Compare commits
131 Commits
test/wm
...
developmen
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | 19855115ae | |
Vitaliy Filippov | 329d8ef32c | |
Vitaliy Filippov | f0ded4ea4f | |
Vitaliy Filippov | 3eea263384 | |
Vitaliy Filippov | c26d4f7d70 | |
Vitaliy Filippov | 63137e7a7b | |
Vitaliy Filippov | fdb23b1cd2 | |
Vitaliy Filippov | 4120eac127 | |
Maha Benzekri | d9bbd6cf3e | |
Maha Benzekri | 65e89d286d | |
Maha Benzekri | dcbc5ca98f | |
Maha Benzekri | 817bb836ec | |
Maha Benzekri | e3e4b2aea7 | |
Francois Ferrand | 9cd72221e8 | |
Francois Ferrand | bdcd4685ad | |
Francois Ferrand | b2b6c47ba7 | |
Jonathan Gramain | da173d53b4 | |
Jonathan Gramain | 7eb2701f21 | |
Jonathan Gramain | 6ec3c8e10d | |
Jonathan Gramain | 7aaf277db2 | |
Francois Ferrand | 67421f8c76 | |
Francois Ferrand | bf2260b1ae | |
Francois Ferrand | 11e0e1b489 | |
Anurag Mittal | f13ec2cf4c | |
Anurag Mittal | e369c7e6d2 | |
Anurag Mittal | c5c1db4568 | |
Anurag Mittal | 58f4d3cb3a | |
Anurag Mittal | b049f39e2a | |
williamlardier | 30eaaf15eb | |
williamlardier | 9d16fb0a34 | |
williamlardier | cdc612f379 | |
williamlardier | 61dd65b2c4 | |
bert-e | 2c0696322e | |
Maha Benzekri | c464a70b90 | |
Maha Benzekri | af07bb3df4 | |
Maha Benzekri | 1858654f34 | |
Maha Benzekri | 0475c8520a | |
Maha Benzekri | 31a4de5372 | |
Maha Benzekri | 0c53d13439 | |
Maha Benzekri | cad8b14df1 | |
Nicolas Humbert | fe29bacc79 | |
Nicolas Humbert | a86cff4631 | |
Kerkesni | f13a5d79ea | |
Maha Benzekri | ca8f570f15 | |
Maha Benzekri | a4bca10faf | |
Jonathan Gramain | c2ab4a2052 | |
Jonathan Gramain | fd0aa314eb | |
Jonathan Gramain | a643a3e6cc | |
Jonathan Gramain | e9d815cc9d | |
Jonathan Gramain | c86d24fc8f | |
Jonathan Gramain | 3b6d3838f5 | |
Jonathan Gramain | fcdfa889be | |
Mickael Bourgois | 5b8fcf0313 | |
Mickael Bourgois | bdfde26fe4 | |
Mickael Bourgois | e53613783a | |
Mickael Bourgois | 69dbbb143a | |
Mickael Bourgois | 403c4e5040 | |
Nicolas Humbert | a1dc2bd84d | |
Nicolas Humbert | 01409d690c | |
Nicolas Humbert | 9ee40f343b | |
bert-e | 77ed018b4f | |
bert-e | f77700236f | |
Nicolas Humbert | 43ff16b28a | |
bert-e | 05c628728d | |
Nicolas Humbert | 2a807dc4ef | |
Nicolas Humbert | 1f8b0a4032 | |
bert-e | 0dd7fe9875 | |
Mickael Bourgois | f7a6af8d9a | |
Mickael Bourgois | e6d0eff1a8 | |
Mickael Bourgois | 9d558351e7 | |
Mickael Bourgois | 68150da72e | |
Mickael Bourgois | 2b2c4bc50e | |
Mickael Bourgois | 3068086a97 | |
Mickael Bourgois | 0af7eb5530 | |
bert-e | 7e372b7bd5 | |
bert-e | a121810552 | |
bert-e | 9bf1bcc483 | |
Nicolas Humbert | 06402c6c94 | |
Nicolas Humbert | a6f3c82827 | |
Nicolas Humbert | f1891851b3 | |
bert-e | a1eed4fefb | |
Nicolas Humbert | 68204448a1 | |
Nicolas Humbert | 40e271f7e2 | |
bert-e | d8f7f18f5a | |
bert-e | 5f4d7afefb | |
bert-e | 2482fdfafc | |
bert-e | e151b3fff1 | |
Nicolas Humbert | b8bbdbbd81 | |
Nicolas Humbert | 46258bca74 | |
williamlardier | b6bc11881a | |
williamlardier | 648257612b | |
williamlardier | 7423fac674 | |
williamlardier | 9647043a02 | |
williamlardier | f9e1f91791 | |
williamlardier | 9c5bc2bfe0 | |
Jonathan Gramain | 1a0a981271 | |
bert-e | a45b2eb6a4 | |
bert-e | b00378d46d | |
Mickael Bourgois | 2c3bfb16ef | |
Jonathan Gramain | c72d8be223 | |
Jonathan Gramain | f63cb3c762 | |
bert-e | 15fd621c5c | |
bert-e | effbf63dd4 | |
bert-e | 285fe2f63b | |
bert-e | 1d8ebe6a9c | |
bert-e | 00555597e0 | |
bert-e | bddc2ccd01 | |
Jonathan Gramain | 7908654b51 | |
Jonathan Gramain | 0d7cf8d40a | |
Jonathan Gramain | c4c75e976c | |
Jonathan Gramain | 1266a14253 | |
williamlardier | 851c72bd0f | |
bert-e | 722b6ae699 | |
bert-e | 29925a15ad | |
williamlardier | 6b64f50450 | |
Jonathan Gramain | 8dc3ba7ca6 | |
bert-e | 3c2283b062 | |
Jonathan Gramain | a6a76acede | |
Jonathan Gramain | 6a116734a9 | |
Jonathan Gramain | 9325ea4996 | |
Jonathan Gramain | 33ba89f0cf | |
Jonathan Gramain | c67331d350 | |
Jonathan Gramain | 6d6f1860ef | |
Nicolas Humbert | cbe6a5e2d6 | |
Mickael Bourgois | be1557d972 | |
Mickael Bourgois | a03463061c | |
Mickael Bourgois | 8ad0ea73a7 | |
Mickael Bourgois | a94040d13b | |
Mickael Bourgois | f265ed6122 | |
Mickael Bourgois | 7301c706fd | |
Mickael Bourgois | bfc8dee559 |
|
@ -17,9 +17,9 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: javascript, typescript
|
||||
|
||||
- name: Build and analyze
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
|
|
@ -13,4 +13,4 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@v3
|
||||
uses: actions/dependency-review-action@v4
|
||||
|
|
|
@ -46,7 +46,9 @@ jobs:
|
|||
run: yarn --silent coverage
|
||||
- name: run functional tests
|
||||
run: yarn ft_test
|
||||
- uses: codecov/codecov-action@v3
|
||||
- uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: run executables tests
|
||||
run: yarn install && yarn test
|
||||
working-directory: 'lib/executables/pensieveCreds/'
|
||||
|
@ -70,7 +72,7 @@ jobs:
|
|||
run: yarn build
|
||||
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
|
||||
- name: Upload artifacts
|
||||
uses: scality/action-artifacts@v3
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"$schema": "https://swc.rs/schema.json",
|
||||
"jsc": {
|
||||
"parser": {
|
||||
"syntax": "typescript"
|
||||
},
|
||||
"target": "es2017"
|
||||
},
|
||||
"module": {
|
||||
"type": "commonjs"
|
||||
}
|
||||
}
|
|
@ -246,3 +246,15 @@ For capacity-enabled buckets, contains the following data:
|
|||
### Usage
|
||||
|
||||
Used to store bucket tagging
|
||||
|
||||
## Model version 17
|
||||
|
||||
### Properties Added
|
||||
|
||||
```javascript
|
||||
this._quotaMax = quotaMax || 0;
|
||||
```
|
||||
|
||||
### Usage
|
||||
|
||||
Used to store bucket quota
|
10
index.ts
10
index.ts
|
@ -1,6 +1,9 @@
|
|||
import * as evaluators from './lib/policyEvaluator/evaluator';
|
||||
import evaluatePrincipal from './lib/policyEvaluator/principal';
|
||||
import RequestContext from './lib/policyEvaluator/RequestContext';
|
||||
import RequestContext, {
|
||||
actionNeedQuotaCheck,
|
||||
actionNeedQuotaCheckCopy,
|
||||
actionWithDataDeletion } from './lib/policyEvaluator/RequestContext';
|
||||
import * as requestUtils from './lib/policyEvaluator/requestUtils';
|
||||
import * as actionMaps from './lib/policyEvaluator/utils/actionMaps';
|
||||
import { validateUserPolicy } from './lib/policy/policyValidator'
|
||||
|
@ -49,6 +52,8 @@ export const algorithms = {
|
|||
Skip: require('./lib/algos/list/skip'),
|
||||
},
|
||||
cache: {
|
||||
GapSet: require('./lib/algos/cache/GapSet'),
|
||||
GapCache: require('./lib/algos/cache/GapCache'),
|
||||
LRUCache: require('./lib/algos/cache/LRUCache'),
|
||||
},
|
||||
stream: {
|
||||
|
@ -65,6 +70,9 @@ export const policies = {
|
|||
RequestContext,
|
||||
requestUtils,
|
||||
actionMaps,
|
||||
actionNeedQuotaCheck,
|
||||
actionWithDataDeletion,
|
||||
actionNeedQuotaCheckCopy,
|
||||
};
|
||||
|
||||
export const testing = {
|
||||
|
|
|
@ -0,0 +1,363 @@
|
|||
import { OrderedSet } from '@js-sdsl/ordered-set';
|
||||
import {
|
||||
default as GapSet,
|
||||
GapSetEntry,
|
||||
} from './GapSet';
|
||||
|
||||
// the API is similar but is not strictly a superset of GapSetInterface
|
||||
// so we don't extend from it
|
||||
export interface GapCacheInterface {
|
||||
exposureDelayMs: number;
|
||||
maxGapWeight: number;
|
||||
size: number;
|
||||
|
||||
setGap: (firstKey: string, lastKey: string, weight: number) => void;
|
||||
removeOverlappingGaps: (overlappingKeys: string[]) => number;
|
||||
lookupGap: (minKey: string, maxKey?: string) => Promise<GapSetEntry | null>;
|
||||
[Symbol.iterator]: () => Iterator<GapSetEntry>;
|
||||
toArray: () => GapSetEntry[];
|
||||
};
|
||||
|
||||
class GapCacheUpdateSet {
|
||||
newGaps: GapSet;
|
||||
updatedKeys: OrderedSet<string>;
|
||||
|
||||
constructor(maxGapWeight: number) {
|
||||
this.newGaps = new GapSet(maxGapWeight);
|
||||
this.updatedKeys = new OrderedSet();
|
||||
}
|
||||
|
||||
addUpdateBatch(updatedKeys: OrderedSet<string>): void {
|
||||
this.updatedKeys.union(updatedKeys);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Cache of listing "gaps" i.e. ranges of keys that can be skipped
|
||||
* over during listing (because they only contain delete markers as
|
||||
* latest versions).
|
||||
*
|
||||
* Typically, a single GapCache instance would be attached to a raft session.
|
||||
*
|
||||
* The API usage is as follows:
|
||||
*
|
||||
* - Initialize a GapCache instance by calling start() (this starts an internal timer)
|
||||
*
|
||||
* - Insert a gap or update an existing one via setGap()
|
||||
*
|
||||
* - Lookup existing gaps via lookupGap()
|
||||
*
|
||||
* - Invalidate gaps that overlap a specific set of keys via removeOverlappingGaps()
|
||||
*
|
||||
* - Shut down a GapCache instance by calling stop() (this stops the internal timer)
|
||||
*
|
||||
* Gaps inserted via setGap() are not exposed immediately to lookupGap(), but only:
|
||||
*
|
||||
* - after a certain delay always larger than 'exposureDelayMs' and usually shorter
|
||||
* than twice this value (but might be slightly longer in rare cases)
|
||||
*
|
||||
* - and only if they haven't been invalidated by a recent call to removeOverlappingGaps()
|
||||
*
|
||||
* This ensures atomicity between gap creation and invalidation from updates under
|
||||
* the condition that a gap is created from first key to last key within the time defined
|
||||
* by 'exposureDelayMs'.
|
||||
*
|
||||
* The implementation is based on two extra temporary "update sets" on top of the main
|
||||
* exposed gap set, one called "staging" and the other "frozen", each containing a
|
||||
* temporary updated gap set and a list of updated keys to invalidate gaps with (coming
|
||||
* from calls to removeOverlappingGaps()). Every "exposureDelayMs" milliseconds, the frozen
|
||||
* gaps are invalidated by all key updates coming from either of the "staging" or "frozen"
|
||||
* update set, then merged into the exposed gaps set, after which the staging updates become
|
||||
* the frozen updates and won't receive any new gap until the next cycle.
|
||||
*/
|
||||
export default class GapCache implements GapCacheInterface {
|
||||
_exposureDelayMs: number;
|
||||
maxGaps: number;
|
||||
|
||||
_stagingUpdates: GapCacheUpdateSet;
|
||||
_frozenUpdates: GapCacheUpdateSet;
|
||||
_exposedGaps: GapSet;
|
||||
_exposeFrozenInterval: NodeJS.Timeout | null;
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
* @param {number} exposureDelayMs - minimum delay between
|
||||
* insertion of a gap via setGap() and its exposure via
|
||||
* lookupGap()
|
||||
* @param {number} maxGaps - maximum number of cached gaps, after
|
||||
* which no new gap can be added by setGap(). (Note: a future
|
||||
* improvement could replace this by an eviction strategy)
|
||||
* @param {number} maxGapWeight - maximum "weight" of individual
|
||||
* cached gaps, which is also the granularity for
|
||||
* invalidation. Individual gaps can be chained together,
|
||||
* which lookupGap() transparently consolidates in the response
|
||||
* into a single large gap.
|
||||
*/
|
||||
constructor(exposureDelayMs: number, maxGaps: number, maxGapWeight: number) {
|
||||
this._exposureDelayMs = exposureDelayMs;
|
||||
this.maxGaps = maxGaps;
|
||||
|
||||
this._stagingUpdates = new GapCacheUpdateSet(maxGapWeight);
|
||||
this._frozenUpdates = new GapCacheUpdateSet(maxGapWeight);
|
||||
this._exposedGaps = new GapSet(maxGapWeight);
|
||||
this._exposeFrozenInterval = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a GapCache from an array of exposed gap entries (used in tests)
|
||||
*
|
||||
* @return {GapCache} - a new GapCache instance
|
||||
*/
|
||||
static createFromArray(
|
||||
gaps: GapSetEntry[],
|
||||
exposureDelayMs: number,
|
||||
maxGaps: number,
|
||||
maxGapWeight: number
|
||||
): GapCache {
|
||||
const gapCache = new GapCache(exposureDelayMs, maxGaps, maxGapWeight);
|
||||
gapCache._exposedGaps = GapSet.createFromArray(gaps, maxGapWeight)
|
||||
return gapCache;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to remove gaps in the staging and frozen sets
|
||||
* overlapping with previously updated keys, right before the
|
||||
* frozen gaps get exposed.
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
_removeOverlappingGapsBeforeExpose(): void {
|
||||
for (const { updatedKeys } of [this._stagingUpdates, this._frozenUpdates]) {
|
||||
if (updatedKeys.size() === 0) {
|
||||
continue;
|
||||
}
|
||||
for (const { newGaps } of [this._stagingUpdates, this._frozenUpdates]) {
|
||||
if (newGaps.size === 0) {
|
||||
continue;
|
||||
}
|
||||
newGaps.removeOverlappingGaps(updatedKeys);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is the core mechanism that updates the exposed gaps in the
|
||||
* cache. It is called on a regular interval defined by 'exposureDelayMs'.
|
||||
*
|
||||
* It does the following in order:
|
||||
*
|
||||
* - remove gaps from the frozen set that overlap with any key present in a
|
||||
* batch passed to removeOverlappingGaps() since the last two triggers of
|
||||
* _exposeFrozen()
|
||||
*
|
||||
* - merge the remaining gaps from the frozen set to the exposed set, which
|
||||
* makes them visible from calls to lookupGap()
|
||||
*
|
||||
* - rotate by freezing the currently staging updates and initiating a new
|
||||
* staging updates set
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
_exposeFrozen(): void {
|
||||
this._removeOverlappingGapsBeforeExpose();
|
||||
for (const gap of this._frozenUpdates.newGaps) {
|
||||
// Use a trivial strategy to keep the cache size within
|
||||
// limits: refuse to add new gaps when the size is above
|
||||
// the 'maxGaps' threshold. We solely rely on
|
||||
// removeOverlappingGaps() to make space for new gaps.
|
||||
if (this._exposedGaps.size < this.maxGaps) {
|
||||
this._exposedGaps.setGap(gap.firstKey, gap.lastKey, gap.weight);
|
||||
}
|
||||
}
|
||||
this._frozenUpdates = this._stagingUpdates;
|
||||
this._stagingUpdates = new GapCacheUpdateSet(this.maxGapWeight);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start the internal GapCache timer
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
start(): void {
|
||||
if (this._exposeFrozenInterval) {
|
||||
return;
|
||||
}
|
||||
this._exposeFrozenInterval = setInterval(
|
||||
() => this._exposeFrozen(),
|
||||
this._exposureDelayMs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the internal GapCache timer
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
stop(): void {
|
||||
if (this._exposeFrozenInterval) {
|
||||
clearInterval(this._exposeFrozenInterval);
|
||||
this._exposeFrozenInterval = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a gap between two keys, associated with a weight to
|
||||
* limit individual gap's spanning ranges in the cache, for a more
|
||||
* granular invalidation.
|
||||
*
|
||||
* The function handles splitting and merging existing gaps to
|
||||
* maintain an optimal weight of cache entries.
|
||||
*
|
||||
* NOTE 1: the caller must ensure that the full length of the gap
|
||||
* between 'firstKey' and 'lastKey' has been built from a listing
|
||||
* snapshot that is more recent than 'exposureDelayMs' milliseconds,
|
||||
* in order to guarantee that the exposed gap will be fully
|
||||
* covered (and potentially invalidated) from recent calls to
|
||||
* removeOverlappingGaps().
|
||||
*
|
||||
* NOTE 2: a usual pattern when building a large gap from multiple
|
||||
* calls to setGap() is to start the next gap from 'lastKey',
|
||||
* which will be passed as 'firstKey' in the next call, so that
|
||||
* gaps can be chained together and consolidated by lookupGap().
|
||||
*
|
||||
* @param {string} firstKey - first key of the gap
|
||||
* @param {string} lastKey - last key of the gap, must be greater
|
||||
* or equal than 'firstKey'
|
||||
* @param {number} weight - total weight between 'firstKey' and 'lastKey'
|
||||
* @return {undefined}
|
||||
*/
|
||||
setGap(firstKey: string, lastKey: string, weight: number): void {
|
||||
this._stagingUpdates.newGaps.setGap(firstKey, lastKey, weight);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove gaps that overlap with a given set of keys. Used to
|
||||
* invalidate gaps when keys are inserted or deleted.
|
||||
*
|
||||
* @param {OrderedSet<string> | string[]} overlappingKeys - remove gaps that
|
||||
* overlap with any of this set of keys
|
||||
* @return {number} - how many gaps were removed from the exposed
|
||||
* gaps only (overlapping gaps not yet exposed are also invalidated
|
||||
* but are not accounted for in the returned value)
|
||||
*/
|
||||
removeOverlappingGaps(overlappingKeys: OrderedSet<string> | string[]): number {
|
||||
let overlappingKeysSet;
|
||||
if (Array.isArray(overlappingKeys)) {
|
||||
overlappingKeysSet = new OrderedSet(overlappingKeys);
|
||||
} else {
|
||||
overlappingKeysSet = overlappingKeys;
|
||||
}
|
||||
this._stagingUpdates.addUpdateBatch(overlappingKeysSet);
|
||||
return this._exposedGaps.removeOverlappingGaps(overlappingKeysSet);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lookup the next exposed gap that overlaps with [minKey, maxKey]. Internally
|
||||
* chained gaps are coalesced in the response into a single contiguous large gap.
|
||||
*
|
||||
* @param {string} minKey - minimum key overlapping with the returned gap
|
||||
* @param {string} [maxKey] - maximum key overlapping with the returned gap
|
||||
* @return {Promise<GapSetEntry | null>} - result of the lookup if a gap
|
||||
* was found, null otherwise, as a Promise
|
||||
*/
|
||||
lookupGap(minKey: string, maxKey?: string): Promise<GapSetEntry | null> {
|
||||
return this._exposedGaps.lookupGap(minKey, maxKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the maximum weight setting for individual gaps.
|
||||
*
|
||||
* @return {number} - maximum weight of individual gaps
|
||||
*/
|
||||
get maxGapWeight(): number {
|
||||
return this._exposedGaps.maxWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the maximum weight setting for individual gaps.
|
||||
*
|
||||
* @param {number} gapWeight - maximum weight of individual gaps
|
||||
*/
|
||||
set maxGapWeight(gapWeight: number) {
|
||||
this._exposedGaps.maxWeight = gapWeight;
|
||||
// also update transient gap sets
|
||||
this._stagingUpdates.newGaps.maxWeight = gapWeight;
|
||||
this._frozenUpdates.newGaps.maxWeight = gapWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the exposure delay in milliseconds, which is the minimum
|
||||
* time after which newly cached gaps will be exposed by
|
||||
* lookupGap().
|
||||
*
|
||||
* @return {number} - exposure delay in milliseconds
|
||||
*/
|
||||
get exposureDelayMs(): number {
|
||||
return this._exposureDelayMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the exposure delay in milliseconds, which is the minimum
|
||||
* time after which newly cached gaps will be exposed by
|
||||
* lookupGap(). Setting this attribute automatically updates the
|
||||
* internal state to honor the new value.
|
||||
*
|
||||
* @param {number} - exposure delay in milliseconds
|
||||
*/
|
||||
set exposureDelayMs(exposureDelayMs: number) {
|
||||
if (exposureDelayMs !== this._exposureDelayMs) {
|
||||
this._exposureDelayMs = exposureDelayMs;
|
||||
if (this._exposeFrozenInterval) {
|
||||
// invalidate all pending gap updates, as the new interval may not be
|
||||
// safe for them
|
||||
this._stagingUpdates = new GapCacheUpdateSet(this.maxGapWeight);
|
||||
this._frozenUpdates = new GapCacheUpdateSet(this.maxGapWeight);
|
||||
|
||||
// reinitialize the _exposeFrozenInterval timer with the updated delay
|
||||
this.stop();
|
||||
this.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of exposed gaps
|
||||
*
|
||||
* @return {number} number of exposed gaps
|
||||
*/
|
||||
get size(): number {
|
||||
return this._exposedGaps.size;
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate over exposed gaps
|
||||
*
|
||||
* @return {Iterator<GapSetEntry>} an iterator over exposed gaps
|
||||
*/
|
||||
[Symbol.iterator](): Iterator<GapSetEntry> {
|
||||
return this._exposedGaps[Symbol.iterator]();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an array of all exposed gaps
|
||||
*
|
||||
* @return {GapSetEntry[]} array of exposed gaps
|
||||
*/
|
||||
toArray(): GapSetEntry[] {
|
||||
return this._exposedGaps.toArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all exposed and staging gaps from the cache.
|
||||
*
|
||||
* Note: retains invalidating updates from removeOverlappingGaps()
|
||||
* for correctness of gaps inserted afterwards.
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
clear(): void {
|
||||
this._stagingUpdates.newGaps = new GapSet(this.maxGapWeight);
|
||||
this._frozenUpdates.newGaps = new GapSet(this.maxGapWeight);
|
||||
this._exposedGaps = new GapSet(this.maxGapWeight);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,366 @@
|
|||
import assert from 'assert';
|
||||
import { OrderedSet } from '@js-sdsl/ordered-set';
|
||||
|
||||
import errors from '../../errors';
|
||||
|
||||
export type GapSetEntry = {
|
||||
firstKey: string,
|
||||
lastKey: string,
|
||||
weight: number,
|
||||
};
|
||||
|
||||
export interface GapSetInterface {
|
||||
maxWeight: number;
|
||||
size: number;
|
||||
|
||||
setGap: (firstKey: string, lastKey: string, weight: number) => GapSetEntry;
|
||||
removeOverlappingGaps: (overlappingKeys: string[]) => number;
|
||||
lookupGap: (minKey: string, maxKey?: string) => Promise<GapSetEntry | null>;
|
||||
[Symbol.iterator]: () => Iterator<GapSetEntry>;
|
||||
toArray: () => GapSetEntry[];
|
||||
};
|
||||
|
||||
/**
|
||||
* Specialized data structure to support caching of listing "gaps",
|
||||
* i.e. ranges of keys that can be skipped over during listing
|
||||
* (because they only contain delete markers as latest versions)
|
||||
*/
|
||||
export default class GapSet implements GapSetInterface, Iterable<GapSetEntry> {
|
||||
_gaps: OrderedSet<GapSetEntry>;
|
||||
_maxWeight: number;
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
|
||||
* @param {number} maxWeight - weight threshold for each cached
|
||||
* gap (unitless). Triggers splitting gaps when reached
|
||||
*/
|
||||
constructor(maxWeight: number) {
|
||||
this._gaps = new OrderedSet(
|
||||
[],
|
||||
(left: GapSetEntry, right: GapSetEntry) => (
|
||||
left.firstKey < right.firstKey ? -1 :
|
||||
left.firstKey > right.firstKey ? 1 : 0
|
||||
)
|
||||
);
|
||||
this._maxWeight = maxWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a GapSet from an array of gap entries (used in tests)
|
||||
*/
|
||||
static createFromArray(gaps: GapSetEntry[], maxWeight: number): GapSet {
|
||||
const gapSet = new GapSet(maxWeight);
|
||||
for (const gap of gaps) {
|
||||
gapSet._gaps.insert(gap);
|
||||
}
|
||||
return gapSet;
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a gap between two keys, associated with a weight to limit
|
||||
* individual gap sizes in the cache.
|
||||
*
|
||||
* The function handles splitting and merging existing gaps to
|
||||
* maintain an optimal weight of cache entries.
|
||||
*
|
||||
* @param {string} firstKey - first key of the gap
|
||||
* @param {string} lastKey - last key of the gap, must be greater
|
||||
* or equal than 'firstKey'
|
||||
* @param {number} weight - total weight between 'firstKey' and 'lastKey'
|
||||
* @return {GapSetEntry} - existing or new gap entry
|
||||
*/
|
||||
setGap(firstKey: string, lastKey: string, weight: number): GapSetEntry {
|
||||
assert(lastKey >= firstKey);
|
||||
|
||||
// Step 1/4: Find the closest left-overlapping gap, and either re-use it
|
||||
// or chain it with a new gap depending on the weights if it exists (otherwise
|
||||
// just creates a new gap).
|
||||
const curGapIt = this._gaps.reverseLowerBound(<GapSetEntry>{ firstKey });
|
||||
let curGap;
|
||||
if (curGapIt.isAccessible()) {
|
||||
curGap = curGapIt.pointer;
|
||||
if (curGap.lastKey >= lastKey) {
|
||||
// return fully overlapping gap already cached
|
||||
return curGap;
|
||||
}
|
||||
}
|
||||
let remainingWeight = weight;
|
||||
if (!curGap // no previous gap
|
||||
|| curGap.lastKey < firstKey // previous gap not overlapping
|
||||
|| (curGap.lastKey === firstKey // previous gap overlapping by one key...
|
||||
&& curGap.weight + weight > this._maxWeight) // ...but we can't extend it
|
||||
) {
|
||||
// create a new gap indexed by 'firstKey'
|
||||
curGap = { firstKey, lastKey: firstKey, weight: 0 };
|
||||
this._gaps.insert(curGap);
|
||||
} else if (curGap.lastKey > firstKey && weight > this._maxWeight) {
|
||||
// previous gap is either fully or partially contained in the new gap
|
||||
// and cannot be extended: substract its weight from the total (heuristic
|
||||
// in case the previous gap doesn't start at 'firstKey', which is the
|
||||
// uncommon case)
|
||||
remainingWeight -= curGap.weight;
|
||||
|
||||
// there may be an existing chained gap starting with the previous gap's
|
||||
// 'lastKey': use it if it exists
|
||||
const chainedGapIt = this._gaps.find(<GapSetEntry>{ firstKey: curGap.lastKey });
|
||||
if (chainedGapIt.isAccessible()) {
|
||||
curGap = chainedGapIt.pointer;
|
||||
} else {
|
||||
// no existing chained gap: chain a new gap to the previous gap
|
||||
curGap = {
|
||||
firstKey: curGap.lastKey,
|
||||
lastKey: curGap.lastKey,
|
||||
weight: 0,
|
||||
};
|
||||
this._gaps.insert(curGap);
|
||||
}
|
||||
}
|
||||
// Step 2/4: Cleanup existing gaps fully included in firstKey -> lastKey, and
|
||||
// aggregate their weights in curGap to define the minimum weight up to the
|
||||
// last merged gap.
|
||||
let nextGap;
|
||||
while (true) {
|
||||
const nextGapIt = this._gaps.upperBound(<GapSetEntry>{ firstKey: curGap.firstKey });
|
||||
nextGap = nextGapIt.isAccessible() && nextGapIt.pointer;
|
||||
// stop the cleanup when no more gap or if the next gap is not fully
|
||||
// included in curGap
|
||||
if (!nextGap || nextGap.lastKey > lastKey) {
|
||||
break;
|
||||
}
|
||||
this._gaps.eraseElementByIterator(nextGapIt);
|
||||
curGap.lastKey = nextGap.lastKey;
|
||||
curGap.weight += nextGap.weight;
|
||||
}
|
||||
|
||||
// Step 3/4: Extend curGap to lastKey, adjusting the weight.
|
||||
// At this point, curGap weight is the minimum weight of the finished gap, save it
|
||||
// for step 4.
|
||||
let minMergedWeight = curGap.weight;
|
||||
if (curGap.lastKey === firstKey && firstKey !== lastKey) {
|
||||
// extend the existing gap by the full amount 'firstKey -> lastKey'
|
||||
curGap.lastKey = lastKey;
|
||||
curGap.weight += remainingWeight;
|
||||
} else if (curGap.lastKey <= lastKey) {
|
||||
curGap.lastKey = lastKey;
|
||||
curGap.weight = remainingWeight;
|
||||
}
|
||||
|
||||
// Step 4/4: Find the closest right-overlapping gap, and if it exists, either merge
|
||||
// it or chain it with curGap depending on the weights.
|
||||
if (nextGap && nextGap.firstKey <= lastKey) {
|
||||
// nextGap overlaps with the new gap: check if we can merge it
|
||||
minMergedWeight += nextGap.weight;
|
||||
let mergedWeight;
|
||||
if (lastKey === nextGap.firstKey) {
|
||||
// nextGap is chained with curGap: add the full weight of nextGap
|
||||
mergedWeight = curGap.weight + nextGap.weight;
|
||||
} else {
|
||||
// strict overlap: don't add nextGap's weight unless
|
||||
// it's larger than the sum of merged ranges (as it is
|
||||
// then included in `minMergedWeight`)
|
||||
mergedWeight = Math.max(curGap.weight, minMergedWeight);
|
||||
}
|
||||
if (mergedWeight <= this._maxWeight) {
|
||||
// merge nextGap into curGap
|
||||
curGap.lastKey = nextGap.lastKey;
|
||||
curGap.weight = mergedWeight;
|
||||
this._gaps.eraseElementByKey(nextGap);
|
||||
} else {
|
||||
// adjust the last key to chain with nextGap and substract the next
|
||||
// gap's weight from curGap (heuristic)
|
||||
curGap.lastKey = nextGap.firstKey;
|
||||
curGap.weight = Math.max(mergedWeight - nextGap.weight, 0);
|
||||
curGap = nextGap;
|
||||
}
|
||||
}
|
||||
// return a copy of curGap
|
||||
return Object.assign({}, curGap);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove gaps that overlap with one or more keys in a given array or
|
||||
* OrderedSet. Used to invalidate gaps when keys are inserted or deleted.
|
||||
*
|
||||
* @param {OrderedSet<string> | string[]} overlappingKeys - remove gaps that overlap
|
||||
* with any of this set of keys
|
||||
* @return {number} - how many gaps were removed
|
||||
*/
|
||||
removeOverlappingGaps(overlappingKeys: OrderedSet<string> | string[]): number {
|
||||
// To optimize processing with a large number of keys and/or gaps, this function:
|
||||
//
|
||||
// 1. converts the overlappingKeys array to a OrderedSet (if not already a OrderedSet)
|
||||
// 2. queries both the gaps set and the overlapping keys set in a loop, which allows:
|
||||
// - skipping ranges of overlapping keys at once when there is no new overlapping gap
|
||||
// - skipping ranges of gaps at once when there is no overlapping key
|
||||
//
|
||||
// This way, it is efficient when the number of non-overlapping gaps is large
|
||||
// (which is the most common case in practice).
|
||||
|
||||
let overlappingKeysSet;
|
||||
if (Array.isArray(overlappingKeys)) {
|
||||
overlappingKeysSet = new OrderedSet(overlappingKeys);
|
||||
} else {
|
||||
overlappingKeysSet = overlappingKeys;
|
||||
}
|
||||
const firstKeyIt = overlappingKeysSet.begin();
|
||||
let currentKey = firstKeyIt.isAccessible() && firstKeyIt.pointer;
|
||||
let nRemoved = 0;
|
||||
while (currentKey) {
|
||||
const closestGapIt = this._gaps.reverseUpperBound(<GapSetEntry>{ firstKey: currentKey });
|
||||
if (closestGapIt.isAccessible()) {
|
||||
const closestGap = closestGapIt.pointer;
|
||||
if (currentKey <= closestGap.lastKey) {
|
||||
// currentKey overlaps closestGap: remove the gap
|
||||
this._gaps.eraseElementByIterator(closestGapIt);
|
||||
nRemoved += 1;
|
||||
}
|
||||
}
|
||||
const nextGapIt = this._gaps.lowerBound(<GapSetEntry>{ firstKey: currentKey });
|
||||
if (!nextGapIt.isAccessible()) {
|
||||
// no more gap: we're done
|
||||
return nRemoved;
|
||||
}
|
||||
const nextGap = nextGapIt.pointer;
|
||||
// advance to the last key potentially overlapping with nextGap
|
||||
let currentKeyIt = overlappingKeysSet.reverseLowerBound(nextGap.lastKey);
|
||||
if (currentKeyIt.isAccessible()) {
|
||||
currentKey = currentKeyIt.pointer;
|
||||
if (currentKey >= nextGap.firstKey) {
|
||||
// currentKey overlaps nextGap: remove the gap
|
||||
this._gaps.eraseElementByIterator(nextGapIt);
|
||||
nRemoved += 1;
|
||||
}
|
||||
}
|
||||
// advance to the first key potentially overlapping with another gap
|
||||
currentKeyIt = overlappingKeysSet.lowerBound(nextGap.lastKey);
|
||||
currentKey = currentKeyIt.isAccessible() && currentKeyIt.pointer;
|
||||
}
|
||||
return nRemoved;
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper to coalesce multiple chained gaps into a single gap.
|
||||
*
|
||||
* It is only used to construct lookupGap() return values and
|
||||
* doesn't modify the GapSet.
|
||||
*
|
||||
* NOTE: The function may take a noticeable amount of time and CPU
|
||||
* to execute if a large number of chained gaps have to be
|
||||
* coalesced, but it should never take more than a few seconds. In
|
||||
* most cases it should take less than a millisecond. It regularly
|
||||
* yields to the nodejs event loop to avoid blocking it during a
|
||||
* long execution.
|
||||
*
|
||||
* @param {GapSetEntry} firstGap - first gap of the chain to coalesce with
|
||||
* the next ones in the chain
|
||||
* @return {Promise<GapSetEntry>} - a new coalesced entry, as a Promise
|
||||
*/
|
||||
_coalesceGapChain(firstGap: GapSetEntry): Promise<GapSetEntry> {
|
||||
return new Promise(resolve => {
|
||||
const coalescedGap: GapSetEntry = Object.assign({}, firstGap);
|
||||
const coalesceGapChainIteration = () => {
|
||||
// efficiency trade-off: 100 iterations of log(N) complexity lookups should
|
||||
// not block the event loop for too long
|
||||
for (let opCounter = 0; opCounter < 100; ++opCounter) {
|
||||
const chainedGapIt = this._gaps.find(
|
||||
<GapSetEntry>{ firstKey: coalescedGap.lastKey });
|
||||
if (!chainedGapIt.isAccessible()) {
|
||||
// chain is complete
|
||||
return resolve(coalescedGap);
|
||||
}
|
||||
const chainedGap = chainedGapIt.pointer;
|
||||
if (chainedGap.firstKey === chainedGap.lastKey) {
|
||||
// found a single-key gap: chain is complete
|
||||
return resolve(coalescedGap);
|
||||
}
|
||||
coalescedGap.lastKey = chainedGap.lastKey;
|
||||
coalescedGap.weight += chainedGap.weight;
|
||||
}
|
||||
// yield to the event loop before continuing the process
|
||||
// of coalescing the gap chain
|
||||
return process.nextTick(coalesceGapChainIteration);
|
||||
};
|
||||
coalesceGapChainIteration();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Lookup the next gap that overlaps with [minKey, maxKey]. Internally chained
|
||||
* gaps are coalesced in the response into a single contiguous large gap.
|
||||
*
|
||||
* @param {string} minKey - minimum key overlapping with the returned gap
|
||||
* @param {string} [maxKey] - maximum key overlapping with the returned gap
|
||||
* @return {Promise<GapSetEntry | null>} - result of the lookup if a gap
|
||||
* was found, null otherwise, as a Promise
|
||||
*/
|
||||
async lookupGap(minKey: string, maxKey?: string): Promise<GapSetEntry | null> {
|
||||
let firstGap: GapSetEntry | null = null;
|
||||
const minGapIt = this._gaps.reverseLowerBound(<GapSetEntry>{ firstKey: minKey });
|
||||
const minGap = minGapIt.isAccessible() && minGapIt.pointer;
|
||||
if (minGap && minGap.lastKey >= minKey) {
|
||||
firstGap = minGap;
|
||||
} else {
|
||||
const maxGapIt = this._gaps.upperBound(<GapSetEntry>{ firstKey: minKey });
|
||||
const maxGap = maxGapIt.isAccessible() && maxGapIt.pointer;
|
||||
if (maxGap && (maxKey === undefined || maxGap.firstKey <= maxKey)) {
|
||||
firstGap = maxGap;
|
||||
}
|
||||
}
|
||||
if (!firstGap) {
|
||||
return null;
|
||||
}
|
||||
return this._coalesceGapChain(firstGap);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the maximum weight setting for individual gaps.
|
||||
*
|
||||
* @return {number} - maximum weight of individual gaps
|
||||
*/
|
||||
get maxWeight(): number {
|
||||
return this._maxWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the maximum weight setting for individual gaps.
|
||||
*
|
||||
* @param {number} gapWeight - maximum weight of individual gaps
|
||||
*/
|
||||
set maxWeight(gapWeight: number) {
|
||||
this._maxWeight = gapWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of gaps stored in this set.
|
||||
*
|
||||
* @return {number} - number of gaps stored in this set
|
||||
*/
|
||||
get size(): number {
|
||||
return this._gaps.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate over each gap of the set, ordered by first key
|
||||
*
|
||||
* @return {Iterator<GapSetEntry>} - an iterator over all gaps
|
||||
* Example:
|
||||
* for (const gap of myGapSet) { ... }
|
||||
*/
|
||||
[Symbol.iterator](): Iterator<GapSetEntry> {
|
||||
return this._gaps[Symbol.iterator]();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an array containing all gaps, ordered by first key
|
||||
*
|
||||
* NOTE: there is a toArray() method in the OrderedSet implementation
|
||||
* but it does not scale well and overflows the stack quickly. This is
|
||||
* why we provide an implementation based on an iterator.
|
||||
*
|
||||
* @return {GapSetEntry[]} - an array containing all gaps
|
||||
*/
|
||||
toArray(): GapSetEntry[] {
|
||||
return [...this];
|
||||
}
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const { FILTER_SKIP, SKIP_NONE } = require('./tools');
|
||||
const { FILTER_ACCEPT, SKIP_NONE } = require('./tools');
|
||||
|
||||
// Use a heuristic to amortize the cost of JSON
|
||||
// serialization/deserialization only on largest metadata where the
|
||||
|
@ -92,21 +92,26 @@ class Extension {
|
|||
* @param {object} entry - a listing entry from metadata
|
||||
* expected format: { key, value }
|
||||
* @return {number} - result of filtering the entry:
|
||||
* > 0: entry is accepted and included in the result
|
||||
* = 0: entry is accepted but not included (skipping)
|
||||
* < 0: entry is not accepted, listing should finish
|
||||
* FILTER_ACCEPT: entry is accepted and may or not be included
|
||||
* in the result
|
||||
* FILTER_SKIP: listing may skip directly (with "gte" param) to
|
||||
* the key returned by the skipping() method
|
||||
* FILTER_END: the results are complete, listing can be stopped
|
||||
*/
|
||||
filter(entry) {
|
||||
return entry ? FILTER_SKIP : FILTER_SKIP;
|
||||
filter(/* entry: { key, value } */) {
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the insight into why filter is skipping an entry. This could be
|
||||
* because it is skipping a range of delimited keys or a range of specific
|
||||
* version when doing master version listing.
|
||||
* Provides the next key at which the listing task is allowed to skip to.
|
||||
* This could allow to skip over:
|
||||
* - a key prefix ending with the delimiter
|
||||
* - all remaining versions of an object when doing a current
|
||||
* versions listing in v0 format
|
||||
* - a cached "gap" of deleted objects when doing a current
|
||||
* versions listing in v0 format
|
||||
*
|
||||
* @return {string} - the insight: a common prefix or a master key,
|
||||
* or SKIP_NONE if there is no insight
|
||||
* @return {string} - the next key at which the listing task is allowed to skip to
|
||||
*/
|
||||
skipping() {
|
||||
return SKIP_NONE;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const { inc, checkLimit, listingParamsMasterKeysV0ToV1,
|
||||
FILTER_END, FILTER_ACCEPT } = require('./tools');
|
||||
FILTER_END, FILTER_ACCEPT, SKIP_NONE } = require('./tools');
|
||||
const DEFAULT_MAX_KEYS = 1000;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
@ -163,7 +163,7 @@ class MultipartUploads {
|
|||
}
|
||||
|
||||
skipping() {
|
||||
return '';
|
||||
return SKIP_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
const Extension = require('./Extension').default;
|
||||
|
||||
const { checkLimit, FILTER_END, FILTER_ACCEPT, FILTER_SKIP } = require('./tools');
|
||||
const { checkLimit, FILTER_END, FILTER_ACCEPT } = require('./tools');
|
||||
const DEFAULT_MAX_KEYS = 10000;
|
||||
|
||||
/**
|
||||
|
@ -91,7 +91,7 @@ class List extends Extension {
|
|||
* < 0 : listing done
|
||||
*/
|
||||
filter(elem) {
|
||||
// Check first in case of maxkeys <= 0
|
||||
// Check if the result array is full
|
||||
if (this.keys >= this.maxKeys) {
|
||||
return FILTER_END;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ class List extends Extension {
|
|||
this.filterKeyStartsWith !== undefined) &&
|
||||
typeof elem === 'object' &&
|
||||
!this.customFilter(elem.value)) {
|
||||
return FILTER_SKIP;
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
if (typeof elem === 'object') {
|
||||
this.res.push({
|
||||
|
|
|
@ -32,7 +32,7 @@ export interface DelimiterFilterState_SkippingPrefix extends FilterState {
|
|||
|
||||
type KeyHandler = (key: string, value: string) => FilterReturnValue;
|
||||
|
||||
type ResultObject = {
|
||||
export type ResultObject = {
|
||||
CommonPrefixes: string[];
|
||||
Contents: {
|
||||
key: string;
|
||||
|
@ -196,6 +196,9 @@ export class Delimiter extends Extension {
|
|||
}
|
||||
|
||||
getCommonPrefix(key: string): string | undefined {
|
||||
if (!this.delimiter) {
|
||||
return undefined;
|
||||
}
|
||||
const baseIndex = this.prefix ? this.prefix.length : 0;
|
||||
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
|
||||
if (delimiterIndex === -1) {
|
||||
|
@ -305,7 +308,7 @@ export class Delimiter extends Extension {
|
|||
switch (this.state.id) {
|
||||
case DelimiterFilterStateId.SkippingPrefix:
|
||||
const { prefix } = <DelimiterFilterState_SkippingPrefix> this.state;
|
||||
return prefix;
|
||||
return inc(prefix);
|
||||
|
||||
default:
|
||||
return SKIP_NONE;
|
||||
|
|
|
@ -5,18 +5,23 @@ import {
|
|||
DelimiterFilterStateId,
|
||||
DelimiterFilterState_NotSkipping,
|
||||
DelimiterFilterState_SkippingPrefix,
|
||||
ResultObject,
|
||||
} from './delimiter';
|
||||
const Version = require('../../versioning/Version').Version;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { BucketVersioningKeyFormat } = VSConst;
|
||||
const { FILTER_ACCEPT, FILTER_SKIP, FILTER_END } = require('./tools');
|
||||
const { FILTER_ACCEPT, FILTER_SKIP, FILTER_END, SKIP_NONE, inc } = require('./tools');
|
||||
|
||||
import { GapSetEntry } from '../cache/GapSet';
|
||||
import { GapCacheInterface } from '../cache/GapCache';
|
||||
|
||||
const VID_SEP = VSConst.VersionId.Separator;
|
||||
const { DbPrefixes } = VSConst;
|
||||
|
||||
const enum DelimiterMasterFilterStateId {
|
||||
export const enum DelimiterMasterFilterStateId {
|
||||
SkippingVersionsV0 = 101,
|
||||
WaitVersionAfterPHDV0 = 102,
|
||||
SkippingGapV0 = 103,
|
||||
};
|
||||
|
||||
interface DelimiterMasterFilterState_SkippingVersionsV0 extends FilterState {
|
||||
|
@ -29,37 +34,121 @@ interface DelimiterMasterFilterState_WaitVersionAfterPHDV0 extends FilterState {
|
|||
masterKey: string,
|
||||
};
|
||||
|
||||
interface DelimiterMasterFilterState_SkippingGapV0 extends FilterState {
|
||||
id: DelimiterMasterFilterStateId.SkippingGapV0,
|
||||
};
|
||||
|
||||
export const enum GapCachingState {
|
||||
NoGapCache = 0, // there is no gap cache
|
||||
UnknownGap = 1, // waiting for a cache lookup
|
||||
GapLookupInProgress = 2, // asynchronous gap lookup in progress
|
||||
GapCached = 3, // an upcoming or already skippable gap is cached
|
||||
NoMoreGap = 4, // the cache doesn't have any more gaps inside the listed range
|
||||
};
|
||||
|
||||
type GapCachingInfo_NoGapCache = {
|
||||
state: GapCachingState.NoGapCache;
|
||||
};
|
||||
|
||||
type GapCachingInfo_NoCachedGap = {
|
||||
state: GapCachingState.UnknownGap
|
||||
| GapCachingState.GapLookupInProgress
|
||||
gapCache: GapCacheInterface;
|
||||
};
|
||||
|
||||
type GapCachingInfo_GapCached = {
|
||||
state: GapCachingState.GapCached;
|
||||
gapCache: GapCacheInterface;
|
||||
gapCached: GapSetEntry;
|
||||
};
|
||||
|
||||
type GapCachingInfo_NoMoreGap = {
|
||||
state: GapCachingState.NoMoreGap;
|
||||
};
|
||||
|
||||
type GapCachingInfo = GapCachingInfo_NoGapCache
|
||||
| GapCachingInfo_NoCachedGap
|
||||
| GapCachingInfo_GapCached
|
||||
| GapCachingInfo_NoMoreGap;
|
||||
|
||||
|
||||
export const enum GapBuildingState {
|
||||
Disabled = 0, // no gap cache or no gap building needed (e.g. in V1 versioning format)
|
||||
NotBuilding = 1, // not currently building a gap (i.e. not listing within a gap)
|
||||
Building = 2, // currently building a gap (i.e. listing within a gap)
|
||||
Expired = 3, // not allowed to build due to exposure delay timeout
|
||||
};
|
||||
|
||||
type GapBuildingInfo_NothingToBuild = {
|
||||
state: GapBuildingState.Disabled | GapBuildingState.Expired;
|
||||
};
|
||||
|
||||
type GapBuildingParams = {
|
||||
/**
|
||||
* minimum weight for a gap to be created in the cache
|
||||
*/
|
||||
minGapWeight: number;
|
||||
/**
|
||||
* trigger a cache setGap() call every N skippable keys
|
||||
*/
|
||||
triggerSaveGapWeight: number;
|
||||
/**
|
||||
* timestamp to assess whether we're still inside the validity period to
|
||||
* be allowed to build gaps
|
||||
*/
|
||||
initTimestamp: number;
|
||||
};
|
||||
|
||||
type GapBuildingInfo_NotBuilding = {
|
||||
state: GapBuildingState.NotBuilding;
|
||||
gapCache: GapCacheInterface;
|
||||
params: GapBuildingParams;
|
||||
};
|
||||
|
||||
type GapBuildingInfo_Building = {
|
||||
state: GapBuildingState.Building;
|
||||
gapCache: GapCacheInterface;
|
||||
params: GapBuildingParams;
|
||||
/**
|
||||
* Gap currently being created
|
||||
*/
|
||||
gap: GapSetEntry;
|
||||
/**
|
||||
* total current weight of the gap being created
|
||||
*/
|
||||
gapWeight: number;
|
||||
};
|
||||
|
||||
type GapBuildingInfo = GapBuildingInfo_NothingToBuild
|
||||
| GapBuildingInfo_NotBuilding
|
||||
| GapBuildingInfo_Building;
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters. This extends the base class Delimiter
|
||||
* to return the raw master versions of existing objects.
|
||||
*/
|
||||
export class DelimiterMaster extends Delimiter {
|
||||
|
||||
_gapCaching: GapCachingInfo;
|
||||
_gapBuilding: GapBuildingInfo;
|
||||
_refreshedBuildingParams: GapBuildingParams | null;
|
||||
|
||||
/**
|
||||
* Delimiter listing of master versions.
|
||||
* @param {Object} parameters - listing parameters
|
||||
* @param {String} parameters.delimiter - delimiter per amazon format
|
||||
* @param {String} parameters.prefix - prefix per amazon format
|
||||
* @param {String} parameters.marker - marker per amazon format
|
||||
* @param {Number} parameters.maxKeys - number of keys to list
|
||||
* @param {Boolean} parameters.v2 - indicates whether v2 format
|
||||
* @param {String} parameters.startAfter - marker per amazon v2 format
|
||||
* @param {String} parameters.continuationToken - obfuscated amazon token
|
||||
* @param {String} [parameters.delimiter] - delimiter per amazon format
|
||||
* @param {String} [parameters.prefix] - prefix per amazon format
|
||||
* @param {String} [parameters.marker] - marker per amazon format
|
||||
* @param {Number} [parameters.maxKeys] - number of keys to list
|
||||
* @param {Boolean} [parameters.v2] - indicates whether v2 format
|
||||
* @param {String} [parameters.startAfter] - marker per amazon v2 format
|
||||
* @param {String} [parameters.continuationToken] - obfuscated amazon token
|
||||
* @param {RequestLogger} logger - The logger of the request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
* @param {String} [vFormat="v0"] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
constructor(parameters, logger, vFormat?: string) {
|
||||
super(parameters, logger, vFormat);
|
||||
|
||||
Object.assign(this, {
|
||||
[BucketVersioningKeyFormat.v0]: {
|
||||
skipping: this.skippingV0,
|
||||
},
|
||||
[BucketVersioningKeyFormat.v1]: {
|
||||
skipping: this.skippingV1,
|
||||
},
|
||||
}[this.vFormat]);
|
||||
|
||||
if (this.vFormat === BucketVersioningKeyFormat.v0) {
|
||||
// override Delimiter's implementation of NotSkipping for
|
||||
// DelimiterMaster logic (skipping versions and special
|
||||
|
@ -77,6 +166,10 @@ export class DelimiterMaster extends Delimiter {
|
|||
DelimiterMasterFilterStateId.WaitVersionAfterPHDV0,
|
||||
this.keyHandler_WaitVersionAfterPHDV0.bind(this));
|
||||
|
||||
this.setKeyHandler(
|
||||
DelimiterMasterFilterStateId.SkippingGapV0,
|
||||
this.keyHandler_SkippingGapV0.bind(this));
|
||||
|
||||
if (this.marker) {
|
||||
// distinct initial state to include some special logic
|
||||
// before the first master key is found that does not have
|
||||
|
@ -98,6 +191,178 @@ export class DelimiterMaster extends Delimiter {
|
|||
DelimiterFilterStateId.NotSkipping,
|
||||
this.keyHandler_NotSkippingPrefixNorVersionsV1.bind(this));
|
||||
}
|
||||
// in v1, we can directly use Delimiter's implementation,
|
||||
// which is already set to the proper state
|
||||
|
||||
// default initialization of the gap cache and building states, can be
|
||||
// set by refreshGapCache()
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.NoGapCache,
|
||||
};
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.Disabled,
|
||||
};
|
||||
this._refreshedBuildingParams = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the validity period left before a refresh of the gap cache is needed
|
||||
* to continue building new gaps.
|
||||
*
|
||||
* @return {number|null} one of:
|
||||
* - the remaining time in milliseconds in which gaps can be added to the
|
||||
* cache before a call to refreshGapCache() is required
|
||||
* - or 0 if there is no time left and a call to refreshGapCache() is required
|
||||
* to resume caching gaps
|
||||
* - or null if refreshing the cache is never needed (because the gap cache
|
||||
* is either not available or not used)
|
||||
*/
|
||||
getGapBuildingValidityPeriodMs(): number | null {
|
||||
let gapBuilding;
|
||||
switch (this._gapBuilding.state) {
|
||||
case GapBuildingState.Disabled:
|
||||
return null;
|
||||
case GapBuildingState.Expired:
|
||||
return 0;
|
||||
case GapBuildingState.NotBuilding:
|
||||
gapBuilding = <GapBuildingInfo_NotBuilding> this._gapBuilding;
|
||||
break;
|
||||
case GapBuildingState.Building:
|
||||
gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
|
||||
break;
|
||||
}
|
||||
const { gapCache, params } = gapBuilding;
|
||||
const elapsedTime = Date.now() - params.initTimestamp;
|
||||
return Math.max(gapCache.exposureDelayMs - elapsedTime, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Refresh the gaps caching logic (gaps are series of current delete markers
|
||||
* in V0 bucket metadata format). It has two effects:
|
||||
*
|
||||
* - starts exposing existing and future gaps from the cache to efficiently
|
||||
* skip over series of current delete markers that have been seen and cached
|
||||
* earlier
|
||||
*
|
||||
* - enables building and caching new gaps (or extend existing ones), for a
|
||||
* limited time period defined by the `gapCacheProxy.exposureDelayMs` value
|
||||
* in milliseconds. To refresh the validity period and resume building and
|
||||
* caching new gaps, one must restart a new listing from the database (starting
|
||||
* at the current listing key, included), then call refreshGapCache() again.
|
||||
*
|
||||
* @param {GapCacheInterface} gapCacheProxy - API proxy to the gaps cache
|
||||
* (the proxy should handle prefixing object keys with the bucket name)
|
||||
* @param {number} [minGapWeight=100] - minimum weight of a gap for it to be
|
||||
* added in the cache
|
||||
* @param {number} [triggerSaveGapWeight] - cumulative weight to wait for
|
||||
* before saving the current building gap. Cannot be greater than
|
||||
* `gapCacheProxy.maxGapWeight` (the value is thresholded to `maxGapWeight`
|
||||
* otherwise). Defaults to `gapCacheProxy.maxGapWeight / 2`.
|
||||
* @return {undefined}
|
||||
*/
|
||||
refreshGapCache(
|
||||
gapCacheProxy: GapCacheInterface,
|
||||
minGapWeight?: number,
|
||||
triggerSaveGapWeight?: number
|
||||
): void {
|
||||
if (this.vFormat !== BucketVersioningKeyFormat.v0) {
|
||||
return;
|
||||
}
|
||||
if (this._gapCaching.state === GapCachingState.NoGapCache) {
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.UnknownGap,
|
||||
gapCache: gapCacheProxy,
|
||||
};
|
||||
}
|
||||
const refreshedBuildingParams: GapBuildingParams = {
|
||||
minGapWeight: minGapWeight || 100,
|
||||
triggerSaveGapWeight: triggerSaveGapWeight
|
||||
|| Math.trunc(gapCacheProxy.maxGapWeight / 2),
|
||||
initTimestamp: Date.now(),
|
||||
};
|
||||
if (this._gapBuilding.state === GapBuildingState.Building) {
|
||||
// refreshed params will be applied as soon as the current building gap is saved
|
||||
this._refreshedBuildingParams = refreshedBuildingParams;
|
||||
} else {
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.NotBuilding,
|
||||
gapCache: gapCacheProxy,
|
||||
params: refreshedBuildingParams,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger a lookup of the closest upcoming or already skippable gap.
|
||||
*
|
||||
* @param {string} fromKey - lookup a gap not before 'fromKey'
|
||||
* @return {undefined} - the lookup is asynchronous and its
|
||||
* response is handled inside this function
|
||||
*/
|
||||
_triggerGapLookup(gapCaching: GapCachingInfo_NoCachedGap, fromKey: string): void {
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.GapLookupInProgress,
|
||||
gapCache: gapCaching.gapCache,
|
||||
};
|
||||
const maxKey = this.prefix ? inc(this.prefix) : undefined;
|
||||
gapCaching.gapCache.lookupGap(fromKey, maxKey).then(_gap => {
|
||||
const gap = <GapSetEntry | null> _gap;
|
||||
if (gap) {
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.GapCached,
|
||||
gapCache: gapCaching.gapCache,
|
||||
gapCached: gap,
|
||||
};
|
||||
} else {
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.NoMoreGap,
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
_checkGapOnMasterDeleteMarker(key: string): FilterReturnValue {
|
||||
switch (this._gapBuilding.state) {
|
||||
case GapBuildingState.Disabled:
|
||||
case GapBuildingState.Expired:
|
||||
break;
|
||||
case GapBuildingState.NotBuilding:
|
||||
this._createBuildingGap(key, 1);
|
||||
break;
|
||||
case GapBuildingState.Building:
|
||||
this._updateBuildingGap(key);
|
||||
break;
|
||||
}
|
||||
if (this._gapCaching.state === GapCachingState.GapCached) {
|
||||
const { gapCached } = this._gapCaching;
|
||||
if (key >= gapCached.firstKey) {
|
||||
if (key <= gapCached.lastKey) {
|
||||
// we are inside the last looked up cached gap: transition to
|
||||
// 'SkippingGapV0' state
|
||||
this.setState(<DelimiterMasterFilterState_SkippingGapV0> {
|
||||
id: DelimiterMasterFilterStateId.SkippingGapV0,
|
||||
});
|
||||
// cut the current gap before skipping, it will be merged or
|
||||
// chained with the existing one (depending on its weight)
|
||||
if (this._gapBuilding.state === GapBuildingState.Building) {
|
||||
// substract 1 from the weight because we are going to chain this gap,
|
||||
// which has an overlap of one key.
|
||||
this._gapBuilding.gap.weight -= 1;
|
||||
this._cutBuildingGap();
|
||||
}
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
// as we are past the cached gap, we will need another lookup
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.UnknownGap,
|
||||
gapCache: this._gapCaching.gapCache,
|
||||
};
|
||||
}
|
||||
}
|
||||
if (this._gapCaching.state === GapCachingState.UnknownGap) {
|
||||
this._triggerGapLookup(this._gapCaching, key);
|
||||
}
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
filter_onNewMasterKeyV0(key: string, value: string): FilterReturnValue {
|
||||
|
@ -109,7 +374,7 @@ export class DelimiterMaster extends Delimiter {
|
|||
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
|
||||
masterKey: key,
|
||||
});
|
||||
return FILTER_ACCEPT;
|
||||
return this._checkGapOnMasterDeleteMarker(key);
|
||||
}
|
||||
if (Version.isPHD(value)) {
|
||||
// master version is a PHD version: wait for the first
|
||||
|
@ -121,6 +386,9 @@ export class DelimiterMaster extends Delimiter {
|
|||
});
|
||||
return FILTER_ACCEPT;
|
||||
}
|
||||
// cut the current gap as soon as a non-deleted entry is seen
|
||||
this._cutBuildingGap();
|
||||
|
||||
if (key.startsWith(DbPrefixes.Replay)) {
|
||||
// skip internal replay prefix entirely
|
||||
this.setState(<DelimiterFilterState_SkippingPrefix> {
|
||||
|
@ -132,6 +400,7 @@ export class DelimiterMaster extends Delimiter {
|
|||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
|
||||
const commonPrefix = this.addCommonPrefixOrContents(key, value);
|
||||
if (commonPrefix) {
|
||||
// transition into SkippingPrefix state to skip all following keys
|
||||
|
@ -173,6 +442,11 @@ export class DelimiterMaster extends Delimiter {
|
|||
* (<key><versionIdSeparator><version>) */
|
||||
const versionIdIndex = key.indexOf(VID_SEP);
|
||||
if (versionIdIndex !== -1) {
|
||||
// version keys count in the building gap weight because they must
|
||||
// also be listed until skipped
|
||||
if (this._gapBuilding.state === GapBuildingState.Building) {
|
||||
this._updateBuildingGap(key);
|
||||
}
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
return this.filter_onNewMasterKeyV0(key, value);
|
||||
|
@ -196,14 +470,151 @@ export class DelimiterMaster extends Delimiter {
|
|||
return this.filter_onNewMasterKeyV0(key, value);
|
||||
}
|
||||
|
||||
keyHandler_SkippingGapV0(key: string, value: string): FilterReturnValue {
|
||||
const { gapCache, gapCached } = <GapCachingInfo_GapCached> this._gapCaching;
|
||||
if (key <= gapCached.lastKey) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
this._gapCaching = {
|
||||
state: GapCachingState.UnknownGap,
|
||||
gapCache,
|
||||
};
|
||||
this.setState(<DelimiterMasterFilterState_SkippingVersionsV0> {
|
||||
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
|
||||
});
|
||||
// Start a gap with weight=0 from the latest skippable key. This will
|
||||
// allow to extend the gap just skipped with a chained gap in case
|
||||
// other delete markers are seen after the existing gap is skipped.
|
||||
this._createBuildingGap(gapCached.lastKey, 0, gapCached.weight);
|
||||
|
||||
return this.handleKey(key, value);
|
||||
}
|
||||
|
||||
skippingBase(): string | undefined {
|
||||
switch (this.state.id) {
|
||||
case DelimiterMasterFilterStateId.SkippingVersionsV0:
|
||||
const { masterKey } = <DelimiterMasterFilterState_SkippingVersionsV0> this.state;
|
||||
return masterKey + VID_SEP;
|
||||
return masterKey + inc(VID_SEP);
|
||||
|
||||
case DelimiterMasterFilterStateId.SkippingGapV0:
|
||||
const { gapCached } = <GapCachingInfo_GapCached> this._gapCaching;
|
||||
return gapCached.lastKey;
|
||||
|
||||
default:
|
||||
return super.skippingBase();
|
||||
}
|
||||
}
|
||||
|
||||
result(): ResultObject {
|
||||
this._cutBuildingGap();
|
||||
return super.result();
|
||||
}
|
||||
|
||||
_checkRefreshedBuildingParams(params: GapBuildingParams): GapBuildingParams {
|
||||
if (this._refreshedBuildingParams) {
|
||||
const newParams = this._refreshedBuildingParams;
|
||||
this._refreshedBuildingParams = null;
|
||||
return newParams;
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save the gap being built if allowed (i.e. still within the
|
||||
* allocated exposure time window).
|
||||
*
|
||||
* @return {boolean} - true if the gap was saved, false if we are
|
||||
* outside the allocated exposure time window.
|
||||
*/
|
||||
_saveBuildingGap(): boolean {
|
||||
const { gapCache, params, gap, gapWeight } =
|
||||
<GapBuildingInfo_Building> this._gapBuilding;
|
||||
const totalElapsed = Date.now() - params.initTimestamp;
|
||||
if (totalElapsed >= gapCache.exposureDelayMs) {
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.Expired,
|
||||
};
|
||||
this._refreshedBuildingParams = null;
|
||||
return false;
|
||||
}
|
||||
const { firstKey, lastKey, weight } = gap;
|
||||
gapCache.setGap(firstKey, lastKey, weight);
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.Building,
|
||||
gapCache,
|
||||
params: this._checkRefreshedBuildingParams(params),
|
||||
gap: {
|
||||
firstKey: gap.lastKey,
|
||||
lastKey: gap.lastKey,
|
||||
weight: 0,
|
||||
},
|
||||
gapWeight,
|
||||
};
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new gap to be extended afterwards
|
||||
*
|
||||
* @param {string} newKey - gap's first key
|
||||
* @param {number} startWeight - initial weight of the building gap (usually 0 or 1)
|
||||
* @param {number} [cachedWeight] - if continuing a cached gap, weight of the existing
|
||||
* cached portion
|
||||
* @return {undefined}
|
||||
*/
|
||||
_createBuildingGap(newKey: string, startWeight: number, cachedWeight?: number): void {
|
||||
if (this._gapBuilding.state === GapBuildingState.NotBuilding) {
|
||||
const { gapCache, params } = <GapBuildingInfo_NotBuilding> this._gapBuilding;
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.Building,
|
||||
gapCache,
|
||||
params: this._checkRefreshedBuildingParams(params),
|
||||
gap: {
|
||||
firstKey: newKey,
|
||||
lastKey: newKey,
|
||||
weight: startWeight,
|
||||
},
|
||||
gapWeight: (cachedWeight || 0) + startWeight,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
_updateBuildingGap(newKey: string): void {
|
||||
const gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
|
||||
const { params, gap } = gapBuilding;
|
||||
gap.lastKey = newKey;
|
||||
gap.weight += 1;
|
||||
gapBuilding.gapWeight += 1;
|
||||
// the GapCache API requires updating a gap regularly because it can only split
|
||||
// it once per update, by the known last key. In practice the default behavior
|
||||
// is to trigger an update after a number of keys that is half the maximum weight.
|
||||
// It is also useful for other listings to benefit from the cache sooner.
|
||||
if (gapBuilding.gapWeight >= params.minGapWeight &&
|
||||
gap.weight >= params.triggerSaveGapWeight) {
|
||||
this._saveBuildingGap();
|
||||
}
|
||||
}
|
||||
|
||||
_cutBuildingGap(): void {
|
||||
if (this._gapBuilding.state === GapBuildingState.Building) {
|
||||
let gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
|
||||
let { gapCache, params, gap, gapWeight } = gapBuilding;
|
||||
// only set gaps that are significant enough in weight and
|
||||
// with a non-empty extension
|
||||
if (gapWeight >= params.minGapWeight && gap.weight > 0) {
|
||||
// we're done if we were not allowed to save the gap
|
||||
if (!this._saveBuildingGap()) {
|
||||
return;
|
||||
}
|
||||
// params may have been refreshed, reload them
|
||||
gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
|
||||
params = gapBuilding.params;
|
||||
}
|
||||
this._gapBuilding = {
|
||||
state: GapBuildingState.NotBuilding,
|
||||
gapCache,
|
||||
params,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -457,11 +457,14 @@ export class DelimiterVersions extends Extension {
|
|||
switch (this.state.id) {
|
||||
case DelimiterVersionsFilterStateId.SkippingPrefix:
|
||||
const { prefix } = <DelimiterVersionsFilterState_SkippingPrefix> this.state;
|
||||
return prefix;
|
||||
return inc(prefix);
|
||||
|
||||
case DelimiterVersionsFilterStateId.SkippingVersions:
|
||||
const { gt } = <DelimiterVersionsFilterState_SkippingVersions> this.state;
|
||||
return gt;
|
||||
// the contract of skipping() is to return the first key
|
||||
// that can be skipped to, so adding a null byte to skip
|
||||
// over the existing versioned key set in 'gt'
|
||||
return `${gt}\0`;
|
||||
|
||||
default:
|
||||
return SKIP_NONE;
|
||||
|
|
|
@ -52,21 +52,21 @@ class Skip {
|
|||
assert(this.skipRangeCb);
|
||||
|
||||
const filteringResult = this.extension.filter(entry);
|
||||
const skippingRange = this.extension.skipping();
|
||||
const skipTo = this.extension.skipping();
|
||||
|
||||
if (filteringResult === FILTER_END) {
|
||||
this.listingEndCb();
|
||||
} else if (filteringResult === FILTER_SKIP
|
||||
&& skippingRange !== SKIP_NONE) {
|
||||
&& skipTo !== SKIP_NONE) {
|
||||
if (++this.streakLength >= MAX_STREAK_LENGTH) {
|
||||
let newRange;
|
||||
if (Array.isArray(skippingRange)) {
|
||||
if (Array.isArray(skipTo)) {
|
||||
newRange = [];
|
||||
for (let i = 0; i < skippingRange.length; ++i) {
|
||||
newRange.push(this._inc(skippingRange[i]));
|
||||
for (let i = 0; i < skipTo.length; ++i) {
|
||||
newRange.push(skipTo[i]);
|
||||
}
|
||||
} else {
|
||||
newRange = this._inc(skippingRange);
|
||||
newRange = skipTo;
|
||||
}
|
||||
/* Avoid to loop on the same range again and again. */
|
||||
if (newRange === this.gteParams) {
|
||||
|
@ -79,16 +79,6 @@ class Skip {
|
|||
this.streakLength = 0;
|
||||
}
|
||||
}
|
||||
|
||||
_inc(str) {
|
||||
if (!str) {
|
||||
return str;
|
||||
}
|
||||
const lastCharValue = str.charCodeAt(str.length - 1);
|
||||
const lastCharNewValue = String.fromCharCode(lastCharValue + 1);
|
||||
|
||||
return `${str.slice(0, str.length - 1)}${lastCharNewValue}`;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ function vaultSignatureCb(
|
|||
err: Error | null,
|
||||
authInfo: { message: { body: any } },
|
||||
log: Logger,
|
||||
callback: (err: Error | null, data?: any, results?: any, params?: any) => void,
|
||||
callback: (err: Error | null, data?: any, results?: any, params?: any, infos?: any) => void,
|
||||
streamingV4Params?: any
|
||||
) {
|
||||
// vaultclient API guarantees that it returns:
|
||||
|
@ -38,7 +38,9 @@ function vaultSignatureCb(
|
|||
}
|
||||
// @ts-ignore
|
||||
log.addDefaultFields(auditLog);
|
||||
return callback(null, userInfo, authorizationResults, streamingV4Params);
|
||||
return callback(null, userInfo, authorizationResults, streamingV4Params, {
|
||||
accountQuota: info.accountQuota || {},
|
||||
});
|
||||
}
|
||||
|
||||
export type AuthV4RequestParams = {
|
||||
|
@ -384,4 +386,19 @@ export default class Vault {
|
|||
return callback(null, respBody);
|
||||
});
|
||||
}
|
||||
|
||||
report(log: Logger, callback: (err: Error | null, data?: any) => void) {
|
||||
// call the report function of the client
|
||||
if (!this.client.report) {
|
||||
return callback(null, {});
|
||||
}
|
||||
// @ts-ignore
|
||||
return this.client.report(log.getSerializedUids(), (err: Error | null, obj?: any) => {
|
||||
if (err) {
|
||||
log.debug(`error from ${this.implName}`, { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, obj);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ export default class ChainBackend extends BaseBackend {
|
|||
}
|
||||
|
||||
const check = (policy) => {
|
||||
const key = (policy.arn || '') + (policy.versionId || '');
|
||||
const key = (policy.arn || '') + (policy.versionId || '') + (policy.action || '');
|
||||
if (!policyMap[key] || !policyMap[key].isAllowed) {
|
||||
policyMap[key] = policy;
|
||||
}
|
||||
|
@ -158,8 +158,12 @@ export default class ChainBackend extends BaseBackend {
|
|||
if (policyMap[key].versionId) {
|
||||
policyRes.versionId = policyMap[key].versionId;
|
||||
}
|
||||
if (policyMap[key].isImplicit !== undefined) {
|
||||
policyRes.isImplicit = policyMap[key].isImplicit;
|
||||
}
|
||||
if (policyMap[key].action) {
|
||||
policyRes.action = policyMap[key].action;
|
||||
}
|
||||
return policyRes;
|
||||
});
|
||||
}
|
||||
|
@ -208,4 +212,22 @@ export default class ChainBackend extends BaseBackend {
|
|||
return callback(null, res);
|
||||
});
|
||||
}
|
||||
|
||||
report(reqUid: string, callback: any) {
|
||||
this._forEachClient((client, done) =>
|
||||
client.report(reqUid, done),
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
const mergedRes = res.reduce((acc, val) => {
|
||||
Object.keys(val).forEach(k => {
|
||||
acc[k] = val[k];
|
||||
});
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return callback(null, mergedRes);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -161,6 +161,10 @@ class InMemoryBackend extends BaseBackend {
|
|||
};
|
||||
return cb(null, vaultReturnObject);
|
||||
}
|
||||
|
||||
report(log: Logger, callback: any) {
|
||||
return callback(null, {});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -83,13 +83,15 @@ export type ResultObject = {
|
|||
export type CommandPromise = {
|
||||
resolve: (results?: ResultObject[]) => void;
|
||||
reject: (error: Error) => void;
|
||||
timeout: NodeJS.Timer | null;
|
||||
timeout: NodeJS.Timeout | null;
|
||||
};
|
||||
export type HandlerCallback = (error: Error | null | undefined, result?: any) => void;
|
||||
export type HandlerCallback = (error: (Error & { code?: number }) | null | undefined, result?: any) => void;
|
||||
export type HandlerFunction = (payload: object, uids: string, callback: HandlerCallback) => void;
|
||||
export type HandlersMap = {
|
||||
[index: string]: HandlerFunction;
|
||||
};
|
||||
export type PrimaryHandlerFunction = (worker: Worker, payload: object, uids: string, callback: HandlerCallback) => void;
|
||||
export type PrimaryHandlersMap = Record<string, PrimaryHandlerFunction>;
|
||||
|
||||
// private types
|
||||
|
||||
|
@ -106,6 +108,7 @@ type RPCCommandMessage = RPCMessage<'cluster-rpc:command', any> & {
|
|||
|
||||
type MarshalledResultObject = {
|
||||
error: string | null;
|
||||
errorCode?: number;
|
||||
result: any;
|
||||
};
|
||||
|
||||
|
@ -119,6 +122,15 @@ type RPCCommandErrorMessage = RPCMessage<'cluster-rpc:commandError', {
|
|||
error: string;
|
||||
}>;
|
||||
|
||||
interface RPCSetupOptions {
|
||||
/**
|
||||
* As werelogs is not a peerDependency, arsenal and a parent project
|
||||
* might have their own separate versions duplicated in dependencies.
|
||||
* The config are therefore not shared.
|
||||
* Use this to propagate werelogs config to arsenal's ClusterRPC.
|
||||
*/
|
||||
werelogsConfig?: Parameters<typeof werelogs.configure>[0];
|
||||
};
|
||||
|
||||
/**
|
||||
* In primary: store worker IDs that are waiting to be dispatched
|
||||
|
@ -165,12 +177,20 @@ function _isRpcMessage(message) {
|
|||
/**
|
||||
* Setup cluster RPC system on the primary
|
||||
*
|
||||
* @param {object} [handlers] - mapping of handler names to handler functions
|
||||
* handler function:
|
||||
* `handler({Worker} worker, {object} payload, {string} uids, {function} callback)`
|
||||
* handler callback must be called when worker is done with the command:
|
||||
* `callback({Error|null} error, {any} [result])`
|
||||
* @return {undefined}
|
||||
*/
|
||||
export function setupRPCPrimary() {
|
||||
export function setupRPCPrimary(handlers?: PrimaryHandlersMap, options?: RPCSetupOptions) {
|
||||
if (options?.werelogsConfig) {
|
||||
werelogs.configure(options.werelogsConfig);
|
||||
}
|
||||
cluster.on('message', (worker, message) => {
|
||||
if (_isRpcMessage(message)) {
|
||||
_handlePrimaryMessage(worker?.id, message);
|
||||
_handlePrimaryMessage(worker, message, handlers);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -186,10 +206,13 @@ export function setupRPCPrimary() {
|
|||
* @return {undefined}
|
||||
* }
|
||||
*/
|
||||
export function setupRPCWorker(handlers: HandlersMap) {
|
||||
export function setupRPCWorker(handlers: HandlersMap, options?: RPCSetupOptions) {
|
||||
if (!process.send) {
|
||||
throw new Error('fatal: cannot setup cluster RPC: "process.send" is not available');
|
||||
}
|
||||
if (options?.werelogsConfig) {
|
||||
werelogs.configure(options.werelogsConfig);
|
||||
}
|
||||
process.on('message', (message: RPCCommandMessage | RPCCommandResultsMessage) => {
|
||||
if (_isRpcMessage(message)) {
|
||||
_handleWorkerMessage(message, handlers);
|
||||
|
@ -201,8 +224,9 @@ export function setupRPCWorker(handlers: HandlersMap) {
|
|||
* Send a command for workers to execute in parallel, and wait for results
|
||||
*
|
||||
* @param {string} toWorkers - which workers should execute the command
|
||||
* Currently the only supported value is "*", meaning all workers will
|
||||
* execute the command
|
||||
* Currently the supported values are:
|
||||
* - "*", meaning all workers will execute the command
|
||||
* - "PRIMARY", meaning primary process will execute the command
|
||||
* @param {string} toHandler - name of handler that will execute the
|
||||
* command in workers, as declared in setupRPCWorker() parameter object
|
||||
* @param {string} uids - unique identifier of the command, must be
|
||||
|
@ -230,7 +254,7 @@ export async function sendWorkerCommand(
|
|||
}
|
||||
rpcLogger.info('sending command', { toWorkers, toHandler, uids, payload });
|
||||
return new Promise((resolve, reject) => {
|
||||
let timeout: NodeJS.Timer | null = null;
|
||||
let timeout: NodeJS.Timeout | null = null;
|
||||
if (timeoutMs) {
|
||||
timeout = setTimeout(() => {
|
||||
delete uidsToCommandPromise[uids];
|
||||
|
@ -288,10 +312,27 @@ function _dispatchCommandErrorToWorker(
|
|||
worker.send(message);
|
||||
}
|
||||
|
||||
function _sendPrimaryCommandResult(
|
||||
worker: Worker,
|
||||
uids: string,
|
||||
error: (Error & { code?: number }) | null | undefined,
|
||||
result?: any
|
||||
): void {
|
||||
const message: RPCCommandResultsMessage = {
|
||||
type: 'cluster-rpc:commandResults',
|
||||
uids,
|
||||
payload: {
|
||||
results: [{ error: error?.message || null, errorCode: error?.code, result }],
|
||||
},
|
||||
};
|
||||
worker.send?.(message);
|
||||
}
|
||||
|
||||
function _handlePrimaryCommandMessage(
|
||||
fromWorkerId: number,
|
||||
fromWorker: Worker,
|
||||
logger: any,
|
||||
message: RPCCommandMessage
|
||||
message: RPCCommandMessage,
|
||||
handlers?: PrimaryHandlersMap
|
||||
): void {
|
||||
const { toWorkers, toHandler, uids, payload } = message;
|
||||
if (toWorkers === '*') {
|
||||
|
@ -305,7 +346,7 @@ function _handlePrimaryCommandMessage(
|
|||
for (const workerId of Object.keys(cluster.workers || {})) {
|
||||
commandResults[workerId] = null;
|
||||
}
|
||||
uidsToWorkerId[uids] = fromWorkerId;
|
||||
uidsToWorkerId[uids] = fromWorker?.id;
|
||||
uidsToCommandResults[uids] = commandResults;
|
||||
|
||||
for (const [workerId, worker] of Object.entries(cluster.workers || {})) {
|
||||
|
@ -316,11 +357,21 @@ function _handlePrimaryCommandMessage(
|
|||
worker.send(message);
|
||||
}
|
||||
}
|
||||
} else if (toWorkers === 'PRIMARY') {
|
||||
const { toHandler, uids, payload } = message;
|
||||
const cb: HandlerCallback = (err, result) => _sendPrimaryCommandResult(fromWorker, uids, err, result);
|
||||
|
||||
if (toHandler in (handlers || {})) {
|
||||
return handlers![toHandler](fromWorker, payload, uids, cb);
|
||||
}
|
||||
logger.error('no such handler in "toHandler" field from worker command message', {
|
||||
toHandler,
|
||||
});
|
||||
return cb(errors.NotImplemented);
|
||||
} else {
|
||||
logger.error('unsupported "toWorkers" field from worker command message', {
|
||||
toWorkers,
|
||||
});
|
||||
const fromWorker = cluster.workers?.[fromWorkerId];
|
||||
if (fromWorker) {
|
||||
_dispatchCommandErrorToWorker(fromWorker, uids, errors.NotImplemented);
|
||||
}
|
||||
|
@ -378,22 +429,23 @@ function _handlePrimaryCommandResultMessage(
|
|||
}
|
||||
|
||||
function _handlePrimaryMessage(
|
||||
fromWorkerId: number,
|
||||
message: RPCCommandMessage | RPCCommandResultMessage
|
||||
fromWorker: Worker,
|
||||
message: RPCCommandMessage | RPCCommandResultMessage,
|
||||
handlers?: PrimaryHandlersMap
|
||||
): void {
|
||||
const { type: messageType, uids } = message;
|
||||
const logger = rpcLogger.newRequestLoggerFromSerializedUids(uids);
|
||||
logger.debug('primary received message from worker', {
|
||||
workerId: fromWorkerId, rpcMessage: message,
|
||||
workerId: fromWorker?.id, rpcMessage: message,
|
||||
});
|
||||
if (messageType === 'cluster-rpc:command') {
|
||||
return _handlePrimaryCommandMessage(fromWorkerId, logger, message);
|
||||
return _handlePrimaryCommandMessage(fromWorker, logger, message, handlers);
|
||||
}
|
||||
if (messageType === 'cluster-rpc:commandResult') {
|
||||
return _handlePrimaryCommandResultMessage(fromWorkerId, logger, message);
|
||||
return _handlePrimaryCommandResultMessage(fromWorker?.id, logger, message);
|
||||
}
|
||||
logger.error('unsupported message type', {
|
||||
workerId: fromWorkerId, messageType, uids,
|
||||
workerId: fromWorker?.id, messageType, uids,
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
|
@ -455,6 +507,9 @@ function _handleWorkerCommandResultsMessage(
|
|||
workerError = new Error(workerResult.error);
|
||||
}
|
||||
}
|
||||
if (workerError && workerResult.errorCode) {
|
||||
(workerError as Error & { code: number }).code = workerResult.errorCode;
|
||||
}
|
||||
const unmarshalledResult: ResultObject = {
|
||||
error: workerError,
|
||||
result: workerResult.result,
|
||||
|
|
|
@ -172,3 +172,6 @@ export const maxCachedBuckets = process.env.METADATA_MAX_CACHED_BUCKETS ?
|
|||
|
||||
export const validRestoreObjectTiers = new Set(['Expedited', 'Standard', 'Bulk']);
|
||||
export const maxBatchingConcurrentOperations = 5;
|
||||
|
||||
/** For policy resource arn check we allow empty account ID to not break compatibility */
|
||||
export const policyArnAllowedEmptyAccountId = ['utapi', 'scuba'];
|
||||
|
|
|
@ -148,7 +148,7 @@ export class IndexTransaction {
|
|||
'missing condition for conditional put'
|
||||
);
|
||||
}
|
||||
if (typeof condition.notExists !== 'string') {
|
||||
if (typeof condition.notExists !== 'string' && typeof condition.exists !== 'string') {
|
||||
throw propError(
|
||||
'unsupportedConditionalOperation',
|
||||
'missing key or supported condition'
|
||||
|
|
|
@ -1042,3 +1042,15 @@ export const AuthMethodNotImplemented: ErrorFormat = {
|
|||
description: 'AuthMethodNotImplemented',
|
||||
code: 501,
|
||||
};
|
||||
|
||||
// --------------------- quotaErros ---------------------
|
||||
|
||||
export const NoSuchQuota: ErrorFormat = {
|
||||
code: 404,
|
||||
description: 'The specified resource does not have a quota.',
|
||||
};
|
||||
|
||||
export const QuotaExceeded: ErrorFormat = {
|
||||
code: 429,
|
||||
description: 'The quota set for the resource is exceeded.',
|
||||
};
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import { legacyLocations } from '../constants';
|
||||
import escapeForXml from '../s3middleware/escapeForXml';
|
||||
|
||||
|
|
|
@ -101,6 +101,7 @@ export default class BucketInfo {
|
|||
_azureInfo: any | null;
|
||||
_ingestion: { status: 'enabled' | 'disabled' } | null;
|
||||
_capabilities?: Capabilities;
|
||||
_quotaMax: number | 0;
|
||||
|
||||
/**
|
||||
* Represents all bucket information.
|
||||
|
@ -157,6 +158,7 @@ export default class BucketInfo {
|
|||
* @param [notificationConfiguration] - bucket notification configuration
|
||||
* @param [tags] - bucket tag set
|
||||
* @param [capabilities] - capabilities for the bucket
|
||||
* @param quotaMax - bucket quota
|
||||
*/
|
||||
constructor(
|
||||
name: string,
|
||||
|
@ -185,6 +187,7 @@ export default class BucketInfo {
|
|||
notificationConfiguration?: any,
|
||||
tags?: Array<BucketTag> | [],
|
||||
capabilities?: Capabilities,
|
||||
quotaMax?: number | 0,
|
||||
) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof owner, 'string');
|
||||
|
@ -285,6 +288,10 @@ export default class BucketInfo {
|
|||
tags = [] as BucketTag[];
|
||||
}
|
||||
assert.strictEqual(areTagsValid(tags), true);
|
||||
if (quotaMax) {
|
||||
assert.strictEqual(typeof quotaMax, 'number');
|
||||
assert(quotaMax >= 0, 'Quota cannot be negative');
|
||||
}
|
||||
|
||||
// IF UPDATING PROPERTIES, INCREMENT MODELVERSION NUMBER ABOVE
|
||||
this._acl = aclInstance;
|
||||
|
@ -313,6 +320,7 @@ export default class BucketInfo {
|
|||
this._notificationConfiguration = notificationConfiguration || null;
|
||||
this._tags = tags;
|
||||
this._capabilities = capabilities || undefined;
|
||||
this._quotaMax = quotaMax || 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -348,6 +356,7 @@ export default class BucketInfo {
|
|||
notificationConfiguration: this._notificationConfiguration,
|
||||
tags: this._tags,
|
||||
capabilities: this._capabilities,
|
||||
quotaMax: this._quotaMax,
|
||||
};
|
||||
const final = this._websiteConfiguration
|
||||
? {
|
||||
|
@ -374,7 +383,7 @@ export default class BucketInfo {
|
|||
obj.bucketPolicy, obj.uid, obj.readLocationConstraint, obj.isNFS,
|
||||
obj.ingestion, obj.azureInfo, obj.objectLockEnabled,
|
||||
obj.objectLockConfiguration, obj.notificationConfiguration, obj.tags,
|
||||
obj.capabilities);
|
||||
obj.capabilities, obj.quotaMax);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -401,7 +410,8 @@ export default class BucketInfo {
|
|||
data._bucketPolicy, data._uid, data._readLocationConstraint,
|
||||
data._isNFS, data._ingestion, data._azureInfo,
|
||||
data._objectLockEnabled, data._objectLockConfiguration,
|
||||
data._notificationConfiguration, data._tags, data._capabilities);
|
||||
data._notificationConfiguration, data._tags, data._capabilities,
|
||||
data._quotaMax);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -939,4 +949,22 @@ export default class BucketInfo {
|
|||
this._capabilities = capabilities;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the bucket quota information
|
||||
* @return quotaMax
|
||||
*/
|
||||
getQuota() {
|
||||
return this._quotaMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set bucket quota
|
||||
* @param quota - quota to be set
|
||||
* @return - bucket quota info
|
||||
*/
|
||||
setQuota(quota: number) {
|
||||
this._quotaMax = quota || 0;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import assert from 'assert';
|
||||
import UUID from 'uuid';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import escapeForXml from '../s3middleware/escapeForXml';
|
||||
import errors from '../errors';
|
||||
import { isValidBucketName } from '../s3routes/routesUtils';
|
||||
|
|
|
@ -435,7 +435,6 @@ export default class Server {
|
|||
this._server.on('connection', sock => {
|
||||
// Setting no delay of the socket to the value configured
|
||||
// TODO fix this
|
||||
// @ts-expect-errors
|
||||
sock.setNoDelay(this.isNoDelay());
|
||||
sock.on('error', err => this._logger.info(
|
||||
'socket error - request rejected', { error: err }));
|
||||
|
|
|
@ -62,7 +62,7 @@ export default class HealthProbeServer extends httpServer {
|
|||
_onLiveness(
|
||||
_req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
if (this._livenessCheck(log)) {
|
||||
sendSuccess(res, log);
|
||||
|
@ -74,7 +74,7 @@ export default class HealthProbeServer extends httpServer {
|
|||
_onReadiness(
|
||||
_req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
if (this._readinessCheck(log)) {
|
||||
sendSuccess(res, log);
|
||||
|
|
|
@ -16,7 +16,7 @@ export const DEFAULT_METRICS_ROUTE = '/metrics';
|
|||
* @param log - Werelogs instance for logging if you choose to
|
||||
*/
|
||||
|
||||
export type ProbeDelegate = (res: http.ServerResponse, log: RequestLogger) => string | void
|
||||
export type ProbeDelegate = (res: http.ServerResponse, log: werelogs.RequestLogger) => string | void
|
||||
|
||||
export type ProbeServerParams = {
|
||||
port: number;
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
import * as http from 'http';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import { ArsenalError } from '../../errors';
|
||||
|
||||
/**
|
||||
|
|
|
@ -119,7 +119,7 @@ export default class RESTClient {
|
|||
method: string,
|
||||
headers: http.OutgoingHttpHeaders | null,
|
||||
key: string | null,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
responseCb: (res: http.IncomingMessage) => void,
|
||||
) {
|
||||
const reqHeaders = headers || {};
|
||||
|
|
|
@ -25,7 +25,7 @@ function setContentRange(
|
|||
|
||||
function sendError(
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
error: ArsenalError,
|
||||
optMessage?: string,
|
||||
) {
|
||||
|
@ -141,7 +141,7 @@ export default class RESTServer extends httpServer {
|
|||
_onPut(
|
||||
req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
let size: number;
|
||||
try {
|
||||
|
@ -183,7 +183,7 @@ export default class RESTServer extends httpServer {
|
|||
_onGet(
|
||||
req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
let pathInfo: ReturnType<typeof parseURL>;
|
||||
let rangeSpec: ReturnType<typeof httpUtils.parseRangeSpec> | undefined =
|
||||
|
@ -266,7 +266,7 @@ export default class RESTServer extends httpServer {
|
|||
_onDelete(
|
||||
req: http.IncomingMessage,
|
||||
res: http.ServerResponse,
|
||||
log: RequestLogger,
|
||||
log: werelogs.RequestLogger,
|
||||
) {
|
||||
let pathInfo: ReturnType<typeof parseURL>;
|
||||
try {
|
||||
|
|
|
@ -12,13 +12,39 @@ import {
|
|||
actionMapSSO,
|
||||
actionMapSTS,
|
||||
actionMapMetadata,
|
||||
actionMapScuba,
|
||||
} from './utils/actionMaps';
|
||||
|
||||
const _actionNeedQuotaCheck = {
|
||||
export const actionNeedQuotaCheck = {
|
||||
objectPut: true,
|
||||
objectPutVersion: true,
|
||||
objectPutPart: true,
|
||||
objectRestore: true,
|
||||
};
|
||||
|
||||
/**
|
||||
* This variable describes APIs that change the bytes
|
||||
* stored, requiring quota updates
|
||||
*/
|
||||
export const actionWithDataDeletion = {
|
||||
objectDelete: true,
|
||||
objectDeleteVersion: true,
|
||||
multipartDelete: true,
|
||||
multiObjectDelete: true,
|
||||
};
|
||||
|
||||
/**
|
||||
* The function returns true if the current API call is a copy object
|
||||
* and the action requires a quota evaluation logic, post retrieval
|
||||
* of the object metadata.
|
||||
* @param {string} action - the action being performed
|
||||
* @param {string} currentApi - the current API being called
|
||||
* @return {boolean} - whether the action requires a quota check
|
||||
*/
|
||||
export function actionNeedQuotaCheckCopy(action: string, currentApi: string) {
|
||||
return action === 'objectGet' && (currentApi === 'objectCopy' || currentApi === 'objectPutCopyPart');
|
||||
}
|
||||
|
||||
function _findAction(service: string, method: string) {
|
||||
switch (service) {
|
||||
case 's3':
|
||||
|
@ -36,6 +62,8 @@ function _findAction(service: string, method: string) {
|
|||
return actionMapSTS[method];
|
||||
case 'metadata':
|
||||
return actionMapMetadata[method];
|
||||
case 'scuba':
|
||||
return actionMapScuba[method];
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
|
@ -105,6 +133,10 @@ function _buildArn(
|
|||
return `arn:scality:metadata::${requesterInfo!.accountid}:` +
|
||||
`${generalResource}/`;
|
||||
}
|
||||
case 'scuba': {
|
||||
return `arn:scality:scuba::${requesterInfo!.accountid}:` +
|
||||
`${generalResource}${specificResource ? '/' + specificResource : ''}`;
|
||||
}
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
|
@ -224,7 +256,8 @@ export default class RequestContext {
|
|||
this._securityToken = securityToken;
|
||||
this._policyArn = policyArn;
|
||||
this._action = action;
|
||||
this._needQuota = _actionNeedQuotaCheck[apiMethod] === true;
|
||||
this._needQuota = actionNeedQuotaCheck[apiMethod] === true
|
||||
|| actionWithDataDeletion[apiMethod] === true;
|
||||
this._requestObjTags = requestObjTags || null;
|
||||
this._existingObjTag = existingObjTag || null;
|
||||
this._needTagEval = needTagEval || false;
|
||||
|
|
|
@ -52,6 +52,12 @@ const sharedActionMap = {
|
|||
objectPutVersion: 's3:PutObjectVersion',
|
||||
};
|
||||
|
||||
const actionMapBucketQuotas = {
|
||||
bucketGetQuota: 'scality:GetBucketQuota',
|
||||
bucketUpdateQuota: 'scality:UpdateBucketQuota',
|
||||
bucketDeleteQuota: 'scality:DeleteBucketQuota',
|
||||
};
|
||||
|
||||
// action map used for request context
|
||||
const actionMapRQ = {
|
||||
bucketPut: 's3:CreateBucket',
|
||||
|
@ -65,6 +71,7 @@ const actionMapRQ = {
|
|||
initiateMultipartUpload: 's3:PutObject',
|
||||
objectDeleteVersion: 's3:DeleteObjectVersion',
|
||||
objectDeleteTaggingVersion: 's3:DeleteObjectVersionTagging',
|
||||
objectGetArchiveInfo: 'scality:GetObjectArchiveInfo',
|
||||
objectGetVersion: 's3:GetObjectVersion',
|
||||
objectGetACLVersion: 's3:GetObjectVersionAcl',
|
||||
objectGetTaggingVersion: 's3:GetObjectVersionTagging',
|
||||
|
@ -79,6 +86,7 @@ const actionMapRQ = {
|
|||
objectPutLegalHoldVersion: 's3:PutObjectLegalHold',
|
||||
listObjectVersions: 's3:ListBucketVersions',
|
||||
...sharedActionMap,
|
||||
...actionMapBucketQuotas,
|
||||
};
|
||||
|
||||
// action map used for bucket policies
|
||||
|
@ -151,6 +159,15 @@ const actionMonitoringMapS3 = {
|
|||
objectPutTagging: 'PutObjectTagging',
|
||||
objectRestore: 'RestoreObject',
|
||||
serviceGet: 'ListBuckets',
|
||||
bucketGetQuota: 'GetBucketQuota',
|
||||
bucketUpdateQuota: 'UpdateBucketQuota',
|
||||
bucketDeleteQuota: 'DeleteBucketQuota',
|
||||
};
|
||||
|
||||
const actionMapAccountQuotas = {
|
||||
UpdateAccountQuota : 'scality:UpdateAccountQuota',
|
||||
DeleteAccountQuota : 'scality:DeleteAccountQuota',
|
||||
GetAccountQuota : 'scality:GetAccountQuota',
|
||||
};
|
||||
|
||||
const actionMapIAM = {
|
||||
|
@ -194,6 +211,7 @@ const actionMapIAM = {
|
|||
tagUser: 'iam:TagUser',
|
||||
unTagUser: 'iam:UntagUser',
|
||||
listUserTags: 'iam:ListUserTags',
|
||||
...actionMapAccountQuotas,
|
||||
};
|
||||
|
||||
const actionMapSSO = {
|
||||
|
@ -209,6 +227,14 @@ const actionMapMetadata = {
|
|||
default: 'metadata:bucketd',
|
||||
};
|
||||
|
||||
const actionMapScuba = {
|
||||
GetMetrics: 'scuba:GetMetrics',
|
||||
AdminStartIngest: 'scuba:AdminStartIngest',
|
||||
AdminStopIngest: 'scuba:AdminStopIngest',
|
||||
AdminReadRaftCseq: 'scuba:AdminReadRaftCseq',
|
||||
AdminTriggerRepair: 'scuba:AdminTriggerRepair',
|
||||
};
|
||||
|
||||
export {
|
||||
actionMapRQ,
|
||||
actionMapBP,
|
||||
|
@ -218,4 +244,5 @@ export {
|
|||
actionMapSSO,
|
||||
actionMapSTS,
|
||||
actionMapMetadata,
|
||||
actionMapScuba,
|
||||
};
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { handleWildcardInResource } from './wildcards';
|
||||
|
||||
import { policyArnAllowedEmptyAccountId } from '../../constants';
|
||||
/**
|
||||
* Checks whether an ARN from a request matches an ARN in a policy
|
||||
* to compare against each portion of the ARN from the request
|
||||
|
@ -38,9 +38,10 @@ export default function checkArnMatch(
|
|||
const requestSegment = caseSensitive ? requestArnArr[j] :
|
||||
requestArnArr[j].toLowerCase();
|
||||
const policyArnArr = policyArn.split(':');
|
||||
// We want to allow an empty account ID for utapi service ARNs to not
|
||||
// We want to allow an empty account ID for utapi and scuba service ARNs to not
|
||||
// break compatibility.
|
||||
if (j === 4 && policyArnArr[2] === 'utapi' && policyArnArr[4] === '') {
|
||||
if (j === 4 && policyArnAllowedEmptyAccountId.includes(policyArnArr[2])
|
||||
&& policyArnArr[4] === '') {
|
||||
continue;
|
||||
} else if (!segmentRegEx.test(requestSegment)) {
|
||||
return false;
|
||||
|
|
|
@ -2,6 +2,9 @@ import assert from 'assert';
|
|||
import * as crypto from 'crypto';
|
||||
import * as stream from 'stream';
|
||||
import azure from '@azure/storage-blob';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import ResultsCollector from './ResultsCollector';
|
||||
import SubStreamInterface from './SubStreamInterface';
|
||||
import * as objectUtils from '../objectUtils';
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
import assert from 'assert';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import errors from '../errors';
|
||||
import routeGET from './routes/routeGET';
|
||||
import routePUT from './routes/routePUT';
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import StatsClient from '../../metrics/StatsClient';
|
||||
|
@ -41,6 +43,8 @@ export default function routeDELETE(
|
|||
return call('bucketDeleteEncryption');
|
||||
} else if (query?.tagging !== undefined) {
|
||||
return call('bucketDeleteTagging');
|
||||
} else if (query?.quota !== undefined) {
|
||||
return call('bucketDeleteQuota');
|
||||
}
|
||||
call('bucketDelete');
|
||||
} else {
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
@ -58,6 +60,8 @@ export default function routerGET(
|
|||
call('bucketGetEncryption');
|
||||
} else if (query.search !== undefined) {
|
||||
call('metadataSearch')
|
||||
} else if (query.quota !== undefined) {
|
||||
call('bucketGetQuota');
|
||||
} else {
|
||||
// GET bucket
|
||||
call('bucketGet');
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import StatsClient from '../../metrics/StatsClient';
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
@ -103,6 +105,13 @@ export default function routePUT(
|
|||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
} else if (query.quota !== undefined) {
|
||||
api.callApiMethod('bucketUpdateQuota', request, response,
|
||||
log, (err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders, response,
|
||||
200, log);
|
||||
});
|
||||
} else {
|
||||
// PUT bucket
|
||||
return api.callApiMethod('bucketPut', request, response, log,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as routesUtils from '../routesUtils';
|
||||
import errors from '../../errors';
|
||||
import * as http from 'http';
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
import * as url from 'url';
|
||||
import * as http from 'http';
|
||||
import { eachSeries } from 'async';
|
||||
|
||||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import * as ipCheck from '../ipCheck';
|
||||
import errors, { ArsenalError } from '../errors';
|
||||
import * as constants from '../constants';
|
||||
import { eachSeries } from 'async';
|
||||
import DataWrapper from '../storage/data/DataWrapper';
|
||||
import * as http from 'http';
|
||||
import StatsClient from '../metrics/StatsClient';
|
||||
import { objectKeyByteLimit } from '../constants';
|
||||
const jsutil = require('../jsutil');
|
||||
|
|
|
@ -2,6 +2,8 @@ const async = require('async');
|
|||
const PassThrough = require('stream').PassThrough;
|
||||
const assert = require('assert');
|
||||
|
||||
const { Logger } = require('werelogs');
|
||||
|
||||
const errors = require('../../errors').default;
|
||||
const MD5Sum = require('../../s3middleware/MD5Sum').default;
|
||||
const NullStream = require('../../s3middleware/nullStream').default;
|
||||
|
@ -27,6 +29,7 @@ class DataWrapper {
|
|||
this.metadata = metadata;
|
||||
this.locStorageCheckFn = locStorageCheckFn;
|
||||
this.vault = vault;
|
||||
this.logger = new Logger('DataWrapper');
|
||||
}
|
||||
|
||||
put(cipherBundle, value, valueSize, keyContext, backendInfo, log, cb) {
|
||||
|
@ -127,7 +130,7 @@ class DataWrapper {
|
|||
}
|
||||
|
||||
delete(objectGetInfo, log, cb) {
|
||||
const callback = cb || log.end;
|
||||
const callback = cb || (() => {});
|
||||
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
|
||||
const isRequiredStringKey =
|
||||
constants.clientsRequireStringKey[this.implName];
|
||||
|
@ -176,7 +179,9 @@ class DataWrapper {
|
|||
newObjDataStoreName)) {
|
||||
return process.nextTick(cb);
|
||||
}
|
||||
log.trace('initiating batch delete', {
|
||||
const delLog = this.logger.newRequestLoggerFromSerializedUids(
|
||||
log.getSerializedUids());
|
||||
delLog.trace('initiating batch delete', {
|
||||
keys: locations,
|
||||
implName: this.implName,
|
||||
method: 'batchDelete',
|
||||
|
@ -202,21 +207,21 @@ class DataWrapper {
|
|||
return false;
|
||||
});
|
||||
if (shouldBatchDelete && keys.length > 1) {
|
||||
return this.client.batchDelete(backendName, { keys }, log, cb);
|
||||
return this.client.batchDelete(backendName, { keys }, delLog, cb);
|
||||
}
|
||||
return async.eachLimit(locations, 5, (loc, next) => {
|
||||
process.nextTick(() => this.delete(loc, log, next));
|
||||
process.nextTick(() => this.delete(loc, delLog, next));
|
||||
},
|
||||
err => {
|
||||
if (err) {
|
||||
log.end().error('batch delete failed', { error: err });
|
||||
delLog.end().error('batch delete failed', { error: err });
|
||||
// deletion of non-existing objects result in 204
|
||||
if (err.code === 404) {
|
||||
return cb();
|
||||
}
|
||||
return cb(err);
|
||||
}
|
||||
log.end().trace('batch delete successfully completed');
|
||||
delLog.end().trace('batch delete successfully completed');
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
const { http, https } = require('httpagent');
|
||||
const url = require('url');
|
||||
const AWS = require('aws-sdk');
|
||||
const Sproxy = require('sproxydclient');
|
||||
const Hyperdrive = require('hdclient');
|
||||
const HttpsProxyAgent = require('https-proxy-agent');
|
||||
|
||||
require("aws-sdk/lib/maintenance_mode_message").suppress = true;
|
||||
|
||||
const constants = require('../../constants');
|
||||
const DataFileBackend = require('./file/DataFileInterface');
|
||||
const inMemory = require('./in_memory/datastore').backend;
|
||||
|
@ -25,8 +25,13 @@ function parseLC(config, vault) {
|
|||
if (locationObj.type === 'file') {
|
||||
clients[location] = new DataFileBackend(config);
|
||||
}
|
||||
if (locationObj.type === 'vitastor') {
|
||||
const VitastorBackend = require('./vitastor/VitastorBackend');
|
||||
clients[location] = new VitastorBackend(location, locationObj.details);
|
||||
}
|
||||
if (locationObj.type === 'scality') {
|
||||
if (locationObj.details.connector.sproxyd) {
|
||||
const Sproxy = require('sproxydclient');
|
||||
clients[location] = new Sproxy({
|
||||
bootstrap: locationObj.details.connector
|
||||
.sproxyd.bootstrap,
|
||||
|
@ -41,6 +46,7 @@ function parseLC(config, vault) {
|
|||
});
|
||||
clients[location].clientType = 'scality';
|
||||
} else if (locationObj.details.connector.hdclient) {
|
||||
const Hyperdrive = require('hdclient');
|
||||
clients[location] = new Hyperdrive.hdcontroller.HDProxydClient(
|
||||
locationObj.details.connector.hdclient);
|
||||
clients[location].clientType = 'scality';
|
||||
|
|
|
@ -5,6 +5,7 @@ const { parseTagFromQuery } = require('../../s3middleware/tagging');
|
|||
const { externalBackendHealthCheckInterval } = require('../../constants');
|
||||
const DataFileBackend = require('./file/DataFileInterface');
|
||||
const { createLogger, checkExternalBackend } = require('./external/utils');
|
||||
const jsutil = require('../../jsutil');
|
||||
|
||||
class MultipleBackendGateway {
|
||||
constructor(clients, metadata, locStorageCheckFn) {
|
||||
|
@ -199,11 +200,12 @@ class MultipleBackendGateway {
|
|||
uploadPart(request, streamingV4Params, stream, size, location, key,
|
||||
uploadId, partNumber, bucketName, log, cb) {
|
||||
const client = this.clients[location];
|
||||
const cbOnce = jsutil.once(cb);
|
||||
|
||||
if (client.uploadPart) {
|
||||
return this.locStorageCheckFn(location, size, log, err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
return cbOnce(err);
|
||||
}
|
||||
return client.uploadPart(request, streamingV4Params, stream,
|
||||
size, key, uploadId, partNumber, bucketName, log,
|
||||
|
@ -217,14 +219,14 @@ class MultipleBackendGateway {
|
|||
'metric following object PUT failure',
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
return cbOnce(err);
|
||||
});
|
||||
}
|
||||
return cb(null, partInfo);
|
||||
return cbOnce(null, partInfo);
|
||||
});
|
||||
});
|
||||
}
|
||||
return cb();
|
||||
return cbOnce();
|
||||
}
|
||||
|
||||
listParts(key, uploadId, location, bucketName, partNumberMarker, maxParts,
|
||||
|
|
|
@ -8,6 +8,7 @@ const getMetaHeaders =
|
|||
const { prepareStream } = require('../../../s3middleware/prepareStream');
|
||||
const { createLogger, logHelper, removeQuotes, trimXMetaPrefix } =
|
||||
require('./utils');
|
||||
const jsutil = require('../../../jsutil');
|
||||
|
||||
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
||||
'Invalid state. Please ensure versioning is enabled ' +
|
||||
|
@ -317,9 +318,11 @@ class AwsClient {
|
|||
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||
partNumber, bucketName, log, callback) {
|
||||
let hashedStream = stream;
|
||||
const cbOnce = jsutil.once(callback);
|
||||
|
||||
if (request) {
|
||||
const partStream = prepareStream(request, streamingV4Params,
|
||||
this._vault, log, callback);
|
||||
this._vault, log, cbOnce);
|
||||
hashedStream = new MD5Sum();
|
||||
partStream.pipe(hashedStream);
|
||||
}
|
||||
|
@ -333,7 +336,7 @@ class AwsClient {
|
|||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend ' +
|
||||
'on uploadPart', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
return cbOnce(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
|
@ -347,7 +350,7 @@ class AwsClient {
|
|||
dataStoreName: this._dataStoreName,
|
||||
dataStoreETag: noQuotesETag,
|
||||
};
|
||||
return callback(null, dataRetrievalInfo);
|
||||
return cbOnce(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,696 @@
|
|||
// Zenko CloudServer Vitastor data storage backend adapter
|
||||
// Copyright (c) Vitaliy Filippov, 2019+
|
||||
// License: VNPL-1.1 (see README.md for details)
|
||||
|
||||
const stream = require('stream');
|
||||
|
||||
const vitastor = require('vitastor');
|
||||
|
||||
const VOLUME_MAGIC = 'VstS3Vol';
|
||||
const OBJECT_MAGIC = 'VstS3Obj';
|
||||
const FLAG_DELETED = 2n;
|
||||
|
||||
type Volume = {
|
||||
id: number,
|
||||
partial_sectors: {
|
||||
[key: string]: {
|
||||
buffer: Buffer,
|
||||
refs: number,
|
||||
},
|
||||
},
|
||||
header: {
|
||||
location: string,
|
||||
bucket: string,
|
||||
max_size: number,
|
||||
create_ts: number,
|
||||
used_ts: number,
|
||||
size: number,
|
||||
objects: number,
|
||||
removed_objects: number,
|
||||
object_bytes: number,
|
||||
removed_bytes: number,
|
||||
},
|
||||
};
|
||||
|
||||
type ObjectHeader = {
|
||||
size: number,
|
||||
key: string,
|
||||
part_num?: number,
|
||||
};
|
||||
|
||||
class VitastorBackend
|
||||
{
|
||||
locationName: string;
|
||||
config: {
|
||||
pool_id: number,
|
||||
metadata_image: string,
|
||||
metadata_pool_id: number,
|
||||
metadata_inode_num: number,
|
||||
size_buckets: number[],
|
||||
size_bucket_mul: number,
|
||||
id_batch_size: number,
|
||||
sector_size: number,
|
||||
write_chunk_size: number,
|
||||
read_chunk_size: number,
|
||||
pack_objects: boolean,
|
||||
// and also other parameters for vitastor itself
|
||||
};
|
||||
next_id: number;
|
||||
alloc_id: number;
|
||||
opened: boolean;
|
||||
on_open: ((...args: any[]) => void)[] | null;
|
||||
open_error: Error | null;
|
||||
cli: any;
|
||||
kv: any;
|
||||
volumes: {
|
||||
[bucket: string]: {
|
||||
[max_size: string]: Volume,
|
||||
},
|
||||
};
|
||||
volumes_by_id: {
|
||||
[id: string]: Volume,
|
||||
};
|
||||
volume_delete_stats: {
|
||||
[id: string]: {
|
||||
count: number,
|
||||
bytes: number,
|
||||
},
|
||||
};
|
||||
|
||||
constructor(locationName, config)
|
||||
{
|
||||
this.locationName = locationName;
|
||||
this.config = config;
|
||||
// validate config
|
||||
this.config.pool_id = Number(this.config.pool_id) || 0;
|
||||
if (!this.config.pool_id)
|
||||
throw new Error('pool_id is required for Vitastor');
|
||||
if (!this.config.metadata_image && (!this.config.metadata_pool_id || !this.config.metadata_inode_num))
|
||||
throw new Error('metadata_image or metadata_inode is required for Vitastor');
|
||||
if (!this.config.size_buckets || !this.config.size_buckets.length)
|
||||
this.config.size_buckets = [ 32*1024, 128*1024, 512*1024, 2*1024, 8*1024 ];
|
||||
this.config.size_bucket_mul = Number(this.config.size_bucket_mul) || 2;
|
||||
this.config.id_batch_size = Number(this.config.id_batch_size) || 100;
|
||||
this.config.sector_size = Number(this.config.sector_size) || 0;
|
||||
if (this.config.sector_size < 4096)
|
||||
this.config.sector_size = 4096;
|
||||
this.config.write_chunk_size = Number(this.config.write_chunk_size) || 0;
|
||||
if (this.config.write_chunk_size < this.config.sector_size)
|
||||
this.config.write_chunk_size = 4*1024*1024; // 4 MB
|
||||
this.config.read_chunk_size = Number(this.config.read_chunk_size) || 0;
|
||||
if (this.config.read_chunk_size < this.config.sector_size)
|
||||
this.config.read_chunk_size = 4*1024*1024; // 4 MB
|
||||
this.config.pack_objects = !!this.config.pack_objects;
|
||||
// state
|
||||
this.next_id = 1;
|
||||
this.alloc_id = 0;
|
||||
this.opened = false;
|
||||
this.on_open = null;
|
||||
this.open_error = null;
|
||||
this.cli = new vitastor.Client(config);
|
||||
this.kv = new vitastor.KV(this.cli);
|
||||
// we group objects into volumes by bucket and size
|
||||
this.volumes = {};
|
||||
this.volumes_by_id = {};
|
||||
this.volume_delete_stats = {};
|
||||
}
|
||||
|
||||
async _makeVolumeId()
|
||||
{
|
||||
if (this.next_id <= this.alloc_id)
|
||||
{
|
||||
return this.next_id++;
|
||||
}
|
||||
const id_key = 'id'+this.config.pool_id;
|
||||
const [ err, prev ] = await new Promise<[ any, string ]>(ok => this.kv.get(id_key, (err, value) => ok([ err, value ])));
|
||||
if (err && err != vitastor.ENOENT)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
const new_id = (parseInt(prev) || 0) + 1;
|
||||
this.next_id = new_id;
|
||||
this.alloc_id = this.next_id + this.config.id_batch_size - 1;
|
||||
await new Promise((ok, no) => this.kv.set(id_key, this.alloc_id, err => (err ? no(new Error(err)) : ok(null)), cas_old => cas_old === prev));
|
||||
return this.next_id;
|
||||
}
|
||||
|
||||
async _getVolume(bucketName, size)
|
||||
{
|
||||
if (!this.opened)
|
||||
{
|
||||
if (this.on_open)
|
||||
{
|
||||
await new Promise(ok => this.on_open!.push(ok));
|
||||
}
|
||||
else
|
||||
{
|
||||
this.on_open = [];
|
||||
if (this.config.metadata_image)
|
||||
{
|
||||
const img = new vitastor.Image(this.cli, this.config.metadata_image);
|
||||
const info = await new Promise<{ pool_id: number, inode_num: number }>(ok => img.get_info(ok));
|
||||
this.config.metadata_pool_id = info.pool_id;
|
||||
this.config.metadata_inode_num = info.inode_num;
|
||||
}
|
||||
const kv_config = {};
|
||||
for (const key in this.config)
|
||||
{
|
||||
if (key.substr(0, 3) === 'kv_')
|
||||
kv_config[key] = this.config[key];
|
||||
}
|
||||
this.open_error = await new Promise(ok => this.kv.open(
|
||||
this.config.metadata_pool_id, this.config.metadata_inode_num,
|
||||
kv_config, err => ok(err ? new Error(err) : null)
|
||||
));
|
||||
this.opened = true;
|
||||
this.on_open.map(cb => setImmediate(cb));
|
||||
this.on_open = null;
|
||||
}
|
||||
}
|
||||
if (this.open_error)
|
||||
{
|
||||
throw this.open_error;
|
||||
}
|
||||
let i;
|
||||
for (i = 0; i < this.config.size_buckets.length && size >= this.config.size_buckets[i]; i++) {}
|
||||
let s;
|
||||
if (i < this.config.size_buckets.length)
|
||||
s = this.config.size_buckets[i];
|
||||
else if (this.config.size_bucket_mul > 1)
|
||||
{
|
||||
while (size >= s)
|
||||
s = Math.floor(this.config.size_bucket_mul * s);
|
||||
}
|
||||
if (!this.volumes[bucketName])
|
||||
{
|
||||
this.volumes[bucketName] = {};
|
||||
}
|
||||
if (this.volumes[bucketName][s])
|
||||
{
|
||||
return this.volumes[bucketName][s];
|
||||
}
|
||||
const new_id = await this._makeVolumeId();
|
||||
const new_vol = this.volumes[bucketName][s] = {
|
||||
id: new_id,
|
||||
// FIXME: partial_sectors should be written with CAS because otherwise we may lose quick deletes
|
||||
partial_sectors: {},
|
||||
header: {
|
||||
location: this.locationName,
|
||||
bucket: bucketName,
|
||||
max_size: s,
|
||||
create_ts: Date.now(),
|
||||
used_ts: Date.now(),
|
||||
size: this.config.sector_size, // initial position is right after header
|
||||
objects: 0,
|
||||
removed_objects: 0,
|
||||
object_bytes: 0,
|
||||
removed_bytes: 0,
|
||||
},
|
||||
};
|
||||
this.volumes_by_id[new_id] = new_vol;
|
||||
const header_text = JSON.stringify(this.volumes[bucketName][s].header);
|
||||
const buf = Buffer.alloc(this.config.sector_size);
|
||||
buf.write(VOLUME_MAGIC + header_text, 0);
|
||||
await new Promise((ok, no) => this.cli.write(
|
||||
this.config.pool_id, new_id, 0, buf, err => (err ? no(new Error(err)) : ok(null))
|
||||
));
|
||||
await new Promise((ok, no) => this.kv.set(
|
||||
'vol_'+this.config.pool_id+'_'+new_id, header_text, err => (err ? no(new Error(err)) : ok(null)), cas_old => !cas_old
|
||||
));
|
||||
return new_vol;
|
||||
}
|
||||
|
||||
toObjectGetInfo(objectKey, bucketName, storageLocation)
|
||||
{
|
||||
return null;
|
||||
}
|
||||
|
||||
_bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs)
|
||||
{
|
||||
if ((cur_pos % this.config.sector_size) ||
|
||||
Math.floor((cur_pos + cur_size) / this.config.sector_size) == Math.floor(cur_pos / this.config.sector_size))
|
||||
{
|
||||
const sect_pos = Math.floor(cur_pos / this.config.sector_size) * this.config.sector_size;
|
||||
const sect = vol.partial_sectors[sect_pos]
|
||||
? vol.partial_sectors[sect_pos].buffer
|
||||
: Buffer.alloc(this.config.sector_size);
|
||||
if (this.config.pack_objects)
|
||||
{
|
||||
// Save only if <pack_objects>
|
||||
if (!vol.partial_sectors[sect_pos])
|
||||
vol.partial_sectors[sect_pos] = { buffer: sect, refs: 0 };
|
||||
vol.partial_sectors[sect_pos].refs++;
|
||||
sector_refs.push(sect_pos);
|
||||
}
|
||||
let off = cur_pos % this.config.sector_size;
|
||||
let i = 0;
|
||||
for (; i < cur_chunks.length; i++)
|
||||
{
|
||||
let copy_len = this.config.sector_size - off;
|
||||
copy_len = copy_len > cur_chunks[i].length ? cur_chunks[i].length : copy_len;
|
||||
cur_chunks[i].copy(sect, off, 0, copy_len);
|
||||
off += copy_len;
|
||||
if (copy_len < cur_chunks[i].length)
|
||||
{
|
||||
cur_chunks[i] = cur_chunks[i].slice(copy_len);
|
||||
cur_size -= copy_len;
|
||||
break;
|
||||
}
|
||||
else
|
||||
cur_size -= cur_chunks[i].length;
|
||||
}
|
||||
cur_chunks.splice(0, i, sect);
|
||||
cur_size += this.config.sector_size;
|
||||
cur_pos = sect_pos;
|
||||
}
|
||||
return [ cur_pos, cur_size ];
|
||||
}
|
||||
|
||||
_bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, write_all)
|
||||
{
|
||||
const write_pos = cur_pos;
|
||||
const write_chunks = cur_chunks;
|
||||
let write_size = cur_size;
|
||||
cur_chunks = [];
|
||||
cur_pos += cur_size;
|
||||
cur_size = 0;
|
||||
let remain = (cur_pos % this.config.sector_size);
|
||||
if (remain > 0)
|
||||
{
|
||||
cur_pos -= remain;
|
||||
let last_sect = null;
|
||||
if (write_all)
|
||||
{
|
||||
last_sect = vol.partial_sectors[cur_pos]
|
||||
? vol.partial_sectors[cur_pos].buffer
|
||||
: Buffer.alloc(this.config.sector_size);
|
||||
if (this.config.pack_objects)
|
||||
{
|
||||
// Save only if <pack_objects>
|
||||
if (!vol.partial_sectors[cur_pos])
|
||||
vol.partial_sectors[cur_pos] = { buffer: last_sect, refs: 0 };
|
||||
vol.partial_sectors[cur_pos].refs++;
|
||||
sector_refs.push(cur_pos);
|
||||
}
|
||||
}
|
||||
write_size -= remain;
|
||||
if (write_size < 0)
|
||||
write_size = 0;
|
||||
for (let i = write_chunks.length-1; i >= 0 && remain > 0; i--)
|
||||
{
|
||||
if (write_chunks[i].length <= remain)
|
||||
{
|
||||
remain -= write_chunks[i].length;
|
||||
if (write_all)
|
||||
write_chunks[i].copy(last_sect, remain);
|
||||
else
|
||||
cur_chunks.unshift(write_chunks[i]);
|
||||
write_chunks.pop();
|
||||
}
|
||||
else
|
||||
{
|
||||
if (write_all)
|
||||
write_chunks[i].copy(last_sect, 0, write_chunks[i].length - remain);
|
||||
else
|
||||
cur_chunks.unshift(write_chunks[i].slice(write_chunks[i].length - remain));
|
||||
write_chunks[i] = write_chunks[i].slice(0, write_chunks[i].length - remain);
|
||||
remain = 0;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
if (write_all)
|
||||
{
|
||||
write_chunks.push(last_sect);
|
||||
write_size += this.config.sector_size;
|
||||
}
|
||||
}
|
||||
for (const chunk of cur_chunks)
|
||||
{
|
||||
cur_size += chunk.length;
|
||||
}
|
||||
return [ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ];
|
||||
}
|
||||
|
||||
/**
|
||||
* reqUids: string, // request-ids for log, usually joined by ':'
|
||||
* keyContext: {
|
||||
* // a lot of shit, basically all metadata
|
||||
* bucketName,
|
||||
* objectKey,
|
||||
* owner?,
|
||||
* namespace?,
|
||||
* partNumber?,
|
||||
* uploadId?,
|
||||
* metaHeaders?,
|
||||
* isDeleteMarker?,
|
||||
* tagging?,
|
||||
* contentType?,
|
||||
* cacheControl?,
|
||||
* contentDisposition?,
|
||||
* contentEncoding?,
|
||||
* },
|
||||
* callback: (error, objectGetInfo: any) => void,
|
||||
*/
|
||||
put(stream, size, keyContext, reqUids, callback)
|
||||
{
|
||||
callback = once(callback);
|
||||
this._getVolume(keyContext.bucketName, size)
|
||||
.then(vol => this._put(vol, stream, size, keyContext, reqUids, callback))
|
||||
.catch(callback);
|
||||
}
|
||||
|
||||
_put(vol, stream, size, keyContext, reqUids, callback)
|
||||
{
|
||||
const object_header: ObjectHeader = {
|
||||
size,
|
||||
key: keyContext.objectKey,
|
||||
};
|
||||
if (keyContext.partNumber)
|
||||
{
|
||||
object_header.part_num = keyContext.partNumber;
|
||||
}
|
||||
// header is: <8 bytes magic> <8 bytes flags> <8 bytes json length> <json>
|
||||
const hdr_begin_buf = Buffer.alloc(24);
|
||||
const hdr_json_buf = Buffer.from(JSON.stringify(object_header), 'utf-8');
|
||||
hdr_begin_buf.write(OBJECT_MAGIC);
|
||||
hdr_begin_buf.writeBigInt64LE(BigInt(hdr_json_buf.length), 16);
|
||||
const object_header_buf = Buffer.concat([ hdr_begin_buf, hdr_json_buf ]);
|
||||
const object_pos = vol.header.size;
|
||||
const object_get_info = { volume: vol.id, offset: object_pos, hdrlen: object_header_buf.length, size };
|
||||
let cur_pos = object_pos;
|
||||
let cur_chunks = [ object_header_buf ];
|
||||
let cur_size = object_header_buf.length;
|
||||
let err: Error|null = null;
|
||||
let waiting = 1; // 1 for end or error, 1 for each write request
|
||||
vol.header.size += object_header_buf.length + size;
|
||||
if (!this.config.pack_objects && (vol.header.size % this.config.sector_size))
|
||||
{
|
||||
vol.header.size += this.config.sector_size - (vol.header.size % this.config.sector_size);
|
||||
}
|
||||
const writeChunk = (last) =>
|
||||
{
|
||||
const sector_refs = [];
|
||||
// Handle partial beginning
|
||||
[ cur_pos, cur_size ] = this._bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs);
|
||||
// Handle partial end
|
||||
let write_pos, write_chunks, write_size;
|
||||
[ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ] = this._bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, last);
|
||||
waiting++;
|
||||
// FIXME: pool_id: maybe it should be stored in volume metadata to allow to migrate volumes?
|
||||
this.cli.write(this.config.pool_id, vol.id, write_pos, write_chunks, (res) =>
|
||||
{
|
||||
for (const sect of sector_refs)
|
||||
{
|
||||
vol.partial_sectors[sect].refs--;
|
||||
if (!vol.partial_sectors[sect].refs &&
|
||||
vol.header.size >= sect+this.config.sector_size)
|
||||
{
|
||||
// Forget partial data when it's not needed anymore
|
||||
delete(vol.partial_sectors[sect]);
|
||||
}
|
||||
}
|
||||
waiting--;
|
||||
if (res)
|
||||
{
|
||||
err = new Error(res);
|
||||
waiting--;
|
||||
}
|
||||
if (!waiting)
|
||||
{
|
||||
callback(err, err ? null : object_get_info);
|
||||
}
|
||||
});
|
||||
};
|
||||
// Stream data
|
||||
stream.on('error', (e) =>
|
||||
{
|
||||
err = e;
|
||||
waiting--;
|
||||
if (!waiting)
|
||||
{
|
||||
callback(err, null);
|
||||
}
|
||||
});
|
||||
stream.on('end', () =>
|
||||
{
|
||||
if (err)
|
||||
{
|
||||
return;
|
||||
}
|
||||
waiting--;
|
||||
if (cur_size)
|
||||
{
|
||||
// write last chunk
|
||||
writeChunk(true);
|
||||
}
|
||||
if (!waiting)
|
||||
{
|
||||
callback(null, object_get_info);
|
||||
}
|
||||
});
|
||||
stream.on('data', (chunk) =>
|
||||
{
|
||||
if (err)
|
||||
{
|
||||
return;
|
||||
}
|
||||
cur_chunks.push(chunk);
|
||||
cur_size += chunk.length;
|
||||
if (cur_size >= this.config.write_chunk_size)
|
||||
{
|
||||
// got a complete chunk, write it out
|
||||
writeChunk(false);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* objectGetInfo: {
|
||||
* key: { volume, offset, hdrlen, size }, // from put
|
||||
* size,
|
||||
* start,
|
||||
* dataStoreName,
|
||||
* dataStoreETag,
|
||||
* range,
|
||||
* response: ServerResponse,
|
||||
* },
|
||||
* range?: [ start, end ], // like in HTTP - first byte index, last byte index
|
||||
* callback: (error, readStream) => void,
|
||||
*/
|
||||
get(objectGetInfo, range, reqUids, callback)
|
||||
{
|
||||
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
|
||||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
|
||||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
|
||||
}
|
||||
const [ start, end ] = range || [];
|
||||
if (start < 0 || end < 0 || end != null && start != null && end < start || start >= objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('Invalid range: '+start+'-'+end);
|
||||
}
|
||||
let offset = objectGetInfo.key.offset + objectGetInfo.key.hdrlen + (start || 0);
|
||||
let len = objectGetInfo.key.size - (start || 0);
|
||||
if (end)
|
||||
{
|
||||
const len2 = end - (start || 0) + 1;
|
||||
if (len2 < len)
|
||||
len = len2;
|
||||
}
|
||||
callback(null, new VitastorReadStream(this.cli, objectGetInfo.key.volume, offset, len, this.config));
|
||||
}
|
||||
|
||||
/**
|
||||
* objectGetInfo: {
|
||||
* key: { volume, offset, hdrlen, size }, // from put
|
||||
* size,
|
||||
* start,
|
||||
* dataStoreName,
|
||||
* dataStoreETag,
|
||||
* range,
|
||||
* response: ServerResponse,
|
||||
* },
|
||||
* callback: (error) => void,
|
||||
*/
|
||||
delete(objectGetInfo, reqUids, callback)
|
||||
{
|
||||
callback = once(callback);
|
||||
this._delete(objectGetInfo, reqUids)
|
||||
.then(callback)
|
||||
.catch(callback);
|
||||
}
|
||||
|
||||
async _delete(objectGetInfo, reqUids)
|
||||
{
|
||||
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
|
||||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
|
||||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
|
||||
}
|
||||
const in_sect_pos = (objectGetInfo.key.offset % this.config.sector_size);
|
||||
const sect_pos = objectGetInfo.key.offset - in_sect_pos;
|
||||
const vol = this.volumes_by_id[objectGetInfo.key.volume];
|
||||
if (vol && vol.partial_sectors[sect_pos])
|
||||
{
|
||||
// The sector may still be written to in corner cases
|
||||
const sect = vol.partial_sectors[sect_pos];
|
||||
const flags = sect.buffer.readBigInt64LE(in_sect_pos + 8);
|
||||
if (!(flags & FLAG_DELETED))
|
||||
{
|
||||
const del_stat = this.volume_delete_stats[vol.id] = (this.volume_delete_stats[vol.id] || { count: 0, bytes: 0 });
|
||||
del_stat.count++;
|
||||
del_stat.bytes += objectGetInfo.key.size;
|
||||
sect.buffer.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
|
||||
sect.refs++;
|
||||
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, sect.buffer, ok));
|
||||
sect.refs--;
|
||||
if (err)
|
||||
{
|
||||
sect.buffer.writeBigInt64LE(0n, in_sect_pos + 8);
|
||||
throw new Error(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// RMW with CAS
|
||||
const [ err, buf, version ] = await new Promise<[ any, Buffer, bigint ]>(ok => this.cli.read(
|
||||
this.config.pool_id, objectGetInfo.key.volume, sect_pos, this.config.sector_size,
|
||||
(err, buf, version) => ok([ err, buf, version ])
|
||||
));
|
||||
if (err)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
// FIXME What if JSON crosses sector boundary? Prevent it if we want to pack objects
|
||||
const magic = buf.slice(in_sect_pos, in_sect_pos+8).toString();
|
||||
const flags = buf.readBigInt64LE(in_sect_pos+8);
|
||||
const json_len = Number(buf.readBigInt64LE(in_sect_pos+16));
|
||||
let json_hdr;
|
||||
if (in_sect_pos+24+json_len <= buf.length)
|
||||
{
|
||||
try
|
||||
{
|
||||
json_hdr = JSON.parse(buf.slice(in_sect_pos+24, in_sect_pos+24+json_len).toString());
|
||||
}
|
||||
catch (e)
|
||||
{
|
||||
}
|
||||
}
|
||||
if (magic !== OBJECT_MAGIC || !json_hdr || json_hdr.size !== objectGetInfo.key.size)
|
||||
{
|
||||
throw new Error(
|
||||
'header of object with size '+objectGetInfo.key.size+
|
||||
' bytes not found in volume '+objectGetInfo.key.volume+' at '+objectGetInfo.key.offset
|
||||
);
|
||||
}
|
||||
else if (!(flags & FLAG_DELETED))
|
||||
{
|
||||
buf.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
|
||||
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, buf, { version: version+1n }, ok));
|
||||
if (err == vitastor.EINTR)
|
||||
{
|
||||
// Retry
|
||||
await this._delete(objectGetInfo, reqUids);
|
||||
}
|
||||
else if (err)
|
||||
{
|
||||
throw new Error(err);
|
||||
}
|
||||
else
|
||||
{
|
||||
// FIXME: Write deletion statistics to volumes
|
||||
// FIXME: Implement defragmentation
|
||||
const del_stat = this.volume_delete_stats[objectGetInfo.key.volume] = (this.volume_delete_stats[objectGetInfo.key.volume] || { count: 0, bytes: 0 });
|
||||
del_stat.count++;
|
||||
del_stat.bytes += objectGetInfo.key.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* config: full zenko server config,
|
||||
* callback: (error, stats) => void, // stats is the returned statistics in arbitrary format
|
||||
*/
|
||||
getDiskUsage(config, reqUids, callback)
|
||||
{
|
||||
// FIXME: Iterate all volumes and return its sizes and deletion statistics, or maybe just sizes
|
||||
callback(null, {});
|
||||
}
|
||||
}
|
||||
|
||||
class VitastorReadStream extends stream.Readable
|
||||
{
|
||||
constructor(cli, volume_id, offset, len, config, options = undefined)
|
||||
{
|
||||
super(options);
|
||||
this.cli = cli;
|
||||
this.volume_id = volume_id;
|
||||
this.offset = offset;
|
||||
this.end = offset + len;
|
||||
this.pos = offset;
|
||||
this.config = config;
|
||||
this._reading = false;
|
||||
}
|
||||
|
||||
_read(n)
|
||||
{
|
||||
if (this._reading)
|
||||
{
|
||||
return;
|
||||
}
|
||||
// FIXME: Validate object header
|
||||
const chunk_size = n && this.config.read_chunk_size < n ? n : this.config.read_chunk_size;
|
||||
const read_offset = this.pos;
|
||||
const round_offset = read_offset - (read_offset % this.config.sector_size);
|
||||
let read_end = this.end <= read_offset+chunk_size ? this.end : read_offset+chunk_size;
|
||||
const round_end = (read_end % this.config.sector_size)
|
||||
? read_end + this.config.sector_size - (read_end % this.config.sector_size)
|
||||
: read_end;
|
||||
if (round_end <= this.end)
|
||||
read_end = round_end;
|
||||
this.pos = read_end;
|
||||
if (read_end <= read_offset)
|
||||
{
|
||||
// EOF
|
||||
this.push(null);
|
||||
return;
|
||||
}
|
||||
this._reading = true;
|
||||
this.cli.read(this.config.pool_id, this.volume_id, round_offset, round_end-round_offset, (err, buf, version) =>
|
||||
{
|
||||
this._reading = false;
|
||||
if (err)
|
||||
{
|
||||
this.destroy(new Error(err));
|
||||
return;
|
||||
}
|
||||
if (read_offset != round_offset || round_end != read_end)
|
||||
{
|
||||
buf = buf.subarray(read_offset-round_offset, buf.length-(round_end-read_end));
|
||||
}
|
||||
if (this.push(buf))
|
||||
{
|
||||
this._read(n);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function once(callback)
|
||||
{
|
||||
let called = false;
|
||||
return function()
|
||||
{
|
||||
if (!called)
|
||||
{
|
||||
called = true;
|
||||
callback.apply(null, arguments);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = VitastorBackend;
|
|
@ -226,6 +226,19 @@ class MetadataWrapper {
|
|||
});
|
||||
}
|
||||
|
||||
getBucketQuota(bucketName, log, cb) {
|
||||
log.debug('getting bucket quota from metadata');
|
||||
this.client.getBucketAttributes(bucketName, log, (err, data) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
const bucketInfo = BucketInfo.fromObj(data);
|
||||
return cb(err, { quota: bucketInfo.getQuota() });
|
||||
});
|
||||
}
|
||||
|
||||
deleteBucket(bucketName, log, cb) {
|
||||
log.debug('deleting bucket from metadata');
|
||||
this.client.deleteBucket(bucketName, log, err => {
|
||||
|
|
|
@ -899,35 +899,130 @@ class MongoClientInterface {
|
|||
return cb(errors.InternalError);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Put object when versioning is not enabled
|
||||
* @param {Object} c bucket collection
|
||||
* @param {String} bucketName bucket name
|
||||
* @param {String} objName object name
|
||||
* @param {Object} objVal object metadata
|
||||
* @param {Object} params params
|
||||
* @param {Object} log logger
|
||||
* @param {Function} cb callback
|
||||
* @return {undefined}
|
||||
* Puts an object into a MongoDB collection.
|
||||
* Depending on the parameters, the object is either directly put into the collection
|
||||
* or the existing object is marked as deleted and a new object is inserted.
|
||||
*
|
||||
* @param {Object} collection - The MongoDB collection to put the object into.
|
||||
* @param {string} bucketName - The name of the bucket the object belongs to.
|
||||
* @param {string} objName - The name of the object.
|
||||
* @param {Object} value - The value of the object.
|
||||
* @param {Object} params - Additional parameters.
|
||||
* @param {string} params.vFormat - object key format.
|
||||
* @param {boolean} params.needOplogUpdate - If true, the object is directly put into the collection
|
||||
* with updating the operation log.
|
||||
* @param {Object} log - The logger to use.
|
||||
* @param {Function} cb - The callback function to call when the operation is complete. It is called with an error
|
||||
* if there is an issue with the operation.
|
||||
* @returns {Promise} A promise that resolves when the operation is complete. The promise is rejected with an error
|
||||
* if there is an issue with the operation.
|
||||
*/
|
||||
putObjectNoVer(c, bucketName, objName, objVal, params, log, cb) {
|
||||
const masterKey = formatMasterKey(objName, params.vFormat);
|
||||
c.updateOne({
|
||||
_id: masterKey,
|
||||
}, {
|
||||
putObjectNoVer(collection, bucketName, objName, value, params, log, cb) {
|
||||
if (params?.needOplogUpdate) {
|
||||
return this.putObjectNoVerWithOplogUpdate(collection, bucketName, objName, value, params, log, cb);
|
||||
}
|
||||
const key = formatMasterKey(objName, params.vFormat);
|
||||
const putFilter = { _id: key };
|
||||
return collection.updateOne(putFilter, {
|
||||
$set: {
|
||||
_id: masterKey,
|
||||
value: objVal,
|
||||
_id: key,
|
||||
value,
|
||||
},
|
||||
}, {
|
||||
upsert: true,
|
||||
}).then(() => cb()).catch((err) => {
|
||||
}).then(() => cb()).catch(err => {
|
||||
log.error('putObjectNoVer: error putting obect with no versioning', { error: err.message });
|
||||
return cb(errors.InternalError);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates an object in a MongoDB collection without changing its version.
|
||||
* If the object doesn't exist, it will be created (upsert is true for the second update operation).
|
||||
* The operation is logged in the oplog.
|
||||
*
|
||||
* @param {Object} collection - The MongoDB collection to update the object in.
|
||||
* @param {string} bucketName - The name of the bucket the object belongs to.
|
||||
* @param {string} objName - The name of the object.
|
||||
* @param {Object} value - The new value of the object.
|
||||
* @param {Object} params - Additional parameters.
|
||||
* @param {string} params.vFormat - object key format
|
||||
* @param {string} params.originOp - origin operation
|
||||
* @param {Object} log - The logger to use.
|
||||
* @param {Function} cb - The callback function to call when the operation is complete.
|
||||
* It is called with an error if there is an issue with the operation.
|
||||
* @returns {void}
|
||||
*/
|
||||
putObjectNoVerWithOplogUpdate(collection, bucketName, objName, value, params, log, cb) {
|
||||
const key = formatMasterKey(objName, params.vFormat);
|
||||
const putFilter = { _id: key };
|
||||
// filter used when finding and updating object
|
||||
const findFilter = {
|
||||
...putFilter,
|
||||
$or: [
|
||||
{ 'value.deleted': { $exists: false } },
|
||||
{ 'value.deleted': { $eq: false } },
|
||||
],
|
||||
};
|
||||
const updateDeleteFilter = {
|
||||
...putFilter,
|
||||
'value.deleted': true,
|
||||
};
|
||||
return async.waterfall([
|
||||
// Adding delete flag when getting the object
|
||||
// to avoid having race conditions.
|
||||
next => collection.findOneAndUpdate(findFilter, {
|
||||
$set: updateDeleteFilter,
|
||||
}, {
|
||||
upsert: false,
|
||||
}).then(doc => {
|
||||
if (!doc.value) {
|
||||
log.error('internalPutObject: unable to find target object to update',
|
||||
{ bucket: bucketName, object: key });
|
||||
return next(errors.NoSuchKey);
|
||||
}
|
||||
const obj = doc.value;
|
||||
const objMetadata = new ObjectMD(obj.value);
|
||||
objMetadata.setOriginOp(params.originOp);
|
||||
objMetadata.setDeleted(true);
|
||||
return next(null, objMetadata.getValue());
|
||||
}).catch(err => {
|
||||
log.error('internalPutObject: error getting object',
|
||||
{ bucket: bucketName, object: key, error: err.message });
|
||||
return next(errors.InternalError);
|
||||
}),
|
||||
// We update the full object to get the whole object metadata
|
||||
// in the oplog update event
|
||||
(objMetadata, next) => collection.bulkWrite([
|
||||
{
|
||||
updateOne: {
|
||||
filter: updateDeleteFilter,
|
||||
update: {
|
||||
$set: { _id: key, value: objMetadata },
|
||||
},
|
||||
upsert: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
updateOne: {
|
||||
filter: putFilter,
|
||||
update: {
|
||||
$set: { _id: key, value },
|
||||
},
|
||||
upsert: true,
|
||||
},
|
||||
},
|
||||
], { ordered: true }).then(() => next(null)).catch(next),
|
||||
], (err) => {
|
||||
if (err) {
|
||||
log.error('internalPutObject: error updating object',
|
||||
{ bucket: bucketName, object: key, error: err.message });
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Returns the putObjectVerCase function to use
|
||||
* depending on params
|
||||
|
@ -973,8 +1068,7 @@ class MongoClientInterface {
|
|||
return putObjectVer(c, bucketName, objName, objVal, _params, log,
|
||||
cb);
|
||||
}
|
||||
return this.putObjectNoVer(c, bucketName, objName, objVal,
|
||||
_params, log, cb);
|
||||
return this.putObjectNoVer(c, bucketName, objName, objVal, _params, log, cb);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -2056,14 +2150,20 @@ class MongoClientInterface {
|
|||
return cb(err);
|
||||
}
|
||||
const { bucketCount, bucketInfos } = res;
|
||||
let bucketWithQuotaCount = 0;
|
||||
|
||||
const retBucketInfos = bucketInfos.map(bucket => ({
|
||||
const retBucketInfos = bucketInfos.map(bucket => {
|
||||
if (bucket.getQuota()) {
|
||||
bucketWithQuotaCount++;
|
||||
}
|
||||
return {
|
||||
name: bucket.getName(),
|
||||
location: bucket.getLocationConstraint(),
|
||||
isVersioned: !!bucket.getVersioningConfiguration(),
|
||||
ownerCanonicalId: bucket.getOwner(),
|
||||
ingestion: bucket.isIngestionBucket(),
|
||||
}));
|
||||
};
|
||||
});
|
||||
|
||||
return this.readCountItems(log, (err, results) => {
|
||||
if (err) {
|
||||
|
@ -2073,6 +2173,7 @@ class MongoClientInterface {
|
|||
/* eslint-disable */
|
||||
results.bucketList = retBucketInfos;
|
||||
results.buckets = bucketCount;
|
||||
results.bucketWithQuotaCount = bucketWithQuotaCount;
|
||||
/* eslint-enable */
|
||||
return cb(null, results);
|
||||
});
|
||||
|
|
|
@ -10,21 +10,21 @@ function trySetDirSyncFlag(path) {
|
|||
|
||||
const GETFLAGS = 2148034049;
|
||||
const SETFLAGS = 1074292226;
|
||||
const FS_DIRSYNC_FL = 65536;
|
||||
const FS_DIRSYNC_FL = 65536n;
|
||||
const buffer = Buffer.alloc(8, 0);
|
||||
const pathFD = fs.openSync(path, 'r');
|
||||
const status = ioctl(pathFD, GETFLAGS, buffer);
|
||||
assert.strictEqual(status, 0);
|
||||
const currentFlags = buffer.readUIntLE(0, 8);
|
||||
const currentFlags = buffer.readBigInt64LE(0);
|
||||
const flags = currentFlags | FS_DIRSYNC_FL;
|
||||
buffer.writeUIntLE(flags, 0, 8);
|
||||
buffer.writeBigInt64LE(flags, 0);
|
||||
const status2 = ioctl(pathFD, SETFLAGS, buffer);
|
||||
assert.strictEqual(status2, 0);
|
||||
fs.closeSync(pathFD);
|
||||
const pathFD2 = fs.openSync(path, 'r');
|
||||
const confirmBuffer = Buffer.alloc(8, 0);
|
||||
ioctl(pathFD2, GETFLAGS, confirmBuffer);
|
||||
assert.strictEqual(confirmBuffer.readUIntLE(0, 8),
|
||||
assert.strictEqual(confirmBuffer.readBigInt64LE(0),
|
||||
currentFlags | FS_DIRSYNC_FL, 'FS_DIRSYNC_FL not set');
|
||||
fs.closeSync(pathFD2);
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ import { VersioningConstants } from './constants';
|
|||
const VID_SEP = VersioningConstants.VersionId.Separator;
|
||||
/**
|
||||
* Class for manipulating an object version.
|
||||
* The format of a version: { isNull, isDeleteMarker, versionId, otherInfo }
|
||||
* The format of a version: { isNull, isNull2, isDeleteMarker, versionId, otherInfo }
|
||||
*
|
||||
* @note Some of these functions are optimized based on string search
|
||||
* prior to a full JSON parse/stringify. (Vinh: 18K op/s are achieved
|
||||
|
@ -13,24 +13,31 @@ const VID_SEP = VersioningConstants.VersionId.Separator;
|
|||
export class Version {
|
||||
version: {
|
||||
isNull?: boolean;
|
||||
isNull2?: boolean;
|
||||
isDeleteMarker?: boolean;
|
||||
versionId?: string;
|
||||
isPHD?: boolean;
|
||||
nullVersionId?: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new version instantiation from its data object.
|
||||
* @param version - the data object to instantiate
|
||||
* @param version.isNull - is a null version
|
||||
* @param version.isNull2 - Whether new version is null or not AND has
|
||||
* been put with a Cloudserver handling null keys (i.e. supporting
|
||||
* S3C-7352)
|
||||
* @param version.isDeleteMarker - is a delete marker
|
||||
* @param version.versionId - the version id
|
||||
* @constructor
|
||||
*/
|
||||
constructor(version?: {
|
||||
isNull?: boolean;
|
||||
isNull2?: boolean;
|
||||
isDeleteMarker?: boolean;
|
||||
versionId?: string;
|
||||
isPHD?: boolean;
|
||||
nullVersionId?: string;
|
||||
}) {
|
||||
this.version = version || {};
|
||||
}
|
||||
|
@ -83,6 +90,33 @@ export class Version {
|
|||
return `{ "isPHD": true, "versionId": "${versionId}" }`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a key-value pair to a JSON object represented as a string. It adds
|
||||
* a comma if the object is not empty (i.e., not just '{}'). It assumes the input
|
||||
* string is formatted as a JSON object.
|
||||
*
|
||||
* @param {string} stringifiedObject The JSON object as a string to which the key-value pair will be appended.
|
||||
* @param {string} key The key to append to the JSON object.
|
||||
* @param {string} value The value associated with the key to append to the JSON object.
|
||||
* @returns {string} The updated JSON object as a string with the new key-value pair appended.
|
||||
* @example
|
||||
* _jsonAppend('{"existingKey":"existingValue"}', 'newKey', 'newValue');
|
||||
* // returns '{"existingKey":"existingValue","newKey":"newValue"}'
|
||||
*/
|
||||
static _jsonAppend(stringifiedObject: string, key: string, value: string): string {
|
||||
// stringifiedObject value has the format of '{...}'
|
||||
let index = stringifiedObject.length - 2;
|
||||
while (stringifiedObject.charAt(index) === ' ') {
|
||||
index -= 1;
|
||||
}
|
||||
const needComma = stringifiedObject.charAt(index) !== '{';
|
||||
return (
|
||||
`${stringifiedObject.slice(0, stringifiedObject.length - 1)}` +
|
||||
(needComma ? ',' : '') +
|
||||
`"${key}":"${value}"}`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Put versionId into an object in the (cheap) way of string manipulation,
|
||||
* instead of the more expensive alternative parsing and stringification.
|
||||
|
@ -93,14 +127,32 @@ export class Version {
|
|||
*/
|
||||
static appendVersionId(value: string, versionId: string): string {
|
||||
// assuming value has the format of '{...}'
|
||||
let index = value.length - 2;
|
||||
while (value.charAt(index--) === ' ');
|
||||
const comma = value.charAt(index + 1) !== '{';
|
||||
return (
|
||||
`${value.slice(0, value.length - 1)}` + // eslint-disable-line
|
||||
(comma ? ',' : '') +
|
||||
`"versionId":"${versionId}"}`
|
||||
);
|
||||
return Version._jsonAppend(value, 'versionId', versionId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates or appends a `nullVersionId` property to a JSON-formatted string.
|
||||
* This function first checks if the `nullVersionId` property already exists within the input string.
|
||||
* If it exists, the function updates the `nullVersionId` with the new value provided.
|
||||
* If it does not exist, the function appends a `nullVersionId` property with the provided value.
|
||||
*
|
||||
* @static
|
||||
* @param {string} value - The JSON-formatted string that may already contain a `nullVersionId` property.
|
||||
* @param {string} nullVersionId - The new value for the `nullVersionId` property to be updated or appended.
|
||||
* @returns {string} The updated JSON-formatted string with the new `nullVersionId` value.
|
||||
*/
|
||||
static updateOrAppendNullVersionId(value: string, nullVersionId: string): string {
|
||||
// Check if "nullVersionId" already exists in the string
|
||||
const nullVersionIdPattern = /"nullVersionId":"[^"]*"/;
|
||||
const nullVersionIdExists = nullVersionIdPattern.test(value);
|
||||
|
||||
if (nullVersionIdExists) {
|
||||
// Replace the existing nullVersionId with the new one
|
||||
return value.replace(nullVersionIdPattern, `"nullVersionId":"${nullVersionId}"`);
|
||||
} else {
|
||||
// Append nullVersionId
|
||||
return Version._jsonAppend(value, 'nullVersionId', nullVersionId);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -121,6 +173,19 @@ export class Version {
|
|||
return this.version.isNull ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a version is a null version and has
|
||||
* been put with a Cloudserver handling null keys (i.e. supporting
|
||||
* S3C-7352).
|
||||
*
|
||||
* @return - stating if the value is a null version and has
|
||||
* been put with a Cloudserver handling null keys (i.e. supporting
|
||||
* S3C-7352).
|
||||
*/
|
||||
isNull2Version(): boolean {
|
||||
return this.version.isNull2 ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a stringified object is a delete marker.
|
||||
*
|
||||
|
@ -190,6 +255,19 @@ export class Version {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark that the null version has been put with a Cloudserver handling null keys (i.e. supporting S3C-7352)
|
||||
*
|
||||
* If `isNull2` is set, `isNull` is also set to maintain consistency.
|
||||
* Explicitly setting both avoids misunderstandings and mistakes in future updates or fixes.
|
||||
* @return - the updated version
|
||||
*/
|
||||
setNull2Version() {
|
||||
this.version.isNull2 = true;
|
||||
this.version.isNull = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize the version.
|
||||
*
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import errors, { ArsenalError } from '../errors';
|
||||
import { Version } from './Version';
|
||||
import { generateVersionId as genVID } from './VersionID';
|
||||
import { generateVersionId as genVID, getInfVid } from './VersionID';
|
||||
import WriteCache from './WriteCache';
|
||||
import WriteGatheringManager from './WriteGatheringManager';
|
||||
|
||||
|
@ -481,19 +483,113 @@ export default class VersioningRequestProcessor {
|
|||
const versionId = request.options.versionId;
|
||||
const versionKey = formatVersionKey(key, versionId);
|
||||
const ops: any = [];
|
||||
if (!request.options.isNull) {
|
||||
ops.push({ key: versionKey, value: request.value });
|
||||
const masterVersion = data !== undefined &&
|
||||
Version.from(data);
|
||||
// push a version key if we're not updating the null
|
||||
// version (or in legacy Cloudservers not sending the
|
||||
// 'isNull' parameter, but this has an issue, see S3C-7526)
|
||||
if (request.options.isNull !== true) {
|
||||
const versionOp = { key: versionKey, value: request.value };
|
||||
ops.push(versionOp);
|
||||
}
|
||||
if (data === undefined ||
|
||||
(Version.from(data).getVersionId() ?? '') >= versionId) {
|
||||
// master does not exist or is not newer than put
|
||||
// version and needs to be updated as well.
|
||||
// Note that older versions have a greater version ID.
|
||||
ops.push({ key: request.key, value: request.value });
|
||||
} else if (request.options.isNull) {
|
||||
if (masterVersion) {
|
||||
// master key exists
|
||||
// note that older versions have a greater version ID
|
||||
const versionIdFromMaster = masterVersion.getVersionId();
|
||||
if (versionIdFromMaster === undefined ||
|
||||
versionIdFromMaster >= versionId) {
|
||||
let value = request.value;
|
||||
logger.debug('version to put is not older than master');
|
||||
// Delete the deprecated, null key for backward compatibility
|
||||
// to avoid storing both deprecated and new null keys.
|
||||
// If master null version was put with an older Cloudserver (or in compat mode),
|
||||
// there is a possibility that it also has a null versioned key
|
||||
// associated, so we need to delete it as we write the null key.
|
||||
// Deprecated null key gets deleted when the new CloudServer:
|
||||
// - updates metadata of a null master (options.isNull=true)
|
||||
// - puts metadata on top of a master null key (options.isNull=false)
|
||||
if (request.options.isNull !== undefined && // new null key behavior when isNull is defined.
|
||||
masterVersion.isNullVersion() && // master is null
|
||||
!masterVersion.isNull2Version()) { // master does not support the new null key behavior yet.
|
||||
const masterNullVersionId = masterVersion.getVersionId();
|
||||
// The deprecated null key is referenced in the "versionId" property of the master key.
|
||||
if (masterNullVersionId) {
|
||||
const oldNullVersionKey = formatVersionKey(key, masterNullVersionId);
|
||||
ops.push({ key: oldNullVersionKey, type: 'del' });
|
||||
}
|
||||
}
|
||||
// new behavior when isNull is defined is to only
|
||||
// update the master key if it is the latest
|
||||
// version, old behavior needs to copy master to
|
||||
// the null version because older Cloudservers
|
||||
// rely on version-specific PUT to copy master
|
||||
// contents to a new null version key (newer ones
|
||||
// use special versionId="null" requests for this
|
||||
// purpose).
|
||||
if (versionIdFromMaster !== versionId ||
|
||||
request.options.isNull === undefined) {
|
||||
// master key is strictly older than the put version
|
||||
let masterVersionId;
|
||||
if (masterVersion.isNullVersion() && versionIdFromMaster) {
|
||||
logger.debug('master key is a null version');
|
||||
masterVersionId = versionIdFromMaster;
|
||||
} else if (versionIdFromMaster === undefined) {
|
||||
logger.debug('master key is nonversioned');
|
||||
// master key does not have a versionID
|
||||
// => create one with the "infinite" version ID
|
||||
masterVersionId = getInfVid(this.replicationGroupId);
|
||||
masterVersion.setVersionId(masterVersionId);
|
||||
} else {
|
||||
logger.debug('master key is a regular version');
|
||||
}
|
||||
if (request.options.isNull === true) {
|
||||
if (!masterVersionId) {
|
||||
// master is a regular version: delete the null key that
|
||||
// may exist (older null version)
|
||||
logger.debug('delete null key');
|
||||
const nullKey = formatVersionKey(key, '');
|
||||
ops.push({ key: nullKey, type: 'del' });
|
||||
}
|
||||
} else if (masterVersionId) {
|
||||
logger.debug('create version key from master version');
|
||||
// isNull === false means Cloudserver supports null keys,
|
||||
// so create a null key in this case, and a version key otherwise
|
||||
const masterKeyVersionId = request.options.isNull === false ?
|
||||
'' : masterVersionId;
|
||||
const masterVersionKey = formatVersionKey(key, masterKeyVersionId);
|
||||
masterVersion.setNullVersion();
|
||||
// isNull === false means Cloudserver supports null keys,
|
||||
// so create a null key with the isNull2 flag
|
||||
if (request.options.isNull === false) {
|
||||
masterVersion.setNull2Version();
|
||||
// else isNull === undefined means Cloudserver does not support null keys,
|
||||
// and versionIdFromMaster !== versionId means that a version is PUT on top of a null version
|
||||
// hence set/update the new master nullVersionId for backward compatibility
|
||||
} else if (versionIdFromMaster !== versionId) {
|
||||
// => set the nullVersionId to the master version if put version on top of null version.
|
||||
value = Version.updateOrAppendNullVersionId(request.value, masterVersionId);
|
||||
}
|
||||
ops.push({ key: masterVersionKey,
|
||||
value: masterVersion.toString() });
|
||||
}
|
||||
} else {
|
||||
logger.debug('version to put is the master');
|
||||
}
|
||||
ops.push({ key, value: value });
|
||||
} else {
|
||||
logger.debug('version to put is older than master');
|
||||
if (request.options.isNull === true && !masterVersion.isNullVersion()) {
|
||||
logger.debug('create or update null key');
|
||||
const nullKey = formatVersionKey(key, '');
|
||||
ops.push({ key: nullKey, value: request.value });
|
||||
const nullKeyOp = { key: nullKey, value: request.value };
|
||||
ops.push(nullKeyOp);
|
||||
// for backward compatibility: remove null version key
|
||||
ops.push({ key: versionKey, type: 'del' });
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// master key does not exist: create it
|
||||
ops.push({ key, value: request.value });
|
||||
}
|
||||
return callback(null, ops, versionId);
|
||||
});
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import errors, { ArsenalError } from '../errors';
|
||||
import WriteGatheringManager from './WriteGatheringManager';
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import { RequestLogger } from 'werelogs';
|
||||
|
||||
import { ArsenalError } from '../errors';
|
||||
|
||||
const WG_TIMEOUT = 5; // batching period in milliseconds
|
||||
|
|
60
package.json
60
package.json
|
@ -3,7 +3,7 @@
|
|||
"engines": {
|
||||
"node": ">=16"
|
||||
},
|
||||
"version": "8.1.121",
|
||||
"version": "8.1.134",
|
||||
"description": "Common utilities for the S3 project components",
|
||||
"main": "build/index.js",
|
||||
"repository": {
|
||||
|
@ -19,39 +19,38 @@
|
|||
"dependencies": {
|
||||
"@azure/identity": "^3.1.1",
|
||||
"@azure/storage-blob": "^12.12.0",
|
||||
"@types/async": "^3.2.12",
|
||||
"@types/utf8": "^3.0.1",
|
||||
"JSONStream": "^1.0.0",
|
||||
"@js-sdsl/ordered-set": "^4.4.2",
|
||||
"@swc/cli": "^0.4.0",
|
||||
"@swc/core": "^1.7.4",
|
||||
"agentkeepalive": "^4.1.3",
|
||||
"ajv": "6.12.3",
|
||||
"async": "~2.6.4",
|
||||
"ajv": "^6.12.3",
|
||||
"async": "^2.6.4",
|
||||
"aws-sdk": "^2.1005.0",
|
||||
"backo": "^1.1.0",
|
||||
"base-x": "3.0.8",
|
||||
"base62": "2.0.1",
|
||||
"bson": "4.0.0",
|
||||
"debug": "~4.1.0",
|
||||
"base-x": "^3.0.8",
|
||||
"base62": "^2.0.1",
|
||||
"bson": "^4.0.0",
|
||||
"debug": "^4.1.0",
|
||||
"diskusage": "^1.1.1",
|
||||
"fcntl": "github:scality/node-fcntl#0.2.2",
|
||||
"hdclient": "scality/hdclient#1.1.7",
|
||||
"httpagent": "scality/httpagent#1.0.6",
|
||||
"fcntl": "git+https://git.yourcmc.ru/vitalif/zenko-fcntl.git",
|
||||
"httpagent": "git+https://git.yourcmc.ru/vitalif/zenko-httpagent.git#development/1.0",
|
||||
"https-proxy-agent": "^2.2.0",
|
||||
"ioredis": "^4.28.5",
|
||||
"ipaddr.js": "1.9.1",
|
||||
"ipaddr.js": "^1.9.1",
|
||||
"joi": "^17.6.0",
|
||||
"level": "~5.0.1",
|
||||
"level-sublevel": "~6.6.5",
|
||||
"JSONStream": "^1.0.0",
|
||||
"level": "^5.0.1",
|
||||
"level-sublevel": "^6.6.5",
|
||||
"mongodb": "^5.2.0",
|
||||
"node-forge": "^1.3.0",
|
||||
"prom-client": "14.2.0",
|
||||
"prom-client": "^14.2.0",
|
||||
"simple-glob": "^0.2.0",
|
||||
"socket.io": "~4.6.1",
|
||||
"socket.io-client": "~4.6.1",
|
||||
"sproxydclient": "git+https://github.com/scality/sproxydclient#8.0.10",
|
||||
"utf8": "3.0.0",
|
||||
"socket.io": "^4.6.1",
|
||||
"socket.io-client": "^4.6.1",
|
||||
"utf8": "^3.0.0",
|
||||
"uuid": "^3.0.1",
|
||||
"werelogs": "scality/werelogs#8.1.2",
|
||||
"xml2js": "~0.4.23"
|
||||
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
|
||||
"xml2js": "^0.4.23"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"ioctl": "^2.0.2"
|
||||
|
@ -60,22 +59,24 @@
|
|||
"@babel/preset-env": "^7.16.11",
|
||||
"@babel/preset-typescript": "^7.16.7",
|
||||
"@sinonjs/fake-timers": "^6.0.1",
|
||||
"@types/async": "^3.2.12",
|
||||
"@types/utf8": "^3.0.1",
|
||||
"@types/ioredis": "^4.28.10",
|
||||
"@types/jest": "^27.4.1",
|
||||
"@types/node": "^17.0.21",
|
||||
"@types/node": "^18.19.41",
|
||||
"@types/xml2js": "^0.4.11",
|
||||
"eslint": "^8.14.0",
|
||||
"eslint-config-airbnb": "6.2.0",
|
||||
"eslint-config-scality": "scality/Guidelines#ec33dfb",
|
||||
"eslint-config-airbnb-base": "^15.0.0",
|
||||
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
|
||||
"eslint-plugin-react": "^4.3.0",
|
||||
"jest": "^27.5.1",
|
||||
"mongodb-memory-server": "^8.12.2",
|
||||
"nyc": "^15.1.0",
|
||||
"sinon": "^9.0.2",
|
||||
"temp": "0.9.1",
|
||||
"temp": "^0.9.1",
|
||||
"ts-jest": "^27.1.3",
|
||||
"ts-node": "^10.6.0",
|
||||
"typescript": "^4.6.2"
|
||||
"typescript": "^4.9.5"
|
||||
},
|
||||
"scripts": {
|
||||
"lint": "eslint $(git ls-files '*.js')",
|
||||
|
@ -83,7 +84,8 @@
|
|||
"lint_yml": "yamllint $(git ls-files '*.yml')",
|
||||
"test": "jest tests/unit",
|
||||
"build": "tsc",
|
||||
"prepare": "yarn build",
|
||||
"prepack": "tsc",
|
||||
"postinstall": "[ -d build ] || swc -d build --copy-files package.json index.ts lib",
|
||||
"ft_test": "jest tests/functional --testTimeout=120000 --forceExit",
|
||||
"coverage": "nyc --clean jest tests --coverage --testTimeout=120000 --forceExit",
|
||||
"build_doc": "cd documentation/listingAlgos/pics; dot -Tsvg delimiterStateChart.dot > delimiterStateChart.svg; dot -Tsvg delimiterMasterV0StateChart.dot > delimiterMasterV0StateChart.svg; dot -Tsvg delimiterVersionsStateChart.dot > delimiterVersionsStateChart.svg"
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
const async = require('async');
|
||||
const assert = require('assert');
|
||||
const cluster = require('cluster');
|
||||
const http = require('http');
|
||||
|
||||
|
@ -65,6 +66,15 @@ const rpcHandlers = {
|
|||
TestHandlerWithNoResponse: () => {},
|
||||
};
|
||||
|
||||
const primaryHandlers = {
|
||||
echoHandler: (worker, payload, uids, callback) => {
|
||||
callback(null, { workerId: worker.id, payload, uids });
|
||||
},
|
||||
errorWithHttpCodeHandler: (_worker, _payload, _uids, callback) => {
|
||||
callback({ name: 'ErrorMock', code: 418, message: 'An error message from primary' });
|
||||
},
|
||||
};
|
||||
|
||||
function respondOnTestFailure(message, error, results) {
|
||||
console.error('After sendWorkerCommand() resolve/reject: ' +
|
||||
`${message}, error=${error}, results=${JSON.stringify(results)}`);
|
||||
|
@ -214,6 +224,27 @@ async function workerTimeoutTest() {
|
|||
}
|
||||
}
|
||||
|
||||
async function workerToPrimaryEcho() {
|
||||
const uids = genUIDS();
|
||||
const payload = { testing: true };
|
||||
const expected = { workerId: cluster.worker.id, payload, uids };
|
||||
|
||||
const results = await sendWorkerCommand('PRIMARY', 'echoHandler', uids, payload);
|
||||
assert.strictEqual(results.length, 1, 'There is 1 and only 1 primary');
|
||||
assert.ifError(results[0].error);
|
||||
assert.deepStrictEqual(results[0].result, expected);
|
||||
}
|
||||
|
||||
async function workerToPrimaryErrorWithHttpCode() {
|
||||
const uids = genUIDS();
|
||||
const payload = { testing: true };
|
||||
const results = await sendWorkerCommand('PRIMARY', 'errorWithHttpCodeHandler', uids, payload);
|
||||
assert.strictEqual(results.length, 1, 'There is 1 and only 1 primary');
|
||||
assert.ok(results[0].error);
|
||||
assert.strictEqual(results[0].error.message, 'An error message from primary');
|
||||
assert.strictEqual(results[0].error.code, 418);
|
||||
}
|
||||
|
||||
const TEST_URLS = {
|
||||
'/successful-command': successfulCommandTest,
|
||||
'/successful-command-with-extra-worker': successfulCommandWithExtraWorkerTest,
|
||||
|
@ -223,6 +254,8 @@ const TEST_URLS = {
|
|||
'/duplicate-uids': duplicateUidsTest,
|
||||
'/unsuccessful-worker': unsuccessfulWorkerTest,
|
||||
'/worker-timeout': workerTimeoutTest,
|
||||
'/worker-to-primary/echo': workerToPrimaryEcho,
|
||||
'/worker-to-primary/error-with-http-code': workerToPrimaryErrorWithHttpCode,
|
||||
};
|
||||
|
||||
if (process.argv.length !== 4) {
|
||||
|
@ -247,7 +280,7 @@ if (cluster.isPrimary) {
|
|||
N_WORKERS,
|
||||
(i, wcb) => cluster.fork().on('online', wcb),
|
||||
() => {
|
||||
setupRPCPrimary();
|
||||
setupRPCPrimary(primaryHandlers);
|
||||
},
|
||||
);
|
||||
} else {
|
||||
|
@ -263,8 +296,22 @@ if (cluster.isPrimary) {
|
|||
res.writeHead(200);
|
||||
res.end();
|
||||
}).catch(err => {
|
||||
res.writeHead(err.code);
|
||||
// serialize AssertionError to be displayed nicely in jest
|
||||
if (err instanceof assert.AssertionError) {
|
||||
const serializedErr = JSON.stringify({
|
||||
code: err.code,
|
||||
message: err.message,
|
||||
stack: err.stack,
|
||||
actual: err.actual,
|
||||
expected: err.expected,
|
||||
operator: err.operator,
|
||||
});
|
||||
res.writeHead(500);
|
||||
res.end(serializedErr);
|
||||
} else {
|
||||
res.writeHead(err.code || 500);
|
||||
res.end(err.message);
|
||||
}
|
||||
});
|
||||
}
|
||||
console.error(`Invalid test URL ${req.url}`);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
'use strict'; // eslint-disable-line
|
||||
|
||||
const assert = require('assert');
|
||||
const http = require('http');
|
||||
const readline = require('readline');
|
||||
const spawn = require('child_process').spawn;
|
||||
|
@ -46,13 +46,45 @@ function stopTestServer(done) {
|
|||
testServer.on('close', done);
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to deserialize and recreate AssertionError with stackTrace from spawned server
|
||||
* @param {string} responseBody maybe serialized AssertionError
|
||||
* @throws {assert.AssertionError}
|
||||
* @returns {undefined}
|
||||
*/
|
||||
function handleAssertionError(responseBody) {
|
||||
let parsed;
|
||||
try {
|
||||
parsed = JSON.parse(responseBody);
|
||||
} catch (_) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (parsed && parsed.code === 'ERR_ASSERTION') {
|
||||
const err = new assert.AssertionError(parsed);
|
||||
err.stack = parsed.stack;
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
function runTest(testUrl, cb) {
|
||||
const req = http.request(`http://localhost:${TEST_SERVER_PORT}/${testUrl}`, res => {
|
||||
let responseBody = '';
|
||||
res
|
||||
.on('data', () => {})
|
||||
.on('data', (chunk) => {
|
||||
responseBody += chunk;
|
||||
})
|
||||
.on('end', () => {
|
||||
try {
|
||||
handleAssertionError(responseBody);
|
||||
expect(res.statusCode).toEqual(200);
|
||||
cb();
|
||||
} catch (err) {
|
||||
if (!(err instanceof assert.AssertionError)) {
|
||||
err.message += `\n\nBody:\n${responseBody}`;
|
||||
}
|
||||
return cb(err);
|
||||
}
|
||||
return cb();
|
||||
})
|
||||
.on('error', err => cb(err));
|
||||
});
|
||||
|
@ -106,4 +138,14 @@ describe('ClusterRPC', () => {
|
|||
// The test server spawns a new worker when it receives SIGUSR1
|
||||
testServer.kill('SIGUSR1');
|
||||
});
|
||||
|
||||
describe('worker to primary', () => {
|
||||
it('should succeed and return a result', done => {
|
||||
runTest('worker-to-primary/echo', done);
|
||||
});
|
||||
|
||||
it('should return an error with a code', done => {
|
||||
runTest('worker-to-primary/error-with-http-code', done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -0,0 +1,265 @@
|
|||
import GapCache from '../../../../lib/algos/cache/GapCache';
|
||||
|
||||
describe('GapCache', () => {
|
||||
let gapCache;
|
||||
|
||||
beforeEach(() => {
|
||||
// exposureDelayMs=100, maxGaps=10, maxGapWeight=100
|
||||
gapCache = new GapCache(100, 10, 100);
|
||||
gapCache.start();
|
||||
});
|
||||
afterEach(() => {
|
||||
gapCache.stop();
|
||||
});
|
||||
|
||||
describe('getters and setters', () => {
|
||||
it('maxGapWeight getter', () => {
|
||||
expect(gapCache.maxGapWeight).toEqual(100);
|
||||
});
|
||||
|
||||
it('maxGapWeight setter', () => {
|
||||
gapCache.maxGapWeight = 123;
|
||||
expect(gapCache.maxGapWeight).toEqual(123);
|
||||
// check that internal gap sets have also been updated
|
||||
expect(gapCache._stagingUpdates.newGaps.maxWeight).toEqual(123);
|
||||
expect(gapCache._frozenUpdates.newGaps.maxWeight).toEqual(123);
|
||||
});
|
||||
|
||||
it('exposureDelayMs getter', () => {
|
||||
expect(gapCache.exposureDelayMs).toEqual(100);
|
||||
});
|
||||
|
||||
it('exposureDelayMs setter', async () => {
|
||||
// insert a first gap
|
||||
gapCache.setGap('bar', 'baz', 10);
|
||||
|
||||
// change the exposure delay to 50ms
|
||||
gapCache.exposureDelayMs = 50;
|
||||
expect(gapCache.exposureDelayMs).toEqual(50);
|
||||
|
||||
gapCache.setGap('qux', 'quz', 10);
|
||||
|
||||
// wait for more than twice the new exposure delay
|
||||
await new Promise(resolve => setTimeout(resolve, 200));
|
||||
|
||||
// only the second gap should have been exposed, due to the change of
|
||||
// exposure delay subsequent to the first call to setGap()
|
||||
expect(await gapCache.lookupGap('ape', 'zoo')).toEqual(
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 10 }
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('clear()', () => {
|
||||
it('should clear all exposed gaps', async () => {
|
||||
gapCache.setGap('bar', 'baz', 10);
|
||||
gapCache.setGap('qux', 'quz', 20);
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
|
||||
expect(await gapCache.lookupGap('ape', 'zoo')).toEqual(
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 }
|
||||
);
|
||||
gapCache.clear();
|
||||
expect(await gapCache.lookupGap('ape', 'zoo')).toBeNull();
|
||||
});
|
||||
|
||||
it('should clear all staging gaps', async () => {
|
||||
gapCache.setGap('bar', 'baz', 10);
|
||||
gapCache.setGap('qux', 'quz', 20);
|
||||
|
||||
gapCache.clear();
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
|
||||
expect(await gapCache.lookupGap('ape', 'zoo')).toBeNull();
|
||||
});
|
||||
|
||||
it('should keep existing invalidating updates against the next new gaps', async () => {
|
||||
// invalidate future gaps containing 'dog'
|
||||
expect(gapCache.removeOverlappingGaps(['dog'])).toEqual(0);
|
||||
|
||||
// then, clear the cache
|
||||
gapCache.clear();
|
||||
|
||||
// wait for 50ms (half of exposure delay of 100ms) before
|
||||
// setting a new gap overlapping with 'dog'
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
gapCache.setGap('cat', 'fox', 10);
|
||||
|
||||
// also set a non-overlapping gap to make sure it is not invalidated
|
||||
gapCache.setGap('goat', 'hog', 20);
|
||||
|
||||
// wait an extra 250ms to ensure all valid gaps have been exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 250));
|
||||
// the next gap is indeed 'goat'... because 'cat'... should have been invalidated
|
||||
expect(await gapCache.lookupGap('bat', 'zoo')).toEqual(
|
||||
{ firstKey: 'goat', lastKey: 'hog', weight: 20 });
|
||||
});
|
||||
});
|
||||
|
||||
it('should expose gaps after at least exposureDelayMs milliseconds', async () => {
|
||||
gapCache.setGap('bar', 'baz', 10);
|
||||
expect(await gapCache.lookupGap('ape', 'cat')).toBeNull();
|
||||
|
||||
// wait for 50ms which is half of the minimum time to exposure
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
// the gap should not be exposed yet
|
||||
expect(await gapCache.lookupGap('ape', 'cat')).toBeNull();
|
||||
|
||||
// wait for an extra 250ms (total 300ms): the upper bound for exposure of any
|
||||
// setGap() call is twice the exposureDelayMs value, so 200ms, wait an extra
|
||||
// 100ms to cope with scheduling uncertainty and GapSet processing time, after
|
||||
// which the gap introduced by setGap() should always be exposed.
|
||||
await new Promise(resolve => setTimeout(resolve, 250));
|
||||
expect(await gapCache.lookupGap('ape', 'cat')).toEqual(
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
|
||||
// check getters
|
||||
expect(gapCache.maxGaps).toEqual(10);
|
||||
expect(gapCache.maxGapWeight).toEqual(100);
|
||||
expect(gapCache.size).toEqual(1);
|
||||
|
||||
// check iteration over the exposed gaps
|
||||
let nGaps = 0;
|
||||
for (const gap of gapCache) {
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
nGaps += 1;
|
||||
}
|
||||
expect(nGaps).toEqual(1);
|
||||
|
||||
// check toArray()
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('removeOverlappingGaps() should invalidate all overlapping gaps that are already exposed',
|
||||
async () => {
|
||||
gapCache.setGap('cat', 'fox', 10);
|
||||
gapCache.setGap('lion', 'seal', 20);
|
||||
// wait for 3x100ms to ensure all setGap() calls have been exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
// expect 0 gap removed because 'hog' is not in any gap
|
||||
expect(gapCache.removeOverlappingGaps(['hog'])).toEqual(0);
|
||||
// expect 1 gap removed because 'cat' -> 'fox' should be already exposed
|
||||
expect(gapCache.removeOverlappingGaps(['dog'])).toEqual(1);
|
||||
// the gap should have been invalidated permanently
|
||||
expect(await gapCache.lookupGap('dog', 'fox')).toBeNull();
|
||||
// the other gap should still be present
|
||||
expect(await gapCache.lookupGap('rat', 'tiger')).toEqual(
|
||||
{ firstKey: 'lion', lastKey: 'seal', weight: 20 });
|
||||
});
|
||||
|
||||
it('removeOverlappingGaps() should invalidate all overlapping gaps that are not yet exposed',
|
||||
async () => {
|
||||
gapCache.setGap('cat', 'fox', 10);
|
||||
gapCache.setGap('lion', 'seal', 20);
|
||||
// make the following calls asynchronous for the sake of the
|
||||
// test, but not waiting for the exposure delay
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
// expect 0 gap removed because 'hog' is not in any gap
|
||||
expect(gapCache.removeOverlappingGaps(['hog'])).toEqual(0);
|
||||
// expect 0 gap removed because 'cat' -> 'fox' is not exposed yet,
|
||||
// but internally it should have been removed from the staging or
|
||||
// frozen gap set
|
||||
expect(gapCache.removeOverlappingGaps(['dog'])).toEqual(0);
|
||||
|
||||
// wait for 3x100ms to ensure all non-invalidated setGap() calls have been exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
// the gap should have been invalidated permanently
|
||||
expect(await gapCache.lookupGap('dog', 'fox')).toBeNull();
|
||||
// the other gap should now be exposed
|
||||
expect(await gapCache.lookupGap('rat', 'tiger')).toEqual(
|
||||
{ firstKey: 'lion', lastKey: 'seal', weight: 20 });
|
||||
});
|
||||
|
||||
it('removeOverlappingGaps() should invalidate gaps created later by setGap() but ' +
|
||||
'within the exposure delay', async () => {
|
||||
// wait for 80ms (slightly less than exposure delay of 100ms)
|
||||
// before calling removeOverlappingGaps(), so that the next
|
||||
// exposure timer kicks in before the call to setGap()
|
||||
await new Promise(resolve => setTimeout(resolve, 80));
|
||||
|
||||
// there is no exposed gap yet, so expect 0 gap removed
|
||||
expect(gapCache.removeOverlappingGaps(['dog'])).toEqual(0);
|
||||
|
||||
// wait for 50ms (half of exposure delay of 100ms) before
|
||||
// setting a new gap overlapping with 'dog'
|
||||
await new Promise(resolve => setTimeout(resolve, 50));
|
||||
gapCache.setGap('cat', 'fox', 10);
|
||||
|
||||
// also set a non-overlapping gap to make sure it is not invalidated
|
||||
gapCache.setGap('goat', 'hog', 20);
|
||||
|
||||
// wait an extra 250ms to ensure all valid gaps have been exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 250));
|
||||
// the next gap is indeed 'goat'... because 'cat'... should have been invalidated
|
||||
expect(await gapCache.lookupGap('bat', 'zoo')).toEqual(
|
||||
{ firstKey: 'goat', lastKey: 'hog', weight: 20 });
|
||||
});
|
||||
|
||||
it('removeOverlappingGaps() should not invalidate gaps created more than twice ' +
|
||||
'the exposure delay later', async () => {
|
||||
// there is no exposed gap yet, so expect 0 gap removed
|
||||
expect(gapCache.removeOverlappingGaps(['dog'])).toEqual(0);
|
||||
|
||||
// wait for 250ms (more than twice the exposure delay of 100ms) before
|
||||
// setting a new gap overlapping with 'dog'
|
||||
await new Promise(resolve => setTimeout(resolve, 250));
|
||||
gapCache.setGap('cat', 'fox', 10);
|
||||
|
||||
// also set a non-overlapping gap to make sure it is not invalidated
|
||||
gapCache.setGap('goat', 'hog', 20);
|
||||
|
||||
// wait for an extra 250ms to ensure the new gap is exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 250));
|
||||
// should find the inserted gap as it should not have been invalidated
|
||||
expect(await gapCache.lookupGap('bat', 'zoo')).toEqual(
|
||||
{ firstKey: 'cat', lastKey: 'fox', weight: 10 });
|
||||
});
|
||||
|
||||
it('exposed gaps should be merged when possible', async () => {
|
||||
gapCache.setGap('bar', 'baz', 10);
|
||||
gapCache.setGap('baz', 'qux', 10);
|
||||
// wait until the merged gap is exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
expect(await gapCache.lookupGap('ape', 'cat')).toEqual(
|
||||
{ firstKey: 'bar', lastKey: 'qux', weight: 20 });
|
||||
});
|
||||
|
||||
it('exposed gaps should be split when above maxGapWeight', async () => {
|
||||
gapCache.setGap('bar', 'baz', gapCache.maxGapWeight - 1);
|
||||
gapCache.setGap('baz', 'qux', 10);
|
||||
// wait until the gaps are exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
expect(await gapCache.lookupGap('cat', 'dog')).toEqual(
|
||||
{ firstKey: 'baz', lastKey: 'qux', weight: 10 });
|
||||
});
|
||||
|
||||
it('gaps should not be exposed when reaching the maxGaps limit', async () => {
|
||||
const gapsArray = new Array(gapCache.maxGaps).fill(undefined).map(
|
||||
(_, i) => {
|
||||
const firstKey = `0000${i}`.slice(-4);
|
||||
return {
|
||||
firstKey,
|
||||
lastKey: `${firstKey}foo`,
|
||||
weight: 10,
|
||||
};
|
||||
}
|
||||
);
|
||||
for (const gap of gapsArray) {
|
||||
gapCache.setGap(gap.firstKey, gap.lastKey, gap.weight);
|
||||
}
|
||||
// wait until the gaps are exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
expect(gapCache.size).toEqual(gapCache.maxGaps);
|
||||
|
||||
gapCache.setGap('noroomforthisgap', 'noroomforthisgapfoo');
|
||||
// wait until the gaps are exposed
|
||||
await new Promise(resolve => setTimeout(resolve, 300));
|
||||
|
||||
// the number of gaps should still be 'maxGaps'
|
||||
expect(gapCache.size).toEqual(gapCache.maxGaps);
|
||||
// the gaps should correspond to the original array
|
||||
expect(gapCache.toArray()).toEqual(gapsArray);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,878 @@
|
|||
import { OrderedSet } from '@js-sdsl/ordered-set';
|
||||
import GapSet from '../../../../lib/algos/cache/GapSet';
|
||||
|
||||
function genRandomKey(): string {
|
||||
const CHARS = 'abcdefghijklmnopqrstuvwxyz0123456789';
|
||||
return new Array(16).fill(undefined).map(
|
||||
() => CHARS[Math.trunc(Math.random() * CHARS.length)]
|
||||
).join('');
|
||||
}
|
||||
|
||||
function genRandomUnchainedGaps(nGaps) {
|
||||
const gapBounds = new Array(nGaps * 2).fill(undefined).map(
|
||||
() => genRandomKey()
|
||||
);
|
||||
gapBounds.sort();
|
||||
const gapsArray = new Array(nGaps).fill(undefined).map(
|
||||
(_, i) => ({
|
||||
firstKey: gapBounds[2 * i],
|
||||
lastKey: gapBounds[2 * i + 1],
|
||||
weight: 10,
|
||||
})
|
||||
);
|
||||
return gapsArray;
|
||||
}
|
||||
|
||||
function genRandomChainedGaps(nGaps) {
|
||||
const gapBounds = new Array(nGaps + 1).fill(undefined).map(
|
||||
() => genRandomKey()
|
||||
);
|
||||
gapBounds.sort();
|
||||
const gapsArray = new Array(nGaps).fill(undefined).map(
|
||||
(_, i) => ({
|
||||
firstKey: gapBounds[i],
|
||||
lastKey: gapBounds[i + 1],
|
||||
weight: 10,
|
||||
})
|
||||
);
|
||||
return gapsArray;
|
||||
}
|
||||
|
||||
/**
|
||||
* Shuffle an array in-place
|
||||
*
|
||||
* @param {any[]} - The array to shuffle
|
||||
* @return {undefined}
|
||||
*/
|
||||
function shuffleArray(array) {
|
||||
for (let i = array.length - 1; i > 0; i--) {
|
||||
const randIndex = Math.trunc(Math.random() * (i + 1));
|
||||
/* eslint-disable no-param-reassign */
|
||||
const randIndexVal = array[randIndex];
|
||||
array[randIndex] = array[i];
|
||||
array[i] = randIndexVal;
|
||||
/* eslint-enable no-param-reassign */
|
||||
}
|
||||
}
|
||||
|
||||
describe('GapSet', () => {
|
||||
const INITIAL_GAPSET = [
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
];
|
||||
const INITIAL_GAPSET_WITH_CHAIN = [
|
||||
// single-key gap
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
// start of chain
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'qux', weight: 15 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
{ firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
{ firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
// end of chain
|
||||
]
|
||||
|
||||
let gapsArray;
|
||||
let gapSet;
|
||||
let gapsArrayWithChain;
|
||||
let gapSetWithChain;
|
||||
beforeEach(() => {
|
||||
gapsArray = JSON.parse(
|
||||
JSON.stringify(INITIAL_GAPSET)
|
||||
);
|
||||
gapSet = GapSet.createFromArray(gapsArray, 100);
|
||||
gapsArrayWithChain = JSON.parse(
|
||||
JSON.stringify(INITIAL_GAPSET_WITH_CHAIN)
|
||||
);
|
||||
gapSetWithChain = GapSet.createFromArray(gapsArrayWithChain, 100);
|
||||
});
|
||||
|
||||
describe('GapSet::size', () => {
|
||||
it('should return 0 for an empty gap set', () => {
|
||||
const emptyGapSet = new GapSet(100);
|
||||
expect(emptyGapSet.size).toEqual(0);
|
||||
});
|
||||
|
||||
it('should return the size of the gap set', () => {
|
||||
expect(gapSet.size).toEqual(2);
|
||||
});
|
||||
|
||||
it('should reflect the new size after removal of gaps', () => {
|
||||
gapSet._gaps.eraseElementByKey({ firstKey: 'bar' });
|
||||
expect(gapSet.size).toEqual(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('GapSet::maxWeight', () => {
|
||||
it('getter', () => {
|
||||
const emptyGapSet = new GapSet(123);
|
||||
expect(emptyGapSet.maxWeight).toEqual(123);
|
||||
});
|
||||
|
||||
it('setter', () => {
|
||||
const emptyGapSet = new GapSet(123);
|
||||
emptyGapSet.maxWeight = 456;
|
||||
expect(emptyGapSet.maxWeight).toEqual(456);
|
||||
});
|
||||
});
|
||||
|
||||
describe('GapSet::setGap()', () => {
|
||||
it('should start a gap with a single key in empty gap set', () => {
|
||||
const emptyGapSet = new GapSet(100);
|
||||
const gap = emptyGapSet.setGap('foo', 'foo', 1);
|
||||
expect(gap).toEqual({ firstKey: 'foo', lastKey: 'foo', weight: 1 });
|
||||
expect(emptyGapSet.toArray()).toEqual([
|
||||
{ firstKey: 'foo', lastKey: 'foo', weight: 1 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should start a gap with a single key in non-empty gap set', () => {
|
||||
const gap = gapSet.setGap('foo', 'foo', 1);
|
||||
expect(gap).toEqual({ firstKey: 'foo', lastKey: 'foo', weight: 1 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'foo', lastKey: 'foo', weight: 1 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should start a gap with multiple keys in empty gap set', () => {
|
||||
const emptyGapSet = new GapSet(100);
|
||||
const gap = emptyGapSet.setGap('foo', 'qux', 5);
|
||||
expect(gap).toEqual({ firstKey: 'foo', lastKey: 'qux', weight: 5 });
|
||||
expect(emptyGapSet.toArray()).toEqual([
|
||||
{ firstKey: 'foo', lastKey: 'qux', weight: 5 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should return a new object rather than a gap managed by GapSet', () => {
|
||||
const emptyGapSet = new GapSet(100);
|
||||
const gap = emptyGapSet.setGap('foo', 'qux', 5);
|
||||
gap.lastKey = 'quz';
|
||||
// check that modifying the returned gap doesn't affect the GapSet
|
||||
expect(emptyGapSet.toArray()).toEqual([
|
||||
{ firstKey: 'foo', lastKey: 'qux', weight: 5 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should return an existing gap that includes the wanted gap', () => {
|
||||
const gap = gapSet.setGap('bat', 'bay', 5);
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
});
|
||||
|
||||
it('should return an existing gap that starts with the wanted gap first key', () => {
|
||||
const gap = gapSet.setGap('bar', 'bay', 5);
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
});
|
||||
|
||||
it('should return an existing gap that ends with the wanted gap last key', () => {
|
||||
const gap = gapSet.setGap('bat', 'baz', 5);
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
});
|
||||
|
||||
it('should return the existing chained gap that starts with the first key', () => {
|
||||
const gap = gapSetWithChain.setGap('baz', 'quo', 10);
|
||||
expect(gap).toEqual({ firstKey: 'baz', lastKey: 'qux', weight: 15 });
|
||||
expect(gapSetWithChain.toArray()).toEqual(INITIAL_GAPSET_WITH_CHAIN);
|
||||
});
|
||||
|
||||
it('should extend a single-key gap with no other gap', () => {
|
||||
const singleKeyGap = { firstKey: 'foo', lastKey: 'foo', weight: 1 };
|
||||
const singleKeyGapSet = GapSet.createFromArray([singleKeyGap], 100);
|
||||
|
||||
const extendedGap = singleKeyGapSet.setGap('foo', 'qux', 30);
|
||||
expect(extendedGap).toEqual({ firstKey: 'foo', lastKey: 'qux', weight: 31 });
|
||||
expect(singleKeyGapSet.toArray()).toEqual([
|
||||
{ firstKey: 'foo', lastKey: 'qux', weight: 31 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a gap with no next gap', () => {
|
||||
// existing gap: 'qux' -> 'quz'
|
||||
const extendedGap = gapSet.setGap('qux', 'rat', 25);
|
||||
expect(extendedGap).toEqual({ firstKey: 'qux', lastKey: 'rat', weight: 25 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'rat', weight: 25 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a gap without overlap with next gap', () => {
|
||||
// existing gap: 'bar' -> 'baz'
|
||||
const extendedGap = gapSet.setGap('bar', 'dog', 15);
|
||||
expect(extendedGap).toEqual({ firstKey: 'bar', lastKey: 'dog', weight: 15 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'dog', weight: 15 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a gap starting from its last key', () => {
|
||||
// existing gap: 'qux' -> 'quz'
|
||||
const extendedGap = gapSet.setGap('quz', 'rat', 5);
|
||||
expect(extendedGap).toEqual({ firstKey: 'qux', lastKey: 'rat', weight: 25 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'rat', weight: 25 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should merge with next gap with single-key overlap if total weight is ' +
|
||||
'under maxWeight', () => {
|
||||
const extendedGap = gapSet.setGap('bar', 'qux', 80);
|
||||
// updated weight is accurately set as the sum of
|
||||
// overlapping individual gap weights
|
||||
expect(extendedGap).toEqual({ firstKey: 'bar', lastKey: 'quz', weight: 80 + 20 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'quz', weight: 80 + 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should chain with next gap with single-key overlap if total weight is ' +
|
||||
'above maxWeight', () => {
|
||||
const extendedGap = gapSet.setGap('bar', 'qux', 90);
|
||||
expect(extendedGap).toEqual({ firstKey: 'qux', lastKey: 'quz', weight: 20 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'qux', weight: 90 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should merge with both previous and next gap if bounds overlap by a ' +
|
||||
'single key and total weight is under maxWeight', () => {
|
||||
const extendedGap = gapSet.setGap('baz', 'qux', 30);
|
||||
// updated weight is accurately set as the sum of
|
||||
// overlapping individual gap weights
|
||||
expect(extendedGap).toEqual({ firstKey: 'bar', lastKey: 'quz', weight: 10 + 30 + 20 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'quz', weight: 10 + 30 + 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should merge with previous gap and chain with next gap if bounds overlap by a ' +
|
||||
'single key on either side and weight is above maxWeight when merging on right side', () => {
|
||||
const extendedGap = gapSet.setGap('baz', 'qux', 90);
|
||||
expect(extendedGap).toEqual({ firstKey: 'qux', lastKey: 'quz', weight: 20 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'qux', weight: 100 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should chain with previous gap and merge with next gap if bounds overlap by a ' +
|
||||
'single key on either side and weight is above maxWeight when merging on left side', () => {
|
||||
// modified version of the common test set with increased weight
|
||||
// for 'bar' -> 'baz'
|
||||
const gapSet = GapSet.createFromArray([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 80 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
], 100);
|
||||
const extendedGap = gapSet.setGap('baz', 'qux', 70);
|
||||
expect(extendedGap).toEqual({ firstKey: 'baz', lastKey: 'quz', weight: 90 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 80 },
|
||||
{ firstKey: 'baz', lastKey: 'quz', weight: 90 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should merge with both previous and next gap if left bound overlaps by a ' +
|
||||
'single key and total weight is under maxWeight', () => {
|
||||
const extendedGap = gapSet.setGap('baz', 'quxxx', 40);
|
||||
// updated weight is heuristically set as the sum of the
|
||||
// previous chained gap's weight and the new weight
|
||||
// (excluding the overlapping gap on right side)
|
||||
expect(extendedGap).toEqual({ firstKey: 'bar', lastKey: 'quz', weight: 10 + 40 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'quz', weight: 10 + 40 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should chain with previous gap and merge with next gap if left bound overlaps by a ' +
|
||||
'single key and total weight is above maxWeight', () => {
|
||||
const extendedGap = gapSet.setGap('baz', 'quxxx', 95);
|
||||
// updated weight is accurately set as the sum of
|
||||
// overlapping individual gap weights
|
||||
expect(extendedGap).toEqual({ firstKey: 'baz', lastKey: 'quz', weight: 95 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'quz', weight: 95 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a gap with overlap with next gap and large weight', () => {
|
||||
const extendedGap = gapSet.setGap('bar', 'quxxx', 80);
|
||||
// updated weight is heuristically chosen to be the new
|
||||
// gap weight which is larger than the sum of the existing merged
|
||||
// gap weights
|
||||
expect(extendedGap).toEqual({ firstKey: 'bar', lastKey: 'quz', weight: 80 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'quz', weight: 80 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a gap with overlap with next gap and small weight', () => {
|
||||
const extendedGap = gapSet.setGap('bar', 'quxxx', 11);
|
||||
// updated weight is heuristically chosen to be the sum of the existing merged
|
||||
// gap weights which is larger than the new gap weight
|
||||
expect(extendedGap).toEqual({ firstKey: 'bar', lastKey: 'quz', weight: 10 + 20 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'quz', weight: 10 + 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a gap with overlap beyond last key of next gap', () => {
|
||||
const extendedGap = gapSet.setGap('bar', 'rat', 80);
|
||||
// updated weight is the new gap weight
|
||||
expect(extendedGap).toEqual({ firstKey: 'bar', lastKey: 'rat', weight: 80 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'rat', weight: 80 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a gap with overlap beyond last key of next gap with a chained gap ' +
|
||||
'if above maxWeight', () => {
|
||||
// gapSet was initialized with maxWeight=100
|
||||
const extendedGap = gapSet.setGap('bar', 'rat', 105);
|
||||
// returned new gap is the right-side chained gap
|
||||
// updated weight is the new gap weight minus the left-side chained gap's weight
|
||||
expect(extendedGap).toEqual({ firstKey: 'baz', lastKey: 'rat', weight: 105 - 10 });
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'rat', weight: 105 - 10 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a single-key gap with overlap on chained gaps', () => {
|
||||
// existing gap: 'ape' -> 'ape' (weight=1)
|
||||
const extendedGap = gapSetWithChain.setGap('ape', 'dog', 30);
|
||||
// updated weight heuristically including the new gap
|
||||
// weight, which is larger than the overlapping gaps cumulated
|
||||
// weights (10+15=25)
|
||||
expect(extendedGap).toEqual({ firstKey: 'ape', lastKey: 'qux', weight: 30 });
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'qux', weight: 30 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
{ firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
{ firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should merge and extend + update weight a gap with overlap not past end of chained gaps',
|
||||
() => {
|
||||
const extendedGap = gapSetWithChain.setGap('baz', 'sea', 80);
|
||||
expect(extendedGap).toEqual({ firstKey: 'baz', lastKey: 'yak', weight: 90 });
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'yak', weight: 90 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should merge and extend + update weight a gap with overlap past end of chained gaps',
|
||||
() => {
|
||||
const extendedGap = gapSetWithChain.setGap('baz', 'zoo', 95);
|
||||
expect(extendedGap).toEqual({ firstKey: 'baz', lastKey: 'zoo', weight: 95 });
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'zoo', weight: 95 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend gap + update weight with overlap past end of chained gaps and ' +
|
||||
'above maxWeight', () => {
|
||||
const extendedGap = gapSetWithChain.setGap('baz', 'zoo', 105);
|
||||
// updated weight is the new gap weight minus the left-side chained gap's weight
|
||||
expect(extendedGap).toEqual({ firstKey: 'qux', lastKey: 'zoo', weight: 105 - 15 });
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'qux', weight: 15 },
|
||||
{ firstKey: 'qux', lastKey: 'zoo', weight: 105 - 15 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should return existing chained gap with overlap above maxWeight', () => {
|
||||
const chainedGapsArray = [
|
||||
{ firstKey: 'ant', lastKey: 'cat', weight: 90 },
|
||||
{ firstKey: 'cat', lastKey: 'fox', weight: 40 },
|
||||
];
|
||||
const chainedGapsSet = GapSet.createFromArray(chainedGapsArray, 100);
|
||||
const extendedGap = chainedGapsSet.setGap('bat', 'dog', 105);
|
||||
expect(extendedGap).toEqual({ firstKey: 'cat', lastKey: 'fox', weight: 40 });
|
||||
expect(chainedGapsSet.toArray()).toEqual([
|
||||
{ firstKey: 'ant', lastKey: 'cat', weight: 90 },
|
||||
{ firstKey: 'cat', lastKey: 'fox', weight: 40 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should merge but not extend nor update weight with overlap on chained gaps', () => {
|
||||
// existing chained gap: 'baz' -> 'qux'
|
||||
const extendedGap = gapSetWithChain.setGap('baz', 'quxxx', 25);
|
||||
// updated weight is the sum of the two merged gap's weights
|
||||
expect(extendedGap).toEqual({ firstKey: 'baz', lastKey: 'quz', weight: 15 + 20 });
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'quz', weight: 15 + 20 },
|
||||
{ firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
{ firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('GapSet::removeOverlappingGaps()', () => {
|
||||
describe('with zero key as parameter', () => {
|
||||
it('passed in an array: should not remove any gap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps([]);
|
||||
expect(nRemoved).toEqual(0);
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
});
|
||||
it('passed in a OrderedSet: should not remove any gap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(new OrderedSet());
|
||||
expect(nRemoved).toEqual(0);
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
});
|
||||
});
|
||||
describe('with an array of one key as parameter', () => {
|
||||
it('should not remove any gap if no overlap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['rat']);
|
||||
expect(nRemoved).toEqual(0);
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
});
|
||||
|
||||
it('should remove a single gap if overlaps', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['bat']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove a single gap if overlaps with first key of first gap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['bar']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove a single gap if overlaps with first key of non-first gap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['qux']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
// removed: { firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove a single gap if overlaps with last key', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['quz']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
// removed: { firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove a single gap in chain if overlaps with one chained gap', () => {
|
||||
const nRemoved = gapSetWithChain.removeOverlappingGaps(['dog']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
// removed: { firstKey: 'baz', lastKey: 'qux', weight: 15 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
{ firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
{ firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove two gaps in chain if overlaps with two chained gap', () => {
|
||||
const nRemoved = gapSetWithChain.removeOverlappingGaps(['qux']);
|
||||
expect(nRemoved).toEqual(2);
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
// removed: { firstKey: 'baz', lastKey: 'qux', weight: 15 },
|
||||
// removed: { firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
{ firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
{ firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('with an array of two keys as parameter', () => {
|
||||
it('should not remove any gap if no overlap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['rat', `rat\0v100`]);
|
||||
expect(nRemoved).toEqual(0);
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
});
|
||||
|
||||
it('should remove a single gap if both keys overlap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['bat', 'bat\0v100']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove a single gap if min key overlaps with first key of first gap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['bar\0v100', 'bar']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove a single gap if max key overlaps with first key of first gap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['ape', 'bar']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should not remove any gap if both keys straddle an existing gap without overlap',
|
||||
() => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['cow', 'ape']);
|
||||
expect(nRemoved).toEqual(0);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove the two last gaps in chained gaps if last gap bounds match ' +
|
||||
'the two keys', () => {
|
||||
const nRemoved = gapSetWithChain.removeOverlappingGaps(['yak', 'rat']);
|
||||
expect(nRemoved).toEqual(2);
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'qux', weight: 15 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
// removed: { firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
// removed: { firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove first and last gap in chained gaps if their bounds match ' +
|
||||
'the two keys', () => {
|
||||
const nRemoved = gapSetWithChain.removeOverlappingGaps(['yak', 'bar']);
|
||||
expect(nRemoved).toEqual(2);
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'qux', weight: 15 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
{ firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
// removed: { firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('with an array of three keys as parameter', () => {
|
||||
it('should remove a single gap if only median key overlaps with gap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['ape', 'bat', 'cow']);
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove a single-key gap and two contiguous chained gaps each overlapping' +
|
||||
'with one key', () => {
|
||||
const nRemoved = gapSetWithChain.removeOverlappingGaps(['ape', 'bat', 'cow']);
|
||||
expect(nRemoved).toEqual(3);
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
// removed: { firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
// removed: { firstKey: 'baz', lastKey: 'qux', weight: 15 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
{ firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
{ firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should not remove any gap if all keys are intermingled but do not overlap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(['ape', 'rat', 'cow']);
|
||||
expect(nRemoved).toEqual(0);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
{ firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should remove three discontiguous chained gaps each overlapping with one key', () => {
|
||||
const nRemoved = gapSetWithChain.removeOverlappingGaps(['bat', 'quxxx', 'tiger']);
|
||||
expect(nRemoved).toEqual(3);
|
||||
expect(gapSetWithChain.toArray()).toEqual([
|
||||
{ firstKey: 'ape', lastKey: 'ape', weight: 1 },
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'baz', lastKey: 'qux', weight: 15 },
|
||||
// removed: { firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
{ firstKey: 'quz', lastKey: 'rat', weight: 25 },
|
||||
// { firstKey: 'rat', lastKey: 'yak', weight: 30 },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('with a OrderedSet of three keys as parameter', () => {
|
||||
it('should remove a single gap if only median key overlaps with gap', () => {
|
||||
const nRemoved = gapSet.removeOverlappingGaps(
|
||||
new OrderedSet(['ape', 'bat', 'cow']));
|
||||
expect(nRemoved).toEqual(1);
|
||||
expect(gapSet.toArray()).toEqual([
|
||||
// removed: { firstKey: 'bar', lastKey: 'baz', weight: 10 },
|
||||
{ firstKey: 'qux', lastKey: 'quz', weight: 20 },
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
// this helper checks that:
|
||||
// - the gaps not overlapping with any key are still present in newGapsArray
|
||||
// - and the gaps overlapping with at least one key have been removed from oldGapsArray
|
||||
// NOTE: It uses a sorted list of keys for efficiency, otherwise it would require
|
||||
// O(n^2) compute time which would be expensive with 50K keys.
|
||||
function checkOverlapInvariant(sortedKeys, oldGapsArray, newGapsArray) {
|
||||
let oldGapIdx = 0;
|
||||
let newGapIdx = 0;
|
||||
for (const key of sortedKeys) {
|
||||
// for all gaps not overlapping with any key in 'sortedKeys',
|
||||
// check that they are still in 'newGapsArray'
|
||||
while (oldGapIdx < oldGapsArray.length &&
|
||||
oldGapsArray[oldGapIdx].lastKey < key) {
|
||||
expect(oldGapsArray[oldGapIdx]).toEqual(newGapsArray[newGapIdx]);
|
||||
oldGapIdx += 1;
|
||||
newGapIdx += 1;
|
||||
}
|
||||
// for the gap(s) overlapping with the current key,
|
||||
// check that they have been removed from 'newGapsArray'
|
||||
while (oldGapIdx < oldGapsArray.length &&
|
||||
oldGapsArray[oldGapIdx].firstKey <= key) {
|
||||
if (newGapIdx < newGapsArray.length) {
|
||||
expect(oldGapsArray[oldGapIdx]).not.toEqual(newGapsArray[newGapIdx]);
|
||||
}
|
||||
++oldGapIdx;
|
||||
}
|
||||
}
|
||||
// check the range after the last key in 'sortedKeys'
|
||||
while (oldGapIdx < oldGapsArray.length) {
|
||||
expect(oldGapsArray[oldGapIdx]).toEqual(newGapsArray[newGapIdx]);
|
||||
oldGapIdx += 1;
|
||||
newGapIdx += 1;
|
||||
}
|
||||
// check that no extra range is in newGapsArray
|
||||
expect(newGapIdx).toEqual(newGapsArray.length);
|
||||
}
|
||||
|
||||
[false, true].forEach(chained => {
|
||||
describe(`with 10K random ${chained ? 'chained' : 'unchained'} gaps`, () => {
|
||||
let largeGapsArray;
|
||||
let largeGapSet;
|
||||
beforeEach(() => {
|
||||
largeGapsArray = chained ?
|
||||
genRandomChainedGaps(10000) :
|
||||
genRandomUnchainedGaps(10000);
|
||||
largeGapSet = GapSet.createFromArray(largeGapsArray, 100);
|
||||
});
|
||||
|
||||
[{
|
||||
desc: 'equal to their first key',
|
||||
getGapKey: gap => gap.firstKey,
|
||||
}, {
|
||||
desc: 'equal to their last key',
|
||||
getGapKey: gap => gap.lastKey,
|
||||
}, {
|
||||
desc: 'neither their first nor last key',
|
||||
getGapKey: gap => `${gap.firstKey}/foo`,
|
||||
}].forEach(testCase => {
|
||||
it(`should remove the overlapping gap(s) with one key ${testCase.desc}`, () => {
|
||||
const gapIndex = 5000;
|
||||
const gap = largeGapsArray[gapIndex];
|
||||
const overlappingKey = testCase.getGapKey(gap);
|
||||
const nRemoved = largeGapSet.removeOverlappingGaps([overlappingKey]);
|
||||
let firstRemovedGapIndex, lastRemovedGapIndex;
|
||||
if (chained && overlappingKey === gap.firstKey) {
|
||||
expect(nRemoved).toEqual(2);
|
||||
[firstRemovedGapIndex, lastRemovedGapIndex] = [4999, 5000];
|
||||
} else if (chained && overlappingKey === gap.lastKey) {
|
||||
expect(nRemoved).toEqual(2);
|
||||
[firstRemovedGapIndex, lastRemovedGapIndex] = [5000, 5001];
|
||||
} else {
|
||||
expect(nRemoved).toEqual(1);
|
||||
[firstRemovedGapIndex, lastRemovedGapIndex] = [5000, 5000];
|
||||
}
|
||||
const expectedGaps = [
|
||||
...largeGapsArray.slice(0, firstRemovedGapIndex),
|
||||
...largeGapsArray.slice(lastRemovedGapIndex + 1)
|
||||
];
|
||||
const newGaps = largeGapSet.toArray();
|
||||
expect(newGaps).toEqual(expectedGaps);
|
||||
});
|
||||
|
||||
it(`should remove all gaps when they all overlap with one key ${testCase.desc}`,
|
||||
() => {
|
||||
// simulate a scenario made of 200 batches of 50 operations, each with
|
||||
// random keys scattered across all gaps that each overlaps a distinct gap
|
||||
// (supposedly a worst-case performance scenario for such batch sizes)
|
||||
const overlappingKeys = largeGapsArray.map(testCase.getGapKey);
|
||||
shuffleArray(overlappingKeys);
|
||||
for (let i = 0; i < overlappingKeys.length; i += 50) {
|
||||
const nRemoved = largeGapSet.removeOverlappingGaps(
|
||||
overlappingKeys.slice(i, i + 50));
|
||||
// with unchained gaps, we expect to have removed exactly
|
||||
// 50 gaps (the size of 'overlappingKeys').
|
||||
if (!chained) {
|
||||
expect(nRemoved).toEqual(50);
|
||||
}
|
||||
}
|
||||
const newGaps = largeGapSet.toArray();
|
||||
expect(newGaps).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
it('should remove only and all overlapping gaps with 50K randomized keys', () => {
|
||||
const randomizedKeys = new Array(50000).fill(undefined).map(
|
||||
() => genRandomKey()
|
||||
);
|
||||
for (let i = 0; i < randomizedKeys.length; i += 50) {
|
||||
largeGapSet.removeOverlappingGaps(
|
||||
randomizedKeys.slice(i, i + 50));
|
||||
}
|
||||
const newGaps = largeGapSet.toArray();
|
||||
randomizedKeys.sort();
|
||||
checkOverlapInvariant(randomizedKeys, largeGapsArray, newGaps);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('GapSet::_coalesceGapChain()', () => {
|
||||
afterEach(() => {
|
||||
// check that the gap sets were not modified by the operation
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
expect(gapSetWithChain.toArray()).toEqual(INITIAL_GAPSET_WITH_CHAIN);
|
||||
});
|
||||
it('should not coalesce if gaps are not chained', async () => {
|
||||
const gap = { firstKey: 'bar', lastKey: 'baz', weight: 10 };
|
||||
const coalescedGap = await gapSet._coalesceGapChain(gap);
|
||||
expect(coalescedGap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
});
|
||||
|
||||
it('should coalesce one chained gap', async () => {
|
||||
const gap = { firstKey: 'quz', lastKey: 'rat', weight: 25 };
|
||||
const coalescedGap = await gapSetWithChain._coalesceGapChain(gap);
|
||||
expect(coalescedGap).toEqual({ firstKey: 'quz', lastKey: 'yak', weight: 55 });
|
||||
});
|
||||
|
||||
it('should coalesce a chain of five gaps', async () => {
|
||||
const gap = { firstKey: 'bar', lastKey: 'baz', weight: 10 };
|
||||
const coalescedGap = await gapSetWithChain._coalesceGapChain(gap);
|
||||
expect(coalescedGap).toEqual({ firstKey: 'bar', lastKey: 'yak', weight: 100 });
|
||||
});
|
||||
|
||||
it('should coalesce a chain of one thousand gaps', async () => {
|
||||
const getKey = i => `000${i}`.slice(-4);
|
||||
const thousandGapsArray = new Array(1000).fill(undefined).map(
|
||||
(_, i) => ({ firstKey: getKey(i), lastKey: getKey(i + 1), weight: 10 })
|
||||
);
|
||||
const thousandGapsSet = GapSet.createFromArray(thousandGapsArray, 100);
|
||||
const gap = { firstKey: '0000', lastKey: '0001', weight: 10 };
|
||||
const coalescedGap = await thousandGapsSet._coalesceGapChain(gap);
|
||||
expect(coalescedGap).toEqual({ firstKey: '0000', lastKey: '1000', weight: 10000 });
|
||||
});
|
||||
|
||||
it('should coalesce a single-key gap', async () => {
|
||||
const singleKeyGapSet = GapSet.createFromArray([
|
||||
{ firstKey: '0000', lastKey: '0000', weight: 1 },
|
||||
], 100);
|
||||
const gap = { firstKey: '0000', lastKey: '0000', weight: 1 };
|
||||
const coalescedGap = await singleKeyGapSet._coalesceGapChain(gap);
|
||||
expect(coalescedGap).toEqual({ firstKey: '0000', lastKey: '0000', weight: 1 });
|
||||
});
|
||||
|
||||
it('should coalesce a chain of two gaps ending with a single-key gap', async () => {
|
||||
const singleKeyGapSet = GapSet.createFromArray([
|
||||
{ firstKey: '0000', lastKey: '0003', weight: 9 },
|
||||
{ firstKey: '0003', lastKey: '0003', weight: 1 },
|
||||
], 100);
|
||||
const gap = { firstKey: '0000', lastKey: '0003', weight: 9 };
|
||||
const coalescedGap = await singleKeyGapSet._coalesceGapChain(gap);
|
||||
expect(coalescedGap).toEqual({ firstKey: '0000', lastKey: '0003', weight: 9 });
|
||||
});
|
||||
});
|
||||
|
||||
describe('GapSet::lookupGap()', () => {
|
||||
afterEach(() => {
|
||||
// check that the gap sets were not modified by the operation
|
||||
expect(gapSet.toArray()).toEqual(INITIAL_GAPSET);
|
||||
expect(gapSetWithChain.toArray()).toEqual(INITIAL_GAPSET_WITH_CHAIN);
|
||||
});
|
||||
|
||||
it('should return null with empty cache', async () => {
|
||||
const emptyGapSet = new GapSet(100);
|
||||
const gap = await emptyGapSet.lookupGap('cat', 'dog');
|
||||
expect(gap).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null if no gap overlaps [minKey, maxKey]', async () => {
|
||||
const gap = await gapSet.lookupGap('cat', 'dog');
|
||||
expect(gap).toBeNull();
|
||||
});
|
||||
|
||||
it('should return the first gap that overlaps if all gaps overlap', async () => {
|
||||
const gap = await gapSet.lookupGap('ape', 'zoo');
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
});
|
||||
|
||||
it('should return an existing gap that contains [minKey, maxKey]', async () => {
|
||||
const gap1 = await gapSet.lookupGap('bat', 'bay');
|
||||
expect(gap1).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
const gap2 = await gapSet.lookupGap('quxxx', 'quy');
|
||||
expect(gap2).toEqual({ firstKey: 'qux', lastKey: 'quz', weight: 20 });
|
||||
});
|
||||
|
||||
it('should return an existing gap that overlaps with minKey but not maxKey', async () => {
|
||||
const gap = await gapSet.lookupGap('ape', 'bat');
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
});
|
||||
|
||||
it('should return an existing gap that overlaps just with minKey when no maxKey is provided',
|
||||
async () => {
|
||||
const gap = await gapSet.lookupGap('ape');
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
});
|
||||
|
||||
it('should return an existing gap that overlaps with maxKey but not minKey', async () => {
|
||||
const gap = await gapSet.lookupGap('bat', 'cat');
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'baz', weight: 10 });
|
||||
});
|
||||
|
||||
it('should return an existing gap that is contained in [minKey, maxKey] strictly', async () => {
|
||||
const gap = await gapSet.lookupGap('dog', 'rat');
|
||||
expect(gap).toEqual({ firstKey: 'qux', lastKey: 'quz', weight: 20 });
|
||||
});
|
||||
|
||||
it('should return a coalesced gap from chained gaps that fully overlaps [minKey, maxKey]', async () => {
|
||||
const gap = await gapSetWithChain.lookupGap('bat', 'zoo');
|
||||
expect(gap).toEqual({ firstKey: 'bar', lastKey: 'yak', weight: 100 });
|
||||
});
|
||||
|
||||
it('should return a coalesced gap from chained gaps that contain [minKey, maxKey] strictly',
|
||||
async () => {
|
||||
const gap = await gapSetWithChain.lookupGap('bog', 'dog');
|
||||
expect(gap).toEqual({ firstKey: 'baz', lastKey: 'yak', weight: 90 });
|
||||
});
|
||||
});
|
||||
});
|
|
@ -727,7 +727,7 @@ function getTestListing(mdParams, data, vFormat) {
|
|||
});
|
||||
}
|
||||
assert.strictEqual(delimiter.skipping(),
|
||||
`${vFormat === 'v1' ? DbPrefixes.Master : ''}foo/`);
|
||||
`${vFormat === 'v1' ? DbPrefixes.Master : ''}foo0`);
|
||||
});
|
||||
|
||||
tests.forEach(test => {
|
||||
|
|
|
@ -2,8 +2,12 @@
|
|||
|
||||
const assert = require('assert');
|
||||
|
||||
const DelimiterMaster =
|
||||
require('../../../../lib/algos/list/delimiterMaster').DelimiterMaster;
|
||||
import {
|
||||
DelimiterMaster,
|
||||
DelimiterMasterFilterStateId,
|
||||
GapCachingState,
|
||||
GapBuildingState,
|
||||
} from '../../../../lib/algos/list/delimiterMaster';
|
||||
const {
|
||||
FILTER_ACCEPT,
|
||||
FILTER_SKIP,
|
||||
|
@ -11,6 +15,8 @@ const {
|
|||
SKIP_NONE,
|
||||
inc,
|
||||
} = require('../../../../lib/algos/list/tools');
|
||||
import { default as GapSet, GapSetEntry } from '../../../../lib/algos/cache/GapSet';
|
||||
import { GapCacheInterface } from '../../../../lib/algos/cache/GapCache';
|
||||
const VSConst =
|
||||
require('../../../../lib/versioning/constants').VersioningConstants;
|
||||
const Version = require('../../../../lib/versioning/Version').Version;
|
||||
|
@ -184,7 +190,7 @@ function getListingKey(key, vFormat) {
|
|||
});
|
||||
|
||||
if (vFormat === 'v0') {
|
||||
it('should return <key><VersionIdSeparator> for DelimiterMaster when ' +
|
||||
it('skipping() should return <key>inc(<VersionIdSeparator>) for DelimiterMaster when ' +
|
||||
'NextMarker is set and there is a delimiter', () => {
|
||||
const key = 'key';
|
||||
const delimiter = new DelimiterMaster(
|
||||
|
@ -195,14 +201,10 @@ function getListingKey(key, vFormat) {
|
|||
const listingKey = getListingKey(key, vFormat);
|
||||
delimiter.filter({ key: listingKey, value: '' });
|
||||
assert.strictEqual(delimiter.nextMarker, key);
|
||||
|
||||
/* With a delimiter skipping should return previous key + VID_SEP
|
||||
* (except when a delimiter is set and the NextMarker ends with the
|
||||
* delimiter) . */
|
||||
assert.strictEqual(delimiter.skipping(), listingKey + VID_SEP);
|
||||
assert.strictEqual(delimiter.skipping(), `${listingKey}${inc(VID_SEP)}`);
|
||||
});
|
||||
|
||||
it('should return <key><VersionIdSeparator> for DelimiterMaster when ' +
|
||||
it('skipping() should return <key>inc(<VersionIdSeparator>) for DelimiterMaster when ' +
|
||||
'NextContinuationToken is set and there is a delimiter', () => {
|
||||
const key = 'key';
|
||||
const delimiter = new DelimiterMaster(
|
||||
|
@ -214,7 +216,7 @@ function getListingKey(key, vFormat) {
|
|||
delimiter.filter({ key: listingKey, value: '' });
|
||||
assert.strictEqual(delimiter.nextMarker, key);
|
||||
|
||||
assert.strictEqual(delimiter.skipping(), listingKey + VID_SEP);
|
||||
assert.strictEqual(delimiter.skipping(), `${listingKey}${inc(VID_SEP)}`);
|
||||
});
|
||||
|
||||
it('should accept a PHD version as first input', () => {
|
||||
|
@ -445,7 +447,7 @@ function getListingKey(key, vFormat) {
|
|||
}),
|
||||
FILTER_SKIP);
|
||||
// ...it should skip the whole replay prefix
|
||||
assert.strictEqual(delimiter.skipping(), DbPrefixes.Replay);
|
||||
assert.strictEqual(delimiter.skipping(), inc(DbPrefixes.Replay));
|
||||
|
||||
// simulate a listing that reaches regular object keys
|
||||
// beyond the replay prefix, ...
|
||||
|
@ -460,8 +462,27 @@ function getListingKey(key, vFormat) {
|
|||
// as usual
|
||||
assert.strictEqual(delimiter.skipping(),
|
||||
delimiterChar ?
|
||||
`${inc(DbPrefixes.Replay)}foo/` :
|
||||
`${inc(DbPrefixes.Replay)}foo/bar${VID_SEP}`);
|
||||
`${inc(DbPrefixes.Replay)}foo0` :
|
||||
`${inc(DbPrefixes.Replay)}foo/bar${inc(VID_SEP)}`);
|
||||
});
|
||||
});
|
||||
|
||||
it('should not crash if key contains "undefined" with no delimiter', () => {
|
||||
const delimiter = new DelimiterMaster({}, fakeLogger, vFormat);
|
||||
const listingKey = getListingKey('undefinedfoo', vFormat);
|
||||
assert.strictEqual(
|
||||
delimiter.filter({
|
||||
key: listingKey,
|
||||
value: '{}',
|
||||
}),
|
||||
FILTER_ACCEPT);
|
||||
|
||||
assert.deepStrictEqual(delimiter.result(), {
|
||||
CommonPrefixes: [],
|
||||
Contents: [{ key: 'undefinedfoo', value: '{}' }],
|
||||
IsTruncated: false,
|
||||
NextMarker: undefined,
|
||||
Delimiter: undefined,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
@ -487,12 +508,12 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/deleted${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/deleted${VID_SEP}`,
|
||||
skipping: `foo/deleted${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: `foo/deleted${VID_SEP}v2`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/deleted${VID_SEP}`,
|
||||
skipping: `foo/deleted${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/notdeleted',
|
||||
|
@ -501,7 +522,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo/notdeleted${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/notdeleted${VID_SEP}`,
|
||||
skipping: `foo/notdeleted${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/subprefix/key-1',
|
||||
|
@ -510,7 +531,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo/subprefix/key-1${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/subprefix/key-1${VID_SEP}`,
|
||||
skipping: `foo/subprefix/key-1${inc(VID_SEP)}`,
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -541,7 +562,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/01${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP, // versions get skipped after master
|
||||
skipping: `foo/01${VID_SEP}`,
|
||||
skipping: `foo/01${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/02',
|
||||
|
@ -552,7 +573,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/02${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/02${VID_SEP}`,
|
||||
skipping: `foo/02${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/03',
|
||||
|
@ -561,7 +582,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo/03${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/03${VID_SEP}`,
|
||||
skipping: `foo/03${inc(VID_SEP)}`,
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -591,7 +612,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/bar/01${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP, // versions get skipped after master
|
||||
skipping: `foo/bar/01${VID_SEP}`,
|
||||
skipping: `foo/bar/01${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/bar/02',
|
||||
|
@ -602,12 +623,12 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/bar/02${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/bar/02${VID_SEP}`,
|
||||
skipping: `foo/bar/02${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: `foo/bar/02${VID_SEP}v2`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/bar/02${VID_SEP}`,
|
||||
skipping: `foo/bar/02${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/bar/03',
|
||||
|
@ -617,19 +638,19 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/bar/03${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
// from now on, skip the 'foo/bar/' prefix because we have already seen it
|
||||
skipping: 'foo/bar/',
|
||||
skipping: 'foo/bar0',
|
||||
},
|
||||
{
|
||||
key: 'foo/bar/04',
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/bar/',
|
||||
skipping: 'foo/bar0',
|
||||
},
|
||||
{
|
||||
key: `foo/bar/04${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/bar/',
|
||||
skipping: 'foo/bar0',
|
||||
},
|
||||
{
|
||||
key: 'foo/baz/01',
|
||||
|
@ -639,7 +660,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/baz/01${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
// skip the 'foo/baz/' prefix because we have already seen it
|
||||
skipping: 'foo/baz/',
|
||||
skipping: 'foo/baz0',
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -668,7 +689,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo/${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/${VID_SEP}`,
|
||||
skipping: `foo/${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/deleted',
|
||||
|
@ -679,12 +700,12 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/deleted${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/deleted${VID_SEP}`,
|
||||
skipping: `foo/deleted${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: `foo/deleted${VID_SEP}v2`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/deleted${VID_SEP}`,
|
||||
skipping: `foo/deleted${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/notdeleted',
|
||||
|
@ -693,7 +714,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo/notdeleted${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/notdeleted${VID_SEP}`,
|
||||
skipping: `foo/notdeleted${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/subprefix/key-1',
|
||||
|
@ -702,17 +723,17 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo/subprefix/key-1${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/subprefix/',
|
||||
skipping: 'foo/subprefix0',
|
||||
},
|
||||
{
|
||||
key: 'foo/subprefix/key-2',
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/subprefix/',
|
||||
skipping: 'foo/subprefix0',
|
||||
},
|
||||
{
|
||||
key: `foo/subprefix/key-2${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/subprefix/',
|
||||
skipping: 'foo/subprefix0',
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -743,7 +764,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo${VID_SEP}`,
|
||||
skipping: `foo${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/deleted',
|
||||
|
@ -754,12 +775,12 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/deleted${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/deleted${VID_SEP}`,
|
||||
skipping: `foo/deleted${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: `foo/deleted${VID_SEP}v2`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/deleted${VID_SEP}`,
|
||||
skipping: `foo/deleted${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/notdeleted',
|
||||
|
@ -768,17 +789,17 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo/notdeleted${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/',
|
||||
skipping: 'foo0',
|
||||
},
|
||||
{
|
||||
key: 'foo/subprefix/key-1',
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/',
|
||||
skipping: 'foo0',
|
||||
},
|
||||
{
|
||||
key: `foo/subprefix/key-1${VID_SEP}v1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/',
|
||||
skipping: 'foo0',
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -810,7 +831,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/${VID_SEP}`,
|
||||
skipping: `foo/${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/subprefix',
|
||||
|
@ -823,7 +844,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: 'foo/subprefix/02',
|
||||
res: FILTER_SKIP,
|
||||
skipping: 'foo/subprefix/', // already added to common prefix
|
||||
skipping: 'foo/subprefix0', // already added to common prefix
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -858,7 +879,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `foo/01${VID_SEP}v2`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/01${VID_SEP}`,
|
||||
skipping: `foo/01${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: 'foo/02',
|
||||
|
@ -869,12 +890,12 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
key: `foo/02${VID_SEP}v1`,
|
||||
isDeleteMarker: true,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/02${VID_SEP}`,
|
||||
skipping: `foo/02${inc(VID_SEP)}`,
|
||||
},
|
||||
{
|
||||
key: `foo/02${VID_SEP}v2`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `foo/02${VID_SEP}`,
|
||||
skipping: `foo/02${inc(VID_SEP)}`,
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -949,7 +970,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `${DbPrefixes.Master}foo/subprefix/key-2`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `${DbPrefixes.Master}foo/subprefix/`,
|
||||
skipping: `${DbPrefixes.Master}foo/subprefix0`,
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -984,7 +1005,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
{
|
||||
key: `${DbPrefixes.Master}foo/subprefix/key-1`,
|
||||
res: FILTER_SKIP,
|
||||
skipping: `${DbPrefixes.Master}foo/`,
|
||||
skipping: `${DbPrefixes.Master}foo0`,
|
||||
},
|
||||
],
|
||||
result: {
|
||||
|
@ -1004,7 +1025,7 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
it(`vFormat=${testCase.vFormat}: ${testCase.desc}`, () => {
|
||||
const delimiter = new DelimiterMaster(testCase.params, fakeLogger, testCase.vFormat);
|
||||
const resultEntries = testCase.entries.map(testEntry => {
|
||||
const entry = {
|
||||
const entry: any = {
|
||||
key: testEntry.key,
|
||||
};
|
||||
if (testEntry.isDeleteMarker) {
|
||||
|
@ -1028,3 +1049,567 @@ describe('DelimiterMaster listing algorithm: sequence of filter() scenarii', ()
|
|||
});
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* Test class that provides a GapCache-compatible interface via a
|
||||
* GapSet implementation, i.e. without introducing a delay to expose
|
||||
* gaps like the GapCache class does, so tests can check more easily
|
||||
* which gaps have been updated.
|
||||
*/
|
||||
class GapCacheAsSet extends GapSet implements GapCacheInterface {
|
||||
exposureDelayMs: number;
|
||||
|
||||
constructor(maxGapWeight: number) {
|
||||
super(maxGapWeight);
|
||||
this.exposureDelayMs = 1000;
|
||||
}
|
||||
|
||||
static createFromArray(gaps: GapSetEntry[], maxWeight: number): GapCacheAsSet {
|
||||
const gs = new GapCacheAsSet(maxWeight);
|
||||
for (const gap of gaps) {
|
||||
gs._gaps.insert(gap);
|
||||
}
|
||||
return gs;
|
||||
}
|
||||
|
||||
get maxGapWeight(): number {
|
||||
return super.maxWeight;
|
||||
}
|
||||
}
|
||||
|
||||
type FilterEntriesResumeState = {
|
||||
i: number,
|
||||
version: number,
|
||||
};
|
||||
|
||||
/**
|
||||
* Convenience test helper to build listing entries and pass them to
|
||||
* the DelimiterMaster.filter() function in order, and checks the
|
||||
* return code. It is also useful to check the state of the gap cache
|
||||
* afterwards.
|
||||
*
|
||||
* The first object key is "pre/0001" and is incremented on each master key.
|
||||
*
|
||||
* The current object version is "v100" and the version is then incremented
|
||||
* for each noncurrent version ("v101" etc.).
|
||||
*
|
||||
* @param {DelimiterMaster} listing - listing algorithm instance
|
||||
* @param {string} pattern - pattern of keys to create:
|
||||
* - an upper-case letter is a master key
|
||||
* - a lower-case letter is a version key
|
||||
* - a 'd' (or 'D') letter is a delete marker
|
||||
* - any other letter (e.g. 'v' or 'V') is a regular version
|
||||
* - space characters ' ' are allowed and must be matched by
|
||||
* a space character at the same position in 'expectedCodes'
|
||||
|
||||
* @param {string} expectedCodes - string of expected codes from filter()
|
||||
* matching each entry from 'pattern':
|
||||
* - 'a' stands for FILTER_ACCEPT
|
||||
* - 's' stands for FILTER_SKIP
|
||||
* - 'e' stands for FILTER_END
|
||||
* - ' ' must be matched by a space character in 'pattern'
|
||||
* @return {FilterEntriesResumeState} - a state that can be passed in
|
||||
* the next call as 'resumeFromState' to resume filtering the next
|
||||
* keys
|
||||
*/
|
||||
function filterEntries(
|
||||
listing: DelimiterMaster,
|
||||
pattern: string,
|
||||
expectedCodes: string,
|
||||
resumeFromState?: FilterEntriesResumeState,
|
||||
): FilterEntriesResumeState {
|
||||
const ExpectedCodeMap: string[] = [];
|
||||
ExpectedCodeMap[FILTER_ACCEPT] = 'a';
|
||||
ExpectedCodeMap[FILTER_SKIP] = 's';
|
||||
ExpectedCodeMap[FILTER_END] = 'e';
|
||||
let { i, version } = resumeFromState || { i: 0, version: 100 };
|
||||
const obtainedCodes = pattern.split('').map(p => {
|
||||
if (p === ' ') {
|
||||
return ' ';
|
||||
}
|
||||
if (p.toUpperCase() === p) {
|
||||
// master key
|
||||
i += 1;
|
||||
version = 100;
|
||||
}
|
||||
const keyId = `0000${i}`.slice(-4);
|
||||
const key = `pre/${keyId}`;
|
||||
const md: any = ('Dd'.includes(p)) ? { isDeleteMarker: true } : {};
|
||||
md.versionId = `v${version}`;
|
||||
const value = JSON.stringify(md);
|
||||
const entry = (p.toUpperCase() === p) ? { key, value } : { key: `${key}\0v${version}`, value };
|
||||
const ret = listing.filter(entry);
|
||||
if (p.toLowerCase() === p) {
|
||||
// version key
|
||||
version += 1;
|
||||
}
|
||||
return ExpectedCodeMap[<number> <unknown> ret];
|
||||
}).join('');
|
||||
expect(obtainedCodes).toEqual(expectedCodes);
|
||||
|
||||
return { i, version };
|
||||
}
|
||||
|
||||
describe('DelimiterMaster listing algorithm: gap caching and lookup', () => {
|
||||
it('should not cache a gap of weight smaller than minGapWeight', () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(100);
|
||||
listing.refreshGapCache(gapCache, 7); // minGapWeight=7
|
||||
|
||||
filterEntries(listing, 'Vv Ddv Ddv Vv Ddv', 'as ass ass as ass');
|
||||
expect(gapCache.toArray()).toEqual([]);
|
||||
});
|
||||
|
||||
it('should cache a gap of weight equal to minGapWeight', () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(100);
|
||||
listing.refreshGapCache(gapCache, 9); // minGapWeight=9
|
||||
|
||||
filterEntries(listing, 'Vv Ddv Ddv Ddv Vv Ddv', 'as ass ass ass as ass');
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0004${VID_SEP}v101`, weight: 9 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should cache a gap of weight equal to maxWeight in a single gap', () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(13); // maxWeight=13
|
||||
listing.refreshGapCache(gapCache, 5); // minGapWeight=5
|
||||
|
||||
filterEntries(listing, 'Vv Ddv Ddvv Ddv Ddv Vv Ddv', 'as ass asss ass ass as ass');
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0005${VID_SEP}v101`, weight: 13 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should not cache a gap if listing has been running for more than exposureDelayMs',
|
||||
async () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapsArray = [
|
||||
{ firstKey: 'pre/0006', lastKey: `pre/0007${VID_SEP}v100`, weight: 6 },
|
||||
];
|
||||
const gapCache = GapCacheAsSet.createFromArray(JSON.parse(
|
||||
JSON.stringify(gapsArray)
|
||||
), 100);
|
||||
listing.refreshGapCache(gapCache, 1, 1);
|
||||
|
||||
let resumeFromState = filterEntries(listing, 'Vv', 'as');
|
||||
let validityPeriod = listing.getGapBuildingValidityPeriodMs();
|
||||
expect(validityPeriod).toBeGreaterThan(gapCache.exposureDelayMs - 10);
|
||||
expect(validityPeriod).toBeLessThan(gapCache.exposureDelayMs + 10);
|
||||
|
||||
await new Promise(resolve => setTimeout(resolve, gapCache.exposureDelayMs + 10));
|
||||
validityPeriod = listing.getGapBuildingValidityPeriodMs();
|
||||
expect(validityPeriod).toEqual(0);
|
||||
resumeFromState = filterEntries(listing, 'Ddv Ddv Ddv Vvv', 'ass ass ass ass',
|
||||
resumeFromState);
|
||||
expect(gapCache.toArray()).toEqual(gapsArray);
|
||||
// gap building should be in expired state
|
||||
expect(listing._gapBuilding.state).toEqual(GapBuildingState.Expired);
|
||||
// remaining validity period should still be 0 because gap building has expired
|
||||
validityPeriod = listing.getGapBuildingValidityPeriodMs();
|
||||
expect(validityPeriod).toEqual(0);
|
||||
|
||||
// we should still be able to skip over the existing cached gaps
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapLookupInProgress);
|
||||
await new Promise(resolve => setTimeout(resolve, 1));
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapCached);
|
||||
filterEntries(listing, 'Ddv Ddv Ddv', 'sss sss ass', resumeFromState);
|
||||
});
|
||||
|
||||
[1, 3, 5, 10].forEach(triggerSaveGapWeight => {
|
||||
it('should cache a gap of weight maxWeight + 1 in two chained gaps ' +
|
||||
`(triggerSaveGapWeight=${triggerSaveGapWeight})`, () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(12); // maxWeight=12
|
||||
listing.refreshGapCache(gapCache, 5, triggerSaveGapWeight);
|
||||
|
||||
filterEntries(listing, 'Vv Ddv Ddvv Ddv Ddv Vv Ddv', 'as ass asss ass ass as ass');
|
||||
if (triggerSaveGapWeight === 1) {
|
||||
// trigger=1 guarantees that the weight of split gaps is maximized
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0005${VID_SEP}v100`, weight: 12 },
|
||||
{ firstKey: `pre/0005${VID_SEP}v100`, lastKey: `pre/0005${VID_SEP}v101`, weight: 1 },
|
||||
]);
|
||||
} else if (triggerSaveGapWeight === 3) {
|
||||
// - the first trigger happens after 'minGapWeight' listing entries, so 5
|
||||
// - the second and third triggers happen after 'triggerSaveGapWeight' listing
|
||||
// entries, so 3 then 3 - same gap because 5+3+3=11 and 11 <= 12 (maxWeight)
|
||||
// - finally, 2 more entries to complete the gap, at which point the
|
||||
// entry is split, hence we get two entries weights 11 and 2 respectively.
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: 'pre/0005', weight: 11 },
|
||||
{ firstKey: 'pre/0005', lastKey: `pre/0005${VID_SEP}v101`, weight: 2 },
|
||||
]);
|
||||
} else {
|
||||
// trigger=5|10
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0004${VID_SEP}v101`, weight: 10 },
|
||||
{ firstKey: `pre/0004${VID_SEP}v101`, lastKey: `pre/0005${VID_SEP}v101`, weight: 3 },
|
||||
]);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
[1, 2, 3].forEach(triggerSaveGapWeight => {
|
||||
it('should cache a gap of weight more than twice maxWeight in as many chained gaps ' +
|
||||
`as needed (triggerSaveGapWeight=${triggerSaveGapWeight})`, () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(5); // maxWeight=5
|
||||
// minGapWeight=4 prevents the last gap starting at "0008" from being cached
|
||||
listing.refreshGapCache(gapCache, 4, triggerSaveGapWeight);
|
||||
|
||||
filterEntries(listing, 'Vv Ddv Ddvv Ddv Ddv Ddv Vv Ddv', 'as ass asss ass ass ass as ass');
|
||||
// the slight differences in weight between different values of
|
||||
// 'triggerSaveGapWeight' are due to the combination of the trigger
|
||||
// frequency and the 'minGapWeight' value (3), but in all cases a
|
||||
// reasonable splitting job should be obtained.
|
||||
//
|
||||
// NOTE: in practice, the default trigger is half the maximum weight, any value
|
||||
// equal or lower should yield gap weights close enough to the maximum allowed.
|
||||
if (triggerSaveGapWeight === 1) {
|
||||
// a trigger at every key guarantees gaps to maximize their weight
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0003${VID_SEP}v100`, weight: 5 },
|
||||
{ firstKey: `pre/0003${VID_SEP}v100`, lastKey: `pre/0004${VID_SEP}v101`, weight: 5 },
|
||||
{ firstKey: `pre/0004${VID_SEP}v101`, lastKey: `pre/0006${VID_SEP}v100`, weight: 5 },
|
||||
{ firstKey: `pre/0006${VID_SEP}v100`, lastKey: `pre/0006${VID_SEP}v101`, weight: 1 },
|
||||
]);
|
||||
} else if (triggerSaveGapWeight === 2) {
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: 'pre/0003', weight: 4 },
|
||||
{ firstKey: 'pre/0003', lastKey: 'pre/0004', weight: 4 },
|
||||
{ firstKey: 'pre/0004', lastKey: `pre/0005${VID_SEP}v100`, weight: 4 },
|
||||
{ firstKey: `pre/0005${VID_SEP}v100`, lastKey: `pre/0006${VID_SEP}v101`, weight: 4 },
|
||||
]);
|
||||
} else {
|
||||
// trigger=3
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: 'pre/0003', weight: 4 },
|
||||
{ firstKey: 'pre/0003', lastKey: `pre/0003${VID_SEP}v102`, weight: 3 },
|
||||
{ firstKey: `pre/0003${VID_SEP}v102`, lastKey: `pre/0004${VID_SEP}v101`, weight: 3 },
|
||||
{ firstKey: `pre/0004${VID_SEP}v101`, lastKey: `pre/0005${VID_SEP}v101`, weight: 3 },
|
||||
{ firstKey: `pre/0005${VID_SEP}v101`, lastKey: `pre/0006${VID_SEP}v101`, weight: 3 },
|
||||
]);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
it('should cut the current gap when seeing a non-deleted object, and start a new ' +
|
||||
'gap on the next deleted object', () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(100);
|
||||
listing.refreshGapCache(gapCache, 2); // minGapWeight=2
|
||||
|
||||
filterEntries(listing, 'Vv Ddv Vv Ddv Vv', 'as ass as ass as');
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0002${VID_SEP}v101`, weight: 3 },
|
||||
{ firstKey: 'pre/0004', lastKey: `pre/0004${VID_SEP}v101`, weight: 3 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should complete the current gap when returning a result', () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(100);
|
||||
listing.refreshGapCache(gapCache, 2); // ensure the gap above minGapWeight=2 gets saved
|
||||
|
||||
filterEntries(listing, 'Vv Ddv Ddv', 'as ass ass');
|
||||
const result = listing.result();
|
||||
expect(result).toEqual({
|
||||
CommonPrefixes: [],
|
||||
Contents: [
|
||||
{ key: 'pre/0001', value: '{"versionId":"v100"}' },
|
||||
],
|
||||
Delimiter: undefined,
|
||||
IsTruncated: false,
|
||||
NextMarker: undefined,
|
||||
});
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0003${VID_SEP}v101`, weight: 6 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should refresh the building params when refreshGapCache() is called in NonBuilding state',
|
||||
() => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(100);
|
||||
// ensure the first gap with weight=9 gets saved
|
||||
listing.refreshGapCache(gapCache, 9);
|
||||
let resumeFromState = filterEntries(listing, 'Vv', 'as');
|
||||
// refresh with a different value for minGapWeight (12)
|
||||
listing.refreshGapCache(gapCache, 12);
|
||||
|
||||
resumeFromState = filterEntries(listing, 'Ddv Ddv Ddv Vv', 'ass ass ass as',
|
||||
resumeFromState);
|
||||
// for the building gap, minGapWeight should have been updated to 12, hence the
|
||||
// gap should not have been created
|
||||
expect(gapCache.toArray()).toEqual([]);
|
||||
filterEntries(listing, 'Ddv Ddv Ddv Ddv Vv', 'ass ass ass ass as', resumeFromState);
|
||||
// there should now be a new gap with weight=12
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0006', lastKey: `pre/0009${VID_SEP}v101`, weight: 12 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should save the refreshed building params when refreshGapCache() is called in Building state',
|
||||
() => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(100);
|
||||
// ensure the first gap with weight=9 gets saved
|
||||
listing.refreshGapCache(gapCache, 9);
|
||||
|
||||
let resumeFromState = filterEntries(listing, 'Vv Ddv Ddv', 'as ass ass');
|
||||
// refresh with a different value for minGapWeight (12)
|
||||
listing.refreshGapCache(gapCache, 12);
|
||||
resumeFromState = filterEntries(listing, 'Ddv Vv', 'ass as', resumeFromState);
|
||||
// for the building gap, minGapWeight should still be 9, hence the gap should
|
||||
// have been created
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0004${VID_SEP}v101`, weight: 9 },
|
||||
]);
|
||||
filterEntries(listing, 'Ddv Ddv Ddv Vv', 'ass ass ass as', resumeFromState);
|
||||
// there should still be only one gap because the next gap's weight is 9 and 9 < 12
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0004${VID_SEP}v101`, weight: 9 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should not build a new gap when skipping a prefix', () => {
|
||||
const listing = new DelimiterMaster({
|
||||
delimiter: '/',
|
||||
}, fakeLogger, 'v0');
|
||||
const gapCache = new GapCacheAsSet(100);
|
||||
// force immediate creation of gaps with 1, 1
|
||||
listing.refreshGapCache(gapCache, 1, 1);
|
||||
|
||||
// prefix should be skipped, but no new gap should be created
|
||||
filterEntries(listing, 'Vv Ddv Ddv Ddv', 'as sss sss sss');
|
||||
expect(gapCache.toArray()).toEqual([]);
|
||||
});
|
||||
|
||||
it('should trigger gap lookup and continue filtering without skipping when encountering ' +
|
||||
'a delete marker', () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapsArray = [
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0003${VID_SEP}v100`, weight: 6 },
|
||||
];
|
||||
const gapCache = GapCacheAsSet.createFromArray(JSON.parse(
|
||||
JSON.stringify(gapsArray)
|
||||
), 100);
|
||||
listing.refreshGapCache(gapCache);
|
||||
|
||||
let resumeState = filterEntries(listing, 'Vv', 'as');
|
||||
// state should still be UnknownGap since no delete marker has been seen yet
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.UnknownGap);
|
||||
|
||||
resumeState = filterEntries(listing, 'D', 'a', resumeState);
|
||||
// since the lookup is asynchronous (Promise-based), it should now be in progress
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapLookupInProgress);
|
||||
|
||||
filterEntries(listing, 'dv Ddv Vv Ddv', 'ss ass as ass', resumeState);
|
||||
// the lookup should still be in progress
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapLookupInProgress);
|
||||
|
||||
// the gap cache shouldn't have been updated
|
||||
expect(gapCache.toArray()).toEqual(gapsArray);
|
||||
});
|
||||
|
||||
it('should cache a gap after lookup completes, and use it to skip over keys ' +
|
||||
'within the gap range', async () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapsArray = [
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0006${VID_SEP}v101`, weight: 14 },
|
||||
];
|
||||
const gapCache = GapCacheAsSet.createFromArray(JSON.parse(
|
||||
JSON.stringify(gapsArray)
|
||||
), 100);
|
||||
listing.refreshGapCache(gapCache);
|
||||
|
||||
let resumeState = filterEntries(listing, 'Vv D', 'as a');
|
||||
// since the lookup is asynchronous (Promise-based), it should now be in progress
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapLookupInProgress);
|
||||
expect(listing.state.id).toEqual(DelimiterMasterFilterStateId.SkippingVersionsV0);
|
||||
// wait until the lookup completes (should happen in the next
|
||||
// event loop iteration so always quicker than a non-immediate timer)
|
||||
await new Promise(resolve => setTimeout(resolve, 1));
|
||||
|
||||
// the lookup should have completed now and the next gap should be cached
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapCached);
|
||||
expect(listing.state.id).toEqual(DelimiterMasterFilterStateId.SkippingVersionsV0);
|
||||
|
||||
// the state should stay in SkippingVersionsV0 until filter() is called with
|
||||
// a new master delete marker
|
||||
resumeState = filterEntries(listing, 'dvvv', 'ssss', resumeState);
|
||||
expect(listing.state.id).toEqual(DelimiterMasterFilterStateId.SkippingVersionsV0);
|
||||
|
||||
// here comes the next master delete marker, it should be skipped as it is still within
|
||||
// the cached gap's range (its key is "0003" and version "v100")
|
||||
resumeState = filterEntries(listing, 'D', 's', resumeState);
|
||||
// the listing algorithm should now be actively skipping the gap
|
||||
expect(listing.state.id).toEqual(DelimiterMasterFilterStateId.SkippingGapV0);
|
||||
|
||||
// the skipping() function should return the gap's last key.
|
||||
// NOTE: returning a key to jump to that is the actual gap's last key
|
||||
// (instead of a key just after) allows the listing algorithm to build
|
||||
// a chained gap when the database listing is restarted from that point
|
||||
// and there are more delete markers to skip.
|
||||
expect(listing.skipping()).toEqual(`pre/0006${VID_SEP}v101`);
|
||||
|
||||
// - The next master delete markers with key "0004" and "0005" are still within the
|
||||
// gap's range, so filter() should return FILTER_SKIP ('s')
|
||||
//
|
||||
// - Master key "0006" is NOT a delete marker, although this means that the update
|
||||
// happened after the gap was looked up and the listing is allowed to skip it as
|
||||
// well (it actually doesn't even check so doesn't know what type of key it is).
|
||||
//
|
||||
// - The following master delete marker "0007" is past the gap so returns
|
||||
// FILTER_ACCEPT ('a') and should have triggered a new cache lookup, and
|
||||
// the listing state should have been switched back to SkippingVersionsV0.
|
||||
resumeState = filterEntries(listing, 'dv Ddv Ddv Vvvv Ddv', 'ss sss sss ssss ass',
|
||||
resumeState);
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapLookupInProgress);
|
||||
expect(listing.state.id).toEqual(DelimiterMasterFilterStateId.SkippingVersionsV0);
|
||||
|
||||
// the gap cache must not have been updated in the process
|
||||
expect(gapCache.toArray()).toEqual(gapsArray);
|
||||
});
|
||||
|
||||
it('should extend a cached gap forward if current delete markers are listed beyond',
|
||||
async () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapsArray = [
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0005${VID_SEP}v100`, weight: 12 },
|
||||
];
|
||||
const gapCache = GapCacheAsSet.createFromArray(JSON.parse(
|
||||
JSON.stringify(gapsArray)
|
||||
), 100);
|
||||
listing.refreshGapCache(gapCache, 2);
|
||||
|
||||
let resumeState = filterEntries(listing, 'Vv D', 'as a');
|
||||
// wait until the lookup completes (should happen in the next
|
||||
// event loop iteration so always quicker than a non-immediate timer)
|
||||
await new Promise(resolve => setTimeout(resolve, 1));
|
||||
|
||||
// the lookup should have completed now and the next gap should be cached,
|
||||
// continue with filtering
|
||||
resumeState = filterEntries(listing, 'dv Ddv Ddv Ddv Ddv Ddvvv Vv Ddv Vv',
|
||||
'ss sss sss sss ass assss as ass as',
|
||||
resumeState);
|
||||
// the cached gap should be extended to the last key before the last regular
|
||||
// master version ('V')
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
// this gap has been extended forward up to right before the first non-deleted
|
||||
// current version following the gap, and its weight updated with how many
|
||||
// extra keys are skippable
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0007${VID_SEP}v103`, weight: 21 },
|
||||
// this gap has been created from the next deleted current version
|
||||
{ firstKey: 'pre/0009', lastKey: `pre/0009${VID_SEP}v101`, weight: 3 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should extend a cached gap backwards if current delete markers are listed ahead, ' +
|
||||
'and forward if more skippable keys are seen', async () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapsArray = [
|
||||
{ firstKey: 'pre/0004', lastKey: `pre/0005${VID_SEP}v100`, weight: 4 },
|
||||
];
|
||||
const gapCache = GapCacheAsSet.createFromArray(JSON.parse(
|
||||
JSON.stringify(gapsArray)
|
||||
), 100);
|
||||
listing.refreshGapCache(gapCache, 2);
|
||||
|
||||
let resumeState = filterEntries(listing, 'Vv D', 'as a');
|
||||
// wait until the lookup completes (should happen in the next
|
||||
// event loop iteration so always quicker than a non-immediate timer)
|
||||
await new Promise(resolve => setTimeout(resolve, 1));
|
||||
|
||||
// the lookup should have completed now and the next gap should be cached,
|
||||
// continue with filtering
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapCached);
|
||||
resumeState = filterEntries(listing, 'dv Ddv Ddv Ddv Vv Ddv Vv',
|
||||
'ss ass sss sss as ass as', resumeState);
|
||||
// the cached gap should be extended to the last key before the last regular
|
||||
// master version ('V')
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
// this gap has been extended:
|
||||
// - backwards up to the first listed delete marker
|
||||
// - forward up to the last skippable key
|
||||
// and its weight updated with how many extra keys are skippable
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0005${VID_SEP}v101`, weight: 11 },
|
||||
// this gap has been created from the next deleted current version
|
||||
{ firstKey: 'pre/0007', lastKey: `pre/0007${VID_SEP}v101`, weight: 3 },
|
||||
]);
|
||||
});
|
||||
|
||||
it('should not extend a cached gap forward if extension weight is 0',
|
||||
async () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapsArray = [
|
||||
{ firstKey: 'pre/0002', lastKey: `pre/0005${VID_SEP}v101`, weight: 13 },
|
||||
];
|
||||
const gapCache = GapCacheAsSet.createFromArray(JSON.parse(
|
||||
JSON.stringify(gapsArray)
|
||||
), 100);
|
||||
listing.refreshGapCache(gapCache, 2);
|
||||
|
||||
let resumeState = filterEntries(listing, 'Vv D', 'as a');
|
||||
// wait until the lookup completes (should happen in the next
|
||||
// event loop iteration so always quicker than a non-immediate timer)
|
||||
await new Promise(resolve => setTimeout(resolve, 1));
|
||||
|
||||
// the lookup should have completed now and the next gap should
|
||||
// be cached, simulate a concurrent invalidation by removing the
|
||||
// existing gap immediately, then continue with filtering
|
||||
resumeState = filterEntries(listing, 'dv Ddv Ddv Ddv',
|
||||
'ss sss sss sss', resumeState);
|
||||
gapCache.removeOverlappingGaps(['pre/0002']);
|
||||
resumeState = filterEntries(listing, 'Vv', 'as', resumeState);
|
||||
// no new gap should have been added
|
||||
expect(gapCache.toArray()).toEqual([]);
|
||||
});
|
||||
|
||||
it('should ignore gap with 0 listed key in it (e.g. due to skipping a prefix)', async () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v0');
|
||||
const gapsArray = [
|
||||
{ firstKey: 'pre/0004/a', lastKey: 'pre/0004/b', weight: 10 },
|
||||
];
|
||||
const gapCache = GapCacheAsSet.createFromArray(JSON.parse(
|
||||
JSON.stringify(gapsArray)
|
||||
), 100);
|
||||
listing.refreshGapCache(gapCache);
|
||||
|
||||
let resumeState = filterEntries(listing, 'Dd Vv Vv', 'as as as');
|
||||
// wait until the lookup completes (should happen in the next
|
||||
// event loop iteration so always quicker than a non-immediate timer)
|
||||
await new Promise(resolve => setTimeout(resolve, 1));
|
||||
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapCached);
|
||||
expect(gapCache.toArray()).toEqual([
|
||||
{ firstKey: 'pre/0004/a', lastKey: 'pre/0004/b', weight: 10 },
|
||||
]);
|
||||
// "0004" keys are still prior to the gap's first key
|
||||
resumeState = filterEntries(listing, 'Ddv', 'ass', resumeState);
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapCached);
|
||||
|
||||
// the next delete marker "0005" should trigger a new lookup...
|
||||
resumeState = filterEntries(listing, 'D', 'a', resumeState);
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.GapLookupInProgress);
|
||||
await new Promise(resolve => setTimeout(resolve, 1));
|
||||
|
||||
// ...which returns 'null' and sets the state to NoMoreGap
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.NoMoreGap);
|
||||
filterEntries(listing, 'dv Vv', 'ss as', resumeState);
|
||||
});
|
||||
|
||||
it('should disable gap fetching and building if using V1 format', async () => {
|
||||
const listing = new DelimiterMaster({}, fakeLogger, 'v1');
|
||||
const gapCache = new GapCacheAsSet(100);
|
||||
listing.refreshGapCache(gapCache);
|
||||
|
||||
expect(listing.getGapBuildingValidityPeriodMs()).toBeNull();
|
||||
expect(listing._gapCaching.state).toEqual(GapCachingState.NoGapCache);
|
||||
// mimic V1 listing of master prefix
|
||||
filterEntries(listing, 'V V', 'a a');
|
||||
expect(listing._gapBuilding.state).toEqual(GapBuildingState.Disabled);
|
||||
});
|
||||
});
|
|
@ -846,11 +846,11 @@ function getTestListing(mdParams, data, vFormat) {
|
|||
}
|
||||
if (vFormat === 'v1') {
|
||||
assert.deepStrictEqual(delimiter.skipping(), [
|
||||
`${DbPrefixes.Master}foo/`,
|
||||
`${DbPrefixes.Version}foo/`,
|
||||
`${DbPrefixes.Master}foo0`,
|
||||
`${DbPrefixes.Version}foo0`,
|
||||
]);
|
||||
} else {
|
||||
assert.strictEqual(delimiter.skipping(), 'foo/');
|
||||
assert.strictEqual(delimiter.skipping(), 'foo0');
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -871,7 +871,7 @@ function getTestListing(mdParams, data, vFormat) {
|
|||
}),
|
||||
FILTER_SKIP);
|
||||
// ...it should skip the whole replay prefix
|
||||
assert.strictEqual(delimiter.skipping(), DbPrefixes.Replay);
|
||||
assert.strictEqual(delimiter.skipping(), inc(DbPrefixes.Replay));
|
||||
|
||||
// simulate a listing that reaches regular object keys
|
||||
// beyond the replay prefix, ...
|
||||
|
@ -882,7 +882,7 @@ function getTestListing(mdParams, data, vFormat) {
|
|||
}),
|
||||
FILTER_ACCEPT);
|
||||
// ...it should return to skipping by prefix as usual
|
||||
assert.strictEqual(delimiter.skipping(), `${inc(DbPrefixes.Replay)}foo/`);
|
||||
assert.strictEqual(delimiter.skipping(), `${inc(DbPrefixes.Replay)}foo0`);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -937,11 +937,11 @@ function getTestListing(mdParams, data, vFormat) {
|
|||
assert.strictEqual(delimiter.nextKeyMarker, 'foo/');
|
||||
|
||||
if (vFormat === 'v0') {
|
||||
assert.strictEqual(delimiter.skipping(), 'foo/');
|
||||
assert.strictEqual(delimiter.skipping(), 'foo0');
|
||||
} else {
|
||||
assert.deepStrictEqual(delimiter.skipping(), [
|
||||
`${DbPrefixes.Master}foo/`,
|
||||
`${DbPrefixes.Version}foo/`,
|
||||
`${DbPrefixes.Master}foo0`,
|
||||
`${DbPrefixes.Version}foo0`,
|
||||
]);
|
||||
}
|
||||
});
|
||||
|
@ -958,11 +958,11 @@ function getTestListing(mdParams, data, vFormat) {
|
|||
assert.strictEqual(delimiter.nextKeyMarker, 'foo/');
|
||||
|
||||
if (vFormat === 'v0') {
|
||||
assert.strictEqual(delimiter.skipping(), 'foo/');
|
||||
assert.strictEqual(delimiter.skipping(), 'foo0');
|
||||
} else {
|
||||
assert.deepStrictEqual(delimiter.skipping(), [
|
||||
`${DbPrefixes.Master}foo/`,
|
||||
`${DbPrefixes.Version}foo/`,
|
||||
`${DbPrefixes.Master}foo0`,
|
||||
`${DbPrefixes.Version}foo0`,
|
||||
]);
|
||||
}
|
||||
});
|
||||
|
@ -1249,11 +1249,11 @@ function getTestListing(mdParams, data, vFormat) {
|
|||
}), FILTER_SKIP);
|
||||
|
||||
if (vFormat === 'v0') {
|
||||
assert.deepStrictEqual(listing.skipping(), `key${VID_SEP}version3`);
|
||||
assert.deepStrictEqual(listing.skipping(), `key${VID_SEP}version3\0`);
|
||||
} else {
|
||||
assert.deepStrictEqual(listing.skipping(), [
|
||||
`${DbPrefixes.Master}key${VID_SEP}version3`,
|
||||
`${DbPrefixes.Version}key${VID_SEP}version3`,
|
||||
`${DbPrefixes.Master}key${VID_SEP}version3\0`,
|
||||
`${DbPrefixes.Version}key${VID_SEP}version3\0`,
|
||||
]);
|
||||
}
|
||||
|
||||
|
@ -1335,11 +1335,11 @@ function getTestListing(mdParams, data, vFormat) {
|
|||
}), FILTER_SKIP);
|
||||
|
||||
if (vFormat === 'v0') {
|
||||
assert.deepStrictEqual(listing.skipping(), `key${VID_SEP}version3`);
|
||||
assert.deepStrictEqual(listing.skipping(), `key${VID_SEP}version3\0`);
|
||||
} else {
|
||||
assert.deepStrictEqual(listing.skipping(), [
|
||||
`${DbPrefixes.Master}key${VID_SEP}version3`,
|
||||
`${DbPrefixes.Version}key${VID_SEP}version3`,
|
||||
`${DbPrefixes.Master}key${VID_SEP}version3\0`,
|
||||
`${DbPrefixes.Version}key${VID_SEP}version3\0`,
|
||||
]);
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ describe('Skip Algorithm', () => {
|
|||
// Skipping algo params
|
||||
const extension = {
|
||||
filter: () => FILTER_SKIP,
|
||||
skipping: () => 'entry0',
|
||||
skipping: () => 'entry1',
|
||||
};
|
||||
const gte = 'some-other-entry';
|
||||
// Setting spy functions
|
||||
|
@ -138,7 +138,7 @@ describe('Skip Algorithm', () => {
|
|||
// Skipping algo params
|
||||
const extension = {
|
||||
filter: () => FILTER_SKIP,
|
||||
skipping: () => ['first-entry-0', 'second-entry-0'],
|
||||
skipping: () => ['first-entry-1', 'second-entry-1'],
|
||||
};
|
||||
const gte = 'some-other-entry';
|
||||
// Setting spy functions
|
||||
|
@ -160,7 +160,7 @@ describe('Skip Algorithm', () => {
|
|||
// Skipping algo params
|
||||
const extension = {
|
||||
filter: () => FILTER_SKIP,
|
||||
skipping: () => 'entry-0',
|
||||
skipping: () => 'entry-1',
|
||||
};
|
||||
const gte = 'entry-1';
|
||||
// Setting spy functions
|
||||
|
|
|
@ -306,6 +306,31 @@ describe('Auth Backend: Chain Backend', () => {
|
|||
],
|
||||
);
|
||||
});
|
||||
|
||||
it('should correctly merge policies with implicit denies and actions', () => {
|
||||
const policyResps = [
|
||||
{ message: { body: [
|
||||
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/true1', action: 'action1', isImplicit: true },
|
||||
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/true2', action: 'action2', isImplicit: false },
|
||||
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/false1', action: 'action3', isImplicit: false },
|
||||
] } },
|
||||
{ message: { body: [
|
||||
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/true1', action: 'action1', isImplicit: false },
|
||||
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/true2', action: 'action1', isImplicit: true },
|
||||
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/false2', action: 'action1', isImplicit: true },
|
||||
] } },
|
||||
];
|
||||
assert.deepStrictEqual(
|
||||
ChainBackend._mergePolicies(policyResps),
|
||||
[
|
||||
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/true1', action: 'action1', isImplicit: false },
|
||||
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/true2', action: 'action2', isImplicit: false },
|
||||
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/false1', action: 'action3', isImplicit: false },
|
||||
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/true2', action: 'action1', isImplicit: true },
|
||||
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/false2', action: 'action1', isImplicit: true },
|
||||
],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('::checkhealth', () => {
|
||||
|
|
|
@ -53,12 +53,21 @@ function checkKeyNotExistsInDB(db, key, cb) {
|
|||
return cb(err);
|
||||
}
|
||||
if (value) {
|
||||
return cb(errors.PreconditionFailed);
|
||||
return cb(errors.EntityAlreadyExists);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
function checkKeyExistsInDB(db, key, callback) {
|
||||
return db.get(key, err => {
|
||||
if (err) {
|
||||
return callback(err.notFound ? errors.NoSuchEntity : err);
|
||||
}
|
||||
return callback();
|
||||
});
|
||||
}
|
||||
|
||||
class ConditionalLevelDB {
|
||||
constructor() {
|
||||
this.db = createDb();
|
||||
|
@ -70,6 +79,9 @@ class ConditionalLevelDB {
|
|||
case ('notExists' in cond):
|
||||
checkKeyNotExistsInDB(this.db, cond.notExists, asyncCallback);
|
||||
break;
|
||||
case ('exists' in cond):
|
||||
checkKeyExistsInDB(this.db, cond.exists, asyncCallback);
|
||||
break;
|
||||
default:
|
||||
asyncCallback(new Error('unsupported conditional operation'));
|
||||
}
|
||||
|
@ -425,7 +437,7 @@ describe('IndexTransaction', () => {
|
|||
value: value3,
|
||||
});
|
||||
return transaction.commit(err => {
|
||||
if (!err || !err.is.PreconditionFailed) {
|
||||
if (!err || !err.is.EntityAlreadyExists) {
|
||||
return done(new Error('should not be able to conditional put for duplicate key'));
|
||||
}
|
||||
return async.parallel([
|
||||
|
@ -457,11 +469,87 @@ describe('IndexTransaction', () => {
|
|||
it('should not allow batch operation with unsupported condition', done => {
|
||||
const transaction = new IndexTransaction();
|
||||
try {
|
||||
transaction.addCondition({ exists: key1 });
|
||||
transaction.addCondition({ like: key1 });
|
||||
done(new Error('should fail for unsupported condition, currently supported - notExists'));
|
||||
} catch (err) {
|
||||
assert.strictEqual(err.unsupportedConditionalOperation, true);
|
||||
done();
|
||||
}
|
||||
});
|
||||
|
||||
it('should allow batch operation with key specified in exists condition is present in db', done => {
|
||||
const db = new ConditionalLevelDB();
|
||||
const { client } = db;
|
||||
let transaction = new IndexTransaction(db);
|
||||
transaction.put(key1, value1);
|
||||
return async.series([
|
||||
next => transaction.commit(next),
|
||||
next => client.get(key1, next),
|
||||
], err => {
|
||||
assert.ifError(err);
|
||||
// create new transaction as previous transaction is already committed
|
||||
transaction = new IndexTransaction(db);
|
||||
transaction.addCondition({ exists: key1 });
|
||||
transaction.push({
|
||||
type: 'put',
|
||||
key: key1,
|
||||
value: value2,
|
||||
});
|
||||
return async.series([
|
||||
next => transaction.commit(next),
|
||||
next => client.get(key1, next),
|
||||
], (err, res) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(res[1], value2);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should not allow batch operation with key specified in exists condition is not in db', done => {
|
||||
const db = new ConditionalLevelDB();
|
||||
const { client } = db;
|
||||
const transaction = new IndexTransaction(db);
|
||||
transaction.addCondition({ exists: key1 });
|
||||
transaction.push({
|
||||
type: 'put',
|
||||
key: key1,
|
||||
value: value1,
|
||||
});
|
||||
return transaction.commit(err => {
|
||||
assert.strictEqual(err && err.NoSuchEntity, true);
|
||||
return checkKeyNotExistsInDB(client, key1, done);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle batch operations with multiple conditions correctly', done => {
|
||||
const db = new ConditionalLevelDB();
|
||||
const { client } = db;
|
||||
let transaction = new IndexTransaction(db);
|
||||
transaction.put(key1, value1);
|
||||
return async.series([
|
||||
next => transaction.commit(next),
|
||||
next => client.get(key1, next),
|
||||
], err => {
|
||||
assert.ifError(err);
|
||||
// create new transaction as previous transaction is already committed
|
||||
transaction = new IndexTransaction(db);
|
||||
transaction.addCondition({ exists: key1 });
|
||||
transaction.addCondition({ notExists: key2 });
|
||||
transaction.push({
|
||||
type: 'put',
|
||||
key: key1,
|
||||
value: value2,
|
||||
});
|
||||
|
||||
return async.series([
|
||||
next => transaction.commit(next),
|
||||
next => client.get(key1, next),
|
||||
], (err, res) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(res[1], value2);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -228,6 +228,8 @@ const testBucketCapabilities = {
|
|||
},
|
||||
};
|
||||
|
||||
const testBucketQuota = 100000;
|
||||
|
||||
// create a dummy bucket to test getters and setters
|
||||
Object.keys(acl).forEach(
|
||||
aclObj => describe(`different acl configurations : ${aclObj}`, () => {
|
||||
|
@ -252,6 +254,7 @@ Object.keys(acl).forEach(
|
|||
testNotificationConfiguration,
|
||||
testBucketTagging,
|
||||
testBucketCapabilities,
|
||||
testBucketQuota,
|
||||
);
|
||||
|
||||
describe('serialize/deSerialize on BucketInfo class', () => {
|
||||
|
@ -290,6 +293,7 @@ Object.keys(acl).forEach(
|
|||
notificationConfiguration: dummyBucket._notificationConfiguration,
|
||||
tags: dummyBucket._tags,
|
||||
capabilities: dummyBucket._capabilities,
|
||||
quotaMax: dummyBucket._quotaMax,
|
||||
};
|
||||
assert.strictEqual(serialized, JSON.stringify(bucketInfos));
|
||||
done();
|
||||
|
@ -339,6 +343,7 @@ Object.keys(acl).forEach(
|
|||
dummyBucket._notificationConfiguration,
|
||||
_tags: dummyBucket._tags,
|
||||
_capabilities: dummyBucket._capabilities,
|
||||
_quotaMax: dummyBucket._quotaMax,
|
||||
};
|
||||
const fromObj = BucketInfo.fromObj(dataObj);
|
||||
assert(fromObj instanceof BucketInfo);
|
||||
|
@ -694,6 +699,17 @@ Object.keys(acl).forEach(
|
|||
assert.deepStrictEqual(
|
||||
dummyBucket.getCapabilities(), testCapabilities);
|
||||
});
|
||||
it('setQuota should set bucket quota', () => {
|
||||
const testQuota = testBucketQuota;
|
||||
dummyBucket.setQuota(testQuota);
|
||||
assert.deepStrictEqual(
|
||||
dummyBucket.getQuota(), testQuota);
|
||||
});
|
||||
it('setQuota should set bucket quota', () => {
|
||||
dummyBucket.setQuota();
|
||||
assert.deepStrictEqual(
|
||||
dummyBucket.getQuota(), 0);
|
||||
});
|
||||
});
|
||||
}),
|
||||
);
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
const { Version } = require('../../../lib/versioning/Version');
|
||||
|
||||
describe('Version', () => {
|
||||
describe('_jsonAppend', () => {
|
||||
it('should append key-value pair to an empty object', () => {
|
||||
const result = Version._jsonAppend('{}', 'versionId', '123');
|
||||
expect(result).toBe('{"versionId":"123"}');
|
||||
});
|
||||
|
||||
it('should append key-value pair to an object with existing properties', () => {
|
||||
const result = Version._jsonAppend('{"existingKey":"existingValue"}', 'versionId', '123');
|
||||
expect(result).toBe('{"existingKey":"existingValue","versionId":"123"}');
|
||||
});
|
||||
|
||||
it('should append key-value pair to an object with existing key', () => {
|
||||
const result = Version._jsonAppend('{"versionId":"0"}', 'versionId', '123');
|
||||
expect(result).toBe('{"versionId":"0","versionId":"123"}');
|
||||
});
|
||||
});
|
||||
|
||||
describe('appendVersionId', () => {
|
||||
it('should append versionId to an empty object', () => {
|
||||
const emptyObject = '{}';
|
||||
const versionId = '123';
|
||||
const expected = '{"versionId":"123"}';
|
||||
const result = Version.appendVersionId(emptyObject, versionId);
|
||||
expect(result).toEqual(expected);
|
||||
});
|
||||
|
||||
it('should append versionId to an object with existing properties', () => {
|
||||
const existingObject = '{"key":"value"}';
|
||||
const versionId = '456';
|
||||
const expected = '{"key":"value","versionId":"456"}';
|
||||
const result = Version.appendVersionId(existingObject, versionId);
|
||||
expect(result).toEqual(expected);
|
||||
});
|
||||
|
||||
it('should append versionId to an object with existing versionId', () => {
|
||||
const objectWithVersionId = '{"key":"value","versionId":"old"}';
|
||||
const versionId = 'new';
|
||||
const expected = '{"key":"value","versionId":"old","versionId":"new"}';
|
||||
const result = Version.appendVersionId(objectWithVersionId, versionId);
|
||||
expect(result).toEqual(expected);
|
||||
});
|
||||
});
|
||||
|
||||
describe('updateOrAppendNullVersionId', () => {
|
||||
it('should append nullVersionId when it does not exist', () => {
|
||||
const initialValue = '{"key":"value"}';
|
||||
const nullVersionId = '12345';
|
||||
const expectedValue = '{"key":"value","nullVersionId":"12345"}';
|
||||
const result = Version.updateOrAppendNullVersionId(initialValue, nullVersionId);
|
||||
expect(result).toEqual(expectedValue);
|
||||
});
|
||||
|
||||
it('should update nullVersionId when it exists', () => {
|
||||
const initialValue = '{"key":"value","nullVersionId":"initial"}';
|
||||
const nullVersionId = 'updated12345';
|
||||
const expectedValue = '{"key":"value","nullVersionId":"updated12345"}';
|
||||
const result = Version.updateOrAppendNullVersionId(initialValue, nullVersionId);
|
||||
expect(result).toEqual(expectedValue);
|
||||
});
|
||||
|
||||
it('should handle empty string by appending nullVersionId', () => {
|
||||
const initialValue = '{}';
|
||||
const nullVersionId = 'emptyCase12345';
|
||||
const expectedValue = '{"nullVersionId":"emptyCase12345"}';
|
||||
const result = Version.updateOrAppendNullVersionId(initialValue, nullVersionId);
|
||||
expect(result).toEqual(expectedValue);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -254,6 +254,588 @@ describe('test VRP', () => {
|
|||
}],
|
||||
done);
|
||||
});
|
||||
|
||||
it('should be able to put Metadata on top of a standalone null version', done => {
|
||||
const versionId = '00000000000000999999PARIS ';
|
||||
|
||||
async.waterfall([next => {
|
||||
// simulate the creation of a standalone null version.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: '{"qux":"quz"}',
|
||||
options: {},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
// simulate a BackbeatClient.putMetadata
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
options: {
|
||||
versioning: true,
|
||||
versionId,
|
||||
// isNull === false means Cloudserver supports the new "null key" logic.
|
||||
isNull: false,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
wgm.list({}, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedListing = [
|
||||
// master version should have the provided version id
|
||||
{
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
},
|
||||
// The null version will get the highest version number.
|
||||
// It should have "isNull" and "isNul2" set to true,
|
||||
// showing it's a null version made by Cloudserver that works with null keys.
|
||||
{
|
||||
key: `bar${VID_SEP}`,
|
||||
value: '{"qux":"quz","versionId":"99999999999999999999PARIS ","isNull":true,"isNull2":true}',
|
||||
},
|
||||
// the new version
|
||||
{
|
||||
key: `bar${VID_SEP}${versionId}`,
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
},
|
||||
];
|
||||
assert.deepStrictEqual(res, expectedListing);
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
};
|
||||
vrp.get(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedGet = {
|
||||
qux: 'quz2',
|
||||
versionId,
|
||||
};
|
||||
assert.deepStrictEqual(JSON.parse(res), expectedGet);
|
||||
next();
|
||||
}],
|
||||
done);
|
||||
});
|
||||
|
||||
it('should be able to put Metadata on top of a standalone null version in backward compatibility mode', done => {
|
||||
const versionId = '00000000000000999999PARIS ';
|
||||
|
||||
async.waterfall([next => {
|
||||
// simulate the creation of a standalone null version.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: '{"qux":"quz"}',
|
||||
options: {},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
// simulate a BackbeatClient.putMetadata
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
options: {
|
||||
versioning: true,
|
||||
versionId,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
wgm.list({}, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedListing = [
|
||||
// master version should have the provided version id and a reference of the null version id.
|
||||
{
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}","nullVersionId":"99999999999999999999PARIS "}`,
|
||||
},
|
||||
// the "internal" master version should have the provided version id.
|
||||
{
|
||||
key: `bar${VID_SEP}${versionId}`,
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
},
|
||||
// should create a version that represents the old null master with the infinite version id and
|
||||
// the isNull property set to true.
|
||||
{
|
||||
key: `bar${VID_SEP}99999999999999999999PARIS `,
|
||||
value: '{"qux":"quz","versionId":"99999999999999999999PARIS ","isNull":true}',
|
||||
},
|
||||
];
|
||||
assert.deepStrictEqual(res, expectedListing);
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
};
|
||||
vrp.get(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedGet = {
|
||||
qux: 'quz2',
|
||||
versionId,
|
||||
nullVersionId: '99999999999999999999PARIS ',
|
||||
};
|
||||
assert.deepStrictEqual(JSON.parse(res), expectedGet);
|
||||
next();
|
||||
}],
|
||||
done);
|
||||
});
|
||||
|
||||
it('should be able to put Metadata on top of a null suspended version', done => {
|
||||
const versionId = '00000000000000999999PARIS ';
|
||||
let nullVersionId;
|
||||
|
||||
async.waterfall([next => {
|
||||
// simulate the creation of a null suspended version.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: '{"qux":"quz","isNull":true}',
|
||||
options: {
|
||||
versionId: '',
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
nullVersionId = JSON.parse(res).versionId;
|
||||
// simulate a BackbeatClient.putMetadata
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
options: {
|
||||
versioning: true,
|
||||
versionId,
|
||||
// isNull === false means Cloudserver supports the new "null key" logic.
|
||||
isNull: false,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
wgm.list({}, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedListing = [
|
||||
// master version should have the provided version id
|
||||
{
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
},
|
||||
// The null version will get the highest version number.
|
||||
// It should have "isNull" and "isNul2" set to true,
|
||||
// showing it's a null version made by Cloudserver that works with null keys.
|
||||
{
|
||||
key: `bar${VID_SEP}`,
|
||||
value: `{"qux":"quz","isNull":true,"versionId":"${nullVersionId}","isNull2":true}`,
|
||||
},
|
||||
// the new version
|
||||
{
|
||||
key: `bar${VID_SEP}${versionId}`,
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
},
|
||||
];
|
||||
assert.deepStrictEqual(res, expectedListing);
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
};
|
||||
vrp.get(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedGet = {
|
||||
qux: 'quz2',
|
||||
versionId,
|
||||
};
|
||||
assert.deepStrictEqual(JSON.parse(res), expectedGet);
|
||||
next();
|
||||
}],
|
||||
done);
|
||||
});
|
||||
|
||||
it('should be able to put Metadata on top of a null suspended version in backward compatibility mode', done => {
|
||||
const versionId = '00000000000000999999PARIS ';
|
||||
let nullVersionId;
|
||||
|
||||
async.waterfall([next => {
|
||||
// simulate the creation of a null suspended version.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: '{"qux":"quz","isNull":true}',
|
||||
options: {
|
||||
versionId: '',
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
nullVersionId = JSON.parse(res).versionId;
|
||||
// simulate a BackbeatClient.putMetadata
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
options: {
|
||||
versioning: true,
|
||||
versionId,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
wgm.list({}, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedListing = [
|
||||
// master version should have the provided version id and a reference of the null version id.
|
||||
{
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}","nullVersionId":"${nullVersionId}"}`,
|
||||
},
|
||||
// the "internal" master version should have the provided version id.
|
||||
{
|
||||
key: `bar${VID_SEP}${versionId}`,
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
},
|
||||
// should create a version that represents the old null master with the infinite version id and
|
||||
// the isNull property set to true.
|
||||
{
|
||||
key: `bar${VID_SEP}${nullVersionId}`,
|
||||
value: `{"qux":"quz","isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
},
|
||||
];
|
||||
assert.deepStrictEqual(res, expectedListing);
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
};
|
||||
vrp.get(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedGet = {
|
||||
qux: 'quz2',
|
||||
versionId,
|
||||
nullVersionId,
|
||||
};
|
||||
assert.deepStrictEqual(JSON.parse(res), expectedGet);
|
||||
next();
|
||||
}],
|
||||
done);
|
||||
});
|
||||
|
||||
it('should be able to update a null suspended version in backward compatibility mode', done => {
|
||||
let nullVersionId;
|
||||
|
||||
async.waterfall([next => {
|
||||
// simulate the creation of a null suspended version.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: '{"qux":"quz","isNull":true}',
|
||||
options: {
|
||||
versionId: '',
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
nullVersionId = JSON.parse(res).versionId;
|
||||
// simulate update null version with BackbeatClient.putMetadata
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
options: {
|
||||
versioning: true,
|
||||
versionId: nullVersionId,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
wgm.list({}, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedListing = [
|
||||
// NOTE: should not set nullVersionId to the master version if updating a null version.
|
||||
{
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
},
|
||||
{
|
||||
key: `bar\x00${nullVersionId}`,
|
||||
value: `{"qux":"quz","isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
},
|
||||
];
|
||||
assert.deepStrictEqual(res, expectedListing);
|
||||
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
};
|
||||
vrp.get(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedGet = {
|
||||
qux: 'quz2',
|
||||
isNull: true,
|
||||
versionId: nullVersionId,
|
||||
};
|
||||
assert.deepStrictEqual(JSON.parse(res), expectedGet);
|
||||
next();
|
||||
}],
|
||||
done);
|
||||
});
|
||||
|
||||
it('should delete the deprecated null key after put Metadata on top of an old null master', done => {
|
||||
const versionId = '00000000000000999999PARIS ';
|
||||
let nullVersionId;
|
||||
|
||||
async.waterfall([next => {
|
||||
// simulate the creation of a null suspended version.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: '{"qux":"quz","isNull":true}',
|
||||
options: {
|
||||
versionId: '',
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
nullVersionId = JSON.parse(res).versionId;
|
||||
// update metadata of the same null version with compat mode (options.isNull not defined)
|
||||
// to generate a deprecated null key.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
options: {
|
||||
versionId: nullVersionId,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
// put metadata with the new keys implementation (options.isNull defined)
|
||||
// on top of the null master with a deprecated null key.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz3","versionId":"${versionId}"}`,
|
||||
options: {
|
||||
versionId,
|
||||
isNull: false,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
wgm.list({}, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedListing = [
|
||||
// master version should have the provided version id.
|
||||
{
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz3","versionId":"${versionId}"}`,
|
||||
},
|
||||
// the null key
|
||||
{
|
||||
key: `bar${VID_SEP}`,
|
||||
value: `{"qux":"quz2","isNull":true,"versionId":"${nullVersionId}","isNull2":true}`,
|
||||
},
|
||||
// version key
|
||||
{
|
||||
key: `bar${VID_SEP}${versionId}`,
|
||||
value: `{"qux":"quz3","versionId":"${versionId}"}`,
|
||||
},
|
||||
];
|
||||
assert.deepStrictEqual(res, expectedListing);
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
};
|
||||
vrp.get(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedGet = {
|
||||
qux: 'quz3',
|
||||
versionId,
|
||||
};
|
||||
assert.deepStrictEqual(JSON.parse(res), expectedGet);
|
||||
next();
|
||||
}],
|
||||
done);
|
||||
});
|
||||
|
||||
it('should delete the deprecated null key after updating metadata of an old null master', done => {
|
||||
let nullVersionId;
|
||||
|
||||
async.waterfall([next => {
|
||||
// simulate the creation of a null suspended version.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: '{"qux":"quz","isNull":true}',
|
||||
options: {
|
||||
versionId: '',
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
nullVersionId = JSON.parse(res).versionId;
|
||||
// update metadata of the same null version with compat mode (options.isNull not defined)
|
||||
// to generate a deprecated null key.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
options: {
|
||||
versionId: nullVersionId,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
// update the null version metadata with the new keys implementation (options.isNull defined)
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz3","isNull2":true,"isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
options: {
|
||||
versionId: nullVersionId,
|
||||
isNull: true,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
wgm.list({}, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedListing = [
|
||||
// the internal null version should be deleted.
|
||||
{
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz3","isNull2":true,"isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
},
|
||||
];
|
||||
assert.deepStrictEqual(res, expectedListing);
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
};
|
||||
vrp.get(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedGet = {
|
||||
qux: 'quz3',
|
||||
isNull2: true,
|
||||
isNull: true,
|
||||
versionId: nullVersionId,
|
||||
};
|
||||
assert.deepStrictEqual(JSON.parse(res), expectedGet);
|
||||
next();
|
||||
}],
|
||||
done);
|
||||
});
|
||||
|
||||
it('should delete the deprecated null key after updating a non-latest null key', done => {
|
||||
const versionId = '00000000000000999999PARIS ';
|
||||
let nullVersionId;
|
||||
|
||||
async.waterfall([next => {
|
||||
// simulate the creation of a null suspended version.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: '{"qux":"quz","isNull":true}',
|
||||
options: {
|
||||
versionId: '',
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
nullVersionId = JSON.parse(res).versionId;
|
||||
// simulate a BackbeatClient.putMetadata
|
||||
// null key is not the latest = master is not null.
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
options: {
|
||||
versioning: true,
|
||||
versionId,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
// update the null version metadata with the new keys implementation (options.isNull defined)
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz3","isNull2":true,"isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
options: {
|
||||
versionId: nullVersionId,
|
||||
isNull: true,
|
||||
},
|
||||
};
|
||||
vrp.put(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
wgm.list({}, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedListing = [
|
||||
{
|
||||
key: 'bar',
|
||||
value: `{"qux":"quz2","versionId":"${versionId}","nullVersionId":"${nullVersionId}"}`,
|
||||
},
|
||||
{
|
||||
key: 'bar\x00',
|
||||
value: `{"qux":"quz3","isNull2":true,"isNull":true,"versionId":"${nullVersionId}"}`,
|
||||
},
|
||||
{
|
||||
key: `bar\x00${versionId}`,
|
||||
value: `{"qux":"quz2","versionId":"${versionId}"}`,
|
||||
},
|
||||
];
|
||||
assert.deepStrictEqual(res, expectedListing);
|
||||
|
||||
const request = {
|
||||
db: 'foo',
|
||||
key: 'bar',
|
||||
};
|
||||
vrp.get(request, logger, next);
|
||||
},
|
||||
(res, next) => {
|
||||
const expectedGet = {
|
||||
qux: 'quz2',
|
||||
versionId,
|
||||
nullVersionId,
|
||||
};
|
||||
assert.deepStrictEqual(JSON.parse(res), expectedGet);
|
||||
next();
|
||||
}],
|
||||
done);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"target": "es6",
|
||||
"target": "es2020",
|
||||
"module": "commonjs",
|
||||
"rootDir": "./",
|
||||
"resolveJsonModule": true,
|
||||
|
|
Loading…
Reference in New Issue