Compare commits

..

1 Commits

Author SHA1 Message Date
Kerkesni 1bb8e86679
ARSN-299 add location pause status model 2023-02-14 11:33:56 +01:00
137 changed files with 8912 additions and 17213 deletions

View File

@ -1,6 +1 @@
{
"extends": "scality",
"parserOptions": {
"ecmaVersion": 2020
}
}
{ "extends": "scality" }

View File

@ -1,25 +0,0 @@
---
name: codeQL
on:
push:
branches: [development/*, stabilization/*, hotfix/*]
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
workflow_dispatch:
jobs:
analyze:
name: Static analysis with CodeQL
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: javascript, typescript
- name: Build and analyze
uses: github/codeql-action/analyze@v3

View File

@ -1,16 +0,0 @@
---
name: dependency review
on:
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4

View File

@ -25,8 +25,8 @@ jobs:
- 6379:6379
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-node@v4
uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: '16'
cache: 'yarn'
@ -46,9 +46,7 @@ jobs:
run: yarn --silent coverage
- name: run functional tests
run: yarn ft_test
- uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
- uses: codecov/codecov-action@v2
- name: run executables tests
run: yarn install && yarn test
working-directory: 'lib/executables/pensieveCreds/'
@ -59,9 +57,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Install NodeJS
uses: actions/setup-node@v4
uses: actions/setup-node@v2
with:
node-version: '16'
cache: yarn
@ -72,7 +70,7 @@ jobs:
run: yarn build
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
- name: Upload artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v2
with:
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}

12
.swcrc
View File

@ -1,12 +0,0 @@
{
"$schema": "https://swc.rs/schema.json",
"jsc": {
"parser": {
"syntax": "typescript"
},
"target": "es2017"
},
"module": {
"type": "commonjs"
}
}

View File

@ -245,16 +245,4 @@ For capacity-enabled buckets, contains the following data:
### Usage
Used to store bucket tagging
## Model version 17
### Properties Added
```javascript
this._quotaMax = quotaMax || 0;
```
### Usage
Used to store bucket quota
Used to store bucket tagging

View File

@ -1,33 +0,0 @@
# DelimiterVersions
The DelimiterVersions class handles raw listings from the database of a
versioned or non-versioned bucket with an optional delimiter, and
fills in a curated listing with "Versions" and "CommonPrefixes" as a
result.
## Expected Behavior
- lists individual distinct versions of versioned buckets
- only lists keys belonging to the given **prefix** (if provided)
- groups listed keys that have a common prefix ending with a delimiter
inside CommonPrefixes
- can take a **keyMarker** and optionally a **versionIdMarker** to
list from a specific key or version
- can take a **maxKeys** parameter to limit how many keys can be returned
- skips internal keys like replay keys
## State Chart
- States with grey background are *Idle* states, which are waiting for
a new listing key
- States with blue background are *Processing* states, which are
actively processing a new listing key passed by the filter()
function
![DelimiterVersions State Chart](./pics/delimiterVersionsStateChart.svg)

View File

@ -1,50 +0,0 @@
digraph {
node [shape="box",style="filled,rounded",fontsize=16,fixedsize=true,width=3];
edge [fontsize=14];
rankdir=TB;
START [shape="circle",width=0.2,label="",style="filled",fillcolor="black"]
END [shape="circle",width=0.2,label="",style="filled",fillcolor="black",peripheries=2]
node [fillcolor="lightgrey"];
"NotSkipping.Idle" [label="NotSkipping",group="NotSkipping",width=4];
"SkippingPrefix.Idle" [label="SkippingPrefix",group="SkippingPrefix"];
"WaitForNullKey.Idle" [label="WaitForNullKey",group="WaitForNullKey"];
"SkippingVersions.Idle" [label="SkippingVersions",group="SkippingVersions"];
node [fillcolor="lightblue"];
"NotSkipping.Processing" [label="NotSkipping",group="NotSkipping",width=4];
"NotSkippingV0.Processing" [label="NotSkippingV0",group="NotSkipping",width=4];
"NotSkippingV1.Processing" [label="NotSkippingV1",group="NotSkipping",width=4];
"NotSkippingCommon.Processing" [label="NotSkippingCommon",group="NotSkipping",width=4];
"SkippingPrefix.Processing" [label="SkippingPrefix",group="SkippingPrefix"];
"WaitForNullKey.Processing" [label="WaitForNullKey",group="WaitForNullKey"];
"SkippingVersions.Processing" [label="SkippingVersions",group="SkippingVersions"];
START -> "WaitForNullKey.Idle" [label="[versionIdMarker != undefined]"]
START -> "NotSkipping.Idle" [label="[versionIdMarker == undefined]"]
"NotSkipping.Idle" -> "NotSkipping.Processing" [label="filter(key, value)"]
"SkippingPrefix.Idle" -> "SkippingPrefix.Processing" [label="filter(key, value)"]
"WaitForNullKey.Idle" -> "WaitForNullKey.Processing" [label="filter(key, value)"]
"SkippingVersions.Idle" -> "SkippingVersions.Processing" [label="filter(key, value)"]
"NotSkipping.Processing" -> "NotSkippingV0.Processing" [label="vFormat='v0'"]
"NotSkipping.Processing" -> "NotSkippingV1.Processing" [label="vFormat='v1'"]
"WaitForNullKey.Processing" -> "NotSkipping.Processing" [label="master(key) != keyMarker"]
"WaitForNullKey.Processing" -> "SkippingVersions.Processing" [label="master(key) == keyMarker"]
"NotSkippingV0.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(<ReplayPrefix>)]\n/ prefix <- <ReplayPrefix>\n-> FILTER_SKIP"]
"NotSkippingV0.Processing" -> "NotSkipping.Idle" [label="[Version.isPHD(value)]\n-> FILTER_ACCEPT"]
"NotSkippingV0.Processing" -> "NotSkippingCommon.Processing" [label="[not key.startsWith(<ReplayPrefix>)\nand not Version.isPHD(value)]"]
"NotSkippingV1.Processing" -> "NotSkippingCommon.Processing" [label="[always]"]
"NotSkippingCommon.Processing" -> END [label="[isListableKey(key, value) and\nKeys == maxKeys]\n-> FILTER_END"]
"NotSkippingCommon.Processing" -> "SkippingPrefix.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nhasDelimiter(key)]\n/ prefix <- prefixOf(key)\n/ CommonPrefixes.append(prefixOf(key))\n-> FILTER_ACCEPT"]
"NotSkippingCommon.Processing" -> "NotSkipping.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nnot hasDelimiter(key)]\n/ Contents.append(key, versionId, value)\n-> FILTER_ACCEPT"]
"SkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(prefix)]\n-> FILTER_SKIP"]
"SkippingPrefix.Processing" -> "NotSkipping.Processing" [label="[not key.startsWith(prefix)]"]
"SkippingVersions.Processing" -> "NotSkipping.Processing" [label="master(key) !== keyMarker or \nversionId > versionIdMarker"]
"SkippingVersions.Processing" -> "SkippingVersions.Idle" [label="master(key) === keyMarker and \nversionId < versionIdMarker\n-> FILTER_SKIP"]
"SkippingVersions.Processing" -> "SkippingVersions.Idle" [label="master(key) === keyMarker and \nversionId == versionIdMarker\n-> FILTER_ACCEPT"]
}

View File

@ -1,265 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.43.0 (0)
-->
<!-- Title: %3 Pages: 1 -->
<svg width="1522pt" height="922pt"
viewBox="0.00 0.00 1522.26 922.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 918)">
<title>%3</title>
<polygon fill="white" stroke="transparent" points="-4,4 -4,-918 1518.26,-918 1518.26,4 -4,4"/>
<!-- START -->
<g id="node1" class="node">
<title>START</title>
<ellipse fill="black" stroke="black" cx="393.26" cy="-907" rx="7" ry="7"/>
</g>
<!-- NotSkipping.Idle -->
<g id="node3" class="node">
<title>NotSkipping.Idle</title>
<path fill="lightgrey" stroke="black" d="M436.26,-675C436.26,-675 172.26,-675 172.26,-675 166.26,-675 160.26,-669 160.26,-663 160.26,-663 160.26,-651 160.26,-651 160.26,-645 166.26,-639 172.26,-639 172.26,-639 436.26,-639 436.26,-639 442.26,-639 448.26,-645 448.26,-651 448.26,-651 448.26,-663 448.26,-663 448.26,-669 442.26,-675 436.26,-675"/>
<text text-anchor="middle" x="304.26" y="-653.2" font-family="Times,serif" font-size="16.00">NotSkipping</text>
</g>
<!-- START&#45;&gt;NotSkipping.Idle -->
<g id="edge2" class="edge">
<title>START&#45;&gt;NotSkipping.Idle</title>
<path fill="none" stroke="black" d="M391.06,-899.87C380.45,-870.31 334.26,-741.58 313.93,-684.93"/>
<polygon fill="black" stroke="black" points="317.12,-683.46 310.45,-675.23 310.53,-685.82 317.12,-683.46"/>
<text text-anchor="middle" x="470.76" y="-783.8" font-family="Times,serif" font-size="14.00">[versionIdMarker == undefined]</text>
</g>
<!-- WaitForNullKey.Idle -->
<g id="node5" class="node">
<title>WaitForNullKey.Idle</title>
<path fill="lightgrey" stroke="black" d="M692.26,-849C692.26,-849 500.26,-849 500.26,-849 494.26,-849 488.26,-843 488.26,-837 488.26,-837 488.26,-825 488.26,-825 488.26,-819 494.26,-813 500.26,-813 500.26,-813 692.26,-813 692.26,-813 698.26,-813 704.26,-819 704.26,-825 704.26,-825 704.26,-837 704.26,-837 704.26,-843 698.26,-849 692.26,-849"/>
<text text-anchor="middle" x="596.26" y="-827.2" font-family="Times,serif" font-size="16.00">WaitForNullKey</text>
</g>
<!-- START&#45;&gt;WaitForNullKey.Idle -->
<g id="edge1" class="edge">
<title>START&#45;&gt;WaitForNullKey.Idle</title>
<path fill="none" stroke="black" d="M399.56,-903.7C420.56,-896.05 489.7,-870.85 540.08,-852.48"/>
<polygon fill="black" stroke="black" points="541.38,-855.73 549.57,-849.02 538.98,-849.16 541.38,-855.73"/>
<text text-anchor="middle" x="608.76" y="-870.8" font-family="Times,serif" font-size="14.00">[versionIdMarker != undefined]</text>
</g>
<!-- END -->
<g id="node2" class="node">
<title>END</title>
<ellipse fill="black" stroke="black" cx="45.26" cy="-120" rx="7" ry="7"/>
<ellipse fill="none" stroke="black" cx="45.26" cy="-120" rx="11" ry="11"/>
</g>
<!-- NotSkipping.Processing -->
<g id="node7" class="node">
<title>NotSkipping.Processing</title>
<path fill="lightblue" stroke="black" d="M761.26,-558C761.26,-558 497.26,-558 497.26,-558 491.26,-558 485.26,-552 485.26,-546 485.26,-546 485.26,-534 485.26,-534 485.26,-528 491.26,-522 497.26,-522 497.26,-522 761.26,-522 761.26,-522 767.26,-522 773.26,-528 773.26,-534 773.26,-534 773.26,-546 773.26,-546 773.26,-552 767.26,-558 761.26,-558"/>
<text text-anchor="middle" x="629.26" y="-536.2" font-family="Times,serif" font-size="16.00">NotSkipping</text>
</g>
<!-- NotSkipping.Idle&#45;&gt;NotSkipping.Processing -->
<g id="edge3" class="edge">
<title>NotSkipping.Idle&#45;&gt;NotSkipping.Processing</title>
<path fill="none" stroke="black" d="M333.17,-638.98C364.86,-620.99 417.68,-592.92 466.26,-576 483.64,-569.95 502.44,-564.74 520.88,-560.34"/>
<polygon fill="black" stroke="black" points="521.83,-563.71 530.78,-558.04 520.25,-556.89 521.83,-563.71"/>
<text text-anchor="middle" x="524.26" y="-594.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- SkippingPrefix.Idle -->
<g id="node4" class="node">
<title>SkippingPrefix.Idle</title>
<path fill="lightgrey" stroke="black" d="M662.26,-138C662.26,-138 470.26,-138 470.26,-138 464.26,-138 458.26,-132 458.26,-126 458.26,-126 458.26,-114 458.26,-114 458.26,-108 464.26,-102 470.26,-102 470.26,-102 662.26,-102 662.26,-102 668.26,-102 674.26,-108 674.26,-114 674.26,-114 674.26,-126 674.26,-126 674.26,-132 668.26,-138 662.26,-138"/>
<text text-anchor="middle" x="566.26" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
</g>
<!-- SkippingPrefix.Processing -->
<g id="node11" class="node">
<title>SkippingPrefix.Processing</title>
<path fill="lightblue" stroke="black" d="M779.26,-36C779.26,-36 587.26,-36 587.26,-36 581.26,-36 575.26,-30 575.26,-24 575.26,-24 575.26,-12 575.26,-12 575.26,-6 581.26,0 587.26,0 587.26,0 779.26,0 779.26,0 785.26,0 791.26,-6 791.26,-12 791.26,-12 791.26,-24 791.26,-24 791.26,-30 785.26,-36 779.26,-36"/>
<text text-anchor="middle" x="683.26" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
</g>
<!-- SkippingPrefix.Idle&#45;&gt;SkippingPrefix.Processing -->
<g id="edge4" class="edge">
<title>SkippingPrefix.Idle&#45;&gt;SkippingPrefix.Processing</title>
<path fill="none" stroke="black" d="M552.64,-101.74C543.31,-87.68 534.41,-67.95 545.26,-54 549.71,-48.29 559.34,-43.36 571.56,-39.15"/>
<polygon fill="black" stroke="black" points="572.87,-42.41 581.36,-36.07 570.77,-35.73 572.87,-42.41"/>
<text text-anchor="middle" x="603.26" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- WaitForNullKey.Processing -->
<g id="node12" class="node">
<title>WaitForNullKey.Processing</title>
<path fill="lightblue" stroke="black" d="M692.26,-762C692.26,-762 500.26,-762 500.26,-762 494.26,-762 488.26,-756 488.26,-750 488.26,-750 488.26,-738 488.26,-738 488.26,-732 494.26,-726 500.26,-726 500.26,-726 692.26,-726 692.26,-726 698.26,-726 704.26,-732 704.26,-738 704.26,-738 704.26,-750 704.26,-750 704.26,-756 698.26,-762 692.26,-762"/>
<text text-anchor="middle" x="596.26" y="-740.2" font-family="Times,serif" font-size="16.00">WaitForNullKey</text>
</g>
<!-- WaitForNullKey.Idle&#45;&gt;WaitForNullKey.Processing -->
<g id="edge5" class="edge">
<title>WaitForNullKey.Idle&#45;&gt;WaitForNullKey.Processing</title>
<path fill="none" stroke="black" d="M596.26,-812.8C596.26,-801.16 596.26,-785.55 596.26,-772.24"/>
<polygon fill="black" stroke="black" points="599.76,-772.18 596.26,-762.18 592.76,-772.18 599.76,-772.18"/>
<text text-anchor="middle" x="654.26" y="-783.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- SkippingVersions.Idle -->
<g id="node6" class="node">
<title>SkippingVersions.Idle</title>
<path fill="lightgrey" stroke="black" d="M1241.26,-558C1241.26,-558 1049.26,-558 1049.26,-558 1043.26,-558 1037.26,-552 1037.26,-546 1037.26,-546 1037.26,-534 1037.26,-534 1037.26,-528 1043.26,-522 1049.26,-522 1049.26,-522 1241.26,-522 1241.26,-522 1247.26,-522 1253.26,-528 1253.26,-534 1253.26,-534 1253.26,-546 1253.26,-546 1253.26,-552 1247.26,-558 1241.26,-558"/>
<text text-anchor="middle" x="1145.26" y="-536.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
</g>
<!-- SkippingVersions.Processing -->
<g id="node13" class="node">
<title>SkippingVersions.Processing</title>
<path fill="lightblue" stroke="black" d="M1241.26,-675C1241.26,-675 1049.26,-675 1049.26,-675 1043.26,-675 1037.26,-669 1037.26,-663 1037.26,-663 1037.26,-651 1037.26,-651 1037.26,-645 1043.26,-639 1049.26,-639 1049.26,-639 1241.26,-639 1241.26,-639 1247.26,-639 1253.26,-645 1253.26,-651 1253.26,-651 1253.26,-663 1253.26,-663 1253.26,-669 1247.26,-675 1241.26,-675"/>
<text text-anchor="middle" x="1145.26" y="-653.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
</g>
<!-- SkippingVersions.Idle&#45;&gt;SkippingVersions.Processing -->
<g id="edge6" class="edge">
<title>SkippingVersions.Idle&#45;&gt;SkippingVersions.Processing</title>
<path fill="none" stroke="black" d="M1145.26,-558.25C1145.26,-576.77 1145.26,-606.45 1145.26,-628.25"/>
<polygon fill="black" stroke="black" points="1141.76,-628.53 1145.26,-638.53 1148.76,-628.53 1141.76,-628.53"/>
<text text-anchor="middle" x="1203.26" y="-594.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- NotSkippingV0.Processing -->
<g id="node8" class="node">
<title>NotSkippingV0.Processing</title>
<path fill="lightblue" stroke="black" d="M436.26,-411C436.26,-411 172.26,-411 172.26,-411 166.26,-411 160.26,-405 160.26,-399 160.26,-399 160.26,-387 160.26,-387 160.26,-381 166.26,-375 172.26,-375 172.26,-375 436.26,-375 436.26,-375 442.26,-375 448.26,-381 448.26,-387 448.26,-387 448.26,-399 448.26,-399 448.26,-405 442.26,-411 436.26,-411"/>
<text text-anchor="middle" x="304.26" y="-389.2" font-family="Times,serif" font-size="16.00">NotSkippingV0</text>
</g>
<!-- NotSkipping.Processing&#45;&gt;NotSkippingV0.Processing -->
<g id="edge7" class="edge">
<title>NotSkipping.Processing&#45;&gt;NotSkippingV0.Processing</title>
<path fill="none" stroke="black" d="M573.96,-521.95C558.07,-516.64 540.84,-510.46 525.26,-504 460.22,-477.02 387.62,-439.36 343.97,-415.84"/>
<polygon fill="black" stroke="black" points="345.57,-412.72 335.11,-411.04 342.24,-418.88 345.57,-412.72"/>
<text text-anchor="middle" x="573.76" y="-462.8" font-family="Times,serif" font-size="14.00">vFormat=&#39;v0&#39;</text>
</g>
<!-- NotSkippingV1.Processing -->
<g id="node9" class="node">
<title>NotSkippingV1.Processing</title>
<path fill="lightblue" stroke="black" d="M758.26,-411C758.26,-411 494.26,-411 494.26,-411 488.26,-411 482.26,-405 482.26,-399 482.26,-399 482.26,-387 482.26,-387 482.26,-381 488.26,-375 494.26,-375 494.26,-375 758.26,-375 758.26,-375 764.26,-375 770.26,-381 770.26,-387 770.26,-387 770.26,-399 770.26,-399 770.26,-405 764.26,-411 758.26,-411"/>
<text text-anchor="middle" x="626.26" y="-389.2" font-family="Times,serif" font-size="16.00">NotSkippingV1</text>
</g>
<!-- NotSkipping.Processing&#45;&gt;NotSkippingV1.Processing -->
<g id="edge8" class="edge">
<title>NotSkipping.Processing&#45;&gt;NotSkippingV1.Processing</title>
<path fill="none" stroke="black" d="M628.91,-521.8C628.39,-496.94 627.44,-450.74 626.83,-421.23"/>
<polygon fill="black" stroke="black" points="630.32,-421.11 626.62,-411.18 623.33,-421.25 630.32,-421.11"/>
<text text-anchor="middle" x="676.76" y="-462.8" font-family="Times,serif" font-size="14.00">vFormat=&#39;v1&#39;</text>
</g>
<!-- NotSkippingV0.Processing&#45;&gt;NotSkipping.Idle -->
<g id="edge12" class="edge">
<title>NotSkippingV0.Processing&#45;&gt;NotSkipping.Idle</title>
<path fill="none" stroke="black" d="M304.26,-411.25C304.26,-455.74 304.26,-574.61 304.26,-628.62"/>
<polygon fill="black" stroke="black" points="300.76,-628.81 304.26,-638.81 307.76,-628.81 300.76,-628.81"/>
<text text-anchor="middle" x="385.76" y="-543.8" font-family="Times,serif" font-size="14.00">[Version.isPHD(value)]</text>
<text text-anchor="middle" x="385.76" y="-528.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingV0.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge11" class="edge">
<title>NotSkippingV0.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M448.41,-376.93C508.52,-369.95 565.63,-362.09 570.26,-357 622.9,-299.12 594.8,-196.31 577.11,-147.78"/>
<polygon fill="black" stroke="black" points="580.33,-146.4 573.53,-138.28 573.78,-148.87 580.33,-146.4"/>
<text text-anchor="middle" x="720.26" y="-297.8" font-family="Times,serif" font-size="14.00">[key.startsWith(&lt;ReplayPrefix&gt;)]</text>
<text text-anchor="middle" x="720.26" y="-282.8" font-family="Times,serif" font-size="14.00">/ prefix &lt;&#45; &lt;ReplayPrefix&gt;</text>
<text text-anchor="middle" x="720.26" y="-267.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- NotSkippingCommon.Processing -->
<g id="node10" class="node">
<title>NotSkippingCommon.Processing</title>
<path fill="lightblue" stroke="black" d="M436.26,-304.5C436.26,-304.5 172.26,-304.5 172.26,-304.5 166.26,-304.5 160.26,-298.5 160.26,-292.5 160.26,-292.5 160.26,-280.5 160.26,-280.5 160.26,-274.5 166.26,-268.5 172.26,-268.5 172.26,-268.5 436.26,-268.5 436.26,-268.5 442.26,-268.5 448.26,-274.5 448.26,-280.5 448.26,-280.5 448.26,-292.5 448.26,-292.5 448.26,-298.5 442.26,-304.5 436.26,-304.5"/>
<text text-anchor="middle" x="304.26" y="-282.7" font-family="Times,serif" font-size="16.00">NotSkippingCommon</text>
</g>
<!-- NotSkippingV0.Processing&#45;&gt;NotSkippingCommon.Processing -->
<g id="edge13" class="edge">
<title>NotSkippingV0.Processing&#45;&gt;NotSkippingCommon.Processing</title>
<path fill="none" stroke="black" d="M304.26,-374.74C304.26,-358.48 304.26,-333.85 304.26,-314.9"/>
<polygon fill="black" stroke="black" points="307.76,-314.78 304.26,-304.78 300.76,-314.78 307.76,-314.78"/>
<text text-anchor="middle" x="435.26" y="-345.8" font-family="Times,serif" font-size="14.00">[not key.startsWith(&lt;ReplayPrefix&gt;)</text>
<text text-anchor="middle" x="435.26" y="-330.8" font-family="Times,serif" font-size="14.00">and not Version.isPHD(value)]</text>
</g>
<!-- NotSkippingV1.Processing&#45;&gt;NotSkippingCommon.Processing -->
<g id="edge14" class="edge">
<title>NotSkippingV1.Processing&#45;&gt;NotSkippingCommon.Processing</title>
<path fill="none" stroke="black" d="M616.43,-374.83C606.75,-359.62 590.48,-338.14 570.26,-327 549.98,-315.83 505.48,-307.38 458.57,-301.23"/>
<polygon fill="black" stroke="black" points="458.9,-297.74 448.53,-299.95 458.01,-304.69 458.9,-297.74"/>
<text text-anchor="middle" x="632.26" y="-338.3" font-family="Times,serif" font-size="14.00">[always]</text>
</g>
<!-- NotSkippingCommon.Processing&#45;&gt;END -->
<g id="edge15" class="edge">
<title>NotSkippingCommon.Processing&#45;&gt;END</title>
<path fill="none" stroke="black" d="M159.92,-279.56C109.8,-274.24 62.13,-264.33 46.26,-246 20.92,-216.72 30.42,-167.54 38.5,-140.42"/>
<polygon fill="black" stroke="black" points="41.94,-141.16 41.67,-130.57 35.27,-139.02 41.94,-141.16"/>
<text text-anchor="middle" x="152.76" y="-212.3" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="152.76" y="-197.3" font-family="Times,serif" font-size="14.00">Keys == maxKeys]</text>
<text text-anchor="middle" x="152.76" y="-182.3" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_END</text>
</g>
<!-- NotSkippingCommon.Processing&#45;&gt;NotSkipping.Idle -->
<g id="edge17" class="edge">
<title>NotSkippingCommon.Processing&#45;&gt;NotSkipping.Idle</title>
<path fill="none" stroke="black" d="M214.74,-304.54C146.51,-322.73 57.06,-358.99 13.26,-429 -49.27,-528.95 128.43,-602.49 233.32,-635.95"/>
<polygon fill="black" stroke="black" points="232.34,-639.31 242.93,-638.97 234.43,-632.63 232.34,-639.31"/>
<text text-anchor="middle" x="156.76" y="-492.8" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="156.76" y="-477.8" font-family="Times,serif" font-size="14.00">nKeys &lt; maxKeys and</text>
<text text-anchor="middle" x="156.76" y="-462.8" font-family="Times,serif" font-size="14.00">not hasDelimiter(key)]</text>
<text text-anchor="middle" x="156.76" y="-447.8" font-family="Times,serif" font-size="14.00">/ Contents.append(key, versionId, value)</text>
<text text-anchor="middle" x="156.76" y="-432.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingCommon.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge16" class="edge">
<title>NotSkippingCommon.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M292.14,-268.23C288.18,-261.59 284.27,-253.75 282.26,-246 272.21,-207.28 255.76,-185.96 282.26,-156 293.6,-143.18 374.98,-134.02 447.74,-128.3"/>
<polygon fill="black" stroke="black" points="448.24,-131.77 457.94,-127.51 447.7,-124.79 448.24,-131.77"/>
<text text-anchor="middle" x="428.26" y="-234.8" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="428.26" y="-219.8" font-family="Times,serif" font-size="14.00">nKeys &lt; maxKeys and</text>
<text text-anchor="middle" x="428.26" y="-204.8" font-family="Times,serif" font-size="14.00">hasDelimiter(key)]</text>
<text text-anchor="middle" x="428.26" y="-189.8" font-family="Times,serif" font-size="14.00">/ prefix &lt;&#45; prefixOf(key)</text>
<text text-anchor="middle" x="428.26" y="-174.8" font-family="Times,serif" font-size="14.00">/ CommonPrefixes.append(prefixOf(key))</text>
<text text-anchor="middle" x="428.26" y="-159.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- SkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge18" class="edge">
<title>SkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M681.57,-36.04C679.28,-50.54 673.9,-71.03 661.26,-84 656.4,-88.99 650.77,-93.28 644.72,-96.95"/>
<polygon fill="black" stroke="black" points="642.71,-94.06 635.6,-101.92 646.05,-100.21 642.71,-94.06"/>
<text text-anchor="middle" x="759.26" y="-72.8" font-family="Times,serif" font-size="14.00">[key.startsWith(prefix)]</text>
<text text-anchor="middle" x="759.26" y="-57.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- SkippingPrefix.Processing&#45;&gt;NotSkipping.Processing -->
<g id="edge19" class="edge">
<title>SkippingPrefix.Processing&#45;&gt;NotSkipping.Processing</title>
<path fill="none" stroke="black" d="M791.46,-33.51C815.84,-38.71 837.21,-45.46 846.26,-54 868.07,-74.57 864.26,-89.02 864.26,-119 864.26,-394 864.26,-394 864.26,-394 864.26,-462.4 791.27,-499.6 726.64,-519.12"/>
<polygon fill="black" stroke="black" points="725.39,-515.84 716.77,-521.99 727.35,-522.56 725.39,-515.84"/>
<text text-anchor="middle" x="961.26" y="-282.8" font-family="Times,serif" font-size="14.00">[not key.startsWith(prefix)]</text>
</g>
<!-- WaitForNullKey.Processing&#45;&gt;NotSkipping.Processing -->
<g id="edge9" class="edge">
<title>WaitForNullKey.Processing&#45;&gt;NotSkipping.Processing</title>
<path fill="none" stroke="black" d="M599.08,-725.78C604.81,-690.67 617.89,-610.59 624.8,-568.31"/>
<polygon fill="black" stroke="black" points="628.3,-568.61 626.46,-558.18 621.39,-567.48 628.3,-568.61"/>
<text text-anchor="middle" x="707.26" y="-653.3" font-family="Times,serif" font-size="14.00">master(key) != keyMarker</text>
</g>
<!-- WaitForNullKey.Processing&#45;&gt;SkippingVersions.Processing -->
<g id="edge10" class="edge">
<title>WaitForNullKey.Processing&#45;&gt;SkippingVersions.Processing</title>
<path fill="none" stroke="black" d="M704.4,-726.26C797.32,-711.87 931.09,-691.16 1026.87,-676.33"/>
<polygon fill="black" stroke="black" points="1027.55,-679.77 1036.89,-674.78 1026.47,-672.85 1027.55,-679.77"/>
<text text-anchor="middle" x="1001.26" y="-696.8" font-family="Times,serif" font-size="14.00">master(key) == keyMarker</text>
</g>
<!-- SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle -->
<g id="edge21" class="edge">
<title>SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle</title>
<path fill="none" stroke="black" d="M1241.89,-638.98C1249.74,-634.29 1256.75,-628.4 1262.26,-621 1274.21,-604.96 1274.21,-592.04 1262.26,-576 1258.82,-571.38 1254.79,-567.34 1250.33,-563.82"/>
<polygon fill="black" stroke="black" points="1252.11,-560.8 1241.89,-558.02 1248.15,-566.57 1252.11,-560.8"/>
<text text-anchor="middle" x="1392.26" y="-609.8" font-family="Times,serif" font-size="14.00">master(key) === keyMarker and </text>
<text text-anchor="middle" x="1392.26" y="-594.8" font-family="Times,serif" font-size="14.00">versionId &lt; versionIdMarker</text>
<text text-anchor="middle" x="1392.26" y="-579.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle -->
<g id="edge22" class="edge">
<title>SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle</title>
<path fill="none" stroke="black" d="M1036.97,-654.38C978.97,-650.96 915.73,-642.25 897.26,-621 884.15,-605.9 884.15,-591.1 897.26,-576 914.65,-555.99 971.71,-547.1 1026.73,-543.28"/>
<polygon fill="black" stroke="black" points="1027.21,-546.76 1036.97,-542.62 1026.76,-539.77 1027.21,-546.76"/>
<text text-anchor="middle" x="1019.26" y="-609.8" font-family="Times,serif" font-size="14.00">master(key) === keyMarker and </text>
<text text-anchor="middle" x="1019.26" y="-594.8" font-family="Times,serif" font-size="14.00">versionId == versionIdMarker</text>
<text text-anchor="middle" x="1019.26" y="-579.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- SkippingVersions.Processing&#45;&gt;NotSkipping.Processing -->
<g id="edge20" class="edge">
<title>SkippingVersions.Processing&#45;&gt;NotSkipping.Processing</title>
<path fill="none" stroke="black" d="M1037.02,-651.24C897.84,-644.67 672.13,-632.37 657.26,-621 641.04,-608.6 634.18,-586.13 631.3,-568.16"/>
<polygon fill="black" stroke="black" points="634.76,-567.68 630.02,-558.21 627.82,-568.57 634.76,-567.68"/>
<text text-anchor="middle" x="770.26" y="-602.3" font-family="Times,serif" font-size="14.00">master(key) !== keyMarker or </text>
<text text-anchor="middle" x="770.26" y="-587.3" font-family="Times,serif" font-size="14.00">versionId &gt; versionIdMarker</text>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 21 KiB

View File

@ -1,9 +1,6 @@
import * as evaluators from './lib/policyEvaluator/evaluator';
import evaluatePrincipal from './lib/policyEvaluator/principal';
import RequestContext, {
actionNeedQuotaCheck,
actionNeedQuotaCheckCopy,
actionWithDataDeletion } from './lib/policyEvaluator/RequestContext';
import RequestContext from './lib/policyEvaluator/RequestContext';
import * as requestUtils from './lib/policyEvaluator/requestUtils';
import * as actionMaps from './lib/policyEvaluator/utils/actionMaps';
import { validateUserPolicy } from './lib/policy/policyValidator'
@ -28,7 +25,6 @@ import * as objectRestore from './lib/s3middleware/objectRestore';
import * as lifecycleHelpers from './lib/s3middleware/lifecycleHelpers';
export { default as errors } from './lib/errors';
export { default as Clustering } from './lib/Clustering';
export * as ClusterRPC from './lib/clustering/ClusterRPC';
export * as ipCheck from './lib/ipCheck';
export * as auth from './lib/auth/auth';
export * as constants from './lib/constants';
@ -52,15 +48,12 @@ export const algorithms = {
Skip: require('./lib/algos/list/skip'),
},
cache: {
GapSet: require('./lib/algos/cache/GapSet'),
GapCache: require('./lib/algos/cache/GapCache'),
LRUCache: require('./lib/algos/cache/LRUCache'),
},
stream: {
MergeStream: require('./lib/algos/stream/MergeStream'),
},
SortedSet: require('./lib/algos/set/SortedSet'),
Heap: require('./lib/algos/heap/Heap'),
};
export const policies = {
@ -70,9 +63,6 @@ export const policies = {
RequestContext,
requestUtils,
actionMaps,
actionNeedQuotaCheck,
actionWithDataDeletion,
actionNeedQuotaCheckCopy,
};
export const testing = {

View File

@ -1,363 +0,0 @@
import { OrderedSet } from '@js-sdsl/ordered-set';
import {
default as GapSet,
GapSetEntry,
} from './GapSet';
// the API is similar but is not strictly a superset of GapSetInterface
// so we don't extend from it
export interface GapCacheInterface {
exposureDelayMs: number;
maxGapWeight: number;
size: number;
setGap: (firstKey: string, lastKey: string, weight: number) => void;
removeOverlappingGaps: (overlappingKeys: string[]) => number;
lookupGap: (minKey: string, maxKey?: string) => Promise<GapSetEntry | null>;
[Symbol.iterator]: () => Iterator<GapSetEntry>;
toArray: () => GapSetEntry[];
};
class GapCacheUpdateSet {
newGaps: GapSet;
updatedKeys: OrderedSet<string>;
constructor(maxGapWeight: number) {
this.newGaps = new GapSet(maxGapWeight);
this.updatedKeys = new OrderedSet();
}
addUpdateBatch(updatedKeys: OrderedSet<string>): void {
this.updatedKeys.union(updatedKeys);
}
};
/**
* Cache of listing "gaps" i.e. ranges of keys that can be skipped
* over during listing (because they only contain delete markers as
* latest versions).
*
* Typically, a single GapCache instance would be attached to a raft session.
*
* The API usage is as follows:
*
* - Initialize a GapCache instance by calling start() (this starts an internal timer)
*
* - Insert a gap or update an existing one via setGap()
*
* - Lookup existing gaps via lookupGap()
*
* - Invalidate gaps that overlap a specific set of keys via removeOverlappingGaps()
*
* - Shut down a GapCache instance by calling stop() (this stops the internal timer)
*
* Gaps inserted via setGap() are not exposed immediately to lookupGap(), but only:
*
* - after a certain delay always larger than 'exposureDelayMs' and usually shorter
* than twice this value (but might be slightly longer in rare cases)
*
* - and only if they haven't been invalidated by a recent call to removeOverlappingGaps()
*
* This ensures atomicity between gap creation and invalidation from updates under
* the condition that a gap is created from first key to last key within the time defined
* by 'exposureDelayMs'.
*
* The implementation is based on two extra temporary "update sets" on top of the main
* exposed gap set, one called "staging" and the other "frozen", each containing a
* temporary updated gap set and a list of updated keys to invalidate gaps with (coming
* from calls to removeOverlappingGaps()). Every "exposureDelayMs" milliseconds, the frozen
* gaps are invalidated by all key updates coming from either of the "staging" or "frozen"
* update set, then merged into the exposed gaps set, after which the staging updates become
* the frozen updates and won't receive any new gap until the next cycle.
*/
export default class GapCache implements GapCacheInterface {
_exposureDelayMs: number;
maxGaps: number;
_stagingUpdates: GapCacheUpdateSet;
_frozenUpdates: GapCacheUpdateSet;
_exposedGaps: GapSet;
_exposeFrozenInterval: NodeJS.Timeout | null;
/**
* @constructor
*
* @param {number} exposureDelayMs - minimum delay between
* insertion of a gap via setGap() and its exposure via
* lookupGap()
* @param {number} maxGaps - maximum number of cached gaps, after
* which no new gap can be added by setGap(). (Note: a future
* improvement could replace this by an eviction strategy)
* @param {number} maxGapWeight - maximum "weight" of individual
* cached gaps, which is also the granularity for
* invalidation. Individual gaps can be chained together,
* which lookupGap() transparently consolidates in the response
* into a single large gap.
*/
constructor(exposureDelayMs: number, maxGaps: number, maxGapWeight: number) {
this._exposureDelayMs = exposureDelayMs;
this.maxGaps = maxGaps;
this._stagingUpdates = new GapCacheUpdateSet(maxGapWeight);
this._frozenUpdates = new GapCacheUpdateSet(maxGapWeight);
this._exposedGaps = new GapSet(maxGapWeight);
this._exposeFrozenInterval = null;
}
/**
* Create a GapCache from an array of exposed gap entries (used in tests)
*
* @return {GapCache} - a new GapCache instance
*/
static createFromArray(
gaps: GapSetEntry[],
exposureDelayMs: number,
maxGaps: number,
maxGapWeight: number
): GapCache {
const gapCache = new GapCache(exposureDelayMs, maxGaps, maxGapWeight);
gapCache._exposedGaps = GapSet.createFromArray(gaps, maxGapWeight)
return gapCache;
}
/**
* Internal helper to remove gaps in the staging and frozen sets
* overlapping with previously updated keys, right before the
* frozen gaps get exposed.
*
* @return {undefined}
*/
_removeOverlappingGapsBeforeExpose(): void {
for (const { updatedKeys } of [this._stagingUpdates, this._frozenUpdates]) {
if (updatedKeys.size() === 0) {
continue;
}
for (const { newGaps } of [this._stagingUpdates, this._frozenUpdates]) {
if (newGaps.size === 0) {
continue;
}
newGaps.removeOverlappingGaps(updatedKeys);
}
}
}
/**
* This function is the core mechanism that updates the exposed gaps in the
* cache. It is called on a regular interval defined by 'exposureDelayMs'.
*
* It does the following in order:
*
* - remove gaps from the frozen set that overlap with any key present in a
* batch passed to removeOverlappingGaps() since the last two triggers of
* _exposeFrozen()
*
* - merge the remaining gaps from the frozen set to the exposed set, which
* makes them visible from calls to lookupGap()
*
* - rotate by freezing the currently staging updates and initiating a new
* staging updates set
*
* @return {undefined}
*/
_exposeFrozen(): void {
this._removeOverlappingGapsBeforeExpose();
for (const gap of this._frozenUpdates.newGaps) {
// Use a trivial strategy to keep the cache size within
// limits: refuse to add new gaps when the size is above
// the 'maxGaps' threshold. We solely rely on
// removeOverlappingGaps() to make space for new gaps.
if (this._exposedGaps.size < this.maxGaps) {
this._exposedGaps.setGap(gap.firstKey, gap.lastKey, gap.weight);
}
}
this._frozenUpdates = this._stagingUpdates;
this._stagingUpdates = new GapCacheUpdateSet(this.maxGapWeight);
}
/**
* Start the internal GapCache timer
*
* @return {undefined}
*/
start(): void {
if (this._exposeFrozenInterval) {
return;
}
this._exposeFrozenInterval = setInterval(
() => this._exposeFrozen(),
this._exposureDelayMs);
}
/**
* Stop the internal GapCache timer
*
* @return {undefined}
*/
stop(): void {
if (this._exposeFrozenInterval) {
clearInterval(this._exposeFrozenInterval);
this._exposeFrozenInterval = null;
}
}
/**
* Record a gap between two keys, associated with a weight to
* limit individual gap's spanning ranges in the cache, for a more
* granular invalidation.
*
* The function handles splitting and merging existing gaps to
* maintain an optimal weight of cache entries.
*
* NOTE 1: the caller must ensure that the full length of the gap
* between 'firstKey' and 'lastKey' has been built from a listing
* snapshot that is more recent than 'exposureDelayMs' milliseconds,
* in order to guarantee that the exposed gap will be fully
* covered (and potentially invalidated) from recent calls to
* removeOverlappingGaps().
*
* NOTE 2: a usual pattern when building a large gap from multiple
* calls to setGap() is to start the next gap from 'lastKey',
* which will be passed as 'firstKey' in the next call, so that
* gaps can be chained together and consolidated by lookupGap().
*
* @param {string} firstKey - first key of the gap
* @param {string} lastKey - last key of the gap, must be greater
* or equal than 'firstKey'
* @param {number} weight - total weight between 'firstKey' and 'lastKey'
* @return {undefined}
*/
setGap(firstKey: string, lastKey: string, weight: number): void {
this._stagingUpdates.newGaps.setGap(firstKey, lastKey, weight);
}
/**
* Remove gaps that overlap with a given set of keys. Used to
* invalidate gaps when keys are inserted or deleted.
*
* @param {OrderedSet<string> | string[]} overlappingKeys - remove gaps that
* overlap with any of this set of keys
* @return {number} - how many gaps were removed from the exposed
* gaps only (overlapping gaps not yet exposed are also invalidated
* but are not accounted for in the returned value)
*/
removeOverlappingGaps(overlappingKeys: OrderedSet<string> | string[]): number {
let overlappingKeysSet;
if (Array.isArray(overlappingKeys)) {
overlappingKeysSet = new OrderedSet(overlappingKeys);
} else {
overlappingKeysSet = overlappingKeys;
}
this._stagingUpdates.addUpdateBatch(overlappingKeysSet);
return this._exposedGaps.removeOverlappingGaps(overlappingKeysSet);
}
/**
* Lookup the next exposed gap that overlaps with [minKey, maxKey]. Internally
* chained gaps are coalesced in the response into a single contiguous large gap.
*
* @param {string} minKey - minimum key overlapping with the returned gap
* @param {string} [maxKey] - maximum key overlapping with the returned gap
* @return {Promise<GapSetEntry | null>} - result of the lookup if a gap
* was found, null otherwise, as a Promise
*/
lookupGap(minKey: string, maxKey?: string): Promise<GapSetEntry | null> {
return this._exposedGaps.lookupGap(minKey, maxKey);
}
/**
* Get the maximum weight setting for individual gaps.
*
* @return {number} - maximum weight of individual gaps
*/
get maxGapWeight(): number {
return this._exposedGaps.maxWeight;
}
/**
* Set the maximum weight setting for individual gaps.
*
* @param {number} gapWeight - maximum weight of individual gaps
*/
set maxGapWeight(gapWeight: number) {
this._exposedGaps.maxWeight = gapWeight;
// also update transient gap sets
this._stagingUpdates.newGaps.maxWeight = gapWeight;
this._frozenUpdates.newGaps.maxWeight = gapWeight;
}
/**
* Get the exposure delay in milliseconds, which is the minimum
* time after which newly cached gaps will be exposed by
* lookupGap().
*
* @return {number} - exposure delay in milliseconds
*/
get exposureDelayMs(): number {
return this._exposureDelayMs;
}
/**
* Set the exposure delay in milliseconds, which is the minimum
* time after which newly cached gaps will be exposed by
* lookupGap(). Setting this attribute automatically updates the
* internal state to honor the new value.
*
* @param {number} - exposure delay in milliseconds
*/
set exposureDelayMs(exposureDelayMs: number) {
if (exposureDelayMs !== this._exposureDelayMs) {
this._exposureDelayMs = exposureDelayMs;
if (this._exposeFrozenInterval) {
// invalidate all pending gap updates, as the new interval may not be
// safe for them
this._stagingUpdates = new GapCacheUpdateSet(this.maxGapWeight);
this._frozenUpdates = new GapCacheUpdateSet(this.maxGapWeight);
// reinitialize the _exposeFrozenInterval timer with the updated delay
this.stop();
this.start();
}
}
}
/**
* Get the number of exposed gaps
*
* @return {number} number of exposed gaps
*/
get size(): number {
return this._exposedGaps.size;
}
/**
* Iterate over exposed gaps
*
* @return {Iterator<GapSetEntry>} an iterator over exposed gaps
*/
[Symbol.iterator](): Iterator<GapSetEntry> {
return this._exposedGaps[Symbol.iterator]();
}
/**
* Get an array of all exposed gaps
*
* @return {GapSetEntry[]} array of exposed gaps
*/
toArray(): GapSetEntry[] {
return this._exposedGaps.toArray();
}
/**
* Clear all exposed and staging gaps from the cache.
*
* Note: retains invalidating updates from removeOverlappingGaps()
* for correctness of gaps inserted afterwards.
*
* @return {undefined}
*/
clear(): void {
this._stagingUpdates.newGaps = new GapSet(this.maxGapWeight);
this._frozenUpdates.newGaps = new GapSet(this.maxGapWeight);
this._exposedGaps = new GapSet(this.maxGapWeight);
}
}

View File

@ -1,366 +0,0 @@
import assert from 'assert';
import { OrderedSet } from '@js-sdsl/ordered-set';
import errors from '../../errors';
export type GapSetEntry = {
firstKey: string,
lastKey: string,
weight: number,
};
export interface GapSetInterface {
maxWeight: number;
size: number;
setGap: (firstKey: string, lastKey: string, weight: number) => GapSetEntry;
removeOverlappingGaps: (overlappingKeys: string[]) => number;
lookupGap: (minKey: string, maxKey?: string) => Promise<GapSetEntry | null>;
[Symbol.iterator]: () => Iterator<GapSetEntry>;
toArray: () => GapSetEntry[];
};
/**
* Specialized data structure to support caching of listing "gaps",
* i.e. ranges of keys that can be skipped over during listing
* (because they only contain delete markers as latest versions)
*/
export default class GapSet implements GapSetInterface, Iterable<GapSetEntry> {
_gaps: OrderedSet<GapSetEntry>;
_maxWeight: number;
/**
* @constructor
* @param {number} maxWeight - weight threshold for each cached
* gap (unitless). Triggers splitting gaps when reached
*/
constructor(maxWeight: number) {
this._gaps = new OrderedSet(
[],
(left: GapSetEntry, right: GapSetEntry) => (
left.firstKey < right.firstKey ? -1 :
left.firstKey > right.firstKey ? 1 : 0
)
);
this._maxWeight = maxWeight;
}
/**
* Create a GapSet from an array of gap entries (used in tests)
*/
static createFromArray(gaps: GapSetEntry[], maxWeight: number): GapSet {
const gapSet = new GapSet(maxWeight);
for (const gap of gaps) {
gapSet._gaps.insert(gap);
}
return gapSet;
}
/**
* Record a gap between two keys, associated with a weight to limit
* individual gap sizes in the cache.
*
* The function handles splitting and merging existing gaps to
* maintain an optimal weight of cache entries.
*
* @param {string} firstKey - first key of the gap
* @param {string} lastKey - last key of the gap, must be greater
* or equal than 'firstKey'
* @param {number} weight - total weight between 'firstKey' and 'lastKey'
* @return {GapSetEntry} - existing or new gap entry
*/
setGap(firstKey: string, lastKey: string, weight: number): GapSetEntry {
assert(lastKey >= firstKey);
// Step 1/4: Find the closest left-overlapping gap, and either re-use it
// or chain it with a new gap depending on the weights if it exists (otherwise
// just creates a new gap).
const curGapIt = this._gaps.reverseLowerBound(<GapSetEntry>{ firstKey });
let curGap;
if (curGapIt.isAccessible()) {
curGap = curGapIt.pointer;
if (curGap.lastKey >= lastKey) {
// return fully overlapping gap already cached
return curGap;
}
}
let remainingWeight = weight;
if (!curGap // no previous gap
|| curGap.lastKey < firstKey // previous gap not overlapping
|| (curGap.lastKey === firstKey // previous gap overlapping by one key...
&& curGap.weight + weight > this._maxWeight) // ...but we can't extend it
) {
// create a new gap indexed by 'firstKey'
curGap = { firstKey, lastKey: firstKey, weight: 0 };
this._gaps.insert(curGap);
} else if (curGap.lastKey > firstKey && weight > this._maxWeight) {
// previous gap is either fully or partially contained in the new gap
// and cannot be extended: substract its weight from the total (heuristic
// in case the previous gap doesn't start at 'firstKey', which is the
// uncommon case)
remainingWeight -= curGap.weight;
// there may be an existing chained gap starting with the previous gap's
// 'lastKey': use it if it exists
const chainedGapIt = this._gaps.find(<GapSetEntry>{ firstKey: curGap.lastKey });
if (chainedGapIt.isAccessible()) {
curGap = chainedGapIt.pointer;
} else {
// no existing chained gap: chain a new gap to the previous gap
curGap = {
firstKey: curGap.lastKey,
lastKey: curGap.lastKey,
weight: 0,
};
this._gaps.insert(curGap);
}
}
// Step 2/4: Cleanup existing gaps fully included in firstKey -> lastKey, and
// aggregate their weights in curGap to define the minimum weight up to the
// last merged gap.
let nextGap;
while (true) {
const nextGapIt = this._gaps.upperBound(<GapSetEntry>{ firstKey: curGap.firstKey });
nextGap = nextGapIt.isAccessible() && nextGapIt.pointer;
// stop the cleanup when no more gap or if the next gap is not fully
// included in curGap
if (!nextGap || nextGap.lastKey > lastKey) {
break;
}
this._gaps.eraseElementByIterator(nextGapIt);
curGap.lastKey = nextGap.lastKey;
curGap.weight += nextGap.weight;
}
// Step 3/4: Extend curGap to lastKey, adjusting the weight.
// At this point, curGap weight is the minimum weight of the finished gap, save it
// for step 4.
let minMergedWeight = curGap.weight;
if (curGap.lastKey === firstKey && firstKey !== lastKey) {
// extend the existing gap by the full amount 'firstKey -> lastKey'
curGap.lastKey = lastKey;
curGap.weight += remainingWeight;
} else if (curGap.lastKey <= lastKey) {
curGap.lastKey = lastKey;
curGap.weight = remainingWeight;
}
// Step 4/4: Find the closest right-overlapping gap, and if it exists, either merge
// it or chain it with curGap depending on the weights.
if (nextGap && nextGap.firstKey <= lastKey) {
// nextGap overlaps with the new gap: check if we can merge it
minMergedWeight += nextGap.weight;
let mergedWeight;
if (lastKey === nextGap.firstKey) {
// nextGap is chained with curGap: add the full weight of nextGap
mergedWeight = curGap.weight + nextGap.weight;
} else {
// strict overlap: don't add nextGap's weight unless
// it's larger than the sum of merged ranges (as it is
// then included in `minMergedWeight`)
mergedWeight = Math.max(curGap.weight, minMergedWeight);
}
if (mergedWeight <= this._maxWeight) {
// merge nextGap into curGap
curGap.lastKey = nextGap.lastKey;
curGap.weight = mergedWeight;
this._gaps.eraseElementByKey(nextGap);
} else {
// adjust the last key to chain with nextGap and substract the next
// gap's weight from curGap (heuristic)
curGap.lastKey = nextGap.firstKey;
curGap.weight = Math.max(mergedWeight - nextGap.weight, 0);
curGap = nextGap;
}
}
// return a copy of curGap
return Object.assign({}, curGap);
}
/**
* Remove gaps that overlap with one or more keys in a given array or
* OrderedSet. Used to invalidate gaps when keys are inserted or deleted.
*
* @param {OrderedSet<string> | string[]} overlappingKeys - remove gaps that overlap
* with any of this set of keys
* @return {number} - how many gaps were removed
*/
removeOverlappingGaps(overlappingKeys: OrderedSet<string> | string[]): number {
// To optimize processing with a large number of keys and/or gaps, this function:
//
// 1. converts the overlappingKeys array to a OrderedSet (if not already a OrderedSet)
// 2. queries both the gaps set and the overlapping keys set in a loop, which allows:
// - skipping ranges of overlapping keys at once when there is no new overlapping gap
// - skipping ranges of gaps at once when there is no overlapping key
//
// This way, it is efficient when the number of non-overlapping gaps is large
// (which is the most common case in practice).
let overlappingKeysSet;
if (Array.isArray(overlappingKeys)) {
overlappingKeysSet = new OrderedSet(overlappingKeys);
} else {
overlappingKeysSet = overlappingKeys;
}
const firstKeyIt = overlappingKeysSet.begin();
let currentKey = firstKeyIt.isAccessible() && firstKeyIt.pointer;
let nRemoved = 0;
while (currentKey) {
const closestGapIt = this._gaps.reverseUpperBound(<GapSetEntry>{ firstKey: currentKey });
if (closestGapIt.isAccessible()) {
const closestGap = closestGapIt.pointer;
if (currentKey <= closestGap.lastKey) {
// currentKey overlaps closestGap: remove the gap
this._gaps.eraseElementByIterator(closestGapIt);
nRemoved += 1;
}
}
const nextGapIt = this._gaps.lowerBound(<GapSetEntry>{ firstKey: currentKey });
if (!nextGapIt.isAccessible()) {
// no more gap: we're done
return nRemoved;
}
const nextGap = nextGapIt.pointer;
// advance to the last key potentially overlapping with nextGap
let currentKeyIt = overlappingKeysSet.reverseLowerBound(nextGap.lastKey);
if (currentKeyIt.isAccessible()) {
currentKey = currentKeyIt.pointer;
if (currentKey >= nextGap.firstKey) {
// currentKey overlaps nextGap: remove the gap
this._gaps.eraseElementByIterator(nextGapIt);
nRemoved += 1;
}
}
// advance to the first key potentially overlapping with another gap
currentKeyIt = overlappingKeysSet.lowerBound(nextGap.lastKey);
currentKey = currentKeyIt.isAccessible() && currentKeyIt.pointer;
}
return nRemoved;
}
/**
* Internal helper to coalesce multiple chained gaps into a single gap.
*
* It is only used to construct lookupGap() return values and
* doesn't modify the GapSet.
*
* NOTE: The function may take a noticeable amount of time and CPU
* to execute if a large number of chained gaps have to be
* coalesced, but it should never take more than a few seconds. In
* most cases it should take less than a millisecond. It regularly
* yields to the nodejs event loop to avoid blocking it during a
* long execution.
*
* @param {GapSetEntry} firstGap - first gap of the chain to coalesce with
* the next ones in the chain
* @return {Promise<GapSetEntry>} - a new coalesced entry, as a Promise
*/
_coalesceGapChain(firstGap: GapSetEntry): Promise<GapSetEntry> {
return new Promise(resolve => {
const coalescedGap: GapSetEntry = Object.assign({}, firstGap);
const coalesceGapChainIteration = () => {
// efficiency trade-off: 100 iterations of log(N) complexity lookups should
// not block the event loop for too long
for (let opCounter = 0; opCounter < 100; ++opCounter) {
const chainedGapIt = this._gaps.find(
<GapSetEntry>{ firstKey: coalescedGap.lastKey });
if (!chainedGapIt.isAccessible()) {
// chain is complete
return resolve(coalescedGap);
}
const chainedGap = chainedGapIt.pointer;
if (chainedGap.firstKey === chainedGap.lastKey) {
// found a single-key gap: chain is complete
return resolve(coalescedGap);
}
coalescedGap.lastKey = chainedGap.lastKey;
coalescedGap.weight += chainedGap.weight;
}
// yield to the event loop before continuing the process
// of coalescing the gap chain
return process.nextTick(coalesceGapChainIteration);
};
coalesceGapChainIteration();
});
}
/**
* Lookup the next gap that overlaps with [minKey, maxKey]. Internally chained
* gaps are coalesced in the response into a single contiguous large gap.
*
* @param {string} minKey - minimum key overlapping with the returned gap
* @param {string} [maxKey] - maximum key overlapping with the returned gap
* @return {Promise<GapSetEntry | null>} - result of the lookup if a gap
* was found, null otherwise, as a Promise
*/
async lookupGap(minKey: string, maxKey?: string): Promise<GapSetEntry | null> {
let firstGap: GapSetEntry | null = null;
const minGapIt = this._gaps.reverseLowerBound(<GapSetEntry>{ firstKey: minKey });
const minGap = minGapIt.isAccessible() && minGapIt.pointer;
if (minGap && minGap.lastKey >= minKey) {
firstGap = minGap;
} else {
const maxGapIt = this._gaps.upperBound(<GapSetEntry>{ firstKey: minKey });
const maxGap = maxGapIt.isAccessible() && maxGapIt.pointer;
if (maxGap && (maxKey === undefined || maxGap.firstKey <= maxKey)) {
firstGap = maxGap;
}
}
if (!firstGap) {
return null;
}
return this._coalesceGapChain(firstGap);
}
/**
* Get the maximum weight setting for individual gaps.
*
* @return {number} - maximum weight of individual gaps
*/
get maxWeight(): number {
return this._maxWeight;
}
/**
* Set the maximum weight setting for individual gaps.
*
* @param {number} gapWeight - maximum weight of individual gaps
*/
set maxWeight(gapWeight: number) {
this._maxWeight = gapWeight;
}
/**
* Get the number of gaps stored in this set.
*
* @return {number} - number of gaps stored in this set
*/
get size(): number {
return this._gaps.size();
}
/**
* Iterate over each gap of the set, ordered by first key
*
* @return {Iterator<GapSetEntry>} - an iterator over all gaps
* Example:
* for (const gap of myGapSet) { ... }
*/
[Symbol.iterator](): Iterator<GapSetEntry> {
return this._gaps[Symbol.iterator]();
}
/**
* Return an array containing all gaps, ordered by first key
*
* NOTE: there is a toArray() method in the OrderedSet implementation
* but it does not scale well and overflows the stack quickly. This is
* why we provide an implementation based on an iterator.
*
* @return {GapSetEntry[]} - an array containing all gaps
*/
toArray(): GapSetEntry[] {
return [...this];
}
}

View File

@ -1,124 +0,0 @@
export enum HeapOrder {
Min = -1,
Max = 1,
}
export enum CompareResult {
LT = -1,
EQ = 0,
GT = 1,
}
export type CompareFunction = (x: any, y: any) => CompareResult;
export class Heap {
size: number;
_maxSize: number;
_order: HeapOrder;
_heap: any[];
_cmpFn: CompareFunction;
constructor(size: number, order: HeapOrder, cmpFn: CompareFunction) {
this.size = 0;
this._maxSize = size;
this._order = order;
this._cmpFn = cmpFn;
this._heap = new Array<any>(this._maxSize);
}
_parent(i: number): number {
return Math.floor((i - 1) / 2);
}
_left(i: number): number {
return Math.floor((2 * i) + 1);
}
_right(i: number): number {
return Math.floor((2 * i) + 2);
}
_shouldSwap(childIdx: number, parentIdx: number): boolean {
return this._cmpFn(this._heap[childIdx], this._heap[parentIdx]) as number === this._order as number;
}
_swap(i: number, j: number) {
const tmp = this._heap[i];
this._heap[i] = this._heap[j];
this._heap[j] = tmp;
}
_heapify(i: number) {
const l = this._left(i);
const r = this._right(i);
let c = i;
if (l < this.size && this._shouldSwap(l, c)) {
c = l;
}
if (r < this.size && this._shouldSwap(r, c)) {
c = r;
}
if (c != i) {
this._swap(c, i);
this._heapify(c);
}
}
add(item: any): any {
if (this.size >= this._maxSize) {
return new Error('Max heap size reached');
}
++this.size;
let c = this.size - 1;
this._heap[c] = item;
while (c > 0) {
if (!this._shouldSwap(c, this._parent(c))) {
return null;
}
this._swap(c, this._parent(c));
c = this._parent(c);
}
return null;
};
remove(): any {
if (this.size <= 0) {
return null;
}
const ret = this._heap[0];
this._heap[0] = this._heap[this.size - 1];
this._heapify(0);
--this.size;
return ret;
};
peek(): any {
if (this.size <= 0) {
return null;
}
return this._heap[0];
};
}
export class MinHeap extends Heap {
constructor(size: number, cmpFn: CompareFunction) {
super(size, HeapOrder.Min, cmpFn);
}
}
export class MaxHeap extends Heap {
constructor(size: number, cmpFn: CompareFunction) {
super(size, HeapOrder.Max, cmpFn);
}
}

View File

@ -1,6 +1,6 @@
'use strict'; // eslint-disable-line strict
const { FILTER_ACCEPT, SKIP_NONE } = require('./tools');
const { FILTER_SKIP, SKIP_NONE } = require('./tools');
// Use a heuristic to amortize the cost of JSON
// serialization/deserialization only on largest metadata where the
@ -92,26 +92,21 @@ class Extension {
* @param {object} entry - a listing entry from metadata
* expected format: { key, value }
* @return {number} - result of filtering the entry:
* FILTER_ACCEPT: entry is accepted and may or not be included
* in the result
* FILTER_SKIP: listing may skip directly (with "gte" param) to
* the key returned by the skipping() method
* FILTER_END: the results are complete, listing can be stopped
* > 0: entry is accepted and included in the result
* = 0: entry is accepted but not included (skipping)
* < 0: entry is not accepted, listing should finish
*/
filter(/* entry: { key, value } */) {
return FILTER_ACCEPT;
filter(entry) {
return entry ? FILTER_SKIP : FILTER_SKIP;
}
/**
* Provides the next key at which the listing task is allowed to skip to.
* This could allow to skip over:
* - a key prefix ending with the delimiter
* - all remaining versions of an object when doing a current
* versions listing in v0 format
* - a cached "gap" of deleted objects when doing a current
* versions listing in v0 format
* Provides the insight into why filter is skipping an entry. This could be
* because it is skipping a range of delimited keys or a range of specific
* version when doing master version listing.
*
* @return {string} - the next key at which the listing task is allowed to skip to
* @return {string} - the insight: a common prefix or a master key,
* or SKIP_NONE if there is no insight
*/
skipping() {
return SKIP_NONE;

View File

@ -1,7 +1,7 @@
'use strict'; // eslint-disable-line strict
const { inc, checkLimit, listingParamsMasterKeysV0ToV1,
FILTER_END, FILTER_ACCEPT, SKIP_NONE } = require('./tools');
FILTER_END, FILTER_ACCEPT } = require('./tools');
const DEFAULT_MAX_KEYS = 1000;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
@ -163,7 +163,7 @@ class MultipartUploads {
}
skipping() {
return SKIP_NONE;
return '';
}
/**

View File

@ -2,7 +2,7 @@
const Extension = require('./Extension').default;
const { checkLimit, FILTER_END, FILTER_ACCEPT } = require('./tools');
const { checkLimit, FILTER_END, FILTER_ACCEPT, FILTER_SKIP } = require('./tools');
const DEFAULT_MAX_KEYS = 10000;
/**
@ -91,7 +91,7 @@ class List extends Extension {
* < 0 : listing done
*/
filter(elem) {
// Check if the result array is full
// Check first in case of maxkeys <= 0
if (this.keys >= this.maxKeys) {
return FILTER_END;
}
@ -99,7 +99,7 @@ class List extends Extension {
this.filterKeyStartsWith !== undefined) &&
typeof elem === 'object' &&
!this.customFilter(elem.value)) {
return FILTER_ACCEPT;
return FILTER_SKIP;
}
if (typeof elem === 'object') {
this.res.push({

View File

@ -32,7 +32,7 @@ export interface DelimiterFilterState_SkippingPrefix extends FilterState {
type KeyHandler = (key: string, value: string) => FilterReturnValue;
export type ResultObject = {
type ResultObject = {
CommonPrefixes: string[];
Contents: {
key: string;
@ -196,9 +196,6 @@ export class Delimiter extends Extension {
}
getCommonPrefix(key: string): string | undefined {
if (!this.delimiter) {
return undefined;
}
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
if (delimiterIndex === -1) {
@ -308,7 +305,7 @@ export class Delimiter extends Extension {
switch (this.state.id) {
case DelimiterFilterStateId.SkippingPrefix:
const { prefix } = <DelimiterFilterState_SkippingPrefix> this.state;
return inc(prefix);
return prefix;
default:
return SKIP_NONE;

View File

@ -1,127 +0,0 @@
const { DelimiterMaster } = require('./delimiterMaster');
const { FILTER_ACCEPT, FILTER_END } = require('./tools');
type ResultObject = {
Contents: {
key: string;
value: string;
}[];
IsTruncated: boolean;
NextMarker ?: string;
};
/**
* Handle object listing with parameters. This extends the base class DelimiterMaster
* to return the master/current versions.
*/
class DelimiterCurrent extends DelimiterMaster {
/**
* Delimiter listing of current versions.
* @param {Object} parameters - listing parameters
* @param {String} parameters.beforeDate - limit the response to keys older than beforeDate
* @param {String} parameters.excludedDataStoreName - excluded datatore name
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
super(parameters, logger, vFormat);
this.beforeDate = parameters.beforeDate;
this.excludedDataStoreName = parameters.excludedDataStoreName;
this.maxScannedLifecycleListingEntries = parameters.maxScannedLifecycleListingEntries;
this.scannedKeys = 0;
}
genMDParamsV0() {
const params = super.genMDParamsV0();
// lastModified and dataStoreName parameters are used by metadata that enables built-in filtering,
// a feature currently exclusive to MongoDB
if (this.beforeDate) {
params.lastModified = {
lt: this.beforeDate,
};
}
if (this.excludedDataStoreName) {
params.dataStoreName = {
ne: this.excludedDataStoreName,
}
}
return params;
}
/**
* Parses the stringified entry's value.
* @param s - sringified value
* @return - undefined if parsing fails, otherwise it contains the parsed value.
*/
_parse(s) {
let p;
try {
p = JSON.parse(s);
} catch (e: any) {
this.logger.warn(
'Could not parse Object Metadata while listing',
{ err: e.toString() });
}
return p;
}
/**
* check if the max keys count has been reached and set the
* final state of the result if it is the case
*
* specialized implementation on DelimiterCurrent to also check
* the number of scanned keys
*
* @return {Boolean} - indicates if the iteration has to stop
*/
_reachedMaxKeys(): boolean {
if (this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries) {
this.IsTruncated = true;
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
{
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
scannedKeys: this.scannedKeys,
});
return true;
}
return super._reachedMaxKeys();
}
addContents(key, value) {
++this.scannedKeys;
const parsedValue = this._parse(value);
// if parsing fails, skip the key.
if (parsedValue) {
const lastModified = parsedValue['last-modified'];
const dataStoreName = parsedValue.dataStoreName;
// We then check if the current version is older than the "beforeDate" and
// "excludedDataStoreName" is not specified or if specified and the data store name is different.
if ((!this.beforeDate || (lastModified && lastModified < this.beforeDate)) &&
(!this.excludedDataStoreName || dataStoreName !== this.excludedDataStoreName)) {
super.addContents(key, value);
}
// In the event of a timeout occurring before any content is added,
// NextMarker is updated even if the object is not eligible.
// It minimizes the amount of data that the client needs to re-process if the request times out.
this.nextMarker = key;
}
}
result(): object {
const result: ResultObject = {
Contents: this.Contents,
IsTruncated: this.IsTruncated,
};
if (this.IsTruncated) {
result.NextMarker = this.nextMarker;
}
return result;
}
}
module.exports = { DelimiterCurrent };

View File

@ -5,23 +5,18 @@ import {
DelimiterFilterStateId,
DelimiterFilterState_NotSkipping,
DelimiterFilterState_SkippingPrefix,
ResultObject,
} from './delimiter';
const Version = require('../../versioning/Version').Version;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { BucketVersioningKeyFormat } = VSConst;
const { FILTER_ACCEPT, FILTER_SKIP, FILTER_END, SKIP_NONE, inc } = require('./tools');
import { GapSetEntry } from '../cache/GapSet';
import { GapCacheInterface } from '../cache/GapCache';
const { FILTER_ACCEPT, FILTER_SKIP, FILTER_END } = require('./tools');
const VID_SEP = VSConst.VersionId.Separator;
const { DbPrefixes } = VSConst;
export const enum DelimiterMasterFilterStateId {
const enum DelimiterMasterFilterStateId {
SkippingVersionsV0 = 101,
WaitVersionAfterPHDV0 = 102,
SkippingGapV0 = 103,
};
interface DelimiterMasterFilterState_SkippingVersionsV0 extends FilterState {
@ -34,121 +29,37 @@ interface DelimiterMasterFilterState_WaitVersionAfterPHDV0 extends FilterState {
masterKey: string,
};
interface DelimiterMasterFilterState_SkippingGapV0 extends FilterState {
id: DelimiterMasterFilterStateId.SkippingGapV0,
};
export const enum GapCachingState {
NoGapCache = 0, // there is no gap cache
UnknownGap = 1, // waiting for a cache lookup
GapLookupInProgress = 2, // asynchronous gap lookup in progress
GapCached = 3, // an upcoming or already skippable gap is cached
NoMoreGap = 4, // the cache doesn't have any more gaps inside the listed range
};
type GapCachingInfo_NoGapCache = {
state: GapCachingState.NoGapCache;
};
type GapCachingInfo_NoCachedGap = {
state: GapCachingState.UnknownGap
| GapCachingState.GapLookupInProgress
gapCache: GapCacheInterface;
};
type GapCachingInfo_GapCached = {
state: GapCachingState.GapCached;
gapCache: GapCacheInterface;
gapCached: GapSetEntry;
};
type GapCachingInfo_NoMoreGap = {
state: GapCachingState.NoMoreGap;
};
type GapCachingInfo = GapCachingInfo_NoGapCache
| GapCachingInfo_NoCachedGap
| GapCachingInfo_GapCached
| GapCachingInfo_NoMoreGap;
export const enum GapBuildingState {
Disabled = 0, // no gap cache or no gap building needed (e.g. in V1 versioning format)
NotBuilding = 1, // not currently building a gap (i.e. not listing within a gap)
Building = 2, // currently building a gap (i.e. listing within a gap)
Expired = 3, // not allowed to build due to exposure delay timeout
};
type GapBuildingInfo_NothingToBuild = {
state: GapBuildingState.Disabled | GapBuildingState.Expired;
};
type GapBuildingParams = {
/**
* minimum weight for a gap to be created in the cache
*/
minGapWeight: number;
/**
* trigger a cache setGap() call every N skippable keys
*/
triggerSaveGapWeight: number;
/**
* timestamp to assess whether we're still inside the validity period to
* be allowed to build gaps
*/
initTimestamp: number;
};
type GapBuildingInfo_NotBuilding = {
state: GapBuildingState.NotBuilding;
gapCache: GapCacheInterface;
params: GapBuildingParams;
};
type GapBuildingInfo_Building = {
state: GapBuildingState.Building;
gapCache: GapCacheInterface;
params: GapBuildingParams;
/**
* Gap currently being created
*/
gap: GapSetEntry;
/**
* total current weight of the gap being created
*/
gapWeight: number;
};
type GapBuildingInfo = GapBuildingInfo_NothingToBuild
| GapBuildingInfo_NotBuilding
| GapBuildingInfo_Building;
/**
* Handle object listing with parameters. This extends the base class Delimiter
* to return the raw master versions of existing objects.
*/
export class DelimiterMaster extends Delimiter {
_gapCaching: GapCachingInfo;
_gapBuilding: GapBuildingInfo;
_refreshedBuildingParams: GapBuildingParams | null;
/**
* Delimiter listing of master versions.
* @param {Object} parameters - listing parameters
* @param {String} [parameters.delimiter] - delimiter per amazon format
* @param {String} [parameters.prefix] - prefix per amazon format
* @param {String} [parameters.marker] - marker per amazon format
* @param {Number} [parameters.maxKeys] - number of keys to list
* @param {Boolean} [parameters.v2] - indicates whether v2 format
* @param {String} [parameters.startAfter] - marker per amazon v2 format
* @param {String} [parameters.continuationToken] - obfuscated amazon token
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat="v0"] - versioning key format
* @param {String} parameters.delimiter - delimiter per amazon format
* @param {String} parameters.prefix - prefix per amazon format
* @param {String} parameters.marker - marker per amazon format
* @param {Number} parameters.maxKeys - number of keys to list
* @param {Boolean} parameters.v2 - indicates whether v2 format
* @param {String} parameters.startAfter - marker per amazon v2 format
* @param {String} parameters.continuationToken - obfuscated amazon token
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat?: string) {
constructor(parameters, logger, vFormat) {
super(parameters, logger, vFormat);
Object.assign(this, {
[BucketVersioningKeyFormat.v0]: {
skipping: this.skippingV0,
},
[BucketVersioningKeyFormat.v1]: {
skipping: this.skippingV1,
},
}[this.vFormat]);
if (this.vFormat === BucketVersioningKeyFormat.v0) {
// override Delimiter's implementation of NotSkipping for
// DelimiterMaster logic (skipping versions and special
@ -166,10 +77,6 @@ export class DelimiterMaster extends Delimiter {
DelimiterMasterFilterStateId.WaitVersionAfterPHDV0,
this.keyHandler_WaitVersionAfterPHDV0.bind(this));
this.setKeyHandler(
DelimiterMasterFilterStateId.SkippingGapV0,
this.keyHandler_SkippingGapV0.bind(this));
if (this.marker) {
// distinct initial state to include some special logic
// before the first master key is found that does not have
@ -183,186 +90,9 @@ export class DelimiterMaster extends Delimiter {
id: DelimiterFilterStateId.NotSkipping,
};
}
} else {
// save base implementation of the `NotSkipping` state in
// Delimiter before overriding it with ours, to be able to call it from there
this.keyHandler_NotSkipping_Delimiter = this.keyHandlers[DelimiterFilterStateId.NotSkipping];
this.setKeyHandler(
DelimiterFilterStateId.NotSkipping,
this.keyHandler_NotSkippingPrefixNorVersionsV1.bind(this));
}
// in v1, we can directly use Delimiter's implementation,
// which is already set to the proper state
// default initialization of the gap cache and building states, can be
// set by refreshGapCache()
this._gapCaching = {
state: GapCachingState.NoGapCache,
};
this._gapBuilding = {
state: GapBuildingState.Disabled,
};
this._refreshedBuildingParams = null;
}
/**
* Get the validity period left before a refresh of the gap cache is needed
* to continue building new gaps.
*
* @return {number|null} one of:
* - the remaining time in milliseconds in which gaps can be added to the
* cache before a call to refreshGapCache() is required
* - or 0 if there is no time left and a call to refreshGapCache() is required
* to resume caching gaps
* - or null if refreshing the cache is never needed (because the gap cache
* is either not available or not used)
*/
getGapBuildingValidityPeriodMs(): number | null {
let gapBuilding;
switch (this._gapBuilding.state) {
case GapBuildingState.Disabled:
return null;
case GapBuildingState.Expired:
return 0;
case GapBuildingState.NotBuilding:
gapBuilding = <GapBuildingInfo_NotBuilding> this._gapBuilding;
break;
case GapBuildingState.Building:
gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
break;
}
const { gapCache, params } = gapBuilding;
const elapsedTime = Date.now() - params.initTimestamp;
return Math.max(gapCache.exposureDelayMs - elapsedTime, 0);
}
/**
* Refresh the gaps caching logic (gaps are series of current delete markers
* in V0 bucket metadata format). It has two effects:
*
* - starts exposing existing and future gaps from the cache to efficiently
* skip over series of current delete markers that have been seen and cached
* earlier
*
* - enables building and caching new gaps (or extend existing ones), for a
* limited time period defined by the `gapCacheProxy.exposureDelayMs` value
* in milliseconds. To refresh the validity period and resume building and
* caching new gaps, one must restart a new listing from the database (starting
* at the current listing key, included), then call refreshGapCache() again.
*
* @param {GapCacheInterface} gapCacheProxy - API proxy to the gaps cache
* (the proxy should handle prefixing object keys with the bucket name)
* @param {number} [minGapWeight=100] - minimum weight of a gap for it to be
* added in the cache
* @param {number} [triggerSaveGapWeight] - cumulative weight to wait for
* before saving the current building gap. Cannot be greater than
* `gapCacheProxy.maxGapWeight` (the value is thresholded to `maxGapWeight`
* otherwise). Defaults to `gapCacheProxy.maxGapWeight / 2`.
* @return {undefined}
*/
refreshGapCache(
gapCacheProxy: GapCacheInterface,
minGapWeight?: number,
triggerSaveGapWeight?: number
): void {
if (this.vFormat !== BucketVersioningKeyFormat.v0) {
return;
}
if (this._gapCaching.state === GapCachingState.NoGapCache) {
this._gapCaching = {
state: GapCachingState.UnknownGap,
gapCache: gapCacheProxy,
};
}
const refreshedBuildingParams: GapBuildingParams = {
minGapWeight: minGapWeight || 100,
triggerSaveGapWeight: triggerSaveGapWeight
|| Math.trunc(gapCacheProxy.maxGapWeight / 2),
initTimestamp: Date.now(),
};
if (this._gapBuilding.state === GapBuildingState.Building) {
// refreshed params will be applied as soon as the current building gap is saved
this._refreshedBuildingParams = refreshedBuildingParams;
} else {
this._gapBuilding = {
state: GapBuildingState.NotBuilding,
gapCache: gapCacheProxy,
params: refreshedBuildingParams,
};
}
}
/**
* Trigger a lookup of the closest upcoming or already skippable gap.
*
* @param {string} fromKey - lookup a gap not before 'fromKey'
* @return {undefined} - the lookup is asynchronous and its
* response is handled inside this function
*/
_triggerGapLookup(gapCaching: GapCachingInfo_NoCachedGap, fromKey: string): void {
this._gapCaching = {
state: GapCachingState.GapLookupInProgress,
gapCache: gapCaching.gapCache,
};
const maxKey = this.prefix ? inc(this.prefix) : undefined;
gapCaching.gapCache.lookupGap(fromKey, maxKey).then(_gap => {
const gap = <GapSetEntry | null> _gap;
if (gap) {
this._gapCaching = {
state: GapCachingState.GapCached,
gapCache: gapCaching.gapCache,
gapCached: gap,
};
} else {
this._gapCaching = {
state: GapCachingState.NoMoreGap,
};
}
});
}
_checkGapOnMasterDeleteMarker(key: string): FilterReturnValue {
switch (this._gapBuilding.state) {
case GapBuildingState.Disabled:
case GapBuildingState.Expired:
break;
case GapBuildingState.NotBuilding:
this._createBuildingGap(key, 1);
break;
case GapBuildingState.Building:
this._updateBuildingGap(key);
break;
}
if (this._gapCaching.state === GapCachingState.GapCached) {
const { gapCached } = this._gapCaching;
if (key >= gapCached.firstKey) {
if (key <= gapCached.lastKey) {
// we are inside the last looked up cached gap: transition to
// 'SkippingGapV0' state
this.setState(<DelimiterMasterFilterState_SkippingGapV0> {
id: DelimiterMasterFilterStateId.SkippingGapV0,
});
// cut the current gap before skipping, it will be merged or
// chained with the existing one (depending on its weight)
if (this._gapBuilding.state === GapBuildingState.Building) {
// substract 1 from the weight because we are going to chain this gap,
// which has an overlap of one key.
this._gapBuilding.gap.weight -= 1;
this._cutBuildingGap();
}
return FILTER_SKIP;
}
// as we are past the cached gap, we will need another lookup
this._gapCaching = {
state: GapCachingState.UnknownGap,
gapCache: this._gapCaching.gapCache,
};
}
}
if (this._gapCaching.state === GapCachingState.UnknownGap) {
this._triggerGapLookup(this._gapCaching, key);
}
return FILTER_ACCEPT;
}
filter_onNewMasterKeyV0(key: string, value: string): FilterReturnValue {
@ -374,7 +104,7 @@ export class DelimiterMaster extends Delimiter {
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
masterKey: key,
});
return this._checkGapOnMasterDeleteMarker(key);
return FILTER_ACCEPT;
}
if (Version.isPHD(value)) {
// master version is a PHD version: wait for the first
@ -386,9 +116,6 @@ export class DelimiterMaster extends Delimiter {
});
return FILTER_ACCEPT;
}
// cut the current gap as soon as a non-deleted entry is seen
this._cutBuildingGap();
if (key.startsWith(DbPrefixes.Replay)) {
// skip internal replay prefix entirely
this.setState(<DelimiterFilterState_SkippingPrefix> {
@ -400,7 +127,6 @@ export class DelimiterMaster extends Delimiter {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
const commonPrefix = this.addCommonPrefixOrContents(key, value);
if (commonPrefix) {
// transition into SkippingPrefix state to skip all following keys
@ -423,30 +149,11 @@ export class DelimiterMaster extends Delimiter {
return this.filter_onNewMasterKeyV0(key, value);
}
filter_onNewMasterKeyV1(key: string, value: string): FilterReturnValue {
// if this master key is a delete marker, accept it without
// adding the version to the contents
if (Version.isDeleteMarker(value)) {
return FILTER_ACCEPT;
}
// use base Delimiter's implementation
return this.keyHandler_NotSkipping_Delimiter(key, value);
}
keyHandler_NotSkippingPrefixNorVersionsV1(key: string, value: string): FilterReturnValue {
return this.filter_onNewMasterKeyV1(key, value);
}
keyHandler_SkippingVersionsV0(key: string, value: string): FilterReturnValue {
/* In the SkippingVersionsV0 state, skip all version keys
* (<key><versionIdSeparator><version>) */
const versionIdIndex = key.indexOf(VID_SEP);
if (versionIdIndex !== -1) {
// version keys count in the building gap weight because they must
// also be listed until skipped
if (this._gapBuilding.state === GapBuildingState.Building) {
this._updateBuildingGap(key);
}
return FILTER_SKIP;
}
return this.filter_onNewMasterKeyV0(key, value);
@ -470,151 +177,14 @@ export class DelimiterMaster extends Delimiter {
return this.filter_onNewMasterKeyV0(key, value);
}
keyHandler_SkippingGapV0(key: string, value: string): FilterReturnValue {
const { gapCache, gapCached } = <GapCachingInfo_GapCached> this._gapCaching;
if (key <= gapCached.lastKey) {
return FILTER_SKIP;
}
this._gapCaching = {
state: GapCachingState.UnknownGap,
gapCache,
};
this.setState(<DelimiterMasterFilterState_SkippingVersionsV0> {
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
});
// Start a gap with weight=0 from the latest skippable key. This will
// allow to extend the gap just skipped with a chained gap in case
// other delete markers are seen after the existing gap is skipped.
this._createBuildingGap(gapCached.lastKey, 0, gapCached.weight);
return this.handleKey(key, value);
}
skippingBase(): string | undefined {
switch (this.state.id) {
case DelimiterMasterFilterStateId.SkippingVersionsV0:
const { masterKey } = <DelimiterMasterFilterState_SkippingVersionsV0> this.state;
return masterKey + inc(VID_SEP);
case DelimiterMasterFilterStateId.SkippingGapV0:
const { gapCached } = <GapCachingInfo_GapCached> this._gapCaching;
return gapCached.lastKey;
return masterKey + VID_SEP;
default:
return super.skippingBase();
}
}
result(): ResultObject {
this._cutBuildingGap();
return super.result();
}
_checkRefreshedBuildingParams(params: GapBuildingParams): GapBuildingParams {
if (this._refreshedBuildingParams) {
const newParams = this._refreshedBuildingParams;
this._refreshedBuildingParams = null;
return newParams;
}
return params;
}
/**
* Save the gap being built if allowed (i.e. still within the
* allocated exposure time window).
*
* @return {boolean} - true if the gap was saved, false if we are
* outside the allocated exposure time window.
*/
_saveBuildingGap(): boolean {
const { gapCache, params, gap, gapWeight } =
<GapBuildingInfo_Building> this._gapBuilding;
const totalElapsed = Date.now() - params.initTimestamp;
if (totalElapsed >= gapCache.exposureDelayMs) {
this._gapBuilding = {
state: GapBuildingState.Expired,
};
this._refreshedBuildingParams = null;
return false;
}
const { firstKey, lastKey, weight } = gap;
gapCache.setGap(firstKey, lastKey, weight);
this._gapBuilding = {
state: GapBuildingState.Building,
gapCache,
params: this._checkRefreshedBuildingParams(params),
gap: {
firstKey: gap.lastKey,
lastKey: gap.lastKey,
weight: 0,
},
gapWeight,
};
return true;
}
/**
* Create a new gap to be extended afterwards
*
* @param {string} newKey - gap's first key
* @param {number} startWeight - initial weight of the building gap (usually 0 or 1)
* @param {number} [cachedWeight] - if continuing a cached gap, weight of the existing
* cached portion
* @return {undefined}
*/
_createBuildingGap(newKey: string, startWeight: number, cachedWeight?: number): void {
if (this._gapBuilding.state === GapBuildingState.NotBuilding) {
const { gapCache, params } = <GapBuildingInfo_NotBuilding> this._gapBuilding;
this._gapBuilding = {
state: GapBuildingState.Building,
gapCache,
params: this._checkRefreshedBuildingParams(params),
gap: {
firstKey: newKey,
lastKey: newKey,
weight: startWeight,
},
gapWeight: (cachedWeight || 0) + startWeight,
};
}
}
_updateBuildingGap(newKey: string): void {
const gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
const { params, gap } = gapBuilding;
gap.lastKey = newKey;
gap.weight += 1;
gapBuilding.gapWeight += 1;
// the GapCache API requires updating a gap regularly because it can only split
// it once per update, by the known last key. In practice the default behavior
// is to trigger an update after a number of keys that is half the maximum weight.
// It is also useful for other listings to benefit from the cache sooner.
if (gapBuilding.gapWeight >= params.minGapWeight &&
gap.weight >= params.triggerSaveGapWeight) {
this._saveBuildingGap();
}
}
_cutBuildingGap(): void {
if (this._gapBuilding.state === GapBuildingState.Building) {
let gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
let { gapCache, params, gap, gapWeight } = gapBuilding;
// only set gaps that are significant enough in weight and
// with a non-empty extension
if (gapWeight >= params.minGapWeight && gap.weight > 0) {
// we're done if we were not allowed to save the gap
if (!this._saveBuildingGap()) {
return;
}
// params may have been refreshed, reload them
gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
params = gapBuilding.params;
}
this._gapBuilding = {
state: GapBuildingState.NotBuilding,
gapCache,
params,
};
}
}
}

View File

@ -1,202 +0,0 @@
const { DelimiterVersions } = require('./delimiterVersions');
const { FILTER_END, FILTER_SKIP } = require('./tools');
const TRIM_METADATA_MIN_BLOB_SIZE = 10000;
/**
* Handle object listing with parameters. This extends the base class DelimiterVersions
* to return the raw non-current versions objects.
*/
class DelimiterNonCurrent extends DelimiterVersions {
/**
* Delimiter listing of non-current versions.
* @param {Object} parameters - listing parameters
* @param {String} parameters.keyMarker - key marker
* @param {String} parameters.versionIdMarker - version id marker
* @param {String} parameters.beforeDate - limit the response to keys with stale date older than beforeDate.
* stale date is the date on when a version becomes non-current.
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
* @param {String} parameters.excludedDataStoreName - exclude dataStoreName matches from the versions
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
super(parameters, logger, vFormat);
this.beforeDate = parameters.beforeDate;
this.excludedDataStoreName = parameters.excludedDataStoreName;
this.maxScannedLifecycleListingEntries = parameters.maxScannedLifecycleListingEntries;
// internal state
this.prevKey = null;
this.staleDate = null;
this.scannedKeys = 0;
}
getLastModified(value) {
let lastModified;
try {
const v = JSON.parse(value);
lastModified = v['last-modified'];
} catch (e) {
this.logger.warn('could not parse Object Metadata while listing',
{
method: 'getLastModified',
err: e.toString(),
});
}
return lastModified;
}
// Overwrite keyHandler_SkippingVersions to include the last version from the previous listing.
// The creation (last-modified) date of this version will be the stale date for the following version.
// eslint-disable-next-line camelcase
keyHandler_SkippingVersions(key, versionId, value) {
if (key === this.keyMarker) {
// since the nonversioned key equals the marker, there is
// necessarily a versionId in this key
const _versionId = versionId;
if (_versionId < this.versionIdMarker) {
// skip all versions until marker
return FILTER_SKIP;
}
}
this.setState({
id: 1 /* NotSkipping */,
});
return this.handleKey(key, versionId, value);
}
filter(obj) {
if (this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries) {
this.IsTruncated = true;
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
{
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
scannedKeys: this.scannedKeys,
});
return FILTER_END;
}
++this.scannedKeys;
return super.filter(obj);
}
/**
* NOTE: Each version of a specific key is sorted from the latest to the oldest
* thanks to the way version ids are generated.
* DESCRIPTION: Skip the version if it represents the master key, but keep its last-modified date in memory,
* which will be the stale date of the following version.
* The following version is pushed only:
* - if the "stale date" (picked up from the previous version) is available (JSON.parse has not failed),
* - if "beforeDate" is not specified or if specified and the "stale date" is older.
* - if "excludedDataStoreName" is not specified or if specified and the data store name is different
* The in-memory "stale date" is then updated with the version's last-modified date to be used for
* the following version.
* The process stops and returns the available results if either:
* - no more metadata key is left to be processed
* - the listing reaches the maximum number of key to be returned
* - the internal timeout is reached
* @param {String} key - The key to add
* @param {String} versionId - The version id
* @param {String} value - The value of the key
* @return {undefined}
*/
addVersion(key, versionId, value) {
this.nextKeyMarker = key;
this.nextVersionIdMarker = versionId;
// Skip the version if it represents the non-current version, but keep its last-modified date,
// which will be the stale date of the following version.
const isCurrentVersion = key !== this.prevKey;
if (isCurrentVersion) {
this.staleDate = this.getLastModified(value);
this.prevKey = key;
return;
}
// The following version is pushed only:
// - if the "stale date" (picked up from the previous version) is available (JSON.parse has not failed),
// - if "beforeDate" is not specified or if specified and the "stale date" is older.
// - if "excludedDataStoreName" is not specified or if specified and the data store name is different
let lastModified;
if (this.staleDate && (!this.beforeDate || this.staleDate < this.beforeDate)) {
const parsedValue = this._parse(value);
// if parsing fails, skip the key.
if (parsedValue) {
const dataStoreName = parsedValue.dataStoreName;
lastModified = parsedValue['last-modified'];
if (!this.excludedDataStoreName || dataStoreName !== this.excludedDataStoreName) {
const s = this._stringify(parsedValue, this.staleDate);
// check that _stringify succeeds to only push objects with a defined staleDate.
if (s) {
this.Versions.push({ key, value: s });
++this.keys;
}
}
}
}
// The in-memory "stale date" is then updated with the version's last-modified date to be used for
// the following version.
this.staleDate = lastModified || this.getLastModified(value);
return;
}
/**
* Parses the stringified entry's value and remove the location property if too large.
* @param {string} s - sringified value
* @return {object} p - undefined if parsing fails, otherwise it contains the parsed value.
*/
_parse(s) {
let p;
try {
p = JSON.parse(s);
if (s.length >= TRIM_METADATA_MIN_BLOB_SIZE) {
delete p.location;
}
} catch (e) {
this.logger.warn('Could not parse Object Metadata while listing', {
method: 'DelimiterNonCurrent._parse',
err: e.toString(),
});
}
return p;
}
_stringify(parsedMD, staleDate) {
const p = parsedMD;
let s = undefined;
p.staleDate = staleDate;
try {
s = JSON.stringify(p);
} catch (e) {
this.logger.warn('could not stringify Object Metadata while listing', {
method: 'DelimiterNonCurrent._stringify',
err: e.toString(),
});
}
return s;
}
result() {
const { Versions, IsTruncated, NextKeyMarker, NextVersionIdMarker } = super.result();
const result = {
Contents: Versions,
IsTruncated,
};
if (NextKeyMarker) {
result.NextKeyMarker = NextKeyMarker;
}
if (NextVersionIdMarker) {
result.NextVersionIdMarker = NextVersionIdMarker;
}
return result;
}
}
module.exports = { DelimiterNonCurrent };

View File

@ -1,204 +0,0 @@
const DelimiterVersions = require('./delimiterVersions').DelimiterVersions;
const { FILTER_END } = require('./tools');
const TRIM_METADATA_MIN_BLOB_SIZE = 10000;
/**
* Handle object listing with parameters. This extends the base class DelimiterVersions
* to return the orphan delete markers. Orphan delete markers are also
* refered as expired object delete marker.
* They are delete marker with zero noncurrent versions.
*/
class DelimiterOrphanDeleteMarker extends DelimiterVersions {
/**
* Delimiter listing of orphan delete markers.
* @param {Object} parameters - listing parameters
* @param {String} parameters.beforeDate - limit the response to keys older than beforeDate
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
const {
marker,
maxKeys,
prefix,
beforeDate,
maxScannedLifecycleListingEntries,
} = parameters;
const versionParams = {
// The orphan delete marker logic uses the term 'marker' instead of 'keyMarker',
// as the latter could suggest the presence of a 'versionIdMarker'.
keyMarker: marker,
maxKeys,
prefix,
};
super(versionParams, logger, vFormat);
this.maxScannedLifecycleListingEntries = maxScannedLifecycleListingEntries;
this.beforeDate = beforeDate;
// this.prevKeyName is used as a marker for the next listing when the current one reaches its entry limit.
// We cannot rely on this.keyName, as it contains the name of the current key.
// In the event of a listing interruption due to reaching the maximum scanned entries,
// relying on this.keyName would cause the next listing to skip the current key because S3 starts
// listing after the marker.
this.prevKeyName = null;
this.keyName = null;
this.value = null;
this.scannedKeys = 0;
}
_reachedMaxKeys() {
if (this.keys >= this.maxKeys) {
return true;
}
return false;
}
_addOrphan() {
const parsedValue = this._parse(this.value);
// if parsing fails, skip the key.
if (parsedValue) {
const lastModified = parsedValue['last-modified'];
const isDeleteMarker = parsedValue.isDeleteMarker;
// We then check if the orphan version is a delete marker and if it is older than the "beforeDate"
if ((!this.beforeDate || (lastModified && lastModified < this.beforeDate)) && isDeleteMarker) {
// Prefer returning an untrimmed data rather than stopping the service in case of parsing failure.
const s = this._stringify(parsedValue) || this.value;
this.Versions.push({ key: this.keyName, value: s });
this.nextKeyMarker = this.keyName;
++this.keys;
}
}
}
/**
* Parses the stringified entry's value and remove the location property if too large.
* @param {string} s - sringified value
* @return {object} p - undefined if parsing fails, otherwise it contains the parsed value.
*/
_parse(s) {
let p;
try {
p = JSON.parse(s);
if (s.length >= TRIM_METADATA_MIN_BLOB_SIZE) {
delete p.location;
}
} catch (e) {
this.logger.warn('Could not parse Object Metadata while listing', {
method: 'DelimiterOrphanDeleteMarker._parse',
err: e.toString(),
});
}
return p;
}
_stringify(value) {
const p = value;
let s = undefined;
try {
s = JSON.stringify(p);
} catch (e) {
this.logger.warn('could not stringify Object Metadata while listing',
{
method: 'DelimiterOrphanDeleteMarker._stringify',
err: e.toString(),
});
}
return s;
}
/**
* The purpose of _isMaxScannedEntriesReached is to restrict the number of scanned entries,
* thus controlling resource overhead (CPU...).
* @return {boolean} isMaxScannedEntriesReached - true if the maximum limit on the number
* of entries scanned has been reached, false otherwise.
*/
_isMaxScannedEntriesReached() {
return this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries;
}
filter(obj) {
if (this._isMaxScannedEntriesReached()) {
this.nextKeyMarker = this.prevKeyName;
this.IsTruncated = true;
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
{
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
scannedKeys: this.scannedKeys,
});
return FILTER_END;
}
++this.scannedKeys;
return super.filter(obj);
}
/**
* NOTE: Each version of a specific key is sorted from the latest to the oldest
* thanks to the way version ids are generated.
* DESCRIPTION: For a given key, the latest version is kept in memory since it is the current version.
* If the following version reference a new key, it means that the previous one was an orphan version.
* We then check if the orphan version is a delete marker and if it is older than the "beforeDate"
* The process stops and returns the available results if either:
* - no more metadata key is left to be processed
* - the listing reaches the maximum number of key to be returned
* - the internal timeout is reached
* NOTE: we cannot leverage MongoDB to list keys older than "beforeDate"
* because then we will not be able to assess its orphanage.
* @param {String} key - The object key.
* @param {String} versionId - The object version id.
* @param {String} value - The value of the key
* @return {undefined}
*/
addVersion(key, versionId, value) {
// For a given key, the youngest version is kept in memory since it represents the current version.
if (key !== this.keyName) {
// If this.value is defined, it means that <this.keyName, this.value> pair is "allowed" to be an orphan.
if (this.value) {
this._addOrphan();
}
this.prevKeyName = this.keyName;
this.keyName = key;
this.value = value;
return;
}
// If the key is not the current version, we can skip it in the next listing
// in the case where the current listing is interrupted due to reaching the maximum scanned entries.
this.prevKeyName = key;
this.keyName = key;
this.value = null;
return;
}
result() {
// Only check for remaining last orphan delete marker if the listing is not interrupted.
// This will help avoid false positives.
if (!this._isMaxScannedEntriesReached()) {
// The following check makes sure the last orphan delete marker is not forgotten.
if (this.keys < this.maxKeys) {
if (this.value) {
this._addOrphan();
}
// The following make sure that if makeKeys is reached, isTruncated is set to true.
// We moved the "isTruncated" from _reachedMaxKeys to make sure we take into account the last entity
// if listing is truncated right before the last entity and the last entity is a orphan delete marker.
} else {
this.IsTruncated = this.maxKeys > 0;
}
}
const result = {
Contents: this.Versions,
IsTruncated: this.IsTruncated,
};
if (this.IsTruncated) {
result.NextMarker = this.nextKeyMarker;
}
return result;
}
}
module.exports = { DelimiterOrphanDeleteMarker };

View File

@ -0,0 +1,304 @@
'use strict'; // eslint-disable-line strict
const Delimiter = require('./delimiter').Delimiter;
const Version = require('../../versioning/Version').Version;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { inc, FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } =
require('./tools');
const VID_SEP = VSConst.VersionId.Separator;
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
/**
* Handle object listing with parameters
*
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
* @prop {String[]} Contents - 'files' to list
* @prop {Boolean} IsTruncated - truncated listing flag
* @prop {String|undefined} NextMarker - marker per amazon format
* @prop {Number} keys - count of listed keys
* @prop {String|undefined} delimiter - separator per amazon format
* @prop {String|undefined} prefix - prefix per amazon format
* @prop {Number} maxKeys - number of keys to list
*/
class DelimiterVersions extends Delimiter {
constructor(parameters, logger, vFormat) {
super(parameters, logger, vFormat);
// specific to version listing
this.keyMarker = parameters.keyMarker;
this.versionIdMarker = parameters.versionIdMarker;
// internal state
this.masterKey = undefined;
this.masterVersionId = undefined;
// listing results
this.NextMarker = parameters.keyMarker;
this.NextVersionIdMarker = undefined;
this.inReplayPrefix = false;
Object.assign(this, {
[BucketVersioningKeyFormat.v0]: {
genMDParams: this.genMDParamsV0,
filter: this.filterV0,
skipping: this.skippingV0,
},
[BucketVersioningKeyFormat.v1]: {
genMDParams: this.genMDParamsV1,
filter: this.filterV1,
skipping: this.skippingV1,
},
}[this.vFormat]);
}
genMDParamsV0() {
const params = {};
if (this.parameters.prefix) {
params.gte = this.parameters.prefix;
params.lt = inc(this.parameters.prefix);
}
if (this.parameters.keyMarker) {
if (params.gte && params.gte > this.parameters.keyMarker) {
return params;
}
delete params.gte;
if (this.parameters.versionIdMarker) {
// versionIdMarker should always come with keyMarker
// but may not be the other way around
params.gt = this.parameters.keyMarker
+ VID_SEP
+ this.parameters.versionIdMarker;
} else {
params.gt = inc(this.parameters.keyMarker + VID_SEP);
}
}
return params;
}
genMDParamsV1() {
// return an array of two listing params sets to ask for
// synchronized listing of M and V ranges
const params = [{}, {}];
if (this.parameters.prefix) {
params[0].gte = DbPrefixes.Master + this.parameters.prefix;
params[0].lt = DbPrefixes.Master + inc(this.parameters.prefix);
params[1].gte = DbPrefixes.Version + this.parameters.prefix;
params[1].lt = DbPrefixes.Version + inc(this.parameters.prefix);
} else {
params[0].gte = DbPrefixes.Master;
params[0].lt = inc(DbPrefixes.Master); // stop after the last master key
params[1].gte = DbPrefixes.Version;
params[1].lt = inc(DbPrefixes.Version); // stop after the last version key
}
if (this.parameters.keyMarker) {
if (params[1].gte <= DbPrefixes.Version + this.parameters.keyMarker) {
delete params[0].gte;
delete params[1].gte;
params[0].gt = DbPrefixes.Master + inc(this.parameters.keyMarker + VID_SEP);
if (this.parameters.versionIdMarker) {
// versionIdMarker should always come with keyMarker
// but may not be the other way around
params[1].gt = DbPrefixes.Version
+ this.parameters.keyMarker
+ VID_SEP
+ this.parameters.versionIdMarker;
} else {
params[1].gt = DbPrefixes.Version
+ inc(this.parameters.keyMarker + VID_SEP);
}
}
}
return params;
}
/**
* Used to synchronize listing of M and V prefixes by object key
*
* @param {object} masterObj object listed from first range
* returned by genMDParamsV1() (the master keys range)
* @param {object} versionObj object listed from second range
* returned by genMDParamsV1() (the version keys range)
* @return {number} comparison result:
* * -1 if master key < version key
* * 1 if master key > version key
*/
compareObjects(masterObj, versionObj) {
const masterKey = masterObj.key.slice(DbPrefixes.Master.length);
const versionKey = versionObj.key.slice(DbPrefixes.Version.length);
return masterKey < versionKey ? -1 : 1;
}
/**
* Add a (key, versionId, value) tuple to the listing.
* Set the NextMarker to the current key
* Increment the keys counter
* @param {object} obj - the entry to add to the listing result
* @param {String} obj.key - The key to add
* @param {String} obj.versionId - versionId
* @param {String} obj.value - The value of the key
* @return {Boolean} - indicates if iteration should continue
*/
addContents(obj) {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
this.Contents.push({
key: obj.key,
value: this.trimMetadata(obj.value),
versionId: obj.versionId,
});
this.NextMarker = obj.key;
this.NextVersionIdMarker = obj.versionId;
++this.keys;
return FILTER_ACCEPT;
}
/**
* Add a Common Prefix in the list
* @param {String} key - object name
* @param {Number} index - after prefix starting point
* @return {Boolean} - indicates if iteration should continue
*/
addCommonPrefix(key, index) {
const commonPrefix = key.substring(0, index + this.delimiter.length);
if (this.CommonPrefixes.indexOf(commonPrefix) === -1
&& this.NextMarker !== commonPrefix) {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
this.CommonPrefixes.push(commonPrefix);
this.NextMarker = commonPrefix;
++this.keys;
return FILTER_ACCEPT;
}
return FILTER_SKIP;
}
/**
* Filter to apply on each iteration if bucket is in v0
* versioning key format, based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filterV0(obj) {
if (obj.key.startsWith(DbPrefixes.Replay)) {
this.inReplayPrefix = true;
return FILTER_SKIP;
}
this.inReplayPrefix = false;
if (Version.isPHD(obj.value)) {
// return accept to avoid skipping the next values in range
return FILTER_ACCEPT;
}
return this.filterCommon(obj.key, obj.value);
}
/**
* Filter to apply on each iteration if bucket is in v1
* versioning key format, based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filterV1(obj) {
if (Version.isPHD(obj.value)) {
// return accept to avoid skipping the next values in range
return FILTER_ACCEPT;
}
// this function receives both M and V keys, but their prefix
// length is the same so we can remove their prefix without
// looking at the type of key
return this.filterCommon(obj.key.slice(DbPrefixes.Master.length),
obj.value);
}
filterCommon(key, value) {
if (this.prefix && !key.startsWith(this.prefix)) {
return FILTER_SKIP;
}
let nonversionedKey;
let versionId = undefined;
const versionIdIndex = key.indexOf(VID_SEP);
if (versionIdIndex < 0) {
nonversionedKey = key;
this.masterKey = key;
this.masterVersionId =
Version.from(value).getVersionId() || 'null';
versionId = this.masterVersionId;
} else {
nonversionedKey = key.slice(0, versionIdIndex);
versionId = key.slice(versionIdIndex + 1);
// skip a version key if it is the master version
if (this.masterKey === nonversionedKey && this.masterVersionId === versionId) {
return FILTER_SKIP;
}
this.masterKey = undefined;
this.masterVersionId = undefined;
}
if (this.delimiter) {
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = nonversionedKey.indexOf(this.delimiter, baseIndex);
if (delimiterIndex >= 0) {
return this.addCommonPrefix(nonversionedKey, delimiterIndex);
}
}
return this.addContents({ key: nonversionedKey, value, versionId });
}
skippingV0() {
if (this.inReplayPrefix) {
return DbPrefixes.Replay;
}
if (this.NextMarker) {
const index = this.NextMarker.lastIndexOf(this.delimiter);
if (index === this.NextMarker.length - 1) {
return this.NextMarker;
}
}
return SKIP_NONE;
}
skippingV1() {
const skipV0 = this.skippingV0();
if (skipV0 === SKIP_NONE) {
return SKIP_NONE;
}
// skip to the same object key in both M and V range listings
return [DbPrefixes.Master + skipV0,
DbPrefixes.Version + skipV0];
}
/**
* Return an object containing all mandatory fields to use once the
* iteration is done, doesn't show a NextMarker field if the output
* isn't truncated
* @return {Object} - following amazon format
*/
result() {
/* NextMarker is only provided when delimiter is used.
* specified in v1 listing documentation
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
*/
return {
CommonPrefixes: this.CommonPrefixes,
Versions: this.Contents,
IsTruncated: this.IsTruncated,
NextKeyMarker: this.IsTruncated ? this.NextMarker : undefined,
NextVersionIdMarker: this.IsTruncated ?
this.NextVersionIdMarker : undefined,
Delimiter: this.delimiter,
};
}
}
module.exports = { DelimiterVersions };

View File

@ -1,535 +0,0 @@
'use strict'; // eslint-disable-line strict
const Extension = require('./Extension').default;
import {
FilterState,
FilterReturnValue,
} from './delimiter';
const Version = require('../../versioning/Version').Version;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { inc, FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } =
require('./tools');
const VID_SEP = VSConst.VersionId.Separator;
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
export const enum DelimiterVersionsFilterStateId {
NotSkipping = 1,
SkippingPrefix = 2,
SkippingVersions = 3,
};
export interface DelimiterVersionsFilterState_NotSkipping extends FilterState {
id: DelimiterVersionsFilterStateId.NotSkipping,
};
export interface DelimiterVersionsFilterState_SkippingPrefix extends FilterState {
id: DelimiterVersionsFilterStateId.SkippingPrefix,
prefix: string;
};
export interface DelimiterVersionsFilterState_SkippingVersions extends FilterState {
id: DelimiterVersionsFilterStateId.SkippingVersions,
gt: string;
};
type KeyHandler = (key: string, versionId: string | undefined, value: string) => FilterReturnValue;
type ResultObject = {
CommonPrefixes: string[],
Versions: {
key: string;
value: string;
versionId: string;
}[];
IsTruncated: boolean;
Delimiter ?: string;
NextKeyMarker ?: string;
NextVersionIdMarker ?: string;
};
type GenMDParamsItem = {
gt ?: string,
gte ?: string,
lt ?: string,
};
/**
* Handle object listing with parameters
*
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
* @prop {String[]} Contents - 'files' to list
* @prop {Boolean} IsTruncated - truncated listing flag
* @prop {String|undefined} NextMarker - marker per amazon format
* @prop {Number} keys - count of listed keys
* @prop {String|undefined} delimiter - separator per amazon format
* @prop {String|undefined} prefix - prefix per amazon format
* @prop {Number} maxKeys - number of keys to list
*/
export class DelimiterVersions extends Extension {
state: FilterState;
keyHandlers: { [id: number]: KeyHandler };
constructor(parameters, logger, vFormat) {
super(parameters, logger);
// original listing parameters
this.delimiter = parameters.delimiter;
this.prefix = parameters.prefix;
this.maxKeys = parameters.maxKeys || 1000;
// specific to version listing
this.keyMarker = parameters.keyMarker;
this.versionIdMarker = parameters.versionIdMarker;
// internal state
this.masterKey = undefined;
this.masterVersionId = undefined;
this.nullKey = null;
this.vFormat = vFormat || BucketVersioningKeyFormat.v0;
// listing results
this.CommonPrefixes = [];
this.Versions = [];
this.IsTruncated = false;
this.nextKeyMarker = parameters.keyMarker;
this.nextVersionIdMarker = undefined;
this.keyHandlers = {};
Object.assign(this, {
[BucketVersioningKeyFormat.v0]: {
genMDParams: this.genMDParamsV0,
getObjectKey: this.getObjectKeyV0,
skipping: this.skippingV0,
},
[BucketVersioningKeyFormat.v1]: {
genMDParams: this.genMDParamsV1,
getObjectKey: this.getObjectKeyV1,
skipping: this.skippingV1,
},
}[this.vFormat]);
if (this.vFormat === BucketVersioningKeyFormat.v0) {
this.setKeyHandler(
DelimiterVersionsFilterStateId.NotSkipping,
this.keyHandler_NotSkippingV0.bind(this));
} else {
this.setKeyHandler(
DelimiterVersionsFilterStateId.NotSkipping,
this.keyHandler_NotSkippingV1.bind(this));
}
this.setKeyHandler(
DelimiterVersionsFilterStateId.SkippingPrefix,
this.keyHandler_SkippingPrefix.bind(this));
this.setKeyHandler(
DelimiterVersionsFilterStateId.SkippingVersions,
this.keyHandler_SkippingVersions.bind(this));
if (this.versionIdMarker) {
this.state = <DelimiterVersionsFilterState_SkippingVersions> {
id: DelimiterVersionsFilterStateId.SkippingVersions,
gt: `${this.keyMarker}${VID_SEP}${this.versionIdMarker}`,
};
} else {
this.state = <DelimiterVersionsFilterState_NotSkipping> {
id: DelimiterVersionsFilterStateId.NotSkipping,
};
}
}
genMDParamsV0() {
const params: GenMDParamsItem = {};
if (this.prefix) {
params.gte = this.prefix;
params.lt = inc(this.prefix);
}
if (this.keyMarker && this.delimiter) {
const commonPrefix = this.getCommonPrefix(this.keyMarker);
if (commonPrefix) {
const afterPrefix = inc(commonPrefix);
if (!params.gte || afterPrefix > params.gte) {
params.gte = afterPrefix;
}
}
}
if (this.keyMarker && (!params.gte || this.keyMarker >= params.gte)) {
delete params.gte;
if (this.versionIdMarker) {
// start from the beginning of versions so we can
// check if there's a null key and fetch it
// (afterwards, we can skip the rest of versions until
// we reach versionIdMarker)
params.gte = `${this.keyMarker}${VID_SEP}`;
} else {
params.gt = `${this.keyMarker}${inc(VID_SEP)}`;
}
}
return params;
}
genMDParamsV1() {
// return an array of two listing params sets to ask for
// synchronized listing of M and V ranges
const v0Params: GenMDParamsItem = this.genMDParamsV0();
const mParams: GenMDParamsItem = {};
const vParams: GenMDParamsItem = {};
if (v0Params.gt) {
mParams.gt = `${DbPrefixes.Master}${v0Params.gt}`;
vParams.gt = `${DbPrefixes.Version}${v0Params.gt}`;
} else if (v0Params.gte) {
mParams.gte = `${DbPrefixes.Master}${v0Params.gte}`;
vParams.gte = `${DbPrefixes.Version}${v0Params.gte}`;
} else {
mParams.gte = DbPrefixes.Master;
vParams.gte = DbPrefixes.Version;
}
if (v0Params.lt) {
mParams.lt = `${DbPrefixes.Master}${v0Params.lt}`;
vParams.lt = `${DbPrefixes.Version}${v0Params.lt}`;
} else {
mParams.lt = inc(DbPrefixes.Master);
vParams.lt = inc(DbPrefixes.Version);
}
return [mParams, vParams];
}
/**
* check if the max keys count has been reached and set the
* final state of the result if it is the case
* @return {Boolean} - indicates if the iteration has to stop
*/
_reachedMaxKeys(): boolean {
if (this.keys >= this.maxKeys) {
// In cases of maxKeys <= 0 -> IsTruncated = false
this.IsTruncated = this.maxKeys > 0;
return true;
}
return false;
}
/**
* Used to synchronize listing of M and V prefixes by object key
*
* @param {object} masterObj object listed from first range
* returned by genMDParamsV1() (the master keys range)
* @param {object} versionObj object listed from second range
* returned by genMDParamsV1() (the version keys range)
* @return {number} comparison result:
* * -1 if master key < version key
* * 1 if master key > version key
*/
compareObjects(masterObj, versionObj) {
const masterKey = masterObj.key.slice(DbPrefixes.Master.length);
const versionKey = versionObj.key.slice(DbPrefixes.Version.length);
return masterKey < versionKey ? -1 : 1;
}
/**
* Parse a listing key into its nonversioned key and version ID components
*
* @param {string} key - full listing key
* @return {object} obj
* @return {string} obj.key - nonversioned part of key
* @return {string} [obj.versionId] - version ID in the key
*/
parseKey(fullKey: string): { key: string, versionId ?: string } {
const versionIdIndex = fullKey.indexOf(VID_SEP);
if (versionIdIndex === -1) {
return { key: fullKey };
}
const nonversionedKey: string = fullKey.slice(0, versionIdIndex);
let versionId: string = fullKey.slice(versionIdIndex + 1);
return { key: nonversionedKey, versionId };
}
/**
* Include a key in the listing output, in the Versions or CommonPrefix result
*
* @param {string} key - key (without version ID)
* @param {string} versionId - version ID
* @param {string} value - metadata value
* @return {undefined}
*/
addKey(key: string, versionId: string, value: string) {
// add the subprefix to the common prefixes if the key has the delimiter
const commonPrefix = this.getCommonPrefix(key);
if (commonPrefix) {
this.addCommonPrefix(commonPrefix);
// transition into SkippingPrefix state to skip all following keys
// while they start with the same prefix
this.setState(<DelimiterVersionsFilterState_SkippingPrefix> {
id: DelimiterVersionsFilterStateId.SkippingPrefix,
prefix: commonPrefix,
});
} else {
this.addVersion(key, versionId, value);
}
}
/**
* Add a (key, versionId, value) tuple to the listing.
* Set the NextMarker to the current key
* Increment the keys counter
* @param {String} key - The key to add
* @param {String} versionId - versionId
* @param {String} value - The value of the key
* @return {undefined}
*/
addVersion(key: string, versionId: string, value: string) {
this.Versions.push({
key,
versionId,
value: this.trimMetadata(value),
});
this.nextKeyMarker = key;
this.nextVersionIdMarker = versionId;
++this.keys;
}
getCommonPrefix(key: string): string | undefined {
if (!this.delimiter) {
return undefined;
}
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
if (delimiterIndex === -1) {
return undefined;
}
return key.substring(0, delimiterIndex + this.delimiter.length);
}
/**
* Add a Common Prefix in the list
* @param {String} commonPrefix - common prefix to add
* @return {undefined}
*/
addCommonPrefix(commonPrefix: string): void {
// add the new prefix to the list
this.CommonPrefixes.push(commonPrefix);
++this.keys;
this.nextKeyMarker = commonPrefix;
this.nextVersionIdMarker = undefined;
}
/**
* Cache the current null key, to save it for outputting it later at
* the correct position
*
* @param {String} key - nonversioned key of the null key
* @param {String} versionId - real version ID of the null key
* @param {String} value - value of the null key
* @return {undefined}
*/
cacheNullKey(key: string, versionId: string, value: string): void {
this.nullKey = { key, versionId, value };
}
getObjectKeyV0(obj: { key: string }): string {
return obj.key;
}
getObjectKeyV1(obj: { key: string }): string {
return obj.key.slice(DbPrefixes.Master.length);
}
/**
* Filter to apply on each iteration, based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filter(obj: { key: string, value: string }): FilterReturnValue {
const key = this.getObjectKey(obj);
const value = obj.value;
const { key: nonversionedKey, versionId: keyVersionId } = this.parseKey(key);
if (this.nullKey) {
if (this.nullKey.key !== nonversionedKey
|| this.nullKey.versionId < <string> keyVersionId) {
this.handleKey(
this.nullKey.key, this.nullKey.versionId, this.nullKey.value);
this.nullKey = null;
}
}
if (keyVersionId === '') {
// null key
this.cacheNullKey(nonversionedKey, Version.from(value).getVersionId(), value);
if (this.state.id === DelimiterVersionsFilterStateId.SkippingVersions) {
return FILTER_SKIP;
}
return FILTER_ACCEPT;
}
return this.handleKey(nonversionedKey, keyVersionId, value);
}
setState(state: FilterState): void {
this.state = state;
}
setKeyHandler(stateId: number, keyHandler: KeyHandler): void {
this.keyHandlers[stateId] = keyHandler;
}
handleKey(key: string, versionId: string | undefined, value: string): FilterReturnValue {
return this.keyHandlers[this.state.id](key, versionId, value);
}
keyHandler_NotSkippingV0(key: string, versionId: string | undefined, value: string): FilterReturnValue {
if (key.startsWith(DbPrefixes.Replay)) {
// skip internal replay prefix entirely
this.setState(<DelimiterVersionsFilterState_SkippingPrefix> {
id: DelimiterVersionsFilterStateId.SkippingPrefix,
prefix: DbPrefixes.Replay,
});
return FILTER_SKIP;
}
if (Version.isPHD(value)) {
return FILTER_ACCEPT;
}
return this.filter_onNewKey(key, versionId, value);
}
keyHandler_NotSkippingV1(key: string, versionId: string | undefined, value: string): FilterReturnValue {
// NOTE: this check on PHD is only useful for Artesca, S3C
// does not use PHDs in V1 format
if (Version.isPHD(value)) {
return FILTER_ACCEPT;
}
return this.filter_onNewKey(key, versionId, value);
}
filter_onNewKey(key: string, versionId: string | undefined, value: string): FilterReturnValue {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
if (versionId === undefined) {
this.masterKey = key;
this.masterVersionId = Version.from(value).getVersionId() || 'null';
this.addKey(this.masterKey, this.masterVersionId, value);
} else {
if (this.masterKey === key && this.masterVersionId === versionId) {
// do not add a version key if it is the master version
return FILTER_ACCEPT;
}
this.addKey(key, versionId, value);
}
return FILTER_ACCEPT;
}
keyHandler_SkippingPrefix(key: string, versionId: string | undefined, value: string): FilterReturnValue {
const { prefix } = <DelimiterVersionsFilterState_SkippingPrefix> this.state;
if (key.startsWith(prefix)) {
return FILTER_SKIP;
}
this.setState(<DelimiterVersionsFilterState_NotSkipping> {
id: DelimiterVersionsFilterStateId.NotSkipping,
});
return this.handleKey(key, versionId, value);
}
keyHandler_SkippingVersions(key: string, versionId: string | undefined, value: string): FilterReturnValue {
if (key === this.keyMarker) {
// since the nonversioned key equals the marker, there is
// necessarily a versionId in this key
const _versionId = <string> versionId;
if (_versionId < this.versionIdMarker) {
// skip all versions until marker
return FILTER_SKIP;
}
if (_versionId === this.versionIdMarker) {
// nothing left to skip, so return ACCEPT, but don't add this version
return FILTER_ACCEPT;
}
}
this.setState(<DelimiterVersionsFilterState_NotSkipping> {
id: DelimiterVersionsFilterStateId.NotSkipping,
});
return this.handleKey(key, versionId, value);
}
skippingBase(): string | undefined {
switch (this.state.id) {
case DelimiterVersionsFilterStateId.SkippingPrefix:
const { prefix } = <DelimiterVersionsFilterState_SkippingPrefix> this.state;
return inc(prefix);
case DelimiterVersionsFilterStateId.SkippingVersions:
const { gt } = <DelimiterVersionsFilterState_SkippingVersions> this.state;
// the contract of skipping() is to return the first key
// that can be skipped to, so adding a null byte to skip
// over the existing versioned key set in 'gt'
return `${gt}\0`;
default:
return SKIP_NONE;
}
}
skippingV0() {
return this.skippingBase();
}
skippingV1() {
const skipTo = this.skippingBase();
if (skipTo === SKIP_NONE) {
return SKIP_NONE;
}
// skip to the same object key in both M and V range listings
return [
`${DbPrefixes.Master}${skipTo}`,
`${DbPrefixes.Version}${skipTo}`,
];
}
/**
* Return an object containing all mandatory fields to use once the
* iteration is done, doesn't show a NextMarker field if the output
* isn't truncated
* @return {Object} - following amazon format
*/
result() {
// Add the last null key if still in cache (when it is the
// last version of the last key)
//
// NOTE: _reachedMaxKeys sets IsTruncated to true when it
// returns true. Here we want this because either:
//
// - we did not reach the max keys yet so the result is not
// - truncated, and there is still room for the null key in
// - the results
//
// - OR we reached it already while having to process a new
// key (so the result is truncated even without the null key)
//
// - OR we are *just* below the limit but the null key to add
// does not fit, so we know the result is now truncated
// because there remains the null key to be output.
//
if (this.nullKey) {
this.handleKey(this.nullKey.key, this.nullKey.versionId, this.nullKey.value);
}
const result: ResultObject = {
CommonPrefixes: this.CommonPrefixes,
Versions: this.Versions,
IsTruncated: this.IsTruncated,
};
if (this.delimiter) {
result.Delimiter = this.delimiter;
}
if (this.IsTruncated) {
result.NextKeyMarker = this.nextKeyMarker;
if (this.nextVersionIdMarker) {
result.NextVersionIdMarker = this.nextVersionIdMarker;
}
};
return result;
}
}
module.exports = { DelimiterVersions };

View File

@ -6,7 +6,4 @@ module.exports = {
DelimiterMaster: require('./delimiterMaster')
.DelimiterMaster,
MPU: require('./MPU').MultipartUploads,
DelimiterCurrent: require('./delimiterCurrent').DelimiterCurrent,
DelimiterNonCurrent: require('./delimiterNonCurrent').DelimiterNonCurrent,
DelimiterOrphanDeleteMarker: require('./delimiterOrphanDeleteMarker').DelimiterOrphanDeleteMarker,
};

View File

@ -52,21 +52,21 @@ class Skip {
assert(this.skipRangeCb);
const filteringResult = this.extension.filter(entry);
const skipTo = this.extension.skipping();
const skippingRange = this.extension.skipping();
if (filteringResult === FILTER_END) {
this.listingEndCb();
} else if (filteringResult === FILTER_SKIP
&& skipTo !== SKIP_NONE) {
&& skippingRange !== SKIP_NONE) {
if (++this.streakLength >= MAX_STREAK_LENGTH) {
let newRange;
if (Array.isArray(skipTo)) {
if (Array.isArray(skippingRange)) {
newRange = [];
for (let i = 0; i < skipTo.length; ++i) {
newRange.push(skipTo[i]);
for (let i = 0; i < skippingRange.length; ++i) {
newRange.push(this._inc(skippingRange[i]));
}
} else {
newRange = skipTo;
newRange = this._inc(skippingRange);
}
/* Avoid to loop on the same range again and again. */
if (newRange === this.gteParams) {
@ -79,6 +79,16 @@ class Skip {
this.streakLength = 0;
}
}
_inc(str) {
if (!str) {
return str;
}
const lastCharValue = str.charCodeAt(str.length - 1);
const lastCharNewValue = String.fromCharCode(lastCharValue + 1);
return `${str.slice(0, str.length - 1)}${lastCharNewValue}`;
}
}

View File

@ -14,7 +14,7 @@ function vaultSignatureCb(
err: Error | null,
authInfo: { message: { body: any } },
log: Logger,
callback: (err: Error | null, data?: any, results?: any, params?: any, infos?: any) => void,
callback: (err: Error | null, data?: any, results?: any, params?: any) => void,
streamingV4Params?: any
) {
// vaultclient API guarantees that it returns:
@ -38,9 +38,7 @@ function vaultSignatureCb(
}
// @ts-ignore
log.addDefaultFields(auditLog);
return callback(null, userInfo, authorizationResults, streamingV4Params, {
accountQuota: info.accountQuota || {},
});
return callback(null, userInfo, authorizationResults, streamingV4Params);
}
export type AuthV4RequestParams = {
@ -386,19 +384,4 @@ export default class Vault {
return callback(null, respBody);
});
}
report(log: Logger, callback: (err: Error | null, data?: any) => void) {
// call the report function of the client
if (!this.client.report) {
return callback(null, {});
}
// @ts-ignore
return this.client.report(log.getSerializedUids(), (err: Error | null, obj?: any) => {
if (err) {
log.debug(`error from ${this.implName}`, { error: err });
return callback(err);
}
return callback(null, obj);
});
}
}

View File

@ -163,20 +163,6 @@ function doAuth(
return cb(errors.InternalError);
}
/**
* This function will generate a version 4 content-md5 header
* It looks at the request path to determine what kind of header encoding is required
*
* @param path - the request path
* @param payload - the request payload to hash
*/
function generateContentMD5Header(
path: string,
payload: string,
) {
const encoding = path && path.startsWith('/_/backbeat/') ? 'hex' : 'base64';
return crypto.createHash('md5').update(payload, 'binary').digest(encoding);
}
/**
* This function will generate a version 4 header
*
@ -189,7 +175,6 @@ function generateContentMD5Header(
* @param [proxyPath] - path that gets proxied by reverse proxy
* @param [sessionToken] - security token if the access/secret keys
* are temporary credentials from STS
* @param [payload] - body of the request if any
*/
function generateV4Headers(
request: any,
@ -197,9 +182,8 @@ function generateV4Headers(
accessKey: string,
secretKeyValue: string,
awsService: string,
proxyPath?: string,
sessionToken?: string,
payload?: string,
proxyPath: string,
sessionToken: string
) {
Object.assign(request, { headers: {} });
const amzDate = convertUTCtoISO8601(Date.now());
@ -212,7 +196,7 @@ function generateV4Headers(
const timestamp = amzDate;
const algorithm = 'AWS4-HMAC-SHA256';
payload = payload || '';
let payload = '';
if (request.method === 'POST') {
payload = queryString.stringify(data, undefined, undefined, {
encodeURIComponent,
@ -223,7 +207,6 @@ function generateV4Headers(
request.setHeader('host', request._headers.host);
request.setHeader('x-amz-date', amzDate);
request.setHeader('x-amz-content-sha256', payloadChecksum);
request.setHeader('content-md5', generateContentMD5Header(request.path, payload));
if (sessionToken) {
request.setHeader('x-amz-security-token', sessionToken);
@ -234,7 +217,6 @@ function generateV4Headers(
.filter(headerName =>
headerName.startsWith('x-amz-')
|| headerName.startsWith('x-scal-')
|| headerName === 'content-md5'
|| headerName === 'host',
).sort().join(';');
const params = { request, signedHeaders, payloadChecksum,

View File

@ -134,7 +134,7 @@ export default class ChainBackend extends BaseBackend {
}
const check = (policy) => {
const key = (policy.arn || '') + (policy.versionId || '') + (policy.action || '');
const key = (policy.arn || '') + (policy.versionId || '');
if (!policyMap[key] || !policyMap[key].isAllowed) {
policyMap[key] = policy;
}
@ -158,12 +158,6 @@ export default class ChainBackend extends BaseBackend {
if (policyMap[key].versionId) {
policyRes.versionId = policyMap[key].versionId;
}
if (policyMap[key].isImplicit !== undefined) {
policyRes.isImplicit = policyMap[key].isImplicit;
}
if (policyMap[key].action) {
policyRes.action = policyMap[key].action;
}
return policyRes;
});
}
@ -212,22 +206,4 @@ export default class ChainBackend extends BaseBackend {
return callback(null, res);
});
}
report(reqUid: string, callback: any) {
this._forEachClient((client, done) =>
client.report(reqUid, done),
(err, res) => {
if (err) {
return callback(err);
}
const mergedRes = res.reduce((acc, val) => {
Object.keys(val).forEach(k => {
acc[k] = val[k];
});
return acc;
}, {});
return callback(null, mergedRes);
});
}
}

View File

@ -161,10 +161,6 @@ class InMemoryBackend extends BaseBackend {
};
return cb(null, vaultReturnObject);
}
report(log: Logger, callback: any) {
return callback(null, {});
}
}

View File

@ -1,569 +0,0 @@
import cluster, { Worker } from 'cluster';
import * as werelogs from 'werelogs';
import { default as errors } from '../../lib/errors';
const rpcLogger = new werelogs.Logger('ClusterRPC');
/**
* Remote procedure calls support between cluster workers.
*
* When using the cluster module, new processes are forked and are
* dispatched workloads, usually HTTP requests. The ClusterRPC module
* implements a RPC system to send commands to all cluster worker
* processes at once from any particular worker, and retrieve their
* individual command results, like a distributed map operation.
*
* The existing nodejs cluster IPC channel is setup from the primary
* to each worker, but not between workers, so there has to be a hop
* by the primary.
*
* How a command is treated:
*
* - a worker sends a command message to the primary
*
* - the primary then forwards that command to each existing worker
* (including the requestor)
*
* - each worker then executes the command and returns a result or an
* error
*
* - the primary gathers all workers results into an array
*
* - finally, the primary dispatches the results array to the original
* requesting worker
*
*
* Limitations:
*
* - The command payload must be serializable, which means that:
* - it should not contain circular references
* - it should be of a reasonable size to be sent in a single RPC message
*
* - The "toWorkers" parameter of value "*" targets the set of workers
* that are available at the time the command is dispatched. Any new
* worker spawned after the command has been dispatched for
* processing, but before the command completes, don't execute
* the command and hence are not part of the results array.
*
*
* To set it up:
*
* - On the primary:
* if (cluster.isPrimary) {
* setupRPCPrimary();
* }
*
* - On the workers:
* if (!cluster.isPrimary) {
* setupRPCWorker({
* handler1: (payload: object, uids: string, callback: HandlerCallback) => void,
* handler2: ...
* });
* }
* Handler functions will be passed the command payload, request
* serialized uids, and must call the callback when the worker is done
* processing the command:
* callback(error: Error | null | undefined, result?: any)
*
* When this setup is done, any worker can start sending commands by calling
* the async function sendWorkerCommand().
*/
// exported types
export type ResultObject = {
error: Error | null;
result: any;
};
/**
* saved Promise for sendWorkerCommand
*/
export type CommandPromise = {
resolve: (results?: ResultObject[]) => void;
reject: (error: Error) => void;
timeout: NodeJS.Timeout | null;
};
export type HandlerCallback = (error: (Error & { code?: number }) | null | undefined, result?: any) => void;
export type HandlerFunction = (payload: object, uids: string, callback: HandlerCallback) => void;
export type HandlersMap = {
[index: string]: HandlerFunction;
};
export type PrimaryHandlerFunction = (worker: Worker, payload: object, uids: string, callback: HandlerCallback) => void;
export type PrimaryHandlersMap = Record<string, PrimaryHandlerFunction>;
// private types
type RPCMessage<T extends string, P> = {
type: T;
uids: string;
payload: P;
};
type RPCCommandMessage = RPCMessage<'cluster-rpc:command', any> & {
toWorkers: string;
toHandler: string;
};
type MarshalledResultObject = {
error: string | null;
errorCode?: number;
result: any;
};
type RPCCommandResultMessage = RPCMessage<'cluster-rpc:commandResult', MarshalledResultObject>;
type RPCCommandResultsMessage = RPCMessage<'cluster-rpc:commandResults', {
results: MarshalledResultObject[];
}>;
type RPCCommandErrorMessage = RPCMessage<'cluster-rpc:commandError', {
error: string;
}>;
interface RPCSetupOptions {
/**
* As werelogs is not a peerDependency, arsenal and a parent project
* might have their own separate versions duplicated in dependencies.
* The config are therefore not shared.
* Use this to propagate werelogs config to arsenal's ClusterRPC.
*/
werelogsConfig?: Parameters<typeof werelogs.configure>[0];
};
/**
* In primary: store worker IDs that are waiting to be dispatched
* their command's results, as a mapping.
*/
const uidsToWorkerId: {
[index: string]: number;
} = {};
/**
* In primary: store worker responses for commands in progress as a
* mapping.
*
* Result objects are 'null' while the worker is still processing the
* command. When a worker finishes processing it stores the result as:
* {
* error: string | null,
* result: any
* }
*/
const uidsToCommandResults: {
[index: string]: {
[index: number]: MarshalledResultObject | null;
};
} = {};
/**
* In workers: store promise callbacks for commands waiting to be
* dispatched, as a mapping.
*/
const uidsToCommandPromise: {
[index: string]: CommandPromise;
} = {};
function _isRpcMessage(message) {
return (message !== null &&
typeof message === 'object' &&
typeof message.type === 'string' &&
message.type.startsWith('cluster-rpc:'));
}
/**
* Setup cluster RPC system on the primary
*
* @param {object} [handlers] - mapping of handler names to handler functions
* handler function:
* `handler({Worker} worker, {object} payload, {string} uids, {function} callback)`
* handler callback must be called when worker is done with the command:
* `callback({Error|null} error, {any} [result])`
* @return {undefined}
*/
export function setupRPCPrimary(handlers?: PrimaryHandlersMap, options?: RPCSetupOptions) {
if (options?.werelogsConfig) {
werelogs.configure(options.werelogsConfig);
}
cluster.on('message', (worker, message) => {
if (_isRpcMessage(message)) {
_handlePrimaryMessage(worker, message, handlers);
}
});
}
/**
* Setup RPCs on a cluster worker process
*
* @param {object} handlers - mapping of handler names to handler functions
* handler function:
* handler({object} payload, {string} uids, {function} callback)
* handler callback must be called when worker is done with the command:
* callback({Error|null} error, {any} [result])
* @return {undefined}
* }
*/
export function setupRPCWorker(handlers: HandlersMap, options?: RPCSetupOptions) {
if (!process.send) {
throw new Error('fatal: cannot setup cluster RPC: "process.send" is not available');
}
if (options?.werelogsConfig) {
werelogs.configure(options.werelogsConfig);
}
process.on('message', (message: RPCCommandMessage | RPCCommandResultsMessage) => {
if (_isRpcMessage(message)) {
_handleWorkerMessage(message, handlers);
}
});
}
/**
* Send a command for workers to execute in parallel, and wait for results
*
* @param {string} toWorkers - which workers should execute the command
* Currently the supported values are:
* - "*", meaning all workers will execute the command
* - "PRIMARY", meaning primary process will execute the command
* @param {string} toHandler - name of handler that will execute the
* command in workers, as declared in setupRPCWorker() parameter object
* @param {string} uids - unique identifier of the command, must be
* unique across all commands in progress
* @param {object} payload - message payload, sent as-is to the handler
* @param {number} [timeoutMs=60000] - timeout the command with a
* "RequestTimeout" error after this number of milliseconds - set to 0
* to disable timeouts (the command may then hang forever)
* @returns {Promise}
*/
export async function sendWorkerCommand(
toWorkers: string,
toHandler: string,
uids: string,
payload: object,
timeoutMs: number = 60000
) {
if (typeof uids !== 'string') {
rpcLogger.error('missing or invalid "uids" field', { uids });
throw errors.MissingParameter;
}
if (uidsToCommandPromise[uids] !== undefined) {
rpcLogger.error('a command is already in progress with same uids', { uids });
throw errors.OperationAborted;
}
rpcLogger.info('sending command', { toWorkers, toHandler, uids, payload });
return new Promise((resolve, reject) => {
let timeout: NodeJS.Timeout | null = null;
if (timeoutMs) {
timeout = setTimeout(() => {
delete uidsToCommandPromise[uids];
reject(errors.RequestTimeout);
}, timeoutMs);
}
uidsToCommandPromise[uids] = { resolve, reject, timeout };
const message: RPCCommandMessage = {
type: 'cluster-rpc:command',
toWorkers,
toHandler,
uids,
payload,
};
return process.send?.(message);
});
}
/**
* Get the number of commands in flight
* @returns {number}
*/
export function getPendingCommandsCount() {
return Object.keys(uidsToCommandPromise).length;
}
function _dispatchCommandResultsToWorker(
worker: Worker,
uids: string,
resultsArray: MarshalledResultObject[]
): void {
const message: RPCCommandResultsMessage = {
type: 'cluster-rpc:commandResults',
uids,
payload: {
results: resultsArray,
},
};
worker.send(message);
}
function _dispatchCommandErrorToWorker(
worker: Worker,
uids: string,
error: Error,
): void {
const message: RPCCommandErrorMessage = {
type: 'cluster-rpc:commandError',
uids,
payload: {
error: error.message,
},
};
worker.send(message);
}
function _sendPrimaryCommandResult(
worker: Worker,
uids: string,
error: (Error & { code?: number }) | null | undefined,
result?: any
): void {
const message: RPCCommandResultsMessage = {
type: 'cluster-rpc:commandResults',
uids,
payload: {
results: [{ error: error?.message || null, errorCode: error?.code, result }],
},
};
worker.send?.(message);
}
function _handlePrimaryCommandMessage(
fromWorker: Worker,
logger: any,
message: RPCCommandMessage,
handlers?: PrimaryHandlersMap
): void {
const { toWorkers, toHandler, uids, payload } = message;
if (toWorkers === '*') {
if (uidsToWorkerId[uids] !== undefined) {
logger.warn('new command already has a waiting worker with same uids', {
uids, workerId: uidsToWorkerId[uids],
});
return undefined;
}
const commandResults = {};
for (const workerId of Object.keys(cluster.workers || {})) {
commandResults[workerId] = null;
}
uidsToWorkerId[uids] = fromWorker?.id;
uidsToCommandResults[uids] = commandResults;
for (const [workerId, worker] of Object.entries(cluster.workers || {})) {
logger.debug('sending command message to worker', {
workerId, toHandler, payload,
});
if (worker) {
worker.send(message);
}
}
} else if (toWorkers === 'PRIMARY') {
const { toHandler, uids, payload } = message;
const cb: HandlerCallback = (err, result) => _sendPrimaryCommandResult(fromWorker, uids, err, result);
if (toHandler in (handlers || {})) {
return handlers![toHandler](fromWorker, payload, uids, cb);
}
logger.error('no such handler in "toHandler" field from worker command message', {
toHandler,
});
return cb(errors.NotImplemented);
} else {
logger.error('unsupported "toWorkers" field from worker command message', {
toWorkers,
});
if (fromWorker) {
_dispatchCommandErrorToWorker(fromWorker, uids, errors.NotImplemented);
}
}
}
function _handlePrimaryCommandResultMessage(
fromWorkerId: number,
logger: any,
message: RPCCommandResultMessage
): void {
const { uids, payload } = message;
const commandResults = uidsToCommandResults[uids];
if (!commandResults) {
logger.warn('received command response message from worker for command not in flight', {
workerId: fromWorkerId,
uids,
});
return undefined;
}
if (commandResults[fromWorkerId] === undefined) {
logger.warn('received command response message with unexpected worker ID', {
workerId: fromWorkerId,
uids,
});
return undefined;
}
if (commandResults[fromWorkerId] !== null) {
logger.warn('ignoring duplicate command response from worker', {
workerId: fromWorkerId,
uids,
});
return undefined;
}
commandResults[fromWorkerId] = payload;
const commandResultsArray = Object.values(commandResults);
if (commandResultsArray.every(response => response !== null)) {
logger.debug('all workers responded to command', { uids });
const completeCommandResultsArray = <MarshalledResultObject[]> commandResultsArray;
const toWorkerId = uidsToWorkerId[uids];
const toWorker = cluster.workers?.[toWorkerId];
delete uidsToCommandResults[uids];
delete uidsToWorkerId[uids];
if (!toWorker) {
logger.warn('worker shut down while its command was executing', {
workerId: toWorkerId, uids,
});
return undefined;
}
// send back response to original worker
_dispatchCommandResultsToWorker(toWorker, uids, completeCommandResultsArray);
}
}
function _handlePrimaryMessage(
fromWorker: Worker,
message: RPCCommandMessage | RPCCommandResultMessage,
handlers?: PrimaryHandlersMap
): void {
const { type: messageType, uids } = message;
const logger = rpcLogger.newRequestLoggerFromSerializedUids(uids);
logger.debug('primary received message from worker', {
workerId: fromWorker?.id, rpcMessage: message,
});
if (messageType === 'cluster-rpc:command') {
return _handlePrimaryCommandMessage(fromWorker, logger, message, handlers);
}
if (messageType === 'cluster-rpc:commandResult') {
return _handlePrimaryCommandResultMessage(fromWorker?.id, logger, message);
}
logger.error('unsupported message type', {
workerId: fromWorker?.id, messageType, uids,
});
return undefined;
}
function _sendWorkerCommandResult(
uids: string,
error: Error | null | undefined,
result?: any
): void {
const message: RPCCommandResultMessage = {
type: 'cluster-rpc:commandResult',
uids,
payload: {
error: error ? error.message : null,
result,
},
};
process.send?.(message);
}
function _handleWorkerCommandMessage(
logger: any,
message: RPCCommandMessage,
handlers: HandlersMap
): void {
const { toHandler, uids, payload } = message;
const cb: HandlerCallback = (err, result) => _sendWorkerCommandResult(uids, err, result);
if (toHandler in handlers) {
return handlers[toHandler](payload, uids, cb);
}
logger.error('no such handler in "toHandler" field from worker command message', {
toHandler,
});
return cb(errors.NotImplemented);
}
function _handleWorkerCommandResultsMessage(
logger: any,
message: RPCCommandResultsMessage,
): void {
const { uids, payload } = message;
const { results } = payload;
const commandPromise: CommandPromise = uidsToCommandPromise[uids];
if (commandPromise === undefined) {
logger.error('missing promise for command results', { uids, payload });
return undefined;
}
if (commandPromise.timeout) {
clearTimeout(commandPromise.timeout);
}
delete uidsToCommandPromise[uids];
const unmarshalledResults = results.map(workerResult => {
let workerError: Error | null = null;
if (workerResult.error) {
if (workerResult.error in errors) {
workerError = errors[workerResult.error];
} else {
workerError = new Error(workerResult.error);
}
}
if (workerError && workerResult.errorCode) {
(workerError as Error & { code: number }).code = workerResult.errorCode;
}
const unmarshalledResult: ResultObject = {
error: workerError,
result: workerResult.result,
};
return unmarshalledResult;
});
return commandPromise.resolve(unmarshalledResults);
}
function _handleWorkerCommandErrorMessage(
logger: any,
message: RPCCommandErrorMessage,
): void {
const { uids, payload } = message;
const { error } = payload;
const commandPromise: CommandPromise = uidsToCommandPromise[uids];
if (commandPromise === undefined) {
logger.error('missing promise for command results', { uids, payload });
return undefined;
}
if (commandPromise.timeout) {
clearTimeout(commandPromise.timeout);
}
delete uidsToCommandPromise[uids];
let commandError: Error | null = null;
if (error in errors) {
commandError = errors[error];
} else {
commandError = new Error(error);
}
return commandPromise.reject(<Error> commandError);
}
function _handleWorkerMessage(
message: RPCCommandMessage | RPCCommandResultsMessage | RPCCommandErrorMessage,
handlers: HandlersMap
): void {
const { type: messageType, uids } = message;
const workerId = cluster.worker?.id;
const logger = rpcLogger.newRequestLoggerFromSerializedUids(uids);
logger.debug('worker received message from primary', {
workerId, rpcMessage: message,
});
if (messageType === 'cluster-rpc:command') {
return _handleWorkerCommandMessage(logger, message, handlers);
}
if (messageType === 'cluster-rpc:commandResults') {
return _handleWorkerCommandResultsMessage(logger, message);
}
if (messageType === 'cluster-rpc:commandError') {
return _handleWorkerCommandErrorMessage(logger, message);
}
logger.error('unsupported message type', {
workerId, messageType,
});
return undefined;
}

View File

@ -141,10 +141,6 @@ export const supportedNotificationEvents = new Set([
's3:ObjectRestore:Post',
's3:ObjectRestore:Completed',
's3:ObjectRestore:Delete',
's3:LifecycleTransition',
's3:LifecycleExpiration:*',
's3:LifecycleExpiration:DeleteMarkerCreated',
's3:LifecycleExpiration:Delete',
]);
export const notificationArnPrefix = 'arn:scality:bucketnotif';
// HTTP server keep-alive timeout is set to a higher value than
@ -171,7 +167,3 @@ export const maxCachedBuckets = process.env.METADATA_MAX_CACHED_BUCKETS ?
Number(process.env.METADATA_MAX_CACHED_BUCKETS) : 1000;
export const validRestoreObjectTiers = new Set(['Expedited', 'Standard', 'Bulk']);
export const maxBatchingConcurrentOperations = 5;
/** For policy resource arn check we allow empty account ID to not break compatibility */
export const policyArnAllowedEmptyAccountId = ['utapi', 'scuba'];

View File

@ -148,7 +148,7 @@ export class IndexTransaction {
'missing condition for conditional put'
);
}
if (typeof condition.notExists !== 'string' && typeof condition.exists !== 'string') {
if (typeof condition.notExists !== 'string') {
throw propError(
'unsupportedConditionalOperation',
'missing key or supported condition'

View File

@ -690,11 +690,6 @@ export const ReportNotPresent: ErrorFormat = {
'The request was rejected because the credential report does not exist. To generate a credential report, use GenerateCredentialReport.',
};
export const Found: ErrorFormat = {
code: 302,
description: 'Resource Found'
};
// ------------- Special non-AWS S3 errors -------------
export const MPUinProgress: ErrorFormat = {
@ -1042,15 +1037,3 @@ export const AuthMethodNotImplemented: ErrorFormat = {
description: 'AuthMethodNotImplemented',
code: 501,
};
// --------------------- quotaErros ---------------------
export const NoSuchQuota: ErrorFormat = {
code: 404,
description: 'The specified resource does not have a quota.',
};
export const QuotaExceeded: ErrorFormat = {
code: 429,
description: 'The quota set for the resource is exceeded.',
};

View File

@ -1,19 +1,26 @@
import promClient from 'prom-client';
const collectDefaultMetricsIntervalMs =
process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS !== undefined ?
Number.parseInt(process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS, 10) :
10000;
promClient.collectDefaultMetrics({ timeout: collectDefaultMetricsIntervalMs });
export default class ZenkoMetrics {
static createCounter(params: promClient.CounterConfiguration<string>) {
static createCounter(params: promClient.CounterConfiguration) {
return new promClient.Counter(params);
}
static createGauge(params: promClient.GaugeConfiguration<string>) {
static createGauge(params: promClient.GaugeConfiguration) {
return new promClient.Gauge(params);
}
static createHistogram(params: promClient.HistogramConfiguration<string>) {
static createHistogram(params: promClient.HistogramConfiguration) {
return new promClient.Histogram(params);
}
static createSummary(params: promClient.SummaryConfiguration<string>) {
static createSummary(params: promClient.SummaryConfiguration) {
return new promClient.Summary(params);
}
@ -21,15 +28,11 @@ export default class ZenkoMetrics {
return promClient.register.getSingleMetric(name);
}
static async asPrometheus() {
static asPrometheus() {
return promClient.register.metrics();
}
static asPrometheusContentType() {
return promClient.register.contentType;
}
static collectDefaultMetrics() {
return promClient.collectDefaultMetrics();
}
}

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import { legacyLocations } from '../constants';
import escapeForXml from '../s3middleware/escapeForXml';

View File

@ -101,7 +101,6 @@ export default class BucketInfo {
_azureInfo: any | null;
_ingestion: { status: 'enabled' | 'disabled' } | null;
_capabilities?: Capabilities;
_quotaMax: number | 0;
/**
* Represents all bucket information.
@ -158,7 +157,6 @@ export default class BucketInfo {
* @param [notificationConfiguration] - bucket notification configuration
* @param [tags] - bucket tag set
* @param [capabilities] - capabilities for the bucket
* @param quotaMax - bucket quota
*/
constructor(
name: string,
@ -187,7 +185,6 @@ export default class BucketInfo {
notificationConfiguration?: any,
tags?: Array<BucketTag> | [],
capabilities?: Capabilities,
quotaMax?: number | 0,
) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof owner, 'string');
@ -288,10 +285,6 @@ export default class BucketInfo {
tags = [] as BucketTag[];
}
assert.strictEqual(areTagsValid(tags), true);
if (quotaMax) {
assert.strictEqual(typeof quotaMax, 'number');
assert(quotaMax >= 0, 'Quota cannot be negative');
}
// IF UPDATING PROPERTIES, INCREMENT MODELVERSION NUMBER ABOVE
this._acl = aclInstance;
@ -320,7 +313,6 @@ export default class BucketInfo {
this._notificationConfiguration = notificationConfiguration || null;
this._tags = tags;
this._capabilities = capabilities || undefined;
this._quotaMax = quotaMax || 0;
return this;
}
@ -356,7 +348,6 @@ export default class BucketInfo {
notificationConfiguration: this._notificationConfiguration,
tags: this._tags,
capabilities: this._capabilities,
quotaMax: this._quotaMax,
};
const final = this._websiteConfiguration
? {
@ -383,7 +374,7 @@ export default class BucketInfo {
obj.bucketPolicy, obj.uid, obj.readLocationConstraint, obj.isNFS,
obj.ingestion, obj.azureInfo, obj.objectLockEnabled,
obj.objectLockConfiguration, obj.notificationConfiguration, obj.tags,
obj.capabilities, obj.quotaMax);
obj.capabilities);
}
/**
@ -410,8 +401,7 @@ export default class BucketInfo {
data._bucketPolicy, data._uid, data._readLocationConstraint,
data._isNFS, data._ingestion, data._azureInfo,
data._objectLockEnabled, data._objectLockConfiguration,
data._notificationConfiguration, data._tags, data._capabilities,
data._quotaMax);
data._notificationConfiguration, data._tags, data._capabilities);
}
/**
@ -949,22 +939,4 @@ export default class BucketInfo {
this._capabilities = capabilities;
return this;
}
/**
* Get the bucket quota information
* @return quotaMax
*/
getQuota() {
return this._quotaMax;
}
/**
* Set bucket quota
* @param quota - quota to be set
* @return - bucket quota info
*/
setQuota(quota: number) {
this._quotaMax = quota || 0;
return this;
}
}

View File

@ -666,38 +666,13 @@ export default class LifecycleConfiguration {
* @return Returns an error object or `null`
*/
_checkDate(date: string) {
const isoRegex = new RegExp(
"^(-?(?:[1-9][0-9]*)?[0-9]{4})" + // Year
"-(1[0-2]|0[1-9])" + // Month
"-(3[01]|0[1-9]|[12][0-9])" + // Day
"T(2[0-3]|[01][0-9])" + // Hour
":([0-5][0-9])" + // Minute
":([0-5][0-9])" + // Second
"(\\.[0-9]+)?" + // Fractional second
"(Z|[+-][01][0-9]:[0-5][0-9])?$", // Timezone
"g"
);
const matches = [...date.matchAll(isoRegex)];
if (matches.length !== 1) {
const isoRegex = new RegExp('^(-?(?:[1-9][0-9]*)?[0-9]{4})-' +
'(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9])' +
':([0-5][0-9]):([0-5][0-9])(.[0-9]+)?(Z)?$');
if (!isoRegex.test(date)) {
const msg = 'Date must be in ISO 8601 format';
return errors.InvalidArgument.customizeDescription(msg);
}
// Check for a timezone in the last match group. If none, add a Z to indicate UTC.
if (!matches[0][matches[0].length-1]) {
date += 'Z';
}
const dateObj = new Date(date);
if (Number.isNaN(dateObj.getTime())) {
const msg = 'Date is not a valid date';
return errors.InvalidArgument.customizeDescription(msg);
}
if (dateObj.getUTCHours() !== 0
|| dateObj.getUTCMinutes() !== 0
|| dateObj.getUTCSeconds() !== 0
|| dateObj.getUTCMilliseconds() !== 0) {
const msg = '\'Date\' must be at midnight GMT';
return errors.InvalidArgument.customizeDescription(msg);
}
return null;
}
@ -869,7 +844,6 @@ export default class LifecycleConfiguration {
* days: <value>,
* date: <value>,
* deleteMarker: <value>
* newerNoncurrentVersions: <value>,
* },
* ],
* }
@ -882,8 +856,7 @@ export default class LifecycleConfiguration {
actionName: string;
days?: number;
date?: number;
deleteMarker?: boolean;
newerNoncurrentVersions?: number
deleteMarker?: boolean
}[];
} = {
propName: 'actions',
@ -912,14 +885,8 @@ export default class LifecycleConfiguration {
if (action.error) {
actionsObj.error = action.error;
} else {
const actionTimes = [
'days',
'date',
'deleteMarker',
'transition',
'nonCurrentVersionTransition',
'newerNoncurrentVersions'
];
const actionTimes = ['days', 'date', 'deleteMarker',
'transition', 'nonCurrentVersionTransition'];
actionTimes.forEach(t => {
if (action[t]) {
// eslint-disable-next-line no-param-reassign
@ -1065,7 +1032,6 @@ export default class LifecycleConfiguration {
* nvExpObj = {
* error: <error>,
* days: <value>,
* newerNoncurrentVersions: <value>,
* }
*/
_parseNoncurrentVersionExpiration(rule: any) {
@ -1076,41 +1042,14 @@ export default class LifecycleConfiguration {
'NoncurrentDays');
return { error };
}
const actionParams: {
error?: ArsenalError;
days: number;
newerNoncurrentVersions: number;
} = {
days: 0,
newerNoncurrentVersions: 0,
};
const daysInt = parseInt(subNVExp.NoncurrentDays[0], 10);
if (daysInt < 1) {
const msg = 'NoncurrentDays is not a positive integer';
const error = errors.InvalidArgument.customizeDescription(msg);
return { error };
} else {
actionParams.days = daysInt;
return { days: daysInt };
}
if (subNVExp.NewerNoncurrentVersions) {
const newerVersionsInt = parseInt(subNVExp.NewerNoncurrentVersions[0], 10);
if (Number.isNaN(newerVersionsInt) || newerVersionsInt < 1) {
const msg = 'NewerNoncurrentVersions is not a positive integer';
const error = errors.InvalidArgument.customizeDescription(msg);
return { error };
}
actionParams.newerNoncurrentVersions = newerVersionsInt;
} else {
actionParams.newerNoncurrentVersions = 0;
}
return actionParams;
}
/**
@ -1173,10 +1112,6 @@ export default class LifecycleConfiguration {
assert.strictEqual(typeof t.storageClass, 'string');
});
}
if (a.newerNoncurrentVersions) {
assert.strictEqual(typeof a.newerNoncurrentVersions, 'number');
}
});
});
}
@ -1226,24 +1161,15 @@ export default class LifecycleConfiguration {
}
const Actions = actions.map(action => {
const {
actionName,
days,
date,
deleteMarker,
nonCurrentVersionTransition,
transition,
newerNoncurrentVersions,
} = action;
const { actionName, days, date, deleteMarker,
nonCurrentVersionTransition, transition } = action;
let Action: any;
if (actionName === 'AbortIncompleteMultipartUpload') {
Action = `<${actionName}><DaysAfterInitiation>${days}` +
`</DaysAfterInitiation></${actionName}>`;
} else if (actionName === 'NoncurrentVersionExpiration') {
const Days = `<NoncurrentDays>${days}</NoncurrentDays>`;
const NewerVersions = newerNoncurrentVersions ?
`<NewerNoncurrentVersions>${newerNoncurrentVersions}</NewerNoncurrentVersions>` : '';
Action = `<${actionName}>${Days}${NewerVersions}</${actionName}>`;
Action = `<${actionName}><NoncurrentDays>${days}` +
`</NoncurrentDays></${actionName}>`;
} else if (actionName === 'Expiration') {
const Days = days ? `<Days>${days}</Days>` : '';
const Date = date ? `<Date>${date}</Date>` : '';
@ -1320,18 +1246,13 @@ export default class LifecycleConfiguration {
}
actions.forEach(action => {
const { actionName, days, date, deleteMarker, newerNoncurrentVersions } = action;
const { actionName, days, date, deleteMarker } = action;
if (actionName === 'AbortIncompleteMultipartUpload') {
entry.addAbortMPU(days!);
return;
}
if (actionName === 'NoncurrentVersionExpiration') {
entry.addNCVExpiration('NoncurrentDays', days!);
if (newerNoncurrentVersions) {
entry.addNCVExpiration('NewerNoncurrentVersions', newerNoncurrentVersions!);
}
entry.addNCVExpiration(days!);
return;
}
if (actionName === 'Expiration') {
@ -1368,7 +1289,6 @@ export type Rule = {
days?: number;
date?: number;
deleteMarker?: boolean;
newerNoncurrentVersions?: number;
nonCurrentVersionTransition?: {
noncurrentDays: number;
storageClass: string;

View File

@ -10,10 +10,6 @@ export type Expiration = {
Date?: number | boolean;
Days?: number | boolean;
};
export type NoncurrentExpiration = {
NoncurrentDays: number | null;
NewerNoncurrentVersions: number | null;
};
/**
* @class LifecycleRule
@ -25,7 +21,7 @@ export default class LifecycleRule {
status: Status;
tags: Tags;
expiration?: Expiration;
ncvExpiration?: NoncurrentExpiration;
ncvExpiration?: { NoncurrentDays: number };
abortMPU?: { DaysAfterInitiation: number };
transitions?: any[];
ncvTransitions?: any[];
@ -43,7 +39,7 @@ export default class LifecycleRule {
ID: string;
Status: Status;
Expiration?: Expiration;
NoncurrentVersionExpiration?: NoncurrentExpiration;
NoncurrentVersionExpiration?: { NoncurrentDays: number };
AbortIncompleteMultipartUpload?: { DaysAfterInitiation: number };
Transitions?: any[];
NoncurrentVersionTransitions?: any[];
@ -55,7 +51,7 @@ export default class LifecycleRule {
rule.Expiration = this.expiration;
}
if (this.ncvExpiration) {
rule.NoncurrentVersionExpiration = this.ncvExpiration
rule.NoncurrentVersionExpiration = this.ncvExpiration;
}
if (this.abortMPU) {
rule.AbortIncompleteMultipartUpload = this.abortMPU;
@ -145,24 +141,15 @@ export default class LifecycleRule {
/**
* NoncurrentVersionExpiration
* @param prop - Property must be defined in `validProps`
* @param value - integer for `NoncurrentDays` and `NewerNoncurrentVersions`
* @param days - NoncurrentDays
*/
addNCVExpiration(prop: 'NoncurrentDays' | 'NewerNoncurrentVersions', value: number): this;
addNCVExpiration(prop: string, value: number) {
const validProps = ['NoncurrentDays', 'NewerNoncurrentVersions'];
if (validProps.includes(prop)) {
this.ncvExpiration = this.ncvExpiration || {
NoncurrentDays: null,
NewerNoncurrentVersions: null,
};
this.ncvExpiration[prop] = value;
}
addNCVExpiration(days: number) {
this.ncvExpiration = { NoncurrentDays: days };
return this;
}
/**
* abortincompletemultipartupload
* AbortIncompleteMultipartUpload
* @param days - DaysAfterInitiation
*/
addAbortMPU(days: number) {

View File

@ -0,0 +1,166 @@
type status = {
paused: boolean,
scheduledResume: string|null,
};
type serviceStatus = {
crr: status|null,
ingestion: status|null,
lifecycle: status|null,
};
/**
* Class to manage a location's pause/resume state on its
* different services.
* Current supported services are : crr, ingestion, lifecycle
*/
export default class LocationStatus {
_data: serviceStatus;
/**
* @constructor
* @param services services to init
* @param locationStatus initial location status values
*/
constructor(services: string[], locationStatus?: {
crr: serviceStatus|null,
ingestion: serviceStatus|null,
lifecycle: serviceStatus|null,
} | serviceStatus) {
this._data = this._initStatus(services);
if (locationStatus) {
const data = locationStatus instanceof LocationStatus ?
locationStatus._data : locationStatus;
Object.keys(this._data).forEach(svc => {
this._data[svc] = {
paused: data[svc]?.paused || false,
scheduledResume: data[svc]?.scheduledResume || null
}
});
}
}
/**
* Initializes the status of all services
* The default status of a service is unpaused
* @returns {LocationServiceStatus}
*/
_initStatus(servicesToInit: string[]): serviceStatus {
const initStatus = {
paused: false,
scheduledResume: null,
};
return {
crr: servicesToInit.includes('crr') ? initStatus : null,
ingestion: servicesToInit.includes('ingestion') ? initStatus : null,
lifecycle: servicesToInit.includes('lifecycle') ? initStatus : null,
};
}
/**
* initializes a service status
* @param service service name
*/
_initService(service: string) {
this._data[service] = {
paused: false,
scheduledResume: null,
};
}
/**
* @param service service name
* @param paused true if paused
*/
setServicePauseStatus(service: string, paused: boolean) {
if (Object.keys(this._data).includes(service)) {
if (!this._data[service]) {
this._initService(service);
}
this._data[service].paused = paused;
}
}
/**
* @param service service name
* @returns true if paused
*/
getServicePauseStatus(service: string) : boolean | null {
if (!this._data[service]) {
return null;
}
return this._data[service].paused;
}
/**
* @param service service name
*/
setServiceResumeSchedule(service: string, date: Date | null) {
if (this._data[service]) {
if (date !== null) {
this._data[service].scheduledResume = date.toString();
} else {
this._data[service].scheduledResume = null;
}
}
}
/**
* @param service service name
* @returns scheduled resume date
*/
getServiceResumeSchedule(service: string) : Date | null {
const schedule = this._data[service].scheduledResume;
if (!schedule) {
return null;
}
return new Date(schedule);
}
/**
* @param service service(s) name
*/
pauseLocation(service: string | string[]) {
const servicesList = Array.isArray(service) ?
service : [service];
servicesList.forEach(svc => {
if (!this.getServicePauseStatus(svc)) {
this.setServicePauseStatus(svc, true);
}
});
}
/**
* @param service service(s) name
* @param schedule date to resume service(s)
*/
resumeLocation(service: string | string[], schedule?: Date) {
const servicesList = Array.isArray(service) ?
service : [service];
servicesList.forEach(svc => {
if (!this.getServicePauseStatus(svc)) {
return;
}
let shouldPause = false;
if (schedule) {
shouldPause = true
}
this.setServicePauseStatus(svc, shouldPause);
this.setServiceResumeSchedule(svc, schedule||null);
});
}
/**
* @return location status object
*/
getValue() : serviceStatus{
return this._data;
}
/**
* @returns serialized location status data
*/
getSerialized() : string {
return JSON.stringify(this.getValue());
}
}

View File

@ -58,15 +58,13 @@ export type ObjectMDData = {
'x-amz-server-side-encryption-customer-algorithm': string;
'x-amz-website-redirect-location': string;
'x-amz-scal-transition-in-progress'?: boolean;
'x-amz-scal-transition-time'?: string;
azureInfo?: any;
acl: ACL;
key: string;
location: null | Location[];
// versionId, isNull, isNull2, nullVersionId and isDeleteMarker
// versionId, isNull, nullVersionId and isDeleteMarker
// should be undefined when not set explicitly
isNull?: boolean;
isNull2?: boolean;
nullVersionId?: string;
nullUploadId?: string;
isDeleteMarker?: boolean;
@ -211,7 +209,6 @@ export default class ObjectMD {
// versionId, isNull, nullVersionId and isDeleteMarker
// should be undefined when not set explicitly
isNull: undefined,
isNull2: undefined,
nullVersionId: undefined,
nullUploadId: undefined,
isDeleteMarker: undefined,
@ -650,24 +647,10 @@ export default class ObjectMD {
* Set metadata transition in progress value
*
* @param inProgress - True if transition is in progress, false otherwise
* @param transitionTime - Date when the transition started
* @return itself
*/
setTransitionInProgress(inProgress: false): this
setTransitionInProgress(inProgress: true, transitionTime: Date|string|number): this
setTransitionInProgress(inProgress: boolean, transitionTime?: Date|string|number) {
setTransitionInProgress(inProgress: boolean) {
this._data['x-amz-scal-transition-in-progress'] = inProgress;
if (!inProgress || !transitionTime) {
delete this._data['x-amz-scal-transition-time'];
} else {
if (typeof transitionTime === 'number') {
transitionTime = new Date(transitionTime);
}
if (transitionTime instanceof Date) {
transitionTime = transitionTime.toISOString();
}
this._data['x-amz-scal-transition-time'] = transitionTime;
}
return this;
}
@ -680,14 +663,6 @@ export default class ObjectMD {
return this._data['x-amz-scal-transition-in-progress'];
}
/**
* Gets the transition time of the object.
* @returns The transition time of the object.
*/
getTransitionTime() {
return this._data['x-amz-scal-transition-time'];
}
/**
* Set access control list
*
@ -836,31 +811,6 @@ export default class ObjectMD {
return this._data.isNull || false;
}
/**
* Set metadata isNull2 value
*
* @param isNull2 - Whether new version is null or not AND has
* been put with a Cloudserver handling null keys (i.e. supporting
* S3C-7352)
* @return itself
*/
setIsNull2(isNull2: boolean) {
this._data.isNull2 = isNull2;
return this;
}
/**
* Get metadata isNull2 value
*
* @return isNull2 - Whether new version is null or not AND has
* been put with a Cloudserver handling null keys (i.e. supporting
* S3C-7352)
*/
getIsNull2() {
return this._data.isNull2 || false;
}
/**
* Set metadata nullVersionId value
*

View File

@ -1,8 +1,6 @@
import assert from 'assert';
import UUID from 'uuid';
import { RequestLogger } from 'werelogs';
import escapeForXml from '../s3middleware/escapeForXml';
import errors from '../errors';
import { isValidBucketName } from '../s3routes/routesUtils';

View File

@ -5,6 +5,7 @@ export { default as BucketInfo } from './BucketInfo';
export { default as BucketPolicy } from './BucketPolicy';
export { default as LifecycleConfiguration } from './LifecycleConfiguration';
export { default as LifecycleRule } from './LifecycleRule';
export { default as LocationStatus } from './LocationStatus';
export { default as NotificationConfiguration } from './NotificationConfiguration';
export { default as ObjectLockConfiguration } from './ObjectLockConfiguration';
export { default as ObjectMD } from './ObjectMD';

View File

@ -435,6 +435,7 @@ export default class Server {
this._server.on('connection', sock => {
// Setting no delay of the socket to the value configured
// TODO fix this
// @ts-expect-errors
sock.setNoDelay(this.isNoDelay());
sock.on('error', err => this._logger.info(
'socket error - request rejected', { error: err }));

View File

@ -62,7 +62,7 @@ export default class HealthProbeServer extends httpServer {
_onLiveness(
_req: http.IncomingMessage,
res: http.ServerResponse,
log: werelogs.RequestLogger,
log: RequestLogger,
) {
if (this._livenessCheck(log)) {
sendSuccess(res, log);
@ -74,7 +74,7 @@ export default class HealthProbeServer extends httpServer {
_onReadiness(
_req: http.IncomingMessage,
res: http.ServerResponse,
log: werelogs.RequestLogger,
log: RequestLogger,
) {
if (this._readinessCheck(log)) {
sendSuccess(res, log);
@ -84,11 +84,10 @@ export default class HealthProbeServer extends httpServer {
}
// expose metrics to Prometheus
async _onMetrics(_req: http.IncomingMessage, res: http.ServerResponse) {
const metrics = await ZenkoMetrics.asPrometheus();
_onMetrics(_req: http.IncomingMessage, res: http.ServerResponse) {
res.writeHead(200, {
'Content-Type': ZenkoMetrics.asPrometheusContentType(),
});
res.end(metrics);
res.end(ZenkoMetrics.asPrometheus());
}
}

View File

@ -16,7 +16,7 @@ export const DEFAULT_METRICS_ROUTE = '/metrics';
* @param log - Werelogs instance for logging if you choose to
*/
export type ProbeDelegate = (res: http.ServerResponse, log: werelogs.RequestLogger) => string | void
export type ProbeDelegate = (res: http.ServerResponse, log: RequestLogger) => string | void
export type ProbeServerParams = {
port: number;

View File

@ -1,7 +1,4 @@
import * as http from 'http';
import { RequestLogger } from 'werelogs';
import { ArsenalError } from '../../errors';
/**

View File

@ -119,7 +119,7 @@ export default class RESTClient {
method: string,
headers: http.OutgoingHttpHeaders | null,
key: string | null,
log: werelogs.RequestLogger,
log: RequestLogger,
responseCb: (res: http.IncomingMessage) => void,
) {
const reqHeaders = headers || {};

View File

@ -25,7 +25,7 @@ function setContentRange(
function sendError(
res: http.ServerResponse,
log: werelogs.RequestLogger,
log: RequestLogger,
error: ArsenalError,
optMessage?: string,
) {
@ -141,7 +141,7 @@ export default class RESTServer extends httpServer {
_onPut(
req: http.IncomingMessage,
res: http.ServerResponse,
log: werelogs.RequestLogger,
log: RequestLogger,
) {
let size: number;
try {
@ -183,7 +183,7 @@ export default class RESTServer extends httpServer {
_onGet(
req: http.IncomingMessage,
res: http.ServerResponse,
log: werelogs.RequestLogger,
log: RequestLogger,
) {
let pathInfo: ReturnType<typeof parseURL>;
let rangeSpec: ReturnType<typeof httpUtils.parseRangeSpec> | undefined =
@ -266,7 +266,7 @@ export default class RESTServer extends httpServer {
_onDelete(
req: http.IncomingMessage,
res: http.ServerResponse,
log: werelogs.RequestLogger,
log: RequestLogger,
) {
let pathInfo: ReturnType<typeof parseURL>;
try {

View File

@ -1,6 +1,6 @@
import ioClient from 'socket.io-client';
import * as http from 'http';
import { Server as IOServer } from 'socket.io';
import io from 'socket.io';
import * as sioStream from './sio-stream';
import async from 'async';
import assert from 'assert';
@ -497,7 +497,7 @@ export function RPCServer(params: {
assert(params.logger);
const httpServer = http.createServer();
const server = new IOServer(httpServer, { maxHttpBufferSize: 1e8 });
const server = io(httpServer);
const log = params.logger;
/**
@ -508,7 +508,7 @@ export function RPCServer(params: {
*
* @param {BaseService} serviceList - list of services to register
*/
(server as any).registerServices = function registerServices(...serviceList: any[]) {
server.registerServices = function registerServices(...serviceList: any[]) {
serviceList.forEach(service => {
const sock = this.of(service.namespace);
sock.on('connection', conn => {
@ -536,7 +536,7 @@ export function RPCServer(params: {
});
};
(server as any).listen = function listen(port, bindAddress = undefined) {
server.listen = function listen(port, bindAddress = undefined) {
httpServer.listen(port, bindAddress);
};

View File

@ -38,7 +38,7 @@
},
"principalAWSUserArn": {
"type": "string",
"pattern": "^arn:aws:iam::[0-9]{12}:user/(?!\\*)[\\w+=,.@ -/]{1,2017}$"
"pattern": "^arn:aws:iam::[0-9]{12}:user/(?!\\*)[\\w+=,.@ -/]{1,64}$"
},
"principalAWSRoleArn": {
"type": "string",
@ -360,9 +360,6 @@
"type": "string",
"const": "2012-10-17"
},
"Id": {
"type": "string"
},
"Statement": {
"oneOf": [
{

View File

@ -28,7 +28,7 @@
},
"principalAWSUserArn": {
"type": "string",
"pattern": "^arn:aws:iam::[0-9]{12}:user/(?!\\*)[\\w+=,.@ -/]{1,2017}$"
"pattern": "^arn:aws:iam::[0-9]{12}:user/(?!\\*)[\\w+=,.@ -/]{1,64}$"
},
"principalAWSRoleArn": {
"type": "string",

View File

@ -12,39 +12,13 @@ import {
actionMapSSO,
actionMapSTS,
actionMapMetadata,
actionMapScuba,
} from './utils/actionMaps';
export const actionNeedQuotaCheck = {
const _actionNeedQuotaCheck = {
objectPut: true,
objectPutVersion: true,
objectPutPart: true,
objectRestore: true,
};
/**
* This variable describes APIs that change the bytes
* stored, requiring quota updates
*/
export const actionWithDataDeletion = {
objectDelete: true,
objectDeleteVersion: true,
multipartDelete: true,
multiObjectDelete: true,
};
/**
* The function returns true if the current API call is a copy object
* and the action requires a quota evaluation logic, post retrieval
* of the object metadata.
* @param {string} action - the action being performed
* @param {string} currentApi - the current API being called
* @return {boolean} - whether the action requires a quota check
*/
export function actionNeedQuotaCheckCopy(action: string, currentApi: string) {
return action === 'objectGet' && (currentApi === 'objectCopy' || currentApi === 'objectPutCopyPart');
}
function _findAction(service: string, method: string) {
switch (service) {
case 's3':
@ -62,8 +36,6 @@ function _findAction(service: string, method: string) {
return actionMapSTS[method];
case 'metadata':
return actionMapMetadata[method];
case 'scuba':
return actionMapScuba[method];
default:
return undefined;
}
@ -133,10 +105,6 @@ function _buildArn(
return `arn:scality:metadata::${requesterInfo!.accountid}:` +
`${generalResource}/`;
}
case 'scuba': {
return `arn:scality:scuba::${requesterInfo!.accountid}:` +
`${generalResource}${specificResource ? '/' + specificResource : ''}`;
}
default:
return undefined;
}
@ -205,7 +173,6 @@ export default class RequestContext {
_needTagEval: boolean;
_foundAction?: string;
_foundResource?: string;
_objectLockRetentionDays?: number | null;
constructor(
headers: { [key: string]: string | string[] },
@ -227,7 +194,6 @@ export default class RequestContext {
requestObjTags?: string,
existingObjTag?: string,
needTagEval?: false,
objectLockRetentionDays?: number,
) {
this._headers = headers;
this._query = query;
@ -256,12 +222,10 @@ export default class RequestContext {
this._securityToken = securityToken;
this._policyArn = policyArn;
this._action = action;
this._needQuota = actionNeedQuotaCheck[apiMethod] === true
|| actionWithDataDeletion[apiMethod] === true;
this._needQuota = _actionNeedQuotaCheck[apiMethod] === true;
this._requestObjTags = requestObjTags || null;
this._existingObjTag = existingObjTag || null;
this._needTagEval = needTagEval || false;
this._objectLockRetentionDays = objectLockRetentionDays || null;
return this;
}
@ -293,7 +257,6 @@ export default class RequestContext {
requestObjTags: this._requestObjTags,
existingObjTag: this._existingObjTag,
needTagEval: this._needTagEval,
objectLockRetentionDays: this._objectLockRetentionDays,
};
return JSON.stringify(requestInfo);
}
@ -334,7 +297,6 @@ export default class RequestContext {
obj.requestObjTags,
obj.existingObjTag,
obj.needTagEval,
obj.objectLockRetentionDays,
);
}
@ -738,24 +700,4 @@ export default class RequestContext {
getNeedTagEval() {
return this._needTagEval;
}
/**
* Get object lock retention days
*
* @returns objectLockRetentionDays - object lock retention days
*/
getObjectLockRetentionDays() {
return this._objectLockRetentionDays;
}
/**
* Set object lock retention days
*
* @param objectLockRetentionDays - object lock retention days
* @returns itself
*/
setObjectLockRetentionDays(objectLockRetentionDays: number) {
this._objectLockRetentionDays = objectLockRetentionDays;
return this;
}
}

View File

@ -310,7 +310,6 @@ export function evaluatePolicy(
}
/**
* @deprecated Upgrade to standardEvaluateAllPolicies
* Evaluate whether a request is permitted under a policy.
* @param requestContext - Info necessary to
* evaluate permission
@ -326,16 +325,6 @@ export function evaluateAllPolicies(
allPolicies: any[],
log: Logger,
): string {
return standardEvaluateAllPolicies(requestContext, allPolicies, log).verdict;
}
export function standardEvaluateAllPolicies(
requestContext: RequestContext,
allPolicies: any[],
log: Logger,
): {
verdict: string;
isImplicit: boolean;
} {
log.trace('evaluating all policies');
let allow = false;
let allowWithTagCondition = false;
@ -344,10 +333,7 @@ export function standardEvaluateAllPolicies(
const singlePolicyVerdict = evaluatePolicy(requestContext, allPolicies[i], log);
// If there is any Deny, just return Deny
if (singlePolicyVerdict === 'Deny') {
return {
verdict: 'Deny',
isImplicit: false,
};
return 'Deny';
}
if (singlePolicyVerdict === 'Allow') {
allow = true;
@ -358,7 +344,6 @@ export function standardEvaluateAllPolicies(
} // else 'Neutral'
}
let verdict;
let isImplicit = false;
if (allow) {
if (denyWithTagCondition) {
verdict = 'NeedTagConditionEval';
@ -370,9 +355,8 @@ export function standardEvaluateAllPolicies(
verdict = 'NeedTagConditionEval';
} else {
verdict = 'Deny';
isImplicit = true;
}
}
log.trace('result of evaluating all policies', { verdict, isImplicit });
return { verdict, isImplicit };
log.trace('result of evaluating all policies', { verdict });
return verdict;
}

View File

@ -52,12 +52,6 @@ const sharedActionMap = {
objectPutVersion: 's3:PutObjectVersion',
};
const actionMapBucketQuotas = {
bucketGetQuota: 'scality:GetBucketQuota',
bucketUpdateQuota: 'scality:UpdateBucketQuota',
bucketDeleteQuota: 'scality:DeleteBucketQuota',
};
// action map used for request context
const actionMapRQ = {
bucketPut: 's3:CreateBucket',
@ -71,7 +65,6 @@ const actionMapRQ = {
initiateMultipartUpload: 's3:PutObject',
objectDeleteVersion: 's3:DeleteObjectVersion',
objectDeleteTaggingVersion: 's3:DeleteObjectVersionTagging',
objectGetArchiveInfo: 'scality:GetObjectArchiveInfo',
objectGetVersion: 's3:GetObjectVersion',
objectGetACLVersion: 's3:GetObjectVersionAcl',
objectGetTaggingVersion: 's3:GetObjectVersionTagging',
@ -86,11 +79,10 @@ const actionMapRQ = {
objectPutLegalHoldVersion: 's3:PutObjectLegalHold',
listObjectVersions: 's3:ListBucketVersions',
...sharedActionMap,
...actionMapBucketQuotas,
};
// action map used for bucket policies
const actionMapBP = actionMapRQ;
const actionMapBP = { ...sharedActionMap };
// action map for all relevant s3 actions
const actionMapS3 = {
@ -159,15 +151,6 @@ const actionMonitoringMapS3 = {
objectPutTagging: 'PutObjectTagging',
objectRestore: 'RestoreObject',
serviceGet: 'ListBuckets',
bucketGetQuota: 'GetBucketQuota',
bucketUpdateQuota: 'UpdateBucketQuota',
bucketDeleteQuota: 'DeleteBucketQuota',
};
const actionMapAccountQuotas = {
UpdateAccountQuota : 'scality:UpdateAccountQuota',
DeleteAccountQuota : 'scality:DeleteAccountQuota',
GetAccountQuota : 'scality:GetAccountQuota',
};
const actionMapIAM = {
@ -211,7 +194,6 @@ const actionMapIAM = {
tagUser: 'iam:TagUser',
unTagUser: 'iam:UntagUser',
listUserTags: 'iam:ListUserTags',
...actionMapAccountQuotas,
};
const actionMapSSO = {
@ -227,14 +209,6 @@ const actionMapMetadata = {
default: 'metadata:bucketd',
};
const actionMapScuba = {
GetMetrics: 'scuba:GetMetrics',
AdminStartIngest: 'scuba:AdminStartIngest',
AdminStopIngest: 'scuba:AdminStopIngest',
AdminReadRaftCseq: 'scuba:AdminReadRaftCseq',
AdminTriggerRepair: 'scuba:AdminTriggerRepair',
};
export {
actionMapRQ,
actionMapBP,
@ -244,5 +218,4 @@ export {
actionMapSSO,
actionMapSTS,
actionMapMetadata,
actionMapScuba,
};

View File

@ -1,5 +1,5 @@
import { handleWildcardInResource } from './wildcards';
import { policyArnAllowedEmptyAccountId } from '../../constants';
/**
* Checks whether an ARN from a request matches an ARN in a policy
* to compare against each portion of the ARN from the request
@ -38,10 +38,9 @@ export default function checkArnMatch(
const requestSegment = caseSensitive ? requestArnArr[j] :
requestArnArr[j].toLowerCase();
const policyArnArr = policyArn.split(':');
// We want to allow an empty account ID for utapi and scuba service ARNs to not
// We want to allow an empty account ID for utapi service ARNs to not
// break compatibility.
if (j === 4 && policyArnAllowedEmptyAccountId.includes(policyArnArr[2])
&& policyArnArr[4] === '') {
if (j === 4 && policyArnArr[2] === 'utapi' && policyArnArr[4] === '') {
continue;
} else if (!segmentRegEx.test(requestSegment)) {
return false;

View File

@ -168,9 +168,6 @@ export function findConditionKey(
return requestContext.getNeedTagEval() && requestContext.getRequestObjTags()
? getTagKeys(requestContext.getRequestObjTags()!)
: undefined;
// The maximum retention period is 100 years.
case 's3:object-lock-remaining-retention-days':
return requestContext.getObjectLockRetentionDays() || undefined;
default:
return undefined;
}

View File

@ -2,9 +2,6 @@ import assert from 'assert';
import * as crypto from 'crypto';
import * as stream from 'stream';
import azure from '@azure/storage-blob';
import { RequestLogger } from 'werelogs';
import ResultsCollector from './ResultsCollector';
import SubStreamInterface from './SubStreamInterface';
import * as objectUtils from '../objectUtils';

View File

@ -1,25 +1,19 @@
import { scaleMsPerDay } from '../objectUtils';
const msInOneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
export default class LifecycleDateTime {
_transitionOneDayEarlier?: boolean;
_expireOneDayEarlier?: boolean;
_timeProgressionFactor?: number;
_scaledMsPerDay: number;
constructor(params?: {
transitionOneDayEarlier: boolean;
expireOneDayEarlier: boolean;
timeProgressionFactor: number;
}) {
this._transitionOneDayEarlier = params?.transitionOneDayEarlier;
this._expireOneDayEarlier = params?.expireOneDayEarlier;
this._timeProgressionFactor = params?.timeProgressionFactor || 1;
this._scaledMsPerDay = scaleMsPerDay(this._timeProgressionFactor);
}
getCurrentDate() {
const timeTravel = this._expireOneDayEarlier ? msInOneDay : 0;
const timeTravel = this._expireOneDayEarlier ? oneDay : 0;
return Date.now() + timeTravel;
}
@ -31,7 +25,7 @@ export default class LifecycleDateTime {
findDaysSince(date: Date) {
const now = this.getCurrentDate();
const diff = now - date.getTime();
return Math.floor(diff / this._scaledMsPerDay);
return Math.floor(diff / (1000 * 60 * 60 * 24));
}
/**
@ -58,8 +52,8 @@ export default class LifecycleDateTime {
}
if (transition.Days !== undefined) {
const lastModifiedTime = this.getTimestamp(lastModified);
const timeTravel = this._transitionOneDayEarlier ? -msInOneDay : 0;
return lastModifiedTime + (transition.Days * this._scaledMsPerDay) + timeTravel;
const timeTravel = this._transitionOneDayEarlier ? -oneDay : 0;
return lastModifiedTime + (transition.Days * oneDay) + timeTravel;
}
}
@ -75,8 +69,8 @@ export default class LifecycleDateTime {
) {
if (transition.NoncurrentDays !== undefined) {
const lastModifiedTime = this.getTimestamp(lastModified);
const timeTravel = this._transitionOneDayEarlier ? -msInOneDay : 0;
return lastModifiedTime + (transition.NoncurrentDays * this._scaledMsPerDay) + timeTravel;
const timeTravel = this._transitionOneDayEarlier ? -oneDay : 0;
return lastModifiedTime + (transition.NoncurrentDays * oneDay) + timeTravel;
}
}
}

View File

@ -284,7 +284,6 @@ export default class LifecycleUtils {
// Names are long, so obscuring a bit
const ncve = 'NoncurrentVersionExpiration';
const ncd = 'NoncurrentDays';
const nncv = 'NewerNoncurrentVersions';
if (!store[ncve]) {
store[ncve] = {};
@ -292,7 +291,6 @@ export default class LifecycleUtils {
if (!store[ncve][ncd] || rule[ncve][ncd] < store[ncve][ncd]) {
store[ncve].ID = rule.ID;
store[ncve][ncd] = rule[ncve][ncd];
store[ncve][nncv] = rule[ncve][nncv];
}
}
if (rule.AbortIncompleteMultipartUpload

View File

@ -1,5 +1,3 @@
const msInOneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
export const getMD5Buffer = (base64MD5: WithImplicitCoercion<string> | Uint8Array) =>
base64MD5 instanceof Uint8Array ? base64MD5 : Buffer.from(base64MD5, 'base64')
@ -8,14 +6,3 @@ export const getHexMD5 = (base64MD5: WithImplicitCoercion<string> | Uint8Array)
export const getBase64MD5 = (hexMD5: WithImplicitCoercion<string>) =>
Buffer.from(hexMD5, 'hex').toString('base64');
/**
* Calculates the number of scaled milliseconds per day based on the given time progression factor.
* This function is intended for testing and simulation purposes only.
* @param {number} timeProgressionFactor - The desired time progression factor for scaling.
* @returns {number} The number of scaled milliseconds per day.
* If the result is 0, the minimum value of 1 millisecond is returned.
*/
export const scaleMsPerDay = (timeProgressionFactor: number): number =>
Math.round(msInOneDay / (timeProgressionFactor || 1)) || 1;

View File

@ -1,7 +1,4 @@
import assert from 'assert';
import { RequestLogger } from 'werelogs';
import errors from '../errors';
import routeGET from './routes/routeGET';
import routePUT from './routes/routePUT';

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import * as routesUtils from '../routesUtils';
import errors from '../../errors';
import StatsClient from '../../metrics/StatsClient';
@ -43,8 +41,6 @@ export default function routeDELETE(
return call('bucketDeleteEncryption');
} else if (query?.tagging !== undefined) {
return call('bucketDeleteTagging');
} else if (query?.quota !== undefined) {
return call('bucketDeleteQuota');
}
call('bucketDelete');
} else {

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import * as routesUtils from '../routesUtils';
import errors from '../../errors';
import * as http from 'http';
@ -60,8 +58,6 @@ export default function routerGET(
call('bucketGetEncryption');
} else if (query.search !== undefined) {
call('metadataSearch')
} else if (query.quota !== undefined) {
call('bucketGetQuota');
} else {
// GET bucket
call('bucketGet');

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import * as routesUtils from '../routesUtils';
import errors from '../../errors';
import StatsClient from '../../metrics/StatsClient';

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import * as routesUtils from '../routesUtils';
import errors from '../../errors';
import * as http from 'http';

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import * as routesUtils from '../routesUtils';
import errors from '../../errors';
import * as http from 'http';

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import * as routesUtils from '../routesUtils';
import errors from '../../errors';
import * as http from 'http';
@ -105,13 +103,6 @@ export default function routePUT(
return routesUtils.responseNoBody(err, corsHeaders,
response, 200, log);
});
} else if (query.quota !== undefined) {
api.callApiMethod('bucketUpdateQuota', request, response,
log, (err, resHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, resHeaders, response,
200, log);
});
} else {
// PUT bucket
return api.callApiMethod('bucketPut', request, response, log,

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import * as routesUtils from '../routesUtils';
import errors from '../../errors';
import * as http from 'http';
@ -29,11 +27,6 @@ export default function routerWebsite(
routesUtils.statsReport500(err, statsClient);
// request being redirected
if (redirectInfo) {
if (err && redirectInfo.withError) {
return routesUtils.redirectRequestOnError(err,
'GET', redirectInfo, dataGetInfo, dataRetrievalParams,
response, resMetaHeaders, log)
}
// note that key might have been modified in websiteGet
// api to add index document
return routesUtils.redirectRequest(redirectInfo,
@ -64,11 +57,6 @@ export default function routerWebsite(
(err, resMetaHeaders, redirectInfo, key) => {
routesUtils.statsReport500(err, statsClient);
if (redirectInfo) {
if (err && redirectInfo.withError) {
return routesUtils.redirectRequestOnError(err,
'HEAD', redirectInfo, null, dataRetrievalParams,
response, resMetaHeaders, log)
}
return routesUtils.redirectRequest(redirectInfo,
// TODO ARSN-217 encrypted does not exists in request.connection
// @ts-ignore

View File

@ -1,13 +1,10 @@
import * as url from 'url';
import * as http from 'http';
import { eachSeries } from 'async';
import { RequestLogger } from 'werelogs';
import * as ipCheck from '../ipCheck';
import errors, { ArsenalError } from '../errors';
import * as constants from '../constants';
import { eachSeries } from 'async';
import DataWrapper from '../storage/data/DataWrapper';
import * as http from 'http';
import StatsClient from '../metrics/StatsClient';
import { objectKeyByteLimit } from '../constants';
const jsutil = require('../jsutil');
@ -694,8 +691,6 @@ export function streamUserErrorPage(
log: RequestLogger,
) {
setCommonResponseHeaders(corsHeaders, response, log);
response.setHeader('x-amz-error-code', err.message);
response.setHeader('x-amz-error-message', err.description);
response.writeHead(err.code, { 'Content-type': 'text/html' });
response.on('finish', () => {
// TODO ARSN-216 Fix logger
@ -878,7 +873,7 @@ export function redirectRequest(
}
let redirectLocation = justPath ? `/${redirectKey}` :
`${redirectProtocol}://${redirectHostName}/${redirectKey}`;
if (!redirectKey && redirectLocationHeader && redirectLocation !== '/') {
if (!redirectKey && redirectLocationHeader) {
// remove hanging slash
redirectLocation = redirectLocation.slice(0, -1);
}
@ -895,52 +890,6 @@ export function redirectRequest(
return undefined;
}
/**
* redirectRequestOnError - redirect with an error body
* @param err - arsenal error object
* @param method - HTTP method
* @param routingInfo - info for routing
* @param [routingInfo.withError] - flag to differentiate from routing rules
* @param [routingInfo.location] - location header
* @param dataLocations --
* - array of locations to get streams from backend
* @param retrieveDataParams - params to create instance of
* data retrieval function
* @param response - response object
* @param corsHeaders - CORS-related response headers
* @param log - Werelogs instance
*/
export function redirectRequestOnError(
err: ArsenalError,
method: 'HEAD' | 'GET',
routingInfo: {
withError: true;
location: string;
},
dataLocations: { size: string | number }[] | null,
retrieveDataParams: any,
response: http.ServerResponse,
corsHeaders: { [key: string]: string },
log: RequestLogger,
) {
response.setHeader('Location', routingInfo.location);
if (!dataLocations && err.is.Found) {
if (method === 'HEAD') {
return errorHeaderResponse(err, response, corsHeaders, log);
}
response.setHeader('x-amz-error-code', err.message);
response.setHeader('x-amz-error-message', err.description);
return errorHtmlResponse(err, false, '', response, corsHeaders, log);
}
// This is reached only for website error document (GET only)
const overrideErrorCode = err.flatten();
overrideErrorCode.code = 301;
return streamUserErrorPage(ArsenalError.unflatten(overrideErrorCode)!,
dataLocations || [], retrieveDataParams, response, corsHeaders, log);
}
/**
* Get bucket name and object name from the request
* @param request - http request object

View File

@ -2,8 +2,6 @@ const async = require('async');
const PassThrough = require('stream').PassThrough;
const assert = require('assert');
const { Logger } = require('werelogs');
const errors = require('../../errors').default;
const MD5Sum = require('../../s3middleware/MD5Sum').default;
const NullStream = require('../../s3middleware/nullStream').default;
@ -29,7 +27,6 @@ class DataWrapper {
this.metadata = metadata;
this.locStorageCheckFn = locStorageCheckFn;
this.vault = vault;
this.logger = new Logger('DataWrapper');
}
put(cipherBundle, value, valueSize, keyContext, backendInfo, log, cb) {
@ -130,7 +127,7 @@ class DataWrapper {
}
delete(objectGetInfo, log, cb) {
const callback = cb || (() => {});
const callback = cb || log.end;
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
const isRequiredStringKey =
constants.clientsRequireStringKey[this.implName];
@ -179,9 +176,7 @@ class DataWrapper {
newObjDataStoreName)) {
return process.nextTick(cb);
}
const delLog = this.logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
delLog.trace('initiating batch delete', {
log.trace('initiating batch delete', {
keys: locations,
implName: this.implName,
method: 'batchDelete',
@ -207,21 +202,21 @@ class DataWrapper {
return false;
});
if (shouldBatchDelete && keys.length > 1) {
return this.client.batchDelete(backendName, { keys }, delLog, cb);
return this.client.batchDelete(backendName, { keys }, log, cb);
}
return async.eachLimit(locations, 5, (loc, next) => {
process.nextTick(() => this.delete(loc, delLog, next));
process.nextTick(() => this.delete(loc, log, next));
},
err => {
if (err) {
delLog.end().error('batch delete failed', { error: err });
log.end().error('batch delete failed', { error: err });
// deletion of non-existing objects result in 204
if (err.code === 404) {
return cb();
}
return cb(err);
}
delLog.end().trace('batch delete successfully completed');
log.end().trace('batch delete successfully completed');
return cb();
});
}

View File

@ -1,10 +1,10 @@
const { http, https } = require('httpagent');
const url = require('url');
const AWS = require('aws-sdk');
const Sproxy = require('sproxydclient');
const Hyperdrive = require('hdclient');
const HttpsProxyAgent = require('https-proxy-agent');
require("aws-sdk/lib/maintenance_mode_message").suppress = true;
const constants = require('../../constants');
const DataFileBackend = require('./file/DataFileInterface');
const inMemory = require('./in_memory/datastore').backend;
@ -25,13 +25,8 @@ function parseLC(config, vault) {
if (locationObj.type === 'file') {
clients[location] = new DataFileBackend(config);
}
if (locationObj.type === 'vitastor') {
const VitastorBackend = require('./vitastor/VitastorBackend');
clients[location] = new VitastorBackend(location, locationObj.details);
}
if (locationObj.type === 'scality') {
if (locationObj.details.connector.sproxyd) {
const Sproxy = require('sproxydclient');
clients[location] = new Sproxy({
bootstrap: locationObj.details.connector
.sproxyd.bootstrap,
@ -46,7 +41,6 @@ function parseLC(config, vault) {
});
clients[location].clientType = 'scality';
} else if (locationObj.details.connector.hdclient) {
const Hyperdrive = require('hdclient');
clients[location] = new Hyperdrive.hdcontroller.HDProxydClient(
locationObj.details.connector.hdclient);
clients[location].clientType = 'scality';

View File

@ -5,7 +5,6 @@ const { parseTagFromQuery } = require('../../s3middleware/tagging');
const { externalBackendHealthCheckInterval } = require('../../constants');
const DataFileBackend = require('./file/DataFileInterface');
const { createLogger, checkExternalBackend } = require('./external/utils');
const jsutil = require('../../jsutil');
class MultipleBackendGateway {
constructor(clients, metadata, locStorageCheckFn) {
@ -200,12 +199,11 @@ class MultipleBackendGateway {
uploadPart(request, streamingV4Params, stream, size, location, key,
uploadId, partNumber, bucketName, log, cb) {
const client = this.clients[location];
const cbOnce = jsutil.once(cb);
if (client.uploadPart) {
return this.locStorageCheckFn(location, size, log, err => {
if (err) {
return cbOnce(err);
return cb(err);
}
return client.uploadPart(request, streamingV4Params, stream,
size, key, uploadId, partNumber, bucketName, log,
@ -219,14 +217,14 @@ class MultipleBackendGateway {
'metric following object PUT failure',
{ error: error.message });
}
return cbOnce(err);
return cb(err);
});
}
return cbOnce(null, partInfo);
return cb(null, partInfo);
});
});
}
return cbOnce();
return cb();
}
listParts(key, uploadId, location, bucketName, partNumberMarker, maxParts,

View File

@ -8,7 +8,6 @@ const getMetaHeaders =
const { prepareStream } = require('../../../s3middleware/prepareStream');
const { createLogger, logHelper, removeQuotes, trimXMetaPrefix } =
require('./utils');
const jsutil = require('../../../jsutil');
const missingVerIdInternalError = errors.InternalError.customizeDescription(
'Invalid state. Please ensure versioning is enabled ' +
@ -318,11 +317,9 @@ class AwsClient {
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
partNumber, bucketName, log, callback) {
let hashedStream = stream;
const cbOnce = jsutil.once(callback);
if (request) {
const partStream = prepareStream(request, streamingV4Params,
this._vault, log, cbOnce);
this._vault, log, callback);
hashedStream = new MD5Sum();
partStream.pipe(hashedStream);
}
@ -336,7 +333,7 @@ class AwsClient {
if (err) {
logHelper(log, 'error', 'err from data backend ' +
'on uploadPart', err, this._dataStoreName, this.clientType);
return cbOnce(errors.ServiceUnavailable
return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' +
`${this.type}: ${err.message}`),
);
@ -350,7 +347,7 @@ class AwsClient {
dataStoreName: this._dataStoreName,
dataStoreETag: noQuotesETag,
};
return cbOnce(null, dataRetrievalInfo);
return callback(null, dataRetrievalInfo);
});
}

View File

@ -1,696 +0,0 @@
// Zenko CloudServer Vitastor data storage backend adapter
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const stream = require('stream');
const vitastor = require('vitastor');
const VOLUME_MAGIC = 'VstS3Vol';
const OBJECT_MAGIC = 'VstS3Obj';
const FLAG_DELETED = 2n;
type Volume = {
id: number,
partial_sectors: {
[key: string]: {
buffer: Buffer,
refs: number,
},
},
header: {
location: string,
bucket: string,
max_size: number,
create_ts: number,
used_ts: number,
size: number,
objects: number,
removed_objects: number,
object_bytes: number,
removed_bytes: number,
},
};
type ObjectHeader = {
size: number,
key: string,
part_num?: number,
};
class VitastorBackend
{
locationName: string;
config: {
pool_id: number,
metadata_image: string,
metadata_pool_id: number,
metadata_inode_num: number,
size_buckets: number[],
size_bucket_mul: number,
id_batch_size: number,
sector_size: number,
write_chunk_size: number,
read_chunk_size: number,
pack_objects: boolean,
// and also other parameters for vitastor itself
};
next_id: number;
alloc_id: number;
opened: boolean;
on_open: ((...args: any[]) => void)[] | null;
open_error: Error | null;
cli: any;
kv: any;
volumes: {
[bucket: string]: {
[max_size: string]: Volume,
},
};
volumes_by_id: {
[id: string]: Volume,
};
volume_delete_stats: {
[id: string]: {
count: number,
bytes: number,
},
};
constructor(locationName, config)
{
this.locationName = locationName;
this.config = config;
// validate config
this.config.pool_id = Number(this.config.pool_id) || 0;
if (!this.config.pool_id)
throw new Error('pool_id is required for Vitastor');
if (!this.config.metadata_image && (!this.config.metadata_pool_id || !this.config.metadata_inode_num))
throw new Error('metadata_image or metadata_inode is required for Vitastor');
if (!this.config.size_buckets || !this.config.size_buckets.length)
this.config.size_buckets = [ 32*1024, 128*1024, 512*1024, 2*1024, 8*1024 ];
this.config.size_bucket_mul = Number(this.config.size_bucket_mul) || 2;
this.config.id_batch_size = Number(this.config.id_batch_size) || 100;
this.config.sector_size = Number(this.config.sector_size) || 0;
if (this.config.sector_size < 4096)
this.config.sector_size = 4096;
this.config.write_chunk_size = Number(this.config.write_chunk_size) || 0;
if (this.config.write_chunk_size < this.config.sector_size)
this.config.write_chunk_size = 4*1024*1024; // 4 MB
this.config.read_chunk_size = Number(this.config.read_chunk_size) || 0;
if (this.config.read_chunk_size < this.config.sector_size)
this.config.read_chunk_size = 4*1024*1024; // 4 MB
this.config.pack_objects = !!this.config.pack_objects;
// state
this.next_id = 1;
this.alloc_id = 0;
this.opened = false;
this.on_open = null;
this.open_error = null;
this.cli = new vitastor.Client(config);
this.kv = new vitastor.KV(this.cli);
// we group objects into volumes by bucket and size
this.volumes = {};
this.volumes_by_id = {};
this.volume_delete_stats = {};
}
async _makeVolumeId()
{
if (this.next_id <= this.alloc_id)
{
return this.next_id++;
}
const id_key = 'id'+this.config.pool_id;
const [ err, prev ] = await new Promise<[ any, string ]>(ok => this.kv.get(id_key, (err, value) => ok([ err, value ])));
if (err && err != vitastor.ENOENT)
{
throw new Error(err);
}
const new_id = (parseInt(prev) || 0) + 1;
this.next_id = new_id;
this.alloc_id = this.next_id + this.config.id_batch_size - 1;
await new Promise((ok, no) => this.kv.set(id_key, this.alloc_id, err => (err ? no(new Error(err)) : ok(null)), cas_old => cas_old === prev));
return this.next_id;
}
async _getVolume(bucketName, size)
{
if (!this.opened)
{
if (this.on_open)
{
await new Promise(ok => this.on_open!.push(ok));
}
else
{
this.on_open = [];
if (this.config.metadata_image)
{
const img = new vitastor.Image(this.cli, this.config.metadata_image);
const info = await new Promise<{ pool_id: number, inode_num: number }>(ok => img.get_info(ok));
this.config.metadata_pool_id = info.pool_id;
this.config.metadata_inode_num = info.inode_num;
}
const kv_config = {};
for (const key in this.config)
{
if (key.substr(0, 3) === 'kv_')
kv_config[key] = this.config[key];
}
this.open_error = await new Promise(ok => this.kv.open(
this.config.metadata_pool_id, this.config.metadata_inode_num,
kv_config, err => ok(err ? new Error(err) : null)
));
this.opened = true;
this.on_open.map(cb => setImmediate(cb));
this.on_open = null;
}
}
if (this.open_error)
{
throw this.open_error;
}
let i;
for (i = 0; i < this.config.size_buckets.length && size >= this.config.size_buckets[i]; i++) {}
let s;
if (i < this.config.size_buckets.length)
s = this.config.size_buckets[i];
else if (this.config.size_bucket_mul > 1)
{
while (size >= s)
s = Math.floor(this.config.size_bucket_mul * s);
}
if (!this.volumes[bucketName])
{
this.volumes[bucketName] = {};
}
if (this.volumes[bucketName][s])
{
return this.volumes[bucketName][s];
}
const new_id = await this._makeVolumeId();
const new_vol = this.volumes[bucketName][s] = {
id: new_id,
// FIXME: partial_sectors should be written with CAS because otherwise we may lose quick deletes
partial_sectors: {},
header: {
location: this.locationName,
bucket: bucketName,
max_size: s,
create_ts: Date.now(),
used_ts: Date.now(),
size: this.config.sector_size, // initial position is right after header
objects: 0,
removed_objects: 0,
object_bytes: 0,
removed_bytes: 0,
},
};
this.volumes_by_id[new_id] = new_vol;
const header_text = JSON.stringify(this.volumes[bucketName][s].header);
const buf = Buffer.alloc(this.config.sector_size);
buf.write(VOLUME_MAGIC + header_text, 0);
await new Promise((ok, no) => this.cli.write(
this.config.pool_id, new_id, 0, buf, err => (err ? no(new Error(err)) : ok(null))
));
await new Promise((ok, no) => this.kv.set(
'vol_'+this.config.pool_id+'_'+new_id, header_text, err => (err ? no(new Error(err)) : ok(null)), cas_old => !cas_old
));
return new_vol;
}
toObjectGetInfo(objectKey, bucketName, storageLocation)
{
return null;
}
_bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs)
{
if ((cur_pos % this.config.sector_size) ||
Math.floor((cur_pos + cur_size) / this.config.sector_size) == Math.floor(cur_pos / this.config.sector_size))
{
const sect_pos = Math.floor(cur_pos / this.config.sector_size) * this.config.sector_size;
const sect = vol.partial_sectors[sect_pos]
? vol.partial_sectors[sect_pos].buffer
: Buffer.alloc(this.config.sector_size);
if (this.config.pack_objects)
{
// Save only if <pack_objects>
if (!vol.partial_sectors[sect_pos])
vol.partial_sectors[sect_pos] = { buffer: sect, refs: 0 };
vol.partial_sectors[sect_pos].refs++;
sector_refs.push(sect_pos);
}
let off = cur_pos % this.config.sector_size;
let i = 0;
for (; i < cur_chunks.length; i++)
{
let copy_len = this.config.sector_size - off;
copy_len = copy_len > cur_chunks[i].length ? cur_chunks[i].length : copy_len;
cur_chunks[i].copy(sect, off, 0, copy_len);
off += copy_len;
if (copy_len < cur_chunks[i].length)
{
cur_chunks[i] = cur_chunks[i].slice(copy_len);
cur_size -= copy_len;
break;
}
else
cur_size -= cur_chunks[i].length;
}
cur_chunks.splice(0, i, sect);
cur_size += this.config.sector_size;
cur_pos = sect_pos;
}
return [ cur_pos, cur_size ];
}
_bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, write_all)
{
const write_pos = cur_pos;
const write_chunks = cur_chunks;
let write_size = cur_size;
cur_chunks = [];
cur_pos += cur_size;
cur_size = 0;
let remain = (cur_pos % this.config.sector_size);
if (remain > 0)
{
cur_pos -= remain;
let last_sect = null;
if (write_all)
{
last_sect = vol.partial_sectors[cur_pos]
? vol.partial_sectors[cur_pos].buffer
: Buffer.alloc(this.config.sector_size);
if (this.config.pack_objects)
{
// Save only if <pack_objects>
if (!vol.partial_sectors[cur_pos])
vol.partial_sectors[cur_pos] = { buffer: last_sect, refs: 0 };
vol.partial_sectors[cur_pos].refs++;
sector_refs.push(cur_pos);
}
}
write_size -= remain;
if (write_size < 0)
write_size = 0;
for (let i = write_chunks.length-1; i >= 0 && remain > 0; i--)
{
if (write_chunks[i].length <= remain)
{
remain -= write_chunks[i].length;
if (write_all)
write_chunks[i].copy(last_sect, remain);
else
cur_chunks.unshift(write_chunks[i]);
write_chunks.pop();
}
else
{
if (write_all)
write_chunks[i].copy(last_sect, 0, write_chunks[i].length - remain);
else
cur_chunks.unshift(write_chunks[i].slice(write_chunks[i].length - remain));
write_chunks[i] = write_chunks[i].slice(0, write_chunks[i].length - remain);
remain = 0;
i++;
}
}
if (write_all)
{
write_chunks.push(last_sect);
write_size += this.config.sector_size;
}
}
for (const chunk of cur_chunks)
{
cur_size += chunk.length;
}
return [ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ];
}
/**
* reqUids: string, // request-ids for log, usually joined by ':'
* keyContext: {
* // a lot of shit, basically all metadata
* bucketName,
* objectKey,
* owner?,
* namespace?,
* partNumber?,
* uploadId?,
* metaHeaders?,
* isDeleteMarker?,
* tagging?,
* contentType?,
* cacheControl?,
* contentDisposition?,
* contentEncoding?,
* },
* callback: (error, objectGetInfo: any) => void,
*/
put(stream, size, keyContext, reqUids, callback)
{
callback = once(callback);
this._getVolume(keyContext.bucketName, size)
.then(vol => this._put(vol, stream, size, keyContext, reqUids, callback))
.catch(callback);
}
_put(vol, stream, size, keyContext, reqUids, callback)
{
const object_header: ObjectHeader = {
size,
key: keyContext.objectKey,
};
if (keyContext.partNumber)
{
object_header.part_num = keyContext.partNumber;
}
// header is: <8 bytes magic> <8 bytes flags> <8 bytes json length> <json>
const hdr_begin_buf = Buffer.alloc(24);
const hdr_json_buf = Buffer.from(JSON.stringify(object_header), 'utf-8');
hdr_begin_buf.write(OBJECT_MAGIC);
hdr_begin_buf.writeBigInt64LE(BigInt(hdr_json_buf.length), 16);
const object_header_buf = Buffer.concat([ hdr_begin_buf, hdr_json_buf ]);
const object_pos = vol.header.size;
const object_get_info = { volume: vol.id, offset: object_pos, hdrlen: object_header_buf.length, size };
let cur_pos = object_pos;
let cur_chunks = [ object_header_buf ];
let cur_size = object_header_buf.length;
let err: Error|null = null;
let waiting = 1; // 1 for end or error, 1 for each write request
vol.header.size += object_header_buf.length + size;
if (!this.config.pack_objects && (vol.header.size % this.config.sector_size))
{
vol.header.size += this.config.sector_size - (vol.header.size % this.config.sector_size);
}
const writeChunk = (last) =>
{
const sector_refs = [];
// Handle partial beginning
[ cur_pos, cur_size ] = this._bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs);
// Handle partial end
let write_pos, write_chunks, write_size;
[ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ] = this._bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, last);
waiting++;
// FIXME: pool_id: maybe it should be stored in volume metadata to allow to migrate volumes?
this.cli.write(this.config.pool_id, vol.id, write_pos, write_chunks, (res) =>
{
for (const sect of sector_refs)
{
vol.partial_sectors[sect].refs--;
if (!vol.partial_sectors[sect].refs &&
vol.header.size >= sect+this.config.sector_size)
{
// Forget partial data when it's not needed anymore
delete(vol.partial_sectors[sect]);
}
}
waiting--;
if (res)
{
err = new Error(res);
waiting--;
}
if (!waiting)
{
callback(err, err ? null : object_get_info);
}
});
};
// Stream data
stream.on('error', (e) =>
{
err = e;
waiting--;
if (!waiting)
{
callback(err, null);
}
});
stream.on('end', () =>
{
if (err)
{
return;
}
waiting--;
if (cur_size)
{
// write last chunk
writeChunk(true);
}
if (!waiting)
{
callback(null, object_get_info);
}
});
stream.on('data', (chunk) =>
{
if (err)
{
return;
}
cur_chunks.push(chunk);
cur_size += chunk.length;
if (cur_size >= this.config.write_chunk_size)
{
// got a complete chunk, write it out
writeChunk(false);
}
});
}
/**
* objectGetInfo: {
* key: { volume, offset, hdrlen, size }, // from put
* size,
* start,
* dataStoreName,
* dataStoreETag,
* range,
* response: ServerResponse,
* },
* range?: [ start, end ], // like in HTTP - first byte index, last byte index
* callback: (error, readStream) => void,
*/
get(objectGetInfo, range, reqUids, callback)
{
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
{
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
}
const [ start, end ] = range || [];
if (start < 0 || end < 0 || end != null && start != null && end < start || start >= objectGetInfo.key.size)
{
throw new Error('Invalid range: '+start+'-'+end);
}
let offset = objectGetInfo.key.offset + objectGetInfo.key.hdrlen + (start || 0);
let len = objectGetInfo.key.size - (start || 0);
if (end)
{
const len2 = end - (start || 0) + 1;
if (len2 < len)
len = len2;
}
callback(null, new VitastorReadStream(this.cli, objectGetInfo.key.volume, offset, len, this.config));
}
/**
* objectGetInfo: {
* key: { volume, offset, hdrlen, size }, // from put
* size,
* start,
* dataStoreName,
* dataStoreETag,
* range,
* response: ServerResponse,
* },
* callback: (error) => void,
*/
delete(objectGetInfo, reqUids, callback)
{
callback = once(callback);
this._delete(objectGetInfo, reqUids)
.then(callback)
.catch(callback);
}
async _delete(objectGetInfo, reqUids)
{
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
{
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
}
const in_sect_pos = (objectGetInfo.key.offset % this.config.sector_size);
const sect_pos = objectGetInfo.key.offset - in_sect_pos;
const vol = this.volumes_by_id[objectGetInfo.key.volume];
if (vol && vol.partial_sectors[sect_pos])
{
// The sector may still be written to in corner cases
const sect = vol.partial_sectors[sect_pos];
const flags = sect.buffer.readBigInt64LE(in_sect_pos + 8);
if (!(flags & FLAG_DELETED))
{
const del_stat = this.volume_delete_stats[vol.id] = (this.volume_delete_stats[vol.id] || { count: 0, bytes: 0 });
del_stat.count++;
del_stat.bytes += objectGetInfo.key.size;
sect.buffer.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
sect.refs++;
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, sect.buffer, ok));
sect.refs--;
if (err)
{
sect.buffer.writeBigInt64LE(0n, in_sect_pos + 8);
throw new Error(err);
}
}
}
else
{
// RMW with CAS
const [ err, buf, version ] = await new Promise<[ any, Buffer, bigint ]>(ok => this.cli.read(
this.config.pool_id, objectGetInfo.key.volume, sect_pos, this.config.sector_size,
(err, buf, version) => ok([ err, buf, version ])
));
if (err)
{
throw new Error(err);
}
// FIXME What if JSON crosses sector boundary? Prevent it if we want to pack objects
const magic = buf.slice(in_sect_pos, in_sect_pos+8).toString();
const flags = buf.readBigInt64LE(in_sect_pos+8);
const json_len = Number(buf.readBigInt64LE(in_sect_pos+16));
let json_hdr;
if (in_sect_pos+24+json_len <= buf.length)
{
try
{
json_hdr = JSON.parse(buf.slice(in_sect_pos+24, in_sect_pos+24+json_len).toString());
}
catch (e)
{
}
}
if (magic !== OBJECT_MAGIC || !json_hdr || json_hdr.size !== objectGetInfo.key.size)
{
throw new Error(
'header of object with size '+objectGetInfo.key.size+
' bytes not found in volume '+objectGetInfo.key.volume+' at '+objectGetInfo.key.offset
);
}
else if (!(flags & FLAG_DELETED))
{
buf.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, buf, { version: version+1n }, ok));
if (err == vitastor.EINTR)
{
// Retry
await this._delete(objectGetInfo, reqUids);
}
else if (err)
{
throw new Error(err);
}
else
{
// FIXME: Write deletion statistics to volumes
// FIXME: Implement defragmentation
const del_stat = this.volume_delete_stats[objectGetInfo.key.volume] = (this.volume_delete_stats[objectGetInfo.key.volume] || { count: 0, bytes: 0 });
del_stat.count++;
del_stat.bytes += objectGetInfo.key.size;
}
}
}
}
/**
* config: full zenko server config,
* callback: (error, stats) => void, // stats is the returned statistics in arbitrary format
*/
getDiskUsage(config, reqUids, callback)
{
// FIXME: Iterate all volumes and return its sizes and deletion statistics, or maybe just sizes
callback(null, {});
}
}
class VitastorReadStream extends stream.Readable
{
constructor(cli, volume_id, offset, len, config, options = undefined)
{
super(options);
this.cli = cli;
this.volume_id = volume_id;
this.offset = offset;
this.end = offset + len;
this.pos = offset;
this.config = config;
this._reading = false;
}
_read(n)
{
if (this._reading)
{
return;
}
// FIXME: Validate object header
const chunk_size = n && this.config.read_chunk_size < n ? n : this.config.read_chunk_size;
const read_offset = this.pos;
const round_offset = read_offset - (read_offset % this.config.sector_size);
let read_end = this.end <= read_offset+chunk_size ? this.end : read_offset+chunk_size;
const round_end = (read_end % this.config.sector_size)
? read_end + this.config.sector_size - (read_end % this.config.sector_size)
: read_end;
if (round_end <= this.end)
read_end = round_end;
this.pos = read_end;
if (read_end <= read_offset)
{
// EOF
this.push(null);
return;
}
this._reading = true;
this.cli.read(this.config.pool_id, this.volume_id, round_offset, round_end-round_offset, (err, buf, version) =>
{
this._reading = false;
if (err)
{
this.destroy(new Error(err));
return;
}
if (read_offset != round_offset || round_end != read_end)
{
buf = buf.subarray(read_offset-round_offset, buf.length-(round_end-read_end));
}
if (this.push(buf))
{
this._read(n);
}
});
}
}
function once(callback)
{
let called = false;
return function()
{
if (!called)
{
called = true;
callback.apply(null, arguments);
}
};
}
module.exports = VitastorBackend;

View File

@ -51,36 +51,6 @@ function _parseListEntries(entries) {
});
}
/** _parseLifecycleListEntries - parse the values returned in a lifeycle listing by metadata
* @param {object[]} entries - Version or Content entries in a metadata listing
* @param {string} entries[].key - metadata key
* @param {string} entries[].value - stringified object metadata
* @return {object} - mapped array with parsed value or JSON parsing err
*/
function _parseLifecycleListEntries(entries) {
return entries.map(entry => {
const tmp = JSON.parse(entry.value);
return {
key: entry.key,
value: {
Size: tmp['content-length'],
ETag: tmp['content-md5'],
VersionId: tmp.versionId,
IsNull: tmp.isNull,
LastModified: tmp['last-modified'],
Owner: {
DisplayName: tmp['owner-display-name'],
ID: tmp['owner-id'],
},
StorageClass: tmp['x-amz-storage-class'],
tags: tmp.tags,
staleDate: tmp.staleDate,
dataStoreName: tmp.dataStoreName,
},
};
});
}
/** parseListEntries - parse the values returned in a listing by metadata
* @param {object[]} entries - Version or Content entries in a metadata listing
* @param {string} entries[].key - metadata key
@ -177,42 +147,6 @@ class MetadataWrapper {
});
}
updateBucketCapabilities(bucketName, bucketMD, capabilityName, capacityField, capability, log, cb) {
log.debug('updating bucket capabilities in metadata');
// When concurrency update is not supported, we update the whole bucket metadata
if (!this.client.putBucketAttributesCapabilities) {
return this.updateBucket(bucketName, bucketMD, log, cb);
}
return this.client.putBucketAttributesCapabilities(bucketName, capabilityName, capacityField, capability,
log, err => {
if (err) {
log.debug('error from metadata', { implName: this.implName,
error: err });
return cb(err);
}
log.trace('bucket capabilities updated in metadata');
return cb(err);
});
}
deleteBucketCapabilities(bucketName, bucketMD, capabilityName, capacityField, log, cb) {
log.debug('deleting bucket capabilities in metadata');
// When concurrency update is not supported, we update the whole bucket metadata
if (!this.client.deleteBucketAttributesCapability) {
return this.updateBucket(bucketName, bucketMD, log, cb);
}
return this.client.deleteBucketAttributesCapability(bucketName, capabilityName, capacityField,
log, err => {
if (err) {
log.debug('error from metadata', { implName: this.implName,
error: err });
return cb(err);
}
log.trace('bucket capabilities deleted in metadata');
return cb(err);
});
}
getBucket(bucketName, log, cb) {
log.debug('getting bucket from metadata');
this.client.getBucketAttributes(bucketName, log, (err, data) => {
@ -226,19 +160,6 @@ class MetadataWrapper {
});
}
getBucketQuota(bucketName, log, cb) {
log.debug('getting bucket quota from metadata');
this.client.getBucketAttributes(bucketName, log, (err, data) => {
if (err) {
log.debug('error from metadata', { implName: this.implName,
error: err });
return cb(err);
}
const bucketInfo = BucketInfo.fromObj(data);
return cb(err, { quota: bucketInfo.getQuota() });
});
}
deleteBucket(bucketName, log, cb) {
log.debug('deleting bucket from metadata');
this.client.deleteBucket(bucketName, log, err => {
@ -292,25 +213,6 @@ class MetadataWrapper {
});
}
getObjectsMD(bucketName, objNamesWithParams, log, cb) {
if (typeof this.client.getObjects !== 'function') {
log.debug('backend does not support get object metadata with batching', {
implName: this.implName,
});
return cb(errors.NotImplemented);
}
log.debug('getting objects from metadata', { objects: objNamesWithParams });
return this.client.getObjects(bucketName, objNamesWithParams, log, (err, data) => {
if (err) {
log.debug('error getting objects from metadata', { implName: this.implName, objects: objNamesWithParams,
err });
return cb(err);
}
log.debug('objects retrieved from metadata', { objects: objNamesWithParams });
return cb(err, data);
});
}
getObjectMD(bucketName, objName, params, log, cb) {
log.debug('getting object from metadata');
this.client.getObject(bucketName, objName, params, log, (err, data) => {
@ -324,7 +226,7 @@ class MetadataWrapper {
});
}
deleteObjectMD(bucketName, objName, params, log, cb, originOp = 's3:ObjectRemoved:Delete') {
deleteObjectMD(bucketName, objName, params, log, cb) {
log.debug('deleting object from metadata');
this.client.deleteObject(bucketName, objName, params, log, err => {
if (err) {
@ -334,7 +236,7 @@ class MetadataWrapper {
}
log.debug('object deleted from metadata');
return cb(err);
}, originOp);
});
}
listObject(bucketName, listingParams, log, cb) {
@ -377,29 +279,6 @@ class MetadataWrapper {
});
}
listLifecycleObject(bucketName, listingParams, log, cb) {
log.debug('getting object listing for lifecycle from metadata');
this.client.listLifecycleObject(bucketName, listingParams, log, (err, data) => {
if (err) {
log.error('error from metadata', { implName: this.implName,
err });
return cb(err);
}
log.debug('object listing for lifecycle retrieved from metadata');
// eslint-disable-next-line no-param-reassign
data.Contents = parseListEntries(data.Contents, _parseLifecycleListEntries);
if (data.Contents instanceof Error) {
log.error('error parsing metadata listing for lifecycle', {
error: data.Contents,
listingType: listingParams.listingType,
method: 'listLifecycleObject',
});
return cb(errors.InternalError);
}
return cb(null, data);
});
}
listMultipartUploads(bucketName, listingParams, log, cb) {
this.client.listMultipartUploads(bucketName, listingParams, log,
(err, data) => {
@ -548,139 +427,6 @@ class MetadataWrapper {
return cb();
});
}
/**
* Put bucket indexes
*
* indexSpec format:
* [
* { key:[ { key: "", order: 1 } ... ], name: <id 1>, ... , < backend options> },
* ...
* { key:[ { key: "", order: 1 } ... ], name: <id n>, ... },
* ]
*
*
* @param {String} bucketName bucket name
* @param {Array<Object>} indexSpecs index specification
* @param {Object} log logger
* @param {Function} cb callback
* @return {undefined}
*/
putBucketIndexes(bucketName, indexSpecs, log, cb) {
log.debug('put bucket indexes');
if (typeof this.client.putBucketIndexes !== 'function') {
log.error('error from metadata', {
method: 'putBucketIndexes',
error: errors.NotImplemented,
implName: this.implName,
});
return cb(errors.NotImplemented);
}
return this.client.putBucketIndexes(bucketName, indexSpecs, log, err => {
if (err) {
log.debug('error from metadata', {
method: 'putBucketIndexes',
error: err,
implName: this.implName,
});
return cb(err);
}
return cb(null);
});
}
/**
* Delete bucket indexes
*
* indexSpec format:
* [
* { key:[ { key: "", order: 1 } ... ], name: <id 1>, ... , < backend options> },
* ...
* { key:[ { key: "", order: 1 } ... ], name: <id n>, ... },
* ]
*
*
* @param {String} bucketName bucket name
* @param {Array<Object>} indexSpecs index specification
* @param {Object} log logger
* @param {Function} cb callback
* @return {undefined}
*/
deleteBucketIndexes(bucketName, indexSpecs, log, cb) {
log.debug('delete bucket indexes');
if (typeof this.client.deleteBucketIndexes !== 'function') {
log.error('error from metadata', {
method: 'deleteBucketIndexes',
error: errors.NotImplemented,
implName: this.implName,
});
return cb(errors.NotImplemented);
}
return this.client.deleteBucketIndexes(bucketName, indexSpecs, log, err => {
if (err) {
log.error('error from metadata', {
method: 'deleteBucketIndexes',
error: err,
implName: this.implName,
});
return cb(err);
}
return cb(null);
});
}
getBucketIndexes(bucketName, log, cb) {
log.debug('get bucket indexes');
if (typeof this.client.getBucketIndexes !== 'function') {
log.debug('error from metadata', {
method: 'getBucketIndexes',
error: errors.NotImplemented,
implName: this.implName,
});
return cb(errors.NotImplemented);
}
return this.client.getBucketIndexes(bucketName, log, (err, res) => {
if (err) {
log.debug('error from metadata', {
method: 'getBucketIndexes',
error: err,
implName: this.implName,
});
return cb(err);
}
return cb(null, res);
});
}
getIndexingJobs(log, cb) {
if (typeof this.client.getIndexingJobs !== 'function') {
log.debug('error from metadata', {
method: 'getIndexingJobs',
error: errors.NotImplemented,
implName: this.implName,
});
return cb(errors.NotImplemented);
}
return this.client.getIndexingJobs(log, (err, res) => {
if (err) {
log.debug('error from metadata', {
method: 'getBucketIndexes',
error: err,
implName: this.implName,
});
return cb(err);
}
return cb(null, res);
});
}
}
module.exports = MetadataWrapper;

View File

@ -110,17 +110,6 @@ class BucketClientInterface {
return null;
}
listLifecycleObject(bucketName, params, log, cb) {
this.client.listObject(bucketName, log.getSerializedUids(), params,
(err, data) => {
if (err) {
return cb(err);
}
return cb(null, JSON.parse(data));
});
return null;
}
listMultipartUploads(bucketName, params, log, cb) {
this.client.listObject(bucketName, log.getSerializedUids(), params,
(err, data) => {

View File

@ -325,10 +325,6 @@ class BucketFileInterface {
return this.internalListObject(bucketName, params, log, cb);
}
listLifecycleObject(bucketName, params, log, cb) {
return this.internalListObject(bucketName, params, log, cb);
}
listMultipartUploads(bucketName, params, log, cb) {
return this.internalListObject(bucketName, params, log, cb);
}

View File

@ -318,10 +318,6 @@ const metastore = {
});
},
listLifecycleObject(bucketName, params, log, cb) {
return process.nextTick(cb, errors.NotImplemented);
},
listMultipartUploads(bucketName, listingParams, log, cb) {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {

File diff suppressed because it is too large Load Diff

View File

@ -55,22 +55,6 @@ class MongoReadStream extends Readable {
}
}
if (options.lastModified) {
query['value.last-modified'] = {};
if (options.lastModified.lt) {
query['value.last-modified'].$lt = options.lastModified.lt;
}
}
if (options.dataStoreName) {
query['value.dataStoreName'] = {};
if (options.dataStoreName.ne) {
query['value.dataStoreName'].$ne = options.dataStoreName.ne;
}
}
if (!Object.keys(query._id).length) {
delete query._id;
}
@ -85,8 +69,7 @@ class MongoReadStream extends Readable {
Object.assign(query, searchOptions);
}
const projection = { 'value.location': 0 };
this._cursor = c.find(query, { projection }).sort({
this._cursor = c.find(query).sort({
_id: options.reverse ? -1 : 1,
});
if (options.limit && options.limit !== -1) {
@ -102,10 +85,15 @@ class MongoReadStream extends Readable {
return;
}
this._cursor.next().then(doc => {
this._cursor.next((err, doc) => {
if (this._destroyed) {
return;
}
if (err) {
this.emit('error', err);
return;
}
let key = undefined;
let value = undefined;
@ -129,12 +117,6 @@ class MongoReadStream extends Readable {
value,
});
}
}).catch(err => {
if (this._destroyed) {
return;
}
this.emit('error', err);
return;
});
}
@ -144,7 +126,7 @@ class MongoReadStream extends Readable {
}
this._destroyed = true;
this._cursor.close().catch(err => {
this._cursor.close(err => {
if (err) {
this.emit('error', err);
return;

View File

@ -185,48 +185,6 @@ function formatVersionKey(key, versionId, vFormat) {
return formatVersionKeyV0(key, versionId);
}
function indexFormatMongoArrayToObject(mongoIndexArray) {
const indexObj = [];
for (const idx of mongoIndexArray) {
const keys = [];
let entries = [];
if (idx.key instanceof Map) {
entries = idx.key.entries();
} else {
entries = Object.entries(idx.key);
}
for (const k of entries) {
keys.push({ key: k[0], order: k[1] });
}
indexObj.push({ name: idx.name, keys });
}
return indexObj;
}
function indexFormatObjectToMongoArray(indexObj) {
const mongoIndexArray = [];
for (const idx of indexObj) {
const key = new Map();
for (const k of idx.keys) {
key.set(k.key, k.order);
}
// copy all field except keys from idx
// eslint-disable-next-line
const { keys: _, ...toCopy } = idx;
mongoIndexArray.push(Object.assign(toCopy, { name: idx.name, key }));
}
return mongoIndexArray;
}
module.exports = {
credPrefix,
@ -237,6 +195,4 @@ module.exports = {
translateConditions,
formatMasterKey,
formatVersionKey,
indexFormatMongoArrayToObject,
indexFormatObjectToMongoArray,
};

View File

@ -10,21 +10,21 @@ function trySetDirSyncFlag(path) {
const GETFLAGS = 2148034049;
const SETFLAGS = 1074292226;
const FS_DIRSYNC_FL = 65536n;
const FS_DIRSYNC_FL = 65536;
const buffer = Buffer.alloc(8, 0);
const pathFD = fs.openSync(path, 'r');
const status = ioctl(pathFD, GETFLAGS, buffer);
assert.strictEqual(status, 0);
const currentFlags = buffer.readBigInt64LE(0);
const currentFlags = buffer.readUIntLE(0, 8);
const flags = currentFlags | FS_DIRSYNC_FL;
buffer.writeBigInt64LE(flags, 0);
buffer.writeUIntLE(flags, 0, 8);
const status2 = ioctl(pathFD, SETFLAGS, buffer);
assert.strictEqual(status2, 0);
fs.closeSync(pathFD);
const pathFD2 = fs.openSync(path, 'r');
const confirmBuffer = Buffer.alloc(8, 0);
ioctl(pathFD2, GETFLAGS, confirmBuffer);
assert.strictEqual(confirmBuffer.readBigInt64LE(0),
assert.strictEqual(confirmBuffer.readUIntLE(0, 8),
currentFlags | FS_DIRSYNC_FL, 'FS_DIRSYNC_FL not set');
fs.closeSync(pathFD2);
}

View File

@ -3,7 +3,7 @@ import { VersioningConstants } from './constants';
const VID_SEP = VersioningConstants.VersionId.Separator;
/**
* Class for manipulating an object version.
* The format of a version: { isNull, isNull2, isDeleteMarker, versionId, otherInfo }
* The format of a version: { isNull, isDeleteMarker, versionId, otherInfo }
*
* @note Some of these functions are optimized based on string search
* prior to a full JSON parse/stringify. (Vinh: 18K op/s are achieved
@ -13,31 +13,24 @@ const VID_SEP = VersioningConstants.VersionId.Separator;
export class Version {
version: {
isNull?: boolean;
isNull2?: boolean;
isDeleteMarker?: boolean;
versionId?: string;
isPHD?: boolean;
nullVersionId?: string;
};
/**
* Create a new version instantiation from its data object.
* @param version - the data object to instantiate
* @param version.isNull - is a null version
* @param version.isNull2 - Whether new version is null or not AND has
* been put with a Cloudserver handling null keys (i.e. supporting
* S3C-7352)
* @param version.isDeleteMarker - is a delete marker
* @param version.versionId - the version id
* @constructor
*/
constructor(version?: {
isNull?: boolean;
isNull2?: boolean;
isDeleteMarker?: boolean;
versionId?: string;
isPHD?: boolean;
nullVersionId?: string;
}) {
this.version = version || {};
}
@ -90,33 +83,6 @@ export class Version {
return `{ "isPHD": true, "versionId": "${versionId}" }`;
}
/**
* Appends a key-value pair to a JSON object represented as a string. It adds
* a comma if the object is not empty (i.e., not just '{}'). It assumes the input
* string is formatted as a JSON object.
*
* @param {string} stringifiedObject The JSON object as a string to which the key-value pair will be appended.
* @param {string} key The key to append to the JSON object.
* @param {string} value The value associated with the key to append to the JSON object.
* @returns {string} The updated JSON object as a string with the new key-value pair appended.
* @example
* _jsonAppend('{"existingKey":"existingValue"}', 'newKey', 'newValue');
* // returns '{"existingKey":"existingValue","newKey":"newValue"}'
*/
static _jsonAppend(stringifiedObject: string, key: string, value: string): string {
// stringifiedObject value has the format of '{...}'
let index = stringifiedObject.length - 2;
while (stringifiedObject.charAt(index) === ' ') {
index -= 1;
}
const needComma = stringifiedObject.charAt(index) !== '{';
return (
`${stringifiedObject.slice(0, stringifiedObject.length - 1)}` +
(needComma ? ',' : '') +
`"${key}":"${value}"}`
);
}
/**
* Put versionId into an object in the (cheap) way of string manipulation,
* instead of the more expensive alternative parsing and stringification.
@ -127,32 +93,14 @@ export class Version {
*/
static appendVersionId(value: string, versionId: string): string {
// assuming value has the format of '{...}'
return Version._jsonAppend(value, 'versionId', versionId);
}
/**
* Updates or appends a `nullVersionId` property to a JSON-formatted string.
* This function first checks if the `nullVersionId` property already exists within the input string.
* If it exists, the function updates the `nullVersionId` with the new value provided.
* If it does not exist, the function appends a `nullVersionId` property with the provided value.
*
* @static
* @param {string} value - The JSON-formatted string that may already contain a `nullVersionId` property.
* @param {string} nullVersionId - The new value for the `nullVersionId` property to be updated or appended.
* @returns {string} The updated JSON-formatted string with the new `nullVersionId` value.
*/
static updateOrAppendNullVersionId(value: string, nullVersionId: string): string {
// Check if "nullVersionId" already exists in the string
const nullVersionIdPattern = /"nullVersionId":"[^"]*"/;
const nullVersionIdExists = nullVersionIdPattern.test(value);
if (nullVersionIdExists) {
// Replace the existing nullVersionId with the new one
return value.replace(nullVersionIdPattern, `"nullVersionId":"${nullVersionId}"`);
} else {
// Append nullVersionId
return Version._jsonAppend(value, 'nullVersionId', nullVersionId);
}
let index = value.length - 2;
while (value.charAt(index--) === ' ');
const comma = value.charAt(index + 1) !== '{';
return (
`${value.slice(0, value.length - 1)}` + // eslint-disable-line
(comma ? ',' : '') +
`"versionId":"${versionId}"}`
);
}
/**
@ -173,19 +121,6 @@ export class Version {
return this.version.isNull ?? false;
}
/**
* Check if a version is a null version and has
* been put with a Cloudserver handling null keys (i.e. supporting
* S3C-7352).
*
* @return - stating if the value is a null version and has
* been put with a Cloudserver handling null keys (i.e. supporting
* S3C-7352).
*/
isNull2Version(): boolean {
return this.version.isNull2 ?? false;
}
/**
* Check if a stringified object is a delete marker.
*
@ -255,19 +190,6 @@ export class Version {
return this;
}
/**
* Mark that the null version has been put with a Cloudserver handling null keys (i.e. supporting S3C-7352)
*
* If `isNull2` is set, `isNull` is also set to maintain consistency.
* Explicitly setting both avoids misunderstandings and mistakes in future updates or fixes.
* @return - the updated version
*/
setNull2Version() {
this.version.isNull2 = true;
this.version.isNull = true;
return this;
}
/**
* Serialize the version.
*

View File

@ -1,8 +1,6 @@
import { RequestLogger } from 'werelogs';
import errors, { ArsenalError } from '../errors';
import { Version } from './Version';
import { generateVersionId as genVID, getInfVid } from './VersionID';
import { generateVersionId as genVID } from './VersionID';
import WriteCache from './WriteCache';
import WriteGatheringManager from './WriteGatheringManager';
@ -24,11 +22,11 @@ function getPrefixUpperBoundary(prefix: string): string {
return prefix;
}
function formatVersionKey(key: string, versionId: string): string {
function formatVersionKey(key: string, versionId: string) {
return `${key}${VID_SEP}${versionId}`;
}
function formatCacheKey(db: string, key: string): string {
function formatCacheKey(db: string, key: string) {
// using double VID_SEP to make sure the cache key is unique
return `${db}${VID_SEP}${VID_SEP}${key}`;
}
@ -91,10 +89,8 @@ export default class VersioningRequestProcessor {
callback: (error: ArsenalError | null, data?: any) => void,
) {
const { db, key, options } = request;
logger.addDefaultFields({ bucket: db, key, options });
if (options && options.versionId) {
const keyVersionId = options.versionId === 'null' ? '' : options.versionId;
const versionKey = formatVersionKey(key, keyVersionId);
const versionKey = formatVersionKey(key, options.versionId);
return this.wgm.get({ db, key: versionKey }, logger, callback);
}
return this.wgm.get(request, logger, (err, data) => {
@ -105,82 +101,13 @@ export default class VersioningRequestProcessor {
if (!Version.isPHD(data)) {
return callback(null, data);
}
logger.debug('master version is a PHD, getting the latest version');
logger.debug('master version is a PHD, getting the latest version',
{ db, key });
// otherwise, need to search for the latest version
return this.getByListing(request, logger, callback);
});
}
/**
* Helper that lists version keys for a certain object key,
* sorted by version ID. If a null key exists for this object, it is
* sorted at the appropriate position by its internal version ID and
* its key will be appended its internal version ID.
*
* @param {string} db - bucket name
* @param {string} key - object key
* @param {object} [options] - options object
* @param {number} [options.limit] - max version keys returned
* (returns all object version keys if not specified)
* @param {object} logger - logger of the request
* @param {function} callback - callback(err, {object|null} master, {array} versions)
* master: { key, value }
* versions: [{ key, value }, ...]
* @return {undefined}
*/
listVersionKeys(db, key, options, logger, callback) {
const { limit } = options || {};
const listingParams: any = {};
let nullKeyLength;
// include master key in v0 listing
listingParams.gte = key;
listingParams.lt = `${key}${VID_SEPPLUS}`;
if (limit !== undefined) {
// may have to skip master + null key, so 2 extra to list in the worst case
listingParams.limit = limit + 2;
}
nullKeyLength = key.length + 1;
return this.wgm.list({
db,
params: listingParams,
}, logger, (err, rawVersions) => {
if (err) {
return callback(err);
}
if (rawVersions.length === 0) {
// object does not have any version key
return callback(null, null, []);
}
let versions = rawVersions;
let master;
// in v0 there is always a master key before versions
master = versions.shift();
if (versions.length === 0) {
return callback(null, master, []);
}
const firstItem = versions[0];
if (firstItem.key.length === nullKeyLength) {
// first version is the null key
const nullVersion = Version.from(firstItem.value);
const nullVersionKey = formatVersionKey(key, <string> nullVersion.getVersionId());
// find null key's natural versioning order in the list
let nullPos = versions.findIndex(item => item.key > nullVersionKey);
if (nullPos === -1) {
nullPos = versions.length;
}
// move null key at the correct position and append its real version ID to the key
versions = versions.slice(1, nullPos)
.concat([{ key: nullVersionKey, value: firstItem.value, isNullKey: true }])
.concat(versions.slice(nullPos));
}
if (limit !== undefined) {
// truncate versions to 'limit' entries
versions.splice(limit);
}
return callback(null, master, versions);
});
}
/**
* Get the latest version of an object when the master version is a place
* holder for deletion. For any given pair of db and key, only a
@ -205,39 +132,39 @@ export default class VersioningRequestProcessor {
if (!this.enqueueGet(request, logger, callback)) {
return null;
}
logger.info('start listing latest versions');
logger.info('start listing latest versions', { request });
// otherwise, search for the latest version
const cacheKey = formatCacheKey(request.db, request.key);
clearTimeout(this.repairing[cacheKey]);
delete this.repairing[cacheKey];
return this.listVersionKeys(request.db, request.key, {
limit: 1,
}, logger, (err, master, versions) => {
logger.info('listing latest versions done', { err, master, versions });
const req = { db: request.db, params: {
gte: request.key, lt: `${request.key}${VID_SEPPLUS}`, limit: 2 } };
return this.wgm.list(req, logger, (err, list) => {
logger.info('listing latest versions done', { err, list });
if (err) {
return this.dequeueGet(request, err);
}
if (!master) {
// the complete list of versions is always: mst, v1, v2, ...
if (list.length === 0) {
return this.dequeueGet(request, errors.ObjNotFound);
}
if (!Version.isPHD(master.value)) {
return this.dequeueGet(request, null, master.value);
if (!Version.isPHD(list[0].value)) {
return this.dequeueGet(request, null, list[0].value);
}
if (versions.length === 0) {
logger.info('no other versions');
if (list.length === 1) {
logger.info('no other versions', { request });
this.dequeueGet(request, errors.ObjNotFound);
return this.repairMaster(request, logger,
{ type: 'del', value: master.value });
{ type: 'del',
value: list[0].value });
}
// need repair
logger.info('update master by the latest version');
const next = {
value: versions[0].value,
isNullKey: versions[0].isNullKey,
};
this.dequeueGet(request, null, next.value);
logger.info('update master by the latest version', { request });
const nextValue = list[1].value;
this.dequeueGet(request, null, nextValue);
return this.repairMaster(request, logger,
{ type: 'put', value: master.value, next });
{ type: 'put', value: list[0].value,
nextValue });
});
}
@ -300,60 +227,42 @@ export default class VersioningRequestProcessor {
* RepdConnection format { db, key
* [, value][, type], method, options }
* @param logger - logger
* @param {object} data - storing reparing hints
* @param {string} data.value - existing value of the master version (PHD)
* @param {object} data.next - the suggested latest version
* @param {string} data.next.value - the suggested latest version value
* @param {boolean} data.next.isNullKey - whether the suggested
* latest version is a null key
* @param hints - storing reparing hints
* @param hints.type - type of repair operation ('put' or 'del')
* @param hints.value - existing value of the master version (PHD)
* @param hints.nextValue - the suggested latest version
(for 'put')
* @return - to finish the call
*/
repairMaster(request: any, logger: RequestLogger, data: {
repairMaster(request: any, logger: RequestLogger, hints: {
type: 'put' | 'del';
value: string;
next?: {
value: string;
isNullKey: boolean;
};
nextValue?: string;
}) {
const { db, key } = request;
logger.info('start repair process');
logger.info('start repair process', { request });
this.writeCache.get({ db, key }, logger, (err, value) => {
// error or the new version is not a place holder for deletion
if (err) {
if (err.is.ObjNotFound) {
return logger.debug('did not repair master: PHD was deleted');
} else {
return logger.error('error repairing', { error: err });
}
return logger.info('error repairing', { request, error: err });
}
if (!Version.isPHD(value)) {
return logger.debug('master is updated already');
return logger.debug('master is updated already', { request });
}
// the latest version is the same place holder for deletion
if (data.value === value) {
if (hints.value === value) {
// update the latest version with the next version
const ops: any = [];
if (data.next) {
ops.push({ key, value: data.next.value });
// cleanup the null key if it is the new master
if (data.next.isNullKey) {
ops.push({ key: formatVersionKey(key, ''), type: 'del' });
}
} else {
ops.push({ key, type: 'del' });
}
const repairRequest = {
db,
array: ops,
};
array: [
{ type: hints.type, key, value: hints.nextValue },
] };
logger.info('replicate repair request', { repairRequest });
return this.writeCache.batch(repairRequest, logger, () => {});
}
// The latest version is an updated place holder for deletion,
// repeat the repair process from listing for latest versions.
// The queue will ensure single repair process at any moment.
logger.info('latest version is an updated PHD');
return this.getByListing(request, logger, () => {});
});
}
@ -375,7 +284,6 @@ export default class VersioningRequestProcessor {
callback: (error: ArsenalError | null, data?: any) => void,
) {
const { db, key, value, options } = request;
logger.addDefaultFields({ bucket: db, key, options });
// valid combinations of versioning options:
// - !versioning && !versionId: normal non-versioning put
// - versioning && !versionId: create a new version
@ -429,7 +337,6 @@ export default class VersioningRequestProcessor {
versionId: string,
) => void,
) {
logger.info('process new version put');
// making a new versionId and a new version key
const versionId = this.generateVersionId();
const versionKey = formatVersionKey(request.key, versionId);
@ -458,22 +365,12 @@ export default class VersioningRequestProcessor {
logger: RequestLogger,
callback: (err: ArsenalError | null, data?: any, versionId?: string) => void,
) {
logger.info('process version specific put');
const { db, key } = request;
// versionId is empty: update the master version
if (request.options.versionId === '') {
const versionId = this.generateVersionId();
const value = Version.appendVersionId(request.value, versionId);
const ops: any = [{ key, value }];
if (request.options.deleteNullKey) {
const nullKey = formatVersionKey(key, '');
ops.push({ key: nullKey, type: 'del' });
}
return callback(null, ops, versionId);
}
if (request.options.versionId === 'null') {
const nullKey = formatVersionKey(key, '');
return callback(null, [{ key: nullKey, value: request.value }], 'null');
return callback(null, [{ key, value }], versionId);
}
// need to get the master version to check if this is the master version
this.writeCache.get({ db, key }, logger, (err, data) => {
@ -481,115 +378,14 @@ export default class VersioningRequestProcessor {
return callback(err);
}
const versionId = request.options.versionId;
const versionKey = formatVersionKey(key, versionId);
const ops: any = [];
const masterVersion = data !== undefined &&
Version.from(data);
// push a version key if we're not updating the null
// version (or in legacy Cloudservers not sending the
// 'isNull' parameter, but this has an issue, see S3C-7526)
if (request.options.isNull !== true) {
const versionOp = { key: versionKey, value: request.value };
ops.push(versionOp);
}
if (masterVersion) {
// master key exists
// note that older versions have a greater version ID
const versionIdFromMaster = masterVersion.getVersionId();
if (versionIdFromMaster === undefined ||
versionIdFromMaster >= versionId) {
let value = request.value;
logger.debug('version to put is not older than master');
// Delete the deprecated, null key for backward compatibility
// to avoid storing both deprecated and new null keys.
// If master null version was put with an older Cloudserver (or in compat mode),
// there is a possibility that it also has a null versioned key
// associated, so we need to delete it as we write the null key.
// Deprecated null key gets deleted when the new CloudServer:
// - updates metadata of a null master (options.isNull=true)
// - puts metadata on top of a master null key (options.isNull=false)
if (request.options.isNull !== undefined && // new null key behavior when isNull is defined.
masterVersion.isNullVersion() && // master is null
!masterVersion.isNull2Version()) { // master does not support the new null key behavior yet.
const masterNullVersionId = masterVersion.getVersionId();
// The deprecated null key is referenced in the "versionId" property of the master key.
if (masterNullVersionId) {
const oldNullVersionKey = formatVersionKey(key, masterNullVersionId);
ops.push({ key: oldNullVersionKey, type: 'del' });
}
}
// new behavior when isNull is defined is to only
// update the master key if it is the latest
// version, old behavior needs to copy master to
// the null version because older Cloudservers
// rely on version-specific PUT to copy master
// contents to a new null version key (newer ones
// use special versionId="null" requests for this
// purpose).
if (versionIdFromMaster !== versionId ||
request.options.isNull === undefined) {
// master key is strictly older than the put version
let masterVersionId;
if (masterVersion.isNullVersion() && versionIdFromMaster) {
logger.debug('master key is a null version');
masterVersionId = versionIdFromMaster;
} else if (versionIdFromMaster === undefined) {
logger.debug('master key is nonversioned');
// master key does not have a versionID
// => create one with the "infinite" version ID
masterVersionId = getInfVid(this.replicationGroupId);
masterVersion.setVersionId(masterVersionId);
} else {
logger.debug('master key is a regular version');
}
if (request.options.isNull === true) {
if (!masterVersionId) {
// master is a regular version: delete the null key that
// may exist (older null version)
logger.debug('delete null key');
const nullKey = formatVersionKey(key, '');
ops.push({ key: nullKey, type: 'del' });
}
} else if (masterVersionId) {
logger.debug('create version key from master version');
// isNull === false means Cloudserver supports null keys,
// so create a null key in this case, and a version key otherwise
const masterKeyVersionId = request.options.isNull === false ?
'' : masterVersionId;
const masterVersionKey = formatVersionKey(key, masterKeyVersionId);
masterVersion.setNullVersion();
// isNull === false means Cloudserver supports null keys,
// so create a null key with the isNull2 flag
if (request.options.isNull === false) {
masterVersion.setNull2Version();
// else isNull === undefined means Cloudserver does not support null keys,
// and versionIdFromMaster !== versionId means that a version is PUT on top of a null version
// hence set/update the new master nullVersionId for backward compatibility
} else if (versionIdFromMaster !== versionId) {
// => set the nullVersionId to the master version if put version on top of null version.
value = Version.updateOrAppendNullVersionId(request.value, masterVersionId);
}
ops.push({ key: masterVersionKey,
value: masterVersion.toString() });
}
} else {
logger.debug('version to put is the master');
}
ops.push({ key, value: value });
} else {
logger.debug('version to put is older than master');
if (request.options.isNull === true && !masterVersion.isNullVersion()) {
logger.debug('create or update null key');
const nullKey = formatVersionKey(key, '');
const nullKeyOp = { key: nullKey, value: request.value };
ops.push(nullKeyOp);
// for backward compatibility: remove null version key
ops.push({ key: versionKey, type: 'del' });
}
}
} else {
// master key does not exist: create it
ops.push({ key, value: request.value });
const versionKey = formatVersionKey(request.key, versionId);
const ops = [{ key: versionKey, value: request.value }];
if (data === undefined ||
(Version.from(data).getVersionId() ?? '') >= versionId) {
// master does not exist or is not newer than put
// version and needs to be updated as well.
// Note that older versions have a greater version ID.
ops.push({ key: request.key, value: request.value });
}
return callback(null, ops, versionId);
});
@ -603,10 +399,8 @@ export default class VersioningRequestProcessor {
callback: (err: ArsenalError | null, data?: any) => void,
) {
const { db, key, options } = request;
logger.addDefaultFields({ bucket: db, key, options });
// no versioning or versioning configuration off
if (!(options && options.versionId)) {
logger.info('process non-versioned delete');
return this.writeCache.batch({ db,
array: [{ key, type: 'del' }] },
logger, callback);
@ -644,12 +438,7 @@ export default class VersioningRequestProcessor {
versionId?: string,
) => void,
) {
logger.info('process version specific delete');
const { db, key, options } = request;
if (options.versionId === 'null') {
const nullKey = formatVersionKey(key, '');
return callback(null, [{ key: nullKey, type: 'del' }], 'null');
}
// deleting a specific version
this.writeCache.get({ db, key }, logger, (err, data) => {
if (err && !err.is.ObjNotFound) {
@ -657,8 +446,7 @@ export default class VersioningRequestProcessor {
}
// delete the specific version
const versionId = options.versionId;
const keyVersionId = options.isNull ? '' : versionId;
const versionKey = formatVersionKey(key, keyVersionId);
const versionKey = formatVersionKey(key, versionId);
const ops: any = [{ key: versionKey, type: 'del' }];
// update the master version as PHD if it is the deleting version
if (Version.isPHD(data) ||

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import errors, { ArsenalError } from '../errors';
import WriteGatheringManager from './WriteGatheringManager';

View File

@ -1,5 +1,3 @@
import { RequestLogger } from 'werelogs';
import { ArsenalError } from '../errors';
const WG_TIMEOUT = 5; // batching period in milliseconds

View File

@ -3,7 +3,7 @@
"engines": {
"node": ">=16"
},
"version": "8.1.134",
"version": "8.1.82",
"description": "Common utilities for the S3 project components",
"main": "build/index.js",
"repository": {
@ -19,38 +19,39 @@
"dependencies": {
"@azure/identity": "^3.1.1",
"@azure/storage-blob": "^12.12.0",
"@js-sdsl/ordered-set": "^4.4.2",
"@swc/cli": "^0.4.0",
"@swc/core": "^1.7.4",
"@types/async": "^3.2.12",
"@types/utf8": "^3.0.1",
"JSONStream": "^1.0.0",
"agentkeepalive": "^4.1.3",
"ajv": "^6.12.3",
"async": "^2.6.4",
"ajv": "6.12.3",
"async": "~2.6.4",
"aws-sdk": "^2.1005.0",
"backo": "^1.1.0",
"base-x": "^3.0.8",
"base62": "^2.0.1",
"bson": "^4.0.0",
"debug": "^4.1.0",
"base-x": "3.0.8",
"base62": "2.0.1",
"bson": "4.0.0",
"debug": "~4.1.0",
"diskusage": "^1.1.1",
"fcntl": "git+https://git.yourcmc.ru/vitalif/zenko-fcntl.git",
"httpagent": "git+https://git.yourcmc.ru/vitalif/zenko-httpagent.git#development/1.0",
"fcntl": "github:scality/node-fcntl#0.2.0",
"hdclient": "scality/hdclient#1.1.5",
"httpagent": "scality/httpagent#1.0.6",
"https-proxy-agent": "^2.2.0",
"ioredis": "^4.28.5",
"ipaddr.js": "^1.9.1",
"ipaddr.js": "1.9.1",
"joi": "^17.6.0",
"JSONStream": "^1.0.0",
"level": "^5.0.1",
"level-sublevel": "^6.6.5",
"mongodb": "^5.2.0",
"level": "~5.0.1",
"level-sublevel": "~6.6.5",
"mongodb": "^3.0.1",
"node-forge": "^1.3.0",
"prom-client": "^14.2.0",
"prom-client": "10.2.3",
"simple-glob": "^0.2.0",
"socket.io": "^4.6.1",
"socket.io-client": "^4.6.1",
"utf8": "^3.0.0",
"socket.io": "2.4.1",
"socket.io-client": "2.4.0",
"sproxydclient": "scality/sproxydclient#8.0.7",
"utf8": "3.0.0",
"uuid": "^3.0.1",
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
"xml2js": "^0.4.23"
"werelogs": "scality/werelogs#8.1.2",
"xml2js": "~0.4.23"
},
"optionalDependencies": {
"ioctl": "^2.0.2"
@ -59,24 +60,22 @@
"@babel/preset-env": "^7.16.11",
"@babel/preset-typescript": "^7.16.7",
"@sinonjs/fake-timers": "^6.0.1",
"@types/async": "^3.2.12",
"@types/utf8": "^3.0.1",
"@types/ioredis": "^4.28.10",
"@types/jest": "^27.4.1",
"@types/node": "^18.19.41",
"@types/node": "^17.0.21",
"@types/xml2js": "^0.4.11",
"eslint": "^8.14.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint": "^8.12.0",
"eslint-config-airbnb": "6.2.0",
"eslint-config-scality": "scality/Guidelines#ec33dfb",
"eslint-plugin-react": "^4.3.0",
"jest": "^27.5.1",
"mongodb-memory-server": "^8.12.2",
"mongodb-memory-server": "^6.0.2",
"nyc": "^15.1.0",
"sinon": "^9.0.2",
"temp": "^0.9.1",
"temp": "0.9.1",
"ts-jest": "^27.1.3",
"ts-node": "^10.6.0",
"typescript": "^4.9.5"
"typescript": "^4.6.2"
},
"scripts": {
"lint": "eslint $(git ls-files '*.js')",
@ -84,11 +83,10 @@
"lint_yml": "yamllint $(git ls-files '*.yml')",
"test": "jest tests/unit",
"build": "tsc",
"prepack": "tsc",
"postinstall": "[ -d build ] || swc -d build --copy-files package.json index.ts lib",
"prepare": "yarn build",
"ft_test": "jest tests/functional --testTimeout=120000 --forceExit",
"coverage": "nyc --clean jest tests --coverage --testTimeout=120000 --forceExit",
"build_doc": "cd documentation/listingAlgos/pics; dot -Tsvg delimiterStateChart.dot > delimiterStateChart.svg; dot -Tsvg delimiterMasterV0StateChart.dot > delimiterMasterV0StateChart.svg; dot -Tsvg delimiterVersionsStateChart.dot > delimiterVersionsStateChart.svg"
"build_doc": "cd documentation/listingAlgos/pics; dot -Tsvg delimiterStateChart.dot > delimiterStateChart.svg; dot -Tsvg delimiterMasterV0StateChart.dot > delimiterMasterV0StateChart.svg"
},
"private": true,
"jest": {

View File

@ -1,356 +0,0 @@
const async = require('async');
const assert = require('assert');
const cluster = require('cluster');
const http = require('http');
const errors = require('../../../build/lib/errors').default;
const {
setupRPCPrimary,
setupRPCWorker,
sendWorkerCommand,
getPendingCommandsCount,
} = require('../../../build/lib/clustering/ClusterRPC');
/* eslint-disable prefer-const */
let SERVER_PORT;
let N_WORKERS;
/* eslint-enable prefer-const */
/* eslint-disable no-console */
function genUIDS() {
return Math.trunc(Math.random() * 0x10000).toString(16);
}
// for testing robustness: regularly pollute the message channel with
// unrelated IPC messages
function sendPollutionMessage(message) {
if (cluster.isPrimary) {
const randomWorker = Math.trunc(Math.random() * cluster.workers.length);
const worker = cluster.workers[randomWorker];
if (worker) {
worker.send(message);
}
} else {
process.send(message);
}
}
const ipcPolluterIntervals = [
setInterval(
() => sendPollutionMessage('string pollution'), 1500),
setInterval(
() => sendPollutionMessage({ pollution: 'bar' }), 2321),
setInterval(
() => sendPollutionMessage({ type: 'pollution', foo: { bar: 'baz' } }), 2777),
];
function someTestHandlerFunc(payload, uids, callback) {
setTimeout(() => callback(null, { someResponsePayload: 'bar' }), 10);
}
function testHandlerWithFailureFunc(payload, uids, callback) {
setTimeout(() => {
// exactly one of the workers fails to execute this command
if (cluster.worker.id === 1) {
callback(errors.ServiceFailure);
} else {
callback(null, { someResponsePayload: 'bar' });
}
}, 10);
}
const rpcHandlers = {
SomeTestHandler: someTestHandlerFunc,
TestHandlerWithFailure: testHandlerWithFailureFunc,
TestHandlerWithNoResponse: () => {},
};
const primaryHandlers = {
echoHandler: (worker, payload, uids, callback) => {
callback(null, { workerId: worker.id, payload, uids });
},
errorWithHttpCodeHandler: (_worker, _payload, _uids, callback) => {
callback({ name: 'ErrorMock', code: 418, message: 'An error message from primary' });
},
};
function respondOnTestFailure(message, error, results) {
console.error('After sendWorkerCommand() resolve/reject: ' +
`${message}, error=${error}, results=${JSON.stringify(results)}`);
console.trace();
throw errors.InternalError;
}
async function successfulCommandTestGeneric(nWorkers) {
try {
const results = await sendWorkerCommand('*', 'SomeTestHandler', genUIDS(), {});
if (results.length !== nWorkers) {
return respondOnTestFailure(
`expected ${nWorkers} worker results, got ${results.length}`,
null, results);
}
for (const result of results) {
if (typeof result !== 'object' || result === null) {
return respondOnTestFailure('not all results are objects', null, results);
}
if (result.error !== null) {
return respondOnTestFailure(
'one or more workers had an unexpected error',
null, results);
}
if (typeof result.result !== 'object' || result.result === null) {
return respondOnTestFailure(
'one or more workers did not return a result object',
null, results);
}
if (result.result.someResponsePayload !== 'bar') {
return respondOnTestFailure(
'one or more workers did not return the expected payload',
null, results);
}
}
return undefined;
} catch (err) {
return respondOnTestFailure(`returned unexpected error ${err}`, err, null);
}
}
async function successfulCommandTest() {
return successfulCommandTestGeneric(N_WORKERS);
}
async function successfulCommandWithExtraWorkerTest() {
return successfulCommandTestGeneric(N_WORKERS + 1);
}
async function unsupportedToWorkersTest() {
try {
const results = await sendWorkerCommand('badToWorkers', 'SomeTestHandler', genUIDS(), {});
return respondOnTestFailure('expected an error', null, results);
} catch (err) {
if (!err.is.NotImplemented) {
return respondOnTestFailure('expected a NotImplemented error', err, null);
}
return undefined;
}
}
async function unsupportedHandlerTest() {
try {
const results = await sendWorkerCommand('*', 'AWrongTestHandler', genUIDS(), {});
if (results.length !== N_WORKERS) {
return respondOnTestFailure(
`expected ${N_WORKERS} worker results, got ${results.length}`,
null, results);
}
for (const result of results) {
if (typeof result !== 'object' || result === null) {
return respondOnTestFailure('not all results are objects', null, results);
}
if (result.error === null || !result.error.is.NotImplemented) {
return respondOnTestFailure(
'one or more workers did not return the expected NotImplemented error',
null, results);
}
}
return undefined;
} catch (err) {
return respondOnTestFailure(`returned unexpected error ${err}`, err, null);
}
}
async function missingUidsTest() {
try {
const results = await sendWorkerCommand('*', 'SomeTestHandler', undefined, {});
return respondOnTestFailure('expected an error', null, results);
} catch (err) {
if (!err.is.MissingParameter) {
return respondOnTestFailure('expected a MissingParameter error', err, null);
}
return undefined;
}
}
async function duplicateUidsTest() {
const dupUIDS = genUIDS();
const promises = [
sendWorkerCommand('*', 'SomeTestHandler', dupUIDS, {}),
sendWorkerCommand('*', 'SomeTestHandler', dupUIDS, {}),
];
const results = await Promise.allSettled(promises);
if (results[1].status !== 'rejected') {
return respondOnTestFailure('expected an error from the second call', null, null);
}
if (!results[1].reason.is.OperationAborted) {
return respondOnTestFailure(
'expected a OperationAborted error', results[1].reason, null);
}
return undefined;
}
async function unsuccessfulWorkerTest() {
try {
const results = await sendWorkerCommand('*', 'TestHandlerWithFailure', genUIDS(), {});
if (results.length !== N_WORKERS) {
return respondOnTestFailure(
`expected ${N_WORKERS} worker results, got ${results.length}`,
null, results);
}
const nServiceFailures = results.filter(result => (
result.error && result.error.is.ServiceFailure
)).length;
if (nServiceFailures !== 1) {
return respondOnTestFailure(
'expected exactly one worker result to be ServiceFailure error',
null, results);
}
return undefined;
} catch (err) {
return respondOnTestFailure(`returned unexpected error ${err}`, err, null);
}
}
async function workerTimeoutTest() {
try {
const results = await sendWorkerCommand(
'*', 'TestHandlerWithNoResponse', genUIDS(), {}, 1000);
return respondOnTestFailure('expected an error', null, results);
} catch (err) {
if (!err.is.RequestTimeout) {
return respondOnTestFailure('expected a RequestTimeout error', err, null);
}
return undefined;
}
}
async function workerToPrimaryEcho() {
const uids = genUIDS();
const payload = { testing: true };
const expected = { workerId: cluster.worker.id, payload, uids };
const results = await sendWorkerCommand('PRIMARY', 'echoHandler', uids, payload);
assert.strictEqual(results.length, 1, 'There is 1 and only 1 primary');
assert.ifError(results[0].error);
assert.deepStrictEqual(results[0].result, expected);
}
async function workerToPrimaryErrorWithHttpCode() {
const uids = genUIDS();
const payload = { testing: true };
const results = await sendWorkerCommand('PRIMARY', 'errorWithHttpCodeHandler', uids, payload);
assert.strictEqual(results.length, 1, 'There is 1 and only 1 primary');
assert.ok(results[0].error);
assert.strictEqual(results[0].error.message, 'An error message from primary');
assert.strictEqual(results[0].error.code, 418);
}
const TEST_URLS = {
'/successful-command': successfulCommandTest,
'/successful-command-with-extra-worker': successfulCommandWithExtraWorkerTest,
'/unsupported-to-workers': unsupportedToWorkersTest,
'/unsupported-handler': unsupportedHandlerTest,
'/missing-uids': missingUidsTest,
'/duplicate-uids': duplicateUidsTest,
'/unsuccessful-worker': unsuccessfulWorkerTest,
'/worker-timeout': workerTimeoutTest,
'/worker-to-primary/echo': workerToPrimaryEcho,
'/worker-to-primary/error-with-http-code': workerToPrimaryErrorWithHttpCode,
};
if (process.argv.length !== 4) {
console.error('ClusterRPC test server: GET requests on test URLs trigger test runs\n\n' +
'Usage: node ClusterRPC-test-server.js <port> <nb-workers>\n\n' +
'Available test URLs:');
console.error(`${Object.keys(TEST_URLS).map(url => `- ${url}\n`).join('')}`);
process.exit(2);
}
/* eslint-disable prefer-const */
[
SERVER_PORT,
N_WORKERS,
] = process.argv.slice(2, 4).map(value => Number.parseInt(value, 10));
/* eslint-enable prefer-const */
let server;
if (cluster.isPrimary) {
async.timesSeries(
N_WORKERS,
(i, wcb) => cluster.fork().on('online', wcb),
() => {
setupRPCPrimary(primaryHandlers);
},
);
} else {
// in worker
server = http.createServer((req, res) => {
if (req.url in TEST_URLS) {
return TEST_URLS[req.url]().then(() => {
if (getPendingCommandsCount() !== 0) {
console.error(`There are still ${getPendingCommandsCount()} pending ` +
`RPC commands after test ${req.url} completed`);
throw errors.InternalError;
}
res.writeHead(200);
res.end();
}).catch(err => {
// serialize AssertionError to be displayed nicely in jest
if (err instanceof assert.AssertionError) {
const serializedErr = JSON.stringify({
code: err.code,
message: err.message,
stack: err.stack,
actual: err.actual,
expected: err.expected,
operator: err.operator,
});
res.writeHead(500);
res.end(serializedErr);
} else {
res.writeHead(err.code || 500);
res.end(err.message);
}
});
}
console.error(`Invalid test URL ${req.url}`);
res.writeHead(400);
res.end();
return undefined;
});
server.listen(SERVER_PORT);
server.on('listening', () => {
console.log('Worker is listening');
});
setupRPCWorker(rpcHandlers);
}
function stop(signal) {
if (cluster.isPrimary) {
console.log(`Handling signal ${signal}`);
for (const worker of Object.values(cluster.workers)) {
worker.kill(signal);
worker.on('exit', () => {
console.log(`Worker ${worker.id} exited`);
});
}
}
for (const interval of ipcPolluterIntervals) {
clearInterval(interval);
}
}
process.on('SIGTERM', stop);
process.on('SIGINT', stop);
process.on('SIGPIPE', () => {});
// for testing: spawn a new worker each time SIGUSR1 is received
function spawnNewWorker() {
if (cluster.isPrimary) {
cluster.fork();
}
}
process.on('SIGUSR1', spawnNewWorker);

View File

@ -1,151 +0,0 @@
'use strict'; // eslint-disable-line
const assert = require('assert');
const http = require('http');
const readline = require('readline');
const spawn = require('child_process').spawn;
const TEST_SERVER_PORT = 8800;
const NB_WORKERS = 4;
let testServer = null;
/*
* jest tests don't correctly support cluster mode with child forked
* processes, instead we use an external test server that launches
* each test based on the provided URL, and returns either 200 for
* success or 500 for failure. A crash would also cause a failure
* from the client side.
*/
function startTestServer(done) {
testServer = spawn('node', [
`${__dirname}/ClusterRPC-test-server.js`,
TEST_SERVER_PORT,
NB_WORKERS,
]);
// gather server stderr to display test failures info
testServer.stdout.pipe(process.stdout);
testServer.stderr.pipe(process.stderr);
const rl = readline.createInterface({
input: testServer.stdout,
});
let nbListeningWorkers = 0;
rl.on('line', line => {
if (line === 'Worker is listening') {
nbListeningWorkers++;
if (nbListeningWorkers === NB_WORKERS) {
rl.close();
done();
}
}
});
}
function stopTestServer(done) {
testServer.kill('SIGTERM');
testServer.on('close', done);
}
/**
* Try to deserialize and recreate AssertionError with stackTrace from spawned server
* @param {string} responseBody maybe serialized AssertionError
* @throws {assert.AssertionError}
* @returns {undefined}
*/
function handleAssertionError(responseBody) {
let parsed;
try {
parsed = JSON.parse(responseBody);
} catch (_) {
return;
}
if (parsed && parsed.code === 'ERR_ASSERTION') {
const err = new assert.AssertionError(parsed);
err.stack = parsed.stack;
throw err;
}
}
function runTest(testUrl, cb) {
const req = http.request(`http://localhost:${TEST_SERVER_PORT}/${testUrl}`, res => {
let responseBody = '';
res
.on('data', (chunk) => {
responseBody += chunk;
})
.on('end', () => {
try {
handleAssertionError(responseBody);
expect(res.statusCode).toEqual(200);
} catch (err) {
if (!(err instanceof assert.AssertionError)) {
err.message += `\n\nBody:\n${responseBody}`;
}
return cb(err);
}
return cb();
})
.on('error', err => cb(err));
});
req
.end()
.on('error', err => cb(err));
}
describe('ClusterRPC', () => {
beforeAll(done => startTestServer(done));
afterAll(done => stopTestServer(done));
it('should send a successful command to all workers', done => {
runTest('successful-command', done);
});
it('should error if "toWorkers" field is not "*"', done => {
runTest('unsupported-to-workers', done);
});
it('should error if handler name is not known', done => {
runTest('unsupported-handler', done);
});
it('should error if "uids" field is not passed', done => {
runTest('missing-uids', done);
});
it('should error if two simultaneous commands with same "uids" field are sent', done => {
runTest('duplicate-uids', done);
});
it('should timeout if one or more workers don\'t respond in allocated time', done => {
runTest('worker-timeout', done);
});
it('should return worker errors in results array', done => {
runTest('unsuccessful-worker', done);
});
it('should send a successful command to all workers after an extra worker is spawned', done => {
const rl = readline.createInterface({
input: testServer.stdout,
});
rl.on('line', line => {
if (line === 'Worker is listening') {
rl.close();
runTest('successful-command-with-extra-worker', done);
}
});
// The test server spawns a new worker when it receives SIGUSR1
testServer.kill('SIGUSR1');
});
describe('worker to primary', () => {
it('should succeed and return a result', done => {
runTest('worker-to-primary/echo', done);
});
it('should return an error with a code', done => {
runTest('worker-to-primary/error-with-http-code', done);
});
});
});

View File

@ -44,37 +44,41 @@ describe('MongoClientInterface::metadata.deleteObjectMD', () => {
let collection;
function getObjectCount(cb) {
collection.countDocuments()
.then(count => cb(null, count))
.catch(err => cb(err));
collection.countDocuments((err, count) => {
if (err) {
cb(err);
}
cb(null, count);
});
}
function getObject(key, cb) {
collection.findOne({
_id: key,
}, {}).then(doc => {
}, {}, (err, doc) => {
if (err) {
return cb(err);
}
if (!doc) {
return cb(errors.NoSuchKey);
}
return cb(null, doc.value);
}).catch(err => cb(err));
});
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27018',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27018',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
@ -454,48 +458,6 @@ describe('MongoClientInterface::metadata.deleteObjectMD', () => {
},
], done);
});
it('should delete the object directly if params.doesNotNeedOpogUpdate is true', done => {
const objName = 'object-to-delete';
const objVal = {
key: 'object-to-delete',
versionId: 'null',
};
const versionParams = {
versioning: false,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
next => {
metadata.deleteObjectMD(BUCKET_NAME, objName, { doesNotNeedOpogUpdate: true }, logger, next);
},
next => {
metadata.getObjectMD(BUCKET_NAME, objName, null, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
next => {
getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 0);
return next();
});
},
], done);
});
it('should throw an error if params.doesNotNeedOpogUpdate is true and object does not exist', done => {
const objName = 'non-existent-object';
metadata.deleteObjectMD(BUCKET_NAME, objName, { doesNotNeedOpogUpdate: true }, logger, err => {
assert.deepStrictEqual(err, errors.InternalError);
return done();
});
});
});
});
});

View File

@ -74,7 +74,13 @@ describe('MongoClientInterface::metadata.getObjectMD', () => {
{
$set: { _id: mKey, value: objVal },
},
{ upsert: true }).then(() => cb(null)).catch(err => cb(err));
{ upsert: true },
err => {
if (err) {
return cb(err);
}
return cb(null);
});
}
/**
@ -87,24 +93,22 @@ describe('MongoClientInterface::metadata.getObjectMD', () => {
collection.updateMany(
{ 'value.key': key },
{ $set: { 'value.deleted': true } },
{ upsert: false }).then(() => cb()).catch(err => cb(err));
{ upsert: false }, cb);
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27019',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27019',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});

View File

@ -1,331 +0,0 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const genVID = versioning.VersionID.generateVersionId;
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { formatMasterKey, formatVersionKey } = require('../../../../lib/storage/metadata/mongoclient/utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket-batching';
const replicationGroupId = 'RG001';
const N = 10;
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27019 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
let uidCounter = 0;
function generateVersionId() {
return genVID(`${process.pid}.${uidCounter++}`,
replicationGroupId);
}
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0, versioning: false },
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0, versioning: true },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1, versioning: false },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1, versioning: true },
];
describe('MongoClientInterface::metadata.getObjectsMD', () => {
let metadata;
let collection;
let versionId2;
const params = {
key: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
};
function updateMasterObject(objName, versionId, objVal, vFormat, cb) {
const mKey = formatMasterKey(objName, vFormat);
collection.updateOne(
{
_id: mKey,
$or: [{
'value.versionId': {
$exists: false,
},
},
{
'value.versionId': {
$gt: versionId,
},
},
],
},
{
$set: { _id: mKey, value: objVal },
},
{ upsert: true }).then(() => cb(null)).catch(err => cb(err));
}
/**
* Sets the "deleted" property to true
* @param {string} key object name
* @param {Function} cb callback
* @return {undefined}
*/
function flagObjectForDeletion(key, cb) {
collection.updateMany(
{ 'value.key': key },
{ $set: { 'value.deleted': true } },
{ upsert: false }).then(() => cb()).catch(err => cb(err));
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27019',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
const itOnlyInV1 = variation.vFormat === 'v1' && variation.versioning ? it : it.skip;
describe(`vFormat : ${variation.vFormat}, versioning: ${variation.versioning}`, () => {
let paramsArr = [];
beforeEach(done => {
// reset params
paramsArr = Array.from({ length: N }, (_, i) => ({
key: `pfx1-test-object${i + 1}`,
objVal: {
key: `pfx1-test-object${i + 1}`,
versionId: 'null',
},
}));
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
const versionParams = {
versioning: variation.versioning,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
next => {
async.eachSeries(paramsArr, (params, eachCb) => {
metadata.putObjectMD(BUCKET_NAME, params.key, params.objVal,
versionParams, logger, (err, res) => {
if (err) {
return eachCb(err);
}
if (variation.versioning) {
// eslint-disable-next-line no-param-reassign
params.versionId = JSON.parse(res).versionId;
}
return eachCb(null);
});
}, next);
},
next => {
metadata.putObjectMD(BUCKET_NAME, paramsArr[N - 1].key, paramsArr[N - 1].objVal,
versionParams, logger, (err, res) => {
if (err) {
return next(err);
}
if (variation.versioning) {
versionId2 = JSON.parse(res).versionId;
} else {
versionId2 = 'null';
}
return next(null);
});
},
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`should get ${N} objects${variation.versioning ? '' : ' master'} versions using batching`, done => {
const request = paramsArr.map(({ key, objVal }) => ({
key,
params: {
versionId: variation.versioning ? objVal.versionId : null,
},
}));
metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
assert.strictEqual(err, null);
assert.strictEqual(objects.length, N);
objects.forEach((obj, i) => {
assert.strictEqual(obj.doc.key, paramsArr[i].key);
if (variation.versioning) {
assert.strictEqual(obj.doc.versionId, paramsArr[i].objVal.versionId);
}
});
return done();
});
});
it('should not throw an error if object or version is inexistent and return null doc', done => {
const request = [{
key: 'nonexistent',
params: {
versionId: variation.versioning ? 'nonexistent' : null,
},
}];
metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
assert.strictEqual(err, null);
assert.strictEqual(objects.length, 1);
assert.strictEqual(objects[0].doc, null);
done();
});
});
it(`should return latest version when master is PHD ${variation.it}`, done => {
if (!variation.versioning) {
return done();
}
const request = paramsArr.map(({ key, objVal }) => ({
key,
params: {
versionId: variation.versioning ? objVal.versionId : null,
},
}));
return async.series([
next => {
let objectName = null;
if (variations.versioning) {
objectName =
formatVersionKey(paramsArr[N - 1].key, paramsArr[N - 1].versionId, variation.vFormat);
} else {
objectName = formatMasterKey(paramsArr[N - 1].key, variation.vFormat);
}
// adding isPHD flag to master
const phdVersionId = generateVersionId();
paramsArr[N - 1].objVal.versionId = phdVersionId;
paramsArr[N - 1].objVal.isPHD = true;
updateMasterObject(objectName, phdVersionId, paramsArr[N - 1].objVal,
variation.vFormat, next);
},
// Should return latest object version
next => metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
assert.deepStrictEqual(err, null);
objects.forEach((obj, i) => {
assert.strictEqual(obj.doc.key, paramsArr[i].objVal.key);
if (variation.versioning && i === N - 1) {
assert.strictEqual(obj.doc.versionId, versionId2);
} else {
assert.strictEqual(obj.doc.versionId, paramsArr[i].objVal.versionId);
}
});
delete params.isPHD;
return next();
}),
], done);
});
it('should fail to get an object tagged for deletion', done => {
const key = paramsArr[0].key;
flagObjectForDeletion(key, err => {
assert(err);
metadata.getObjectsMD(BUCKET_NAME, [{ key }], logger, (err, object) => {
assert.strictEqual(err, null);
assert.strictEqual(object[0].doc, null);
done();
});
});
});
itOnlyInV1(`Should return last version when master deleted ${variation.vFormat}`, done => {
const versioningParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
// putting a delete marker as last version
next => {
paramsArr[0].versionId = null;
paramsArr[0].objVal.isDeleteMarker = true;
return metadata.putObjectMD(BUCKET_NAME, paramsArr[0].key, paramsArr[0].objVal,
versioningParams, logger, next);
},
next => metadata.getObjectsMD(BUCKET_NAME, [{ key: paramsArr[0].key }], logger, (err, objects) => {
assert.strictEqual(err, null);
assert.strictEqual(objects[0].doc.key, paramsArr[0].key);
assert.strictEqual(objects[0].doc.isDeleteMarker, true);
paramsArr[0].objVal.isDeleteMarker = null;
return next();
}),
], done);
});
});
});
});

View File

@ -1,744 +0,0 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const MetadataWrapper =
require('../../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { assertContents, flagObjectForDeletion, makeBucketMD, putBulkObjectVersions } = require('./utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-lifecycle-list-current-bucket';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27020 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
describe('MongoClientInterface::metadata.listLifecycleObject::current', () => {
let metadata;
let collection;
const expectedVersionIds = {};
const location1 = 'loc1';
const location2 = 'loc2';
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
describe(`bucket format version: ${v}`, () => {
beforeEach(done => {
const bucketMD = makeBucketMD(BUCKET_NAME);
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
metadata.client.defaultBucketKeyFormat = v;
async.series([
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
next => {
const objName = 'pfx1-test-object';
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
dataStoreName: location1,
};
const nbVersions = 5;
const timestamp = 0;
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
nbVersions, timestamp, logger, (err, data) => {
expectedVersionIds[objName] = data.lastVersionId;
return next(err);
});
/* eslint-disable max-len */
// The following versions are created:
// { "_id" : "Mpfx1-test-object", "value" : { "key" : "pfx1-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:00.005Z" } }
// { "_id" : "Vpfx1-test-object{sep}id4", "value" : { "key" : "pfx1-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:00.005Z" } }
// { "_id" : "Vpfx1-test-object{sep}id3", "value" : { "key" : "pfx1-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:00.004Z" } }
// { "_id" : "Vpfx1-test-object{sep}id2", "value" : { "key" : "pfx1-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:00.003Z" } }
// { "_id" : "Vpfx1-test-object{sep}id1", "value" : { "key" : "pfx1-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:00.002Z" } }
// { "_id" : "Vpfx1-test-object{sep}id0", "value" : { "key" : "pfx1-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:00.001Z" } }
/* eslint-enable max-len */
},
next => {
const objName = 'pfx2-test-object';
const objVal = {
key: 'pfx2-test-object',
versionId: 'null',
dataStoreName: location2,
};
const nbVersions = 5;
const timestamp = 2000;
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
nbVersions, timestamp, logger, (err, data) => {
expectedVersionIds[objName] = data.lastVersionId;
return next(err);
});
/* eslint-disable max-len */
// The following versions are created:
// { "_id" : "Mpfx2-test-object", "value" : { "key" : "pfx2-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:02.005Z" } }
// { "_id" : "Vpfx2-test-object{sep}id4", "value" : { "key" : "pfx2-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:02.005Z" } }
// { "_id" : "Vpfx2-test-object{sep}id3", "value" : { "key" : "pfx2-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:02.004Z" } }
// { "_id" : "Vpfx2-test-object{sep}id2", "value" : { "key" : "pfx2-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:02.003Z" } }
// { "_id" : "Vpfx2-test-object{sep}id1", "value" : { "key" : "pfx2-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:02.002Z" } }
// { "_id" : "Vpfx1-test-object{sep}id0", "value" : { "key" : "pfx2-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:02.001Z" } }
/* eslint-enable max-len */
},
next => {
const objName = 'pfx3-test-object';
const objVal = {
key: 'pfx3-test-object',
versionId: 'null',
dataStoreName: location1,
};
const nbVersions = 5;
const timestamp = 1000;
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
nbVersions, timestamp, logger, (err, data) => {
expectedVersionIds[objName] = data.lastVersionId;
return next(err);
});
/* eslint-disable max-len */
// The following versions are created:
// { "_id" : "Mpfx3-test-object", "value" : { "key" : "pfx3-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:01.005Z" } }
// { "_id" : "Vpfx3-test-object{sep}id4", "value" : { "key" : "pfx3-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:01.005Z" } }
// { "_id" : "Vpfx3-test-object{sep}id3", "value" : { "key" : "pfx3-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:01.004Z" } }
// { "_id" : "Vpfx3-test-object{sep}id2", "value" : { "key" : "pfx3-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:01.003Z" } }
// { "_id" : "Vpfx3-test-object{sep}id1", "value" : { "key" : "pfx3-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:01.002Z" } }
// { "_id" : "Vpfx3-test-object{sep}id0", "value" : { "key" : "pfx3-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:01.001Z" } }
/* eslint-enable max-len */
},
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it('Should list current versions of objects', done => {
const params = {
listingType: 'DelimiterCurrent',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list current versions of objects excluding keys stored in location2', done => {
const params = {
listingType: 'DelimiterCurrent',
excludedDataStoreName: location2,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 2);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list current versions of objects excluding keys stored in location1', done => {
const params = {
listingType: 'DelimiterCurrent',
excludedDataStoreName: location1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list current versions of objects with prefix and excluding keys stored in location2', done => {
const params = {
listingType: 'DelimiterCurrent',
excludedDataStoreName: location2,
prefix: 'pfx3',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should return trucated list of current versions excluding keys stored in location2', done => {
const params = {
listingType: 'DelimiterCurrent',
excludedDataStoreName: location2,
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.NextMarker, 'pfx1-test-object');
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
];
assertContents(data.Contents, expected);
params.marker = 'pfx1-test-object';
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
});
it('Should return empty list when beforeDate is before the objects creation date', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:00.000Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it('Should return the current version modified before 1970-01-01T00:00:00.010Z', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:00.10Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should return the current versions modified before 1970-01-01T00:00:01.010Z', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:01.010Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 2);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should return the current versions modified before 1970-01-01T00:00:02.010Z', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:02.010Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should truncate the list of current versions modified before 1970-01-01T00:00:01.010Z', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:01.010Z',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.NextMarker, 'pfx1-test-object');
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
];
assertContents(data.Contents, expected);
params.marker = 'pfx1-test-object';
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
});
it('Should truncate list of current versions of objects', done => {
const params = {
listingType: 'DelimiterCurrent',
maxKeys: 2,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.NextMarker, 'pfx2-test-object');
assert.strictEqual(data.Contents.length, 2);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list the following current versions of objects', done => {
const params = {
listingType: 'DelimiterCurrent',
marker: 'pfx2-test-object',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list current versions that start with prefix', done => {
const params = {
listingType: 'DelimiterCurrent',
prefix: 'pfx2',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should return the list of current versions modified before 1970-01-01T00:00:01.010Z with prefix pfx1',
done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:01.010Z',
maxKeys: 1,
prefix: 'pfx1',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should not list deleted version', done => {
const objVal = {
'key': 'pfx4-test-object',
'last-modified': new Date(0).toISOString(),
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterCurrent',
};
let deletedVersionId;
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
deletedVersionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.deleteObjectMD(BUCKET_NAME, objVal.key,
{ versionId: deletedVersionId }, logger, next),
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return next();
}),
], done);
});
it('Should not list object with delete marker', done => {
const objVal = {
'key': 'pfx4-test-object',
'last-modified': new Date(0).toISOString(),
};
const dmObjVal = { ...objVal, isDeleteMarker: true };
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterCurrent',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams, logger, next),
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, dmObjVal, versionParams, logger, next),
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return next();
}),
], done);
});
it('Should not list phd master key when listing current versions', done => {
const objVal = {
'key': 'pfx4-test-object',
'versionId': 'null',
'last-modified': new Date(0).toISOString(),
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterCurrent',
prefix: 'pfx4',
};
let versionId;
let lastVersionId;
async.series([
next => metadata.putObjectMD(BUCKET_NAME, 'pfx4-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
versionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx4-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx4-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents[0].value.VersionId, versionId);
return next();
}),
], done);
});
it('Should not list the current version tagged for deletion', done => {
const objVal = {
'key': 'pfx4-test-object',
'last-modified': new Date(0).toISOString(),
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterCurrent',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, next),
next => flagObjectForDeletion(collection, objVal.key, next),
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return next();
}),
], done);
});
});
});
});

View File

@ -1,215 +0,0 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const MetadataWrapper =
require('../../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { makeBucketMD } = require('./utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-lifecycle-list-bucket-null';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27020 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
describe('MongoClientInterface::metadata.listLifecycleObject::nullVersion', () => {
let metadata;
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
describe(`bucket format version: ${v}`, () => {
beforeEach(done => {
const bucketMD = makeBucketMD(BUCKET_NAME);
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
metadata.client.defaultBucketKeyFormat = v;
async.series([
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
next => {
const objName = 'key0';
const timestamp = 0;
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'versionId': 'null',
'isNull': true,
'last-modified': lastModified,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
next => {
const objName = 'key1';
const timestamp = 0;
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'versionId': 'null',
'isNull': true,
'last-modified': lastModified,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
next => {
const objName = 'key1';
const timestamp = 0;
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'last-modified': lastModified,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
// key2 simulates a scenario where:
// 1) bucket is versioned
// 2) put object key2
// 3) bucket versioning gets suspended
// 4) put object key2
// result:
// {
// "_id" : "Mkey0",
// "value" : {
// "key" : "key2",
// "isNull" : true,
// "versionId" : "<VersionId2>",
// "last-modified" : "2023-07-11T14:16:00.151Z",
// }
// },
// {
// "_id" : "Vkey0\u0000<VersionId1>",
// "value" : {
// "key" : "key2",
// "versionId" : "<VersionId1>",
// "tags" : {
// },
// "last-modified" : "2023-07-11T14:15:36.713Z",
// }
// },
next => {
const objName = 'key2';
const timestamp = 0;
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'last-modified': lastModified,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
next => {
const objName = 'key2';
const timestamp = 0;
const params = {
versionId: '',
};
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'last-modified': lastModified,
'isNull': true,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, params, logger, next);
},
], done);
});
afterEach(done => metadata.deleteBucket(BUCKET_NAME, logger, done));
it('Should list the null current version and set IsNull to true', done => {
const params = {
listingType: 'DelimiterCurrent',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 3);
// check that key0 has a null current version
const firstKey = data.Contents[0];
assert.strictEqual(firstKey.key, 'key0');
assert.strictEqual(firstKey.value.IsNull, true);
// check that key1 has no null current version
const secondKey = data.Contents[1];
assert.strictEqual(secondKey.key, 'key1');
assert(!secondKey.value.IsNull);
// check that key2 has a null current version
const thirdKey = data.Contents[2];
assert.strictEqual(thirdKey.key, 'key2');
assert.strictEqual(thirdKey.value.IsNull, true);
return done();
});
});
it('Should list the null non-current version and set IsNull to true', done => {
const params = {
listingType: 'DelimiterNonCurrent',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 2);
// check that key1 has a null non-current version
const firstKey = data.Contents[0];
assert.strictEqual(firstKey.key, 'key1');
assert.strictEqual(firstKey.value.IsNull, true);
// check that key2 has no null non-current version
const secondKey = data.Contents[1];
assert.strictEqual(secondKey.key, 'key2');
assert(!secondKey.value.IsNull);
return done();
});
});
});
});
});

View File

@ -1,455 +0,0 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const MetadataWrapper =
require('../../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { makeBucketMD, putBulkObjectVersions } = require('./utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-lifecycle-list-orphan-bucket';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27020 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
describe('MongoClientInterface::metadata.listLifecycleObject::orphan', () => {
let metadata;
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
describe(`bucket format version: ${v}`, () => {
beforeEach(done => {
const bucketMD = makeBucketMD(BUCKET_NAME);
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
metadata.client.defaultBucketKeyFormat = v;
async.series([
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
next => {
const keyName = 'pfx0-test-object';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(0).toISOString(), // 1970-01-01T00:00:00.000Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
next => {
const params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
nbVersions: 1,
};
const timestamp = 0;
putBulkObjectVersions(metadata, BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, timestamp, logger, next);
},
next => {
const params = {
objName: 'pfx2-test-object',
objVal: {
key: 'pfx2-test-object',
versionId: 'null',
},
nbVersions: 1,
};
const timestamp = 0;
putBulkObjectVersions(metadata, BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, timestamp, logger, next);
},
next => {
const keyName = 'pfx2-test-object';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(2).toISOString(), // 1970-01-01T00:00:00.002Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
next => {
const keyName = 'pfx3-test-object';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(0).toISOString(), // 1970-01-01T00:00:00.000Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
next => {
const keyName = 'pfx4-test-object';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(5).toISOString(), // 1970-01-01T00:00:00.005Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
next => {
const keyName = 'pfx4-test-object2';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(6).toISOString(), // 1970-01-01T00:00:00.006Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
], done);
});
/* eslint-disable max-len */
// { "_id" : "Mpfx1-test-object", "value" : { "key" : "pfx1-test-object", "versionId" : "v1", "last-modified" : "1970-01-01T00:00:00.001Z" } }
// { "_id" : "Vpfx0-test-object{sep}v0", "value" : { "key" : "pfx0-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.000Z", "versionId" : "v0" } }
// { "_id" : "Vpfx1-test-object{sep}v1", "value" : { "key" : "pfx1-test-object", "versionId" : "v1", "last-modified" : "1970-01-01T00:00:00.001Z" } }
// { "_id" : "Vpfx2-test-object{sep}v3", "value" : { "key" : "pfx2-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.002Z", "versionId" : "v3" } }
// { "_id" : "Vpfx2-test-object{sep}v2", "value" : { "key" : "pfx2-test-object", "versionId" : "v2", "last-modified" : "1970-01-01T00:00:00.001Z" } }
// { "_id" : "Vpfx3-test-object{sep}v4", "value" : { "key" : "pfx3-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.000Z", "versionId" : "v4" } }
// { "_id" : "Vpfx4-test-object{sep}v5", "value" : { "key" : "pfx4-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.005Z", "versionId" : "v5" } }
// { "_id" : "Vpfx4-test-object2{sep}v6", "value" : { "key" : "pfx4-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.006Z", "versionId" : "v6" } }
/* eslint-enable max-len */
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it('Should list orphan delete markers', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 4);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
assert.strictEqual(data.Contents[2].key, 'pfx4-test-object');
assert.strictEqual(data.Contents[3].key, 'pfx4-test-object2');
return done();
});
});
it('Should return empty list when beforeDate is before youngest last-modified', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
beforeDate: '1970-01-01T00:00:00.000Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it('Should list orphan delete markers older than 1970-01-01T00:00:00.003Z', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
beforeDate: '1970-01-01T00:00:00.003Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
return done();
});
});
it('Should return the first part of the orphan delete markers listing', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.NextMarker, 'pfx0-test-object');
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
return done();
});
});
it('Should return the second part of the orphan delete markers listing', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
marker: 'pfx0-test-object',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx3-test-object');
return done();
});
});
it('Should return the third part of the orphan delete markers listing', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
marker: 'pfx3-test-object',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.NextMarker, 'pfx4-test-object');
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
return done();
});
});
it('Should return the fourth part of the orphan delete markers listing', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
marker: 'pfx4-test-object',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object2');
return done();
});
});
it('Should list the two first orphan delete markers', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 2,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
return done();
});
});
it('Should list the four first orphan delete markers', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 4,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 4);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
assert.strictEqual(data.Contents[2].key, 'pfx4-test-object');
assert.strictEqual(data.Contents[3].key, 'pfx4-test-object2');
return done();
});
});
it('Should return an empty list if no orphan delete marker starts with prefix pfx2', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
prefix: 'pfx2',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it('Should list orphan delete markers that start with prefix pfx4', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
prefix: 'pfx4',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx4-test-object2');
return done();
});
});
it('Should return the first orphan delete marker version that starts with prefix', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
prefix: 'pfx4',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.NextMarker, 'pfx4-test-object');
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
return done();
});
});
it('Should return the following orphan delete marker version that starts with prefix', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
marker: 'pfx4-test-object',
prefix: 'pfx4',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object2');
return done();
});
});
it('Should return the truncated list of orphan delete markers older than 1970-01-01T00:00:00.006Z',
done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 2,
beforeDate: '1970-01-01T00:00:00.006Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
return done();
});
});
it('Should return the following list of orphan delete markers older than 1970-01-01T00:00:00.006Z',
done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 2,
beforeDate: '1970-01-01T00:00:00.006Z',
marker: 'pfx3-test-object',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
return done();
});
});
it('Should return the truncated list of orphan delete markers older than 1970-01-01T00:00:00.001Z',
done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 2,
beforeDate: '1970-01-01T00:00:00.001Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
return done();
});
});
});
});
});

View File

@ -1,104 +0,0 @@
const async = require('async');
const BucketInfo = require('../../../../../lib/models/BucketInfo').default;
const assert = require('assert');
/**
* Puts multpile versions of an object
* @param {Object} metadata - metadata client
* @param {String} bucketName - bucket name
* @param {String} objName - object key
* @param {Object} objVal - object metadata
* @param {Object} params - versioning parameters
* @param {number} versionNb - number of versions to put
* @param {number} timestamp - used for last-modified
* @param {Object} logger - a Logger instance
* @param {Function} cb - callback
* @returns {undefined}
*/
function putBulkObjectVersions(metadata, bucketName, objName, objVal, params, versionNb, timestamp, logger, cb) {
let count = 0;
const versionIds = [];
return async.whilst(
() => count < versionNb,
cbIterator => {
count++;
const lastModified = new Date(timestamp + count).toISOString();
const finalObjectVal = Object.assign(objVal, { 'last-modified': lastModified });
return metadata.putObjectMD(bucketName, objName, finalObjectVal, params, logger, (err, data) => {
versionIds.push(JSON.parse(data).versionId);
return cbIterator(err, versionIds);
});
}, (err, expectedVersionIds) => {
// The last version is removed since it represents the current version.
const lastVersionId = expectedVersionIds.pop();
// array is reversed to be alligned with the version order (latest to oldest).
expectedVersionIds.reverse();
return cb(err, { lastVersionId, expectedVersionIds });
});
}
function makeBucketMD(bucketName) {
return BucketInfo.fromObj({
_name: bucketName,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
}
function assertContents(contents, expected) {
assert.strictEqual(contents.length, expected.length);
contents.forEach((c, i) => {
assert.strictEqual(c.key, expected[i].key);
assert.strictEqual(c.value.LastModified, expected[i].LastModified);
assert.strictEqual(c.value.staleDate, expected[i].staleDate);
assert.strictEqual(c.value.dataStoreName, expected[i].dataStoreName);
if (expected[i].VersionId) {
assert.strictEqual(c.value.VersionId, expected[i].VersionId);
}
});
}
/**
* Sets the "deleted" property to true
* @param {Object} collection - collection to be updated
* @param {string} key - object name
* @param {Function} cb - callback
* @return {undefined}
*/
function flagObjectForDeletion(collection, key, cb) {
collection.updateMany(
{ 'value.key': key },
{ $set: { 'value.deleted': true } },
{ upsert: false })
.then(() => cb())
.catch(err => cb(err));
}
module.exports = {
putBulkObjectVersions,
makeBucketMD,
assertContents,
flagObjectForDeletion,
};

View File

@ -5,11 +5,9 @@ const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
require('../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const sinon = require('sinon');
const MongoReadStream = require('../../../../lib/storage/metadata/mongoclient/readStream');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
@ -69,31 +67,22 @@ describe('MongoClientInterface::metadata.listObject', () => {
collection.updateMany(
{ 'value.key': key },
{ $set: { 'value.deleted': true } },
{ upsert: false }).then(() => cb()).catch(err => cb(err));
}
function customListingParser(entries) {
return entries.map(entry => {
const tmp = JSON.parse(entry.value);
return tmp;
});
{ upsert: false }, cb);
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
@ -107,466 +96,383 @@ describe('MongoClientInterface::metadata.listObject', () => {
});
variations.forEach(variation => {
describe(`vFormat : ${variation.vFormat}`, () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
next => {
const params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
next => {
const params = {
objName: 'pfx2-test-object',
objVal: {
key: 'pfx2-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
next => {
const params = {
objName: 'pfx3-test-object',
objVal: {
key: 'pfx3-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should list master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 3);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
assert.strictEqual(data.Contents[2].key, 'pfx3-test-object');
return done();
});
});
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 2,
};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
return done();
});
});
it(`Should list master versions of objects that start with prefix ${variation.it}`, done => {
const bucketName = BUCKET_NAME;
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
prefix: 'pfx2',
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx2-test-object');
return done();
});
});
it(`Should return empty results when bucket non existent (master) ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert(data);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it(`Should list all versions of objects ${variation.it}`, done => {
const bucketName = BUCKET_NAME;
const params = {
listingType: 'DelimiterVersions',
maxKeys: 1000,
};
const versionsPerKey = {};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 15);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
return done();
});
});
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterVersions',
maxKeys: 5,
};
const versionsPerKey = {};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 5);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
return done();
});
});
it(`Should list versions of objects that start with prefix ${variation.it}`, done => {
const params = {
listingType: 'DelimiterVersions',
maxKeys: 100,
prefix: 'pfx2',
};
const versionsPerKey = {};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 5);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
return done();
});
});
it(`Should return empty results when bucket not existing (version) ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
const params = {
listingType: 'DelimiterVersions',
maxKeys: 100,
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert(data);
assert.strictEqual(data.Versions.length, 0);
return done();
});
});
it(`Should check entire list with pagination (version) ${variation.it}`, done => {
const versionsPerKey = {};
const bucketName = BUCKET_NAME;
const get = (maxKeys, keyMarker, versionIdMarker, cb) => metadata.listObject(bucketName, {
listingType: 'DelimiterVersions',
maxKeys,
keyMarker,
versionIdMarker,
}, logger, (err, res) => {
if (err) {
return cb(err);
}
res.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
if (res.IsTruncated) {
return get(maxKeys, res.NextKeyMarker, res.NextVersionIdMarker, cb);
}
return cb(null);
});
return get(3, null, null, err => {
assert.deepStrictEqual(err, null);
assert.strictEqual(Object.keys(versionsPerKey).length, 3);
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
done();
});
});
it(`Should not list phd master key when listing masters ${variation.it}`, done => {
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterMaster',
prefix: 'pfx1',
};
let versionId;
let lastVersionId;
async.series([
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
versionId = JSON.parse(res).versionId;
return next(null);
}),
next => {
const params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
location: [{
start: 0,
size: 150,
dataStoreETag: 'etag',
dataStoreVersionId: 'versionId',
}],
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
next => {
const params = {
objName: 'pfx2-test-object',
objVal: {
key: 'pfx2-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
next => {
const params = {
objName: 'pfx3-test-object',
objVal: {
key: 'pfx3-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should list master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 3);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
assert.strictEqual(data.Contents[2].key, 'pfx3-test-object');
return done();
});
});
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 2,
};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
return done();
});
});
it(`Should list master versions of objects that start with prefix ${variation.it}`, done => {
const bucketName = BUCKET_NAME;
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
prefix: 'pfx2',
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx2-test-object');
return done();
});
});
it(`Should return empty results when bucket non existent (master) ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert(data);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it(`Should list all versions of objects ${variation.it}`, done => {
const bucketName = BUCKET_NAME;
const params = {
listingType: 'DelimiterVersions',
maxKeys: 1000,
};
const versionsPerKey = {};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 15);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
return done();
});
});
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterVersions',
maxKeys: 5,
};
const versionsPerKey = {};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 5);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
return done();
});
});
it(`Should list versions of objects that start with prefix ${variation.it}`, done => {
const params = {
listingType: 'DelimiterVersions',
maxKeys: 100,
prefix: 'pfx2',
};
const versionsPerKey = {};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 5);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
return done();
});
});
it(`Should return empty results when bucket not existing (version) ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
const params = {
listingType: 'DelimiterVersions',
maxKeys: 100,
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert(data);
assert.strictEqual(data.Versions.length, 0);
return done();
});
});
it(`Should check entire list with pagination (version) ${variation.it}`, done => {
const versionsPerKey = {};
const bucketName = BUCKET_NAME;
const get = (maxKeys, keyMarker, versionIdMarker, cb) => metadata.listObject(bucketName, {
listingType: 'DelimiterVersions',
maxKeys,
keyMarker,
versionIdMarker,
}, logger, (err, res) => {
if (err) {
return cb(err);
}
res.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
if (res.IsTruncated) {
return get(maxKeys, res.NextKeyMarker, res.NextVersionIdMarker, cb);
}
return cb(null);
});
return get(3, null, null, err => {
assert.deepStrictEqual(err, null);
assert.strictEqual(Object.keys(versionsPerKey).length, 3);
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
done();
});
});
it(`Should not list phd master key when listing masters ${variation.it}`, done => {
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterMaster',
prefix: 'pfx1',
};
let versionId;
let lastVersionId;
async.series([
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
versionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
// when deleting the last version of an object a PHD master is created
// and kept for 15s before it's repaired
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents[0].value.VersionId, versionId);
return next();
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
], done);
});
it(`Should not list phd master key when listing versions ${variation.it}`, done => {
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterVersions',
prefix: 'pfx1',
};
let lastVersionId;
let versionIds;
async.series([
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Versions.length, 5);
versionIds = data.Versions.map(version => version.VersionId);
return next();
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
// when deleting the last version of an object a PHD master is created
// and kept for 15s before it's repaired
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
const newVersionIds = data.Versions.map(version => version.VersionId);
assert.strictEqual(data.Versions.length, 5);
assert(versionIds.every(version => newVersionIds.includes(version)));
return next();
}),
], done);
});
it('Should not list objects tagged for deletion (master keys)', done => {
const objVal = {
key: 'pfx4-test-object',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterMaster',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, next),
next => flagObjectForDeletion(objVal.key, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const listedObjectNames = data.Contents.map(x => x.key);
assert(!listedObjectNames.includes(objVal.key));
return next();
}),
], done);
});
it('Should not list objects tagged for deletion (version keys)', done => {
const objVal = {
key: 'pfx4-test-object',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterVersions',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, next),
next => flagObjectForDeletion(objVal.key, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Versions.length, 15);
const listedObjectNames = data.Versions.map(x => x.key);
assert(!listedObjectNames.includes(objVal.key));
return next();
}),
], done);
});
it('Should properly destroy the MongoDBReadStream', done => {
// eslint-disable-next-line func-names
const destroyStub = sinon.stub(MongoReadStream.prototype, 'destroy').callsFake(function (...args) {
// You can add extra logic here if needed
MongoReadStream.prototype.destroy.wrappedMethod.apply(this, ...args);
});
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(BUCKET_NAME, params, logger, err => {
// when deleting the last version of an object a PHD master is created
// and kept for 15s before it's repaired
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert(destroyStub.called, 'Destroy should have been called on MongoReadStream');
// Restore original destroy method
destroyStub.restore();
return done();
});
});
assert.strictEqual(data.Contents[0].value.VersionId, versionId);
return next();
}),
], done);
});
it(`Should not list phd master key when listing versions ${variation.it}`, done => {
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterVersions',
prefix: 'pfx1',
};
let lastVersionId;
let versionIds;
async.series([
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Versions.length, 5);
versionIds = data.Versions.map(version => version.VersionId);
return next();
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
// when deleting the last version of an object a PHD master is created
// and kept for 15s before it's repaired
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
const newVersionIds = data.Versions.map(version => version.VersionId);
assert.strictEqual(data.Versions.length, 5);
assert(versionIds.every(version => newVersionIds.includes(version)));
return next();
}),
], done);
});
it('Should properly destroy the MongoDBReadStream on error', done => {
// eslint-disable-next-line func-names
const destroyStub = sinon.stub(MongoReadStream.prototype, 'destroy').callsFake(function (...args) {
// You can add extra logic here if needed
MongoReadStream.prototype.destroy.wrappedMethod.apply(this, ...args);
});
// stub the cursor creation to emit an error
// eslint-disable-next-line func-names
const readStub = sinon.stub(MongoReadStream.prototype, '_read').callsFake(function () {
this.emit('error', new Error('error'));
});
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(BUCKET_NAME, params, logger, err => {
assert(err, 'Expected an error');
assert(destroyStub.called, 'Destroy should have been called on MongoReadStream');
destroyStub.restore();
readStub.restore();
return done();
});
});
it('Should not list objects tagged for deletion (master keys)', done => {
const objVal = {
key: 'pfx4-test-object',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterMaster',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, next),
next => flagObjectForDeletion(objVal.key, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const listedObjectNames = data.Contents.map(x => x.key);
assert(!listedObjectNames.includes(objVal.key));
return next();
}),
], done);
});
it('Should not include location in listing result and use custom listing parser', done => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
customListingParser,
};
const parserSpy = sinon.spy(opts, 'customListingParser');
const md = new MetadataWrapper(IMPL_NAME, opts, null, logger);
md.setup(() => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return md.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[0].location, undefined);
assert(parserSpy.called);
return done();
});
});
});
it('Should not list objects tagged for deletion (version keys)', done => {
const objVal = {
key: 'pfx4-test-object',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterVersions',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, next),
next => flagObjectForDeletion(objVal.key, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Versions.length, 15);
const listedObjectNames = data.Versions.map(x => x.key);
assert(!listedObjectNames.includes(objVal.key));
return next();
}),
], done);
});
});
});

View File

@ -40,35 +40,39 @@ describe('MongoClientInterface:metadata.putObjectMD', () => {
function getObject(key, cb) {
collection.findOne({
_id: key,
}, {}).then(doc => {
}, {}, (err, doc) => {
if (err) {
return cb(err);
}
if (!doc) {
return cb(errors.NoSuchKey);
}
return cb(null, doc.value);
}).catch(err => cb(err));
});
}
function getObjectCount(cb) {
collection.countDocuments()
.then(count => cb(null, count))
.catch(err => cb(err));
collection.countDocuments((err, count) => {
if (err) {
cb(err);
}
cb(null, count);
});
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27021',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27021',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});

View File

@ -35,20 +35,18 @@ describe('MongoClientInterface:withCond', () => {
];
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27022',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMP_NAME, opts, null, logger);
metadata.setup(done);
});
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27022',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMP_NAME, opts, null, logger);
metadata.setup(done);
});
});
@ -220,10 +218,6 @@ describe('MongoClientInterface:withCond', () => {
});
describe('::deleteObjectWithCond', () => {
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
const tests = [
[
`should return no such key if the object does not exist ${variation.it}`,

Some files were not shown because too many files have changed in this diff Show More