Compare commits
3 Commits
developmen
...
improvemen
Author | SHA1 | Date |
---|---|---|
Maha Benzekri | f06969889d | |
Maha Benzekri | ac58d8f0db | |
Maha Benzekri | 0ae73f4936 |
|
@ -16,7 +16,7 @@ runs:
|
||||||
run: |-
|
run: |-
|
||||||
set -exu;
|
set -exu;
|
||||||
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v2
|
||||||
with:
|
with:
|
||||||
node-version: '16'
|
node-version: '16'
|
||||||
cache: 'yarn'
|
cache: 'yarn'
|
||||||
|
|
|
@ -40,11 +40,6 @@ services:
|
||||||
- DEFAULT_BUCKET_KEY_FORMAT
|
- DEFAULT_BUCKET_KEY_FORMAT
|
||||||
- METADATA_MAX_CACHED_BUCKETS
|
- METADATA_MAX_CACHED_BUCKETS
|
||||||
- ENABLE_NULL_VERSION_COMPAT_MODE
|
- ENABLE_NULL_VERSION_COMPAT_MODE
|
||||||
- SCUBA_HOST
|
|
||||||
- SCUBA_PORT
|
|
||||||
- SCUBA_HEALTHCHECK_FREQUENCY
|
|
||||||
- S3QUOTA
|
|
||||||
- QUOTA_ENABLE_INFLIGHTS
|
|
||||||
env_file:
|
env_file:
|
||||||
- creds.env
|
- creds.env
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -72,7 +67,7 @@ services:
|
||||||
pykmip:
|
pykmip:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
profiles: ['pykmip']
|
profiles: ['pykmip']
|
||||||
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
|
image: registry.scality.com/cloudserver-dev/pykmip
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||||
mongo:
|
mongo:
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM mongo:5.0.21
|
FROM mongo:6.0.13
|
||||||
|
|
||||||
ENV USER=scality \
|
ENV USER=scality \
|
||||||
HOME_DIR=/home/scality \
|
HOME_DIR=/home/scality \
|
||||||
|
|
|
@ -3,7 +3,7 @@ set -exo pipefail
|
||||||
|
|
||||||
init_RS() {
|
init_RS() {
|
||||||
sleep 5
|
sleep 5
|
||||||
mongo --port 27018 /conf/initReplicaSet.js
|
mongosh --port 27018 /conf/initReplicaSet.js
|
||||||
}
|
}
|
||||||
init_RS &
|
init_RS &
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
|
FROM registry.scality.com/federation/sproxyd:7.10.6.8
|
||||||
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
|
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
|
||||||
RUN chown root:root /conf/sproxyd0.conf
|
RUN chown root:root /conf/sproxyd0.conf
|
||||||
|
|
|
@ -20,16 +20,13 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Render and test ${{ matrix.tests.name }}
|
- name: Render and test ${{ matrix.tests.name }}
|
||||||
uses: scality/action-prom-render-test@1.0.3
|
uses: scality/action-prom-render-test@1.0.1
|
||||||
with:
|
with:
|
||||||
alert_file_path: monitoring/alerts.yaml
|
alert_file_path: monitoring/alerts.yaml
|
||||||
test_file_path: ${{ matrix.tests.file }}
|
test_file_path: ${{ matrix.tests.file }}
|
||||||
alert_inputs: |
|
alert_inputs: >-
|
||||||
namespace=zenko
|
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
|
||||||
service=artesca-data-connector-s3api-metrics
|
|
||||||
reportJob=artesca-data-ops-report-handler
|
|
||||||
replicas=3
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
|
@ -14,12 +14,12 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v3
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
languages: javascript, python, ruby
|
languages: javascript, python, ruby
|
||||||
|
|
||||||
- name: Build and analyze
|
- name: Build and analyze
|
||||||
uses: github/codeql-action/analyze@v3
|
uses: github/codeql-action/analyze@v2
|
||||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: 'Checkout Repository'
|
- name: 'Checkout Repository'
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: 'Dependency Review'
|
- name: 'Dependency Review'
|
||||||
uses: actions/dependency-review-action@v4
|
uses: actions/dependency-review-action@v3
|
||||||
|
|
|
@ -10,69 +10,58 @@ on:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
REGISTRY_NAME: registry.scality.com
|
||||||
PROJECT_NAME: ${{ github.event.repository.name }}
|
PROJECT_NAME: ${{ github.event.repository.name }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-federation-image:
|
build-federation-image:
|
||||||
runs-on: ubuntu-20.04
|
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
|
||||||
steps:
|
secrets: inherit
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Login to GitHub Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Build and push image for federation
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
|
registry: registry.scality.com
|
||||||
|
namespace: ${{ github.event.repository.name }}
|
||||||
|
name: ${{ github.event.repository.name }}
|
||||||
context: .
|
context: .
|
||||||
file: images/svc-base/Dockerfile
|
file: images/svc-base/Dockerfile
|
||||||
tags: |
|
tag: ${{ github.event.inputs.tag }}-svc-base
|
||||||
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
|
|
||||||
cache-from: type=gha,scope=federation
|
|
||||||
cache-to: type=gha,mode=max,scope=federation
|
|
||||||
|
|
||||||
release:
|
release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildk
|
- name: Set up Docker Buildk
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
- name: Login to Registry
|
- name: Login to Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ${{ env.REGISTRY_NAME }}
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ secrets.REGISTRY_LOGIN }}
|
||||||
password: ${{ github.token }}
|
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
|
|
||||||
- name: Push dashboards into the production namespace
|
- name: Push dashboards into the production namespace
|
||||||
run: |
|
run: |
|
||||||
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
oras push ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
||||||
dashboard.json:application/grafana-dashboard+json \
|
dashboard.json:application/grafana-dashboard+json \
|
||||||
alerts.yaml:application/prometheus-alerts+yaml
|
alerts.yaml:application/prometheus-alerts+yaml
|
||||||
working-directory: monitoring
|
working-directory: monitoring
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: true
|
push: true
|
||||||
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
|
tags: ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}:${{ github.event.inputs.tag }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v1
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
name: Release ${{ github.event.inputs.tag }}
|
name: Release ${{ github.event.inputs.tag }}
|
||||||
tag_name: ${{ github.event.inputs.tag }}
|
tag_name: ${{ github.event.inputs.tag }}
|
||||||
|
|
|
@ -67,24 +67,23 @@ env:
|
||||||
ENABLE_LOCAL_CACHE: "true"
|
ENABLE_LOCAL_CACHE: "true"
|
||||||
REPORT_TOKEN: "report-token-1"
|
REPORT_TOKEN: "report-token-1"
|
||||||
REMOTE_MANAGEMENT_DISABLE: "1"
|
REMOTE_MANAGEMENT_DISABLE: "1"
|
||||||
# https://github.com/git-lfs/git-lfs/issues/5749
|
|
||||||
GIT_CLONE_PROTECTION_ACTIVE: 'false'
|
|
||||||
jobs:
|
jobs:
|
||||||
linting-coverage:
|
linting-coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v2
|
||||||
with:
|
with:
|
||||||
node-version: '16'
|
node-version: '16'
|
||||||
cache: yarn
|
cache: yarn
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
run: yarn install --frozen-lockfile --network-concurrency 1
|
run: yarn install --frozen-lockfile --network-concurrency 1
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
- uses: actions/cache@v4
|
- uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pip
|
path: ~/.cache/pip
|
||||||
key: ${{ runner.os }}-pip
|
key: ${{ runner.os }}-pip
|
||||||
|
@ -117,7 +116,7 @@ jobs:
|
||||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
||||||
if: always()
|
if: always()
|
||||||
- name: Upload files to artifacts
|
- name: Upload files to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v2
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -133,54 +132,46 @@ jobs:
|
||||||
packages: write
|
packages: write
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v2
|
||||||
- name: Login to GitHub Registry
|
- name: Login to GitHub Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ github.token }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Login to Registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: registry.scality.com
|
||||||
|
username: ${{ secrets.REGISTRY_LOGIN }}
|
||||||
|
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
- name: Build and push cloudserver image
|
- name: Build and push cloudserver image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
context: .
|
context: .
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: |
|
tags: |
|
||||||
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
labels: |
|
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=cloudserver
|
cache-from: type=gha,scope=cloudserver
|
||||||
cache-to: type=gha,mode=max,scope=cloudserver
|
cache-to: type=gha,mode=max,scope=cloudserver
|
||||||
- name: Build and push pykmip image
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .github/pykmip
|
|
||||||
tags: |
|
|
||||||
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
|
||||||
labels: |
|
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=pykmip
|
|
||||||
cache-to: type=gha,mode=max,scope=pykmip
|
|
||||||
- name: Build and push MongoDB
|
- name: Build and push MongoDB
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
context: .github/docker/mongodb
|
context: .github/docker/mongodb
|
||||||
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
cache-from: type=gha,scope=mongodb
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max,scope=mongodb
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
multiple-backend:
|
multiple-backend:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
S3BACKEND: mem
|
S3BACKEND: mem
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
|
@ -188,13 +179,13 @@ jobs:
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Login to Registry
|
- name: Login to Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: registry.scality.com
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ secrets.REGISTRY_LOGIN }}
|
||||||
password: ${{ github.token }}
|
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
|
@ -209,7 +200,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -229,11 +220,11 @@ jobs:
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v0
|
DEFAULT_BUCKET_KEY_FORMAT: v0
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
|
@ -247,7 +238,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -268,11 +259,11 @@ jobs:
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v1
|
DEFAULT_BUCKET_KEY_FORMAT: v1
|
||||||
METADATA_MAX_CACHED_BUCKETS: 1
|
METADATA_MAX_CACHED_BUCKETS: 1
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
|
@ -287,7 +278,7 @@ jobs:
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -307,13 +298,13 @@ jobs:
|
||||||
env:
|
env:
|
||||||
S3BACKEND: file
|
S3BACKEND: file
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
JOB_NAME: ${{ matrix.job-name }}
|
JOB_NAME: ${{ matrix.job-name }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup matrix job artifacts directory
|
- name: Setup matrix job artifacts directory
|
||||||
|
@ -330,7 +321,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -346,12 +337,12 @@ jobs:
|
||||||
ENABLE_UTAPI_V2: t
|
ENABLE_UTAPI_V2: t
|
||||||
S3BACKEND: mem
|
S3BACKEND: mem
|
||||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
|
@ -363,51 +354,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
quota-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
inflights:
|
|
||||||
- name: "With Inflights"
|
|
||||||
value: "true"
|
|
||||||
- name: "Without Inflights"
|
|
||||||
value: "false"
|
|
||||||
env:
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3BACKEND: mem
|
|
||||||
S3QUOTA: scuba
|
|
||||||
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
|
|
||||||
SCUBA_HOST: localhost
|
|
||||||
SCUBA_PORT: 8100
|
|
||||||
SCUBA_HEALTHCHECK_FREQUENCY: 100
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile mongo up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run quota tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -423,13 +370,12 @@ jobs:
|
||||||
S3BACKEND: file
|
S3BACKEND: file
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Copy KMIP certs
|
- name: Copy KMIP certs
|
||||||
|
@ -445,7 +391,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 5696 40
|
bash wait_for_local_port.bash 5696 40
|
||||||
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -465,17 +411,17 @@ jobs:
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Login to GitHub Registry
|
- name: Login to GitHub Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v2
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ github.token }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- uses: ruby/setup-ruby@v1
|
- uses: ruby/setup-ruby@v1
|
||||||
|
@ -483,7 +429,7 @@ jobs:
|
||||||
ruby-version: '2.5.9'
|
ruby-version: '2.5.9'
|
||||||
- name: Install Ruby dependencies
|
- name: Install Ruby dependencies
|
||||||
run: |
|
run: |
|
||||||
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
|
gem install nokogiri:1.12.5 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
|
||||||
- name: Install Java dependencies
|
- name: Install Java dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
|
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
|
||||||
|
@ -523,7 +469,7 @@ jobs:
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
S3METADATA: mongodb
|
S3METADATA: mongodb
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
|
|
@ -23,7 +23,6 @@ RUN apt-get update \
|
||||||
|
|
||||||
ENV PYTHON=python3
|
ENV PYTHON=python3
|
||||||
COPY package.json yarn.lock /usr/src/app/
|
COPY package.json yarn.lock /usr/src/app/
|
||||||
RUN npm install typescript -g
|
|
||||||
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
175
README.md
175
README.md
|
@ -1,7 +1,10 @@
|
||||||
# Zenko CloudServer with Vitastor Backend
|
# Zenko CloudServer
|
||||||
|
|
||||||
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
||||||
|
|
||||||
|
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
|
||||||
|
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||||
|
@ -11,71 +14,137 @@ Scality’s Open Source Multi-Cloud Data Controller.
|
||||||
CloudServer provides a single AWS S3 API interface to access multiple
|
CloudServer provides a single AWS S3 API interface to access multiple
|
||||||
backend data storage both on-premise or public in the cloud.
|
backend data storage both on-premise or public in the cloud.
|
||||||
|
|
||||||
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
|
CloudServer is useful for Developers, either to run as part of a
|
||||||
backend support.
|
continous integration test environment to emulate the AWS S3 service locally
|
||||||
|
or as an abstraction layer to develop object storage enabled
|
||||||
|
application on the go.
|
||||||
|
|
||||||
## Quick Start with Vitastor
|
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
|
||||||
|
|
||||||
Vitastor Backend is in experimental status, however you can already try to
|
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
||||||
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
|
|
||||||
it works too 😊.
|
|
||||||
|
|
||||||
Installation instructions:
|
## Docker
|
||||||
|
|
||||||
### Install Vitastor
|
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
|
||||||
|
|
||||||
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
|
## Contributing
|
||||||
|
|
||||||
### Install Zenko with Vitastor Backend
|
In order to contribute, please follow the
|
||||||
|
[Contributing Guidelines](
|
||||||
|
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
|
## Installation
|
||||||
- Install dependencies: `npm install --omit dev` or just `npm install`
|
|
||||||
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
|
|
||||||
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
|
|
||||||
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
|
|
||||||
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
|
|
||||||
|
|
||||||
### Install and Configure MongoDB
|
### Dependencies
|
||||||
|
|
||||||
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
|
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
|
||||||
|
. Up-to-date versions can be found at
|
||||||
|
[Nodesource](https://github.com/nodesource/distributions).
|
||||||
|
|
||||||
### Setup Zenko
|
### Clone source code
|
||||||
|
|
||||||
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
|
```shell
|
||||||
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
|
git clone https://github.com/scality/S3.git
|
||||||
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
|
|
||||||
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
|
|
||||||
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
|
|
||||||
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
|
|
||||||
access keys, but it's not published, so let's use a file for now.
|
|
||||||
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
|
|
||||||
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
|
|
||||||
in this file.
|
|
||||||
|
|
||||||
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
|
|
||||||
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
|
|
||||||
|
|
||||||
### Start Zenko
|
|
||||||
|
|
||||||
Start the S3 server with: `node index.js`
|
|
||||||
|
|
||||||
If you use default settings, Zenko CloudServer starts on port 8000.
|
|
||||||
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
|
|
||||||
|
|
||||||
Now you can access your S3 with `s3cmd` or `geesefs`:
|
|
||||||
|
|
||||||
```
|
|
||||||
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
### Install js dependencies
|
||||||
AWS_ACCESS_KEY_ID=accessKey1 \
|
|
||||||
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
|
Go to the ./S3 folder,
|
||||||
geesefs --endpoint http://localhost:8000 testbucket mountdir
|
|
||||||
|
```shell
|
||||||
|
yarn install --frozen-lockfile
|
||||||
```
|
```
|
||||||
|
|
||||||
# Author & License
|
If you get an error regarding installation of the diskUsage module,
|
||||||
|
please install g++.
|
||||||
|
|
||||||
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
If you get an error regarding level-down bindings, try clearing your yarn cache:
|
||||||
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
|
|
||||||
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
|
```shell
|
||||||
|
yarn cache clean
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with a file backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
|
||||||
|
9991 are also open locally for internal transfer of metadata and data,
|
||||||
|
respectively.
|
||||||
|
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
By default the metadata files will be saved in the
|
||||||
|
localMetadata directory and the data files will be saved
|
||||||
|
in the localData directory within the ./S3 directory on your
|
||||||
|
machine. These directories have been pre-created within the
|
||||||
|
repository. If you would like to save the data or metadata in
|
||||||
|
different locations of your choice, you must specify them with absolute paths.
|
||||||
|
So, when starting the server:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||||
|
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||||
|
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with multiple data backends
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3DATA='multiple'
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to
|
||||||
|
choose where each object will be saved by setting
|
||||||
|
the following header with a locationConstraint on
|
||||||
|
a PUT request:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
```
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the
|
||||||
|
location constraint of the bucket will determine
|
||||||
|
where the data is saved. If the bucket has no location
|
||||||
|
constraint, the endpoint of the PUT request will be
|
||||||
|
used to determine location.
|
||||||
|
|
||||||
|
See the Configuration section in our documentation
|
||||||
|
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
|
||||||
|
to learn how to set location constraints.
|
||||||
|
|
||||||
|
## Run it with an in-memory backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
yarn run mem_backend
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
## Run it with Vault user management
|
||||||
|
|
||||||
|
Note: Vault is proprietary and must be accessed separately.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3VAULT=vault
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer using Vault for user management.
|
||||||
|
|
||||||
|
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
|
||||||
|
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
||||||
|
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||||
|
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env node
|
||||||
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
|
const {
|
||||||
|
startWSManagementClient,
|
||||||
|
startPushConnectionHealthCheckServer,
|
||||||
|
} = require('../lib/management/push');
|
||||||
|
|
||||||
|
const logger = require('../lib/utilities/logger');
|
||||||
|
|
||||||
|
const {
|
||||||
|
PUSH_ENDPOINT: pushEndpoint,
|
||||||
|
INSTANCE_ID: instanceId,
|
||||||
|
MANAGEMENT_TOKEN: managementToken,
|
||||||
|
} = process.env;
|
||||||
|
|
||||||
|
if (!pushEndpoint) {
|
||||||
|
logger.error('missing push endpoint env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!instanceId) {
|
||||||
|
logger.error('missing instance id env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!managementToken) {
|
||||||
|
logger.error('missing management token env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
startPushConnectionHealthCheckServer(err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('could not start healthcheck server', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
|
||||||
|
startWSManagementClient(url, managementToken, err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('connection failed, exiting', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
logger.info('no more connection, exiting');
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
});
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env node
|
||||||
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
|
const {
|
||||||
|
startWSManagementClient,
|
||||||
|
startPushConnectionHealthCheckServer,
|
||||||
|
} = require('../lib/management/push');
|
||||||
|
|
||||||
|
const logger = require('../lib/utilities/logger');
|
||||||
|
|
||||||
|
const {
|
||||||
|
PUSH_ENDPOINT: pushEndpoint,
|
||||||
|
INSTANCE_ID: instanceId,
|
||||||
|
MANAGEMENT_TOKEN: managementToken,
|
||||||
|
} = process.env;
|
||||||
|
|
||||||
|
if (!pushEndpoint) {
|
||||||
|
logger.error('missing push endpoint env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!instanceId) {
|
||||||
|
logger.error('missing instance id env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!managementToken) {
|
||||||
|
logger.error('missing management token env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
startPushConnectionHealthCheckServer(err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('could not start healthcheck server', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
|
||||||
|
startWSManagementClient(url, managementToken, err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('connection failed, exiting', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
logger.info('no more connection, exiting');
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
});
|
|
@ -4,7 +4,6 @@
|
||||||
"metricsPort": 8002,
|
"metricsPort": 8002,
|
||||||
"metricsListenOn": [],
|
"metricsListenOn": [],
|
||||||
"replicationGroupId": "RG001",
|
"replicationGroupId": "RG001",
|
||||||
"workers": 4,
|
|
||||||
"restEndpoints": {
|
"restEndpoints": {
|
||||||
"localhost": "us-east-1",
|
"localhost": "us-east-1",
|
||||||
"127.0.0.1": "us-east-1",
|
"127.0.0.1": "us-east-1",
|
||||||
|
@ -102,14 +101,6 @@
|
||||||
"readPreference": "primary",
|
"readPreference": "primary",
|
||||||
"database": "metadata"
|
"database": "metadata"
|
||||||
},
|
},
|
||||||
"authdata": "authdata.json",
|
|
||||||
"backends": {
|
|
||||||
"auth": "file",
|
|
||||||
"data": "file",
|
|
||||||
"metadata": "mongodb",
|
|
||||||
"kms": "file",
|
|
||||||
"quota": "none"
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
"externalBackends": {
|
||||||
"aws_s3": {
|
"aws_s3": {
|
||||||
"httpAgent": {
|
"httpAgent": {
|
|
@ -1,71 +0,0 @@
|
||||||
{
|
|
||||||
"port": 8000,
|
|
||||||
"listenOn": [],
|
|
||||||
"metricsPort": 8002,
|
|
||||||
"metricsListenOn": [],
|
|
||||||
"replicationGroupId": "RG001",
|
|
||||||
"restEndpoints": {
|
|
||||||
"localhost": "STANDARD",
|
|
||||||
"127.0.0.1": "STANDARD",
|
|
||||||
"yourhostname.ru": "STANDARD"
|
|
||||||
},
|
|
||||||
"websiteEndpoints": [
|
|
||||||
"static.yourhostname.ru"
|
|
||||||
],
|
|
||||||
"replicationEndpoints": [ {
|
|
||||||
"site": "zenko",
|
|
||||||
"servers": ["127.0.0.1:8000"],
|
|
||||||
"default": true
|
|
||||||
} ],
|
|
||||||
"log": {
|
|
||||||
"logLevel": "info",
|
|
||||||
"dumpLevel": "error"
|
|
||||||
},
|
|
||||||
"healthChecks": {
|
|
||||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
|
||||||
},
|
|
||||||
"backends": {
|
|
||||||
"metadata": "mongodb"
|
|
||||||
},
|
|
||||||
"mongodb": {
|
|
||||||
"replicaSetHosts": "127.0.0.1:27017",
|
|
||||||
"writeConcern": "majority",
|
|
||||||
"replicaSet": "rs0",
|
|
||||||
"readPreference": "primary",
|
|
||||||
"database": "s3",
|
|
||||||
"authCredentials": {
|
|
||||||
"username": "s3",
|
|
||||||
"password": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
|
||||||
"aws_s3": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": false,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gcp": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": true,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"requests": {
|
|
||||||
"viaProxy": false,
|
|
||||||
"trustedProxyCIDRs": [],
|
|
||||||
"extractClientIPFromHeader": ""
|
|
||||||
},
|
|
||||||
"bucketNotificationDestinations": [
|
|
||||||
{
|
|
||||||
"resource": "target1",
|
|
||||||
"type": "dummy",
|
|
||||||
"host": "localhost:6000"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -116,7 +116,7 @@ const constants = {
|
||||||
],
|
],
|
||||||
|
|
||||||
// user metadata header to set object locationConstraint
|
// user metadata header to set object locationConstraint
|
||||||
objectLocationConstraintHeader: 'x-amz-storage-class',
|
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
||||||
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
||||||
legacyLocations: ['sproxyd', 'legacy'],
|
legacyLocations: ['sproxyd', 'legacy'],
|
||||||
// declare here all existing service accounts and their properties
|
// declare here all existing service accounts and their properties
|
||||||
|
@ -205,6 +205,9 @@ const constants = {
|
||||||
],
|
],
|
||||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||||
allowedRestoreObjectRequestTierValues: ['Standard'],
|
allowedRestoreObjectRequestTierValues: ['Standard'],
|
||||||
|
validStorageClasses: [
|
||||||
|
'STANDARD',
|
||||||
|
],
|
||||||
lifecycleListing: {
|
lifecycleListing: {
|
||||||
CURRENT_TYPE: 'current',
|
CURRENT_TYPE: 'current',
|
||||||
NON_CURRENT_TYPE: 'noncurrent',
|
NON_CURRENT_TYPE: 'noncurrent',
|
||||||
|
@ -240,9 +243,6 @@ const constants = {
|
||||||
'objectPutPart',
|
'objectPutPart',
|
||||||
'completeMultipartUpload',
|
'completeMultipartUpload',
|
||||||
],
|
],
|
||||||
// if requester is not bucket owner, bucket policy actions should be denied with
|
|
||||||
// MethodNotAllowed error
|
|
||||||
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = constants;
|
module.exports = constants;
|
||||||
|
|
|
@ -2,12 +2,11 @@
|
||||||
|
|
||||||
## Docker Image Generation
|
## Docker Image Generation
|
||||||
|
|
||||||
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
|
Docker images are hosted on [registry.scality.com](registry.scality.com).
|
||||||
CloudServer has a few images there:
|
CloudServer has two namespaces there:
|
||||||
|
|
||||||
* Cloudserver container image: ghcr.io/scality/cloudserver
|
* Production Namespace: registry.scality.com/cloudserver
|
||||||
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
* Dev Namespace: registry.scality.com/cloudserver-dev
|
||||||
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
|
||||||
|
|
||||||
With every CI build, the CI will push images, tagging the
|
With every CI build, the CI will push images, tagging the
|
||||||
content with the developer branch's short SHA-1 commit hash.
|
content with the developer branch's short SHA-1 commit hash.
|
||||||
|
@ -19,8 +18,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
|
||||||
## How to Pull Docker Images
|
## How to Pull Docker Images
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker pull ghcr.io/scality/cloudserver:<commit hash>
|
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
|
||||||
docker pull ghcr.io/scality/cloudserver:<tag>
|
docker pull registry.scality.com/cloudserver/cloudserver:<tag>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Release Process
|
## Release Process
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
|
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0
|
||||||
|
|
||||||
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
||||||
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
||||||
|
@ -14,10 +14,8 @@ RUN rm -f ~/.gitconfig && \
|
||||||
git config --global --add safe.directory . && \
|
git config --global --add safe.directory . && \
|
||||||
git lfs install && \
|
git lfs install && \
|
||||||
GIT_LFS_SKIP_SMUDGE=1 && \
|
GIT_LFS_SKIP_SMUDGE=1 && \
|
||||||
yarn global add typescript && \
|
|
||||||
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
||||||
yarn cache clean --all && \
|
yarn cache clean --all
|
||||||
yarn global remove typescript
|
|
||||||
|
|
||||||
# run symlinking separately to avoid yarn installation errors
|
# run symlinking separately to avoid yarn installation errors
|
||||||
# we might have to check if the symlinking is really needed!
|
# we might have to check if the symlinking is really needed!
|
||||||
|
|
12
index.js
12
index.js
|
@ -1,10 +1,10 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr(
|
/**
|
||||||
undefined,
|
* Catch uncaught exceptions and add timestamp to aid debugging
|
||||||
// Do not exit as workers have their own listener that will exit
|
*/
|
||||||
// But primary don't have another listener
|
process.on('uncaughtException', err => {
|
||||||
require('cluster').isPrimary ? 1 : null,
|
process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
|
||||||
);
|
});
|
||||||
|
|
||||||
require('./lib/server.js')();
|
require('./lib/server.js')();
|
||||||
|
|
288
lib/Config.js
288
lib/Config.js
|
@ -107,47 +107,6 @@ function parseSproxydConfig(configSproxyd) {
|
||||||
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
||||||
}
|
}
|
||||||
|
|
||||||
function parseRedisConfig(redisConfig) {
|
|
||||||
const joiSchema = joi.object({
|
|
||||||
password: joi.string().allow(''),
|
|
||||||
host: joi.string(),
|
|
||||||
port: joi.number(),
|
|
||||||
retry: joi.object({
|
|
||||||
connectBackoff: joi.object({
|
|
||||||
min: joi.number().required(),
|
|
||||||
max: joi.number().required(),
|
|
||||||
jitter: joi.number().required(),
|
|
||||||
factor: joi.number().required(),
|
|
||||||
deadline: joi.number().required(),
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
// sentinel config
|
|
||||||
sentinels: joi.alternatives().try(
|
|
||||||
joi.string()
|
|
||||||
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
|
|
||||||
.custom(hosts => hosts.split(',').map(item => {
|
|
||||||
const [host, port] = item.split(':');
|
|
||||||
return { host, port: Number.parseInt(port, 10) };
|
|
||||||
})),
|
|
||||||
joi.array().items(
|
|
||||||
joi.object({
|
|
||||||
host: joi.string().required(),
|
|
||||||
port: joi.number().required(),
|
|
||||||
})
|
|
||||||
).min(1),
|
|
||||||
),
|
|
||||||
name: joi.string(),
|
|
||||||
sentinelPassword: joi.string().allow(''),
|
|
||||||
})
|
|
||||||
.and('host', 'port')
|
|
||||||
.and('sentinels', 'name')
|
|
||||||
.xor('host', 'sentinels')
|
|
||||||
.without('sentinels', ['host', 'port'])
|
|
||||||
.without('host', ['sentinels', 'sentinelPassword']);
|
|
||||||
|
|
||||||
return joi.attempt(redisConfig, joiSchema, 'bad config');
|
|
||||||
}
|
|
||||||
|
|
||||||
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
||||||
assert(typeof restEndpoints === 'object',
|
assert(typeof restEndpoints === 'object',
|
||||||
'bad config: restEndpoints must be an object of endpoints');
|
'bad config: restEndpoints must be an object of endpoints');
|
||||||
|
@ -377,7 +336,7 @@ function dmfLocationConstraintAssert(locationObj) {
|
||||||
function locationConstraintAssert(locationConstraints) {
|
function locationConstraintAssert(locationConstraints) {
|
||||||
const supportedBackends =
|
const supportedBackends =
|
||||||
['mem', 'file', 'scality',
|
['mem', 'file', 'scality',
|
||||||
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
|
'mongodb', 'dmf', 'azure_archive'].concat(Object.keys(validExternalBackends));
|
||||||
assert(typeof locationConstraints === 'object',
|
assert(typeof locationConstraints === 'object',
|
||||||
'bad config: locationConstraints must be an object');
|
'bad config: locationConstraints must be an object');
|
||||||
Object.keys(locationConstraints).forEach(l => {
|
Object.keys(locationConstraints).forEach(l => {
|
||||||
|
@ -502,23 +461,27 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
locationConstraints[l].details.connector.hdclient);
|
locationConstraints[l].details.connector.hdclient);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
assert(Object.keys(locationConstraints)
|
||||||
|
.includes('us-east-1'), 'bad locationConfig: must ' +
|
||||||
|
'include us-east-1 as a locationConstraint');
|
||||||
}
|
}
|
||||||
|
|
||||||
function parseUtapiReindex(config) {
|
function parseUtapiReindex(config) {
|
||||||
const {
|
const {
|
||||||
enabled,
|
enabled,
|
||||||
schedule,
|
schedule,
|
||||||
redis,
|
sentinel,
|
||||||
bucketd,
|
bucketd,
|
||||||
onlyCountLatestWhenObjectLocked,
|
onlyCountLatestWhenObjectLocked,
|
||||||
} = config;
|
} = config;
|
||||||
assert(typeof enabled === 'boolean',
|
assert(typeof enabled === 'boolean',
|
||||||
'bad config: utapi.reindex.enabled must be a boolean');
|
'bad config: utapi.reindex.enabled must be a boolean');
|
||||||
|
assert(typeof sentinel === 'object',
|
||||||
const parsedRedis = parseRedisConfig(redis);
|
'bad config: utapi.reindex.sentinel must be an object');
|
||||||
assert(Array.isArray(parsedRedis.sentinels),
|
assert(typeof sentinel.port === 'number',
|
||||||
'bad config: utapi reindex redis config requires a list of sentinels');
|
'bad config: utapi.reindex.sentinel.port must be a number');
|
||||||
|
assert(typeof sentinel.name === 'string',
|
||||||
|
'bad config: utapi.reindex.sentinel.name must be a string');
|
||||||
assert(typeof bucketd === 'object',
|
assert(typeof bucketd === 'object',
|
||||||
'bad config: utapi.reindex.bucketd must be an object');
|
'bad config: utapi.reindex.bucketd must be an object');
|
||||||
assert(typeof bucketd.port === 'number',
|
assert(typeof bucketd.port === 'number',
|
||||||
|
@ -536,13 +499,6 @@ function parseUtapiReindex(config) {
|
||||||
'bad config: utapi.reindex.schedule must be a valid ' +
|
'bad config: utapi.reindex.schedule must be a valid ' +
|
||||||
`cron schedule. ${e.message}.`);
|
`cron schedule. ${e.message}.`);
|
||||||
}
|
}
|
||||||
return {
|
|
||||||
enabled,
|
|
||||||
schedule,
|
|
||||||
redis: parsedRedis,
|
|
||||||
bucketd,
|
|
||||||
onlyCountLatestWhenObjectLocked,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function requestsConfigAssert(requestsConfig) {
|
function requestsConfigAssert(requestsConfig) {
|
||||||
|
@ -630,6 +586,7 @@ class Config extends EventEmitter {
|
||||||
// Read config automatically
|
// Read config automatically
|
||||||
this._getLocationConfig();
|
this._getLocationConfig();
|
||||||
this._getConfig();
|
this._getConfig();
|
||||||
|
this._configureBackends();
|
||||||
}
|
}
|
||||||
|
|
||||||
_getLocationConfig() {
|
_getLocationConfig() {
|
||||||
|
@ -841,11 +798,11 @@ class Config extends EventEmitter {
|
||||||
this.websiteEndpoints = config.websiteEndpoints;
|
this.websiteEndpoints = config.websiteEndpoints;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.workers = false;
|
this.clusters = false;
|
||||||
if (config.workers !== undefined) {
|
if (config.clusters !== undefined) {
|
||||||
assert(Number.isInteger(config.workers) && config.workers > 0,
|
assert(Number.isInteger(config.clusters) && config.clusters > 0,
|
||||||
'bad config: workers must be a positive integer');
|
'bad config: clusters must be a positive integer');
|
||||||
this.workers = config.workers;
|
this.clusters = config.clusters;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.usEastBehavior !== undefined) {
|
if (config.usEastBehavior !== undefined) {
|
||||||
|
@ -1083,7 +1040,8 @@ class Config extends EventEmitter {
|
||||||
assert(typeof config.localCache.port === 'number',
|
assert(typeof config.localCache.port === 'number',
|
||||||
'config: bad port for localCache. port must be a number');
|
'config: bad port for localCache. port must be a number');
|
||||||
if (config.localCache.password !== undefined) {
|
if (config.localCache.password !== undefined) {
|
||||||
assert(typeof config.localCache.password === 'string',
|
assert(
|
||||||
|
this._verifyRedisPassword(config.localCache.password),
|
||||||
'config: vad password for localCache. password must' +
|
'config: vad password for localCache. password must' +
|
||||||
' be a string');
|
' be a string');
|
||||||
}
|
}
|
||||||
|
@ -1109,46 +1067,56 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.redis) {
|
if (config.redis) {
|
||||||
this.redis = parseRedisConfig(config.redis);
|
if (config.redis.sentinels) {
|
||||||
|
this.redis = { sentinels: [], name: null };
|
||||||
|
|
||||||
|
assert(typeof config.redis.name === 'string',
|
||||||
|
'bad config: redis sentinel name must be a string');
|
||||||
|
this.redis.name = config.redis.name;
|
||||||
|
assert(Array.isArray(config.redis.sentinels) ||
|
||||||
|
typeof config.redis.sentinels === 'string',
|
||||||
|
'bad config: redis sentinels must be an array or string');
|
||||||
|
|
||||||
|
if (typeof config.redis.sentinels === 'string') {
|
||||||
|
config.redis.sentinels.split(',').forEach(item => {
|
||||||
|
const [host, port] = item.split(':');
|
||||||
|
this.redis.sentinels.push({ host,
|
||||||
|
port: Number.parseInt(port, 10) });
|
||||||
|
});
|
||||||
|
} else if (Array.isArray(config.redis.sentinels)) {
|
||||||
|
config.redis.sentinels.forEach(item => {
|
||||||
|
const { host, port } = item;
|
||||||
|
assert(typeof host === 'string',
|
||||||
|
'bad config: redis sentinel host must be a string');
|
||||||
|
assert(typeof port === 'number',
|
||||||
|
'bad config: redis sentinel port must be a number');
|
||||||
|
this.redis.sentinels.push({ host, port });
|
||||||
|
});
|
||||||
}
|
}
|
||||||
if (config.scuba) {
|
|
||||||
this.scuba = {};
|
if (config.redis.sentinelPassword !== undefined) {
|
||||||
if (config.scuba.host) {
|
assert(
|
||||||
assert(typeof config.scuba.host === 'string',
|
this._verifyRedisPassword(config.redis.sentinelPassword));
|
||||||
'bad config: scuba host must be a string');
|
this.redis.sentinelPassword = config.redis.sentinelPassword;
|
||||||
this.scuba.host = config.scuba.host;
|
|
||||||
}
|
}
|
||||||
if (config.scuba.port) {
|
} else {
|
||||||
assert(Number.isInteger(config.scuba.port)
|
// check for standalone configuration
|
||||||
&& config.scuba.port > 0,
|
this.redis = {};
|
||||||
'bad config: scuba port must be a positive integer');
|
assert(typeof config.redis.host === 'string',
|
||||||
this.scuba.port = config.scuba.port;
|
'bad config: redis.host must be a string');
|
||||||
|
assert(typeof config.redis.port === 'number',
|
||||||
|
'bad config: redis.port must be a number');
|
||||||
|
this.redis.host = config.redis.host;
|
||||||
|
this.redis.port = config.redis.port;
|
||||||
|
}
|
||||||
|
if (config.redis.password !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(config.redis.password),
|
||||||
|
'bad config: invalid password for redis. password must ' +
|
||||||
|
'be a string');
|
||||||
|
this.redis.password = config.redis.password;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
|
|
||||||
assert(typeof process.env.SCUBA_HOST === 'string',
|
|
||||||
'bad config: scuba host must be a string');
|
|
||||||
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
|
|
||||||
&& Number(process.env.SCUBA_PORT) > 0,
|
|
||||||
'bad config: scuba port must be a positive integer');
|
|
||||||
this.scuba = {
|
|
||||||
host: process.env.SCUBA_HOST,
|
|
||||||
port: Number(process.env.SCUBA_PORT),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if (this.scuba) {
|
|
||||||
this.quotaEnabled = true;
|
|
||||||
}
|
|
||||||
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
|
|
||||||
config.quota?.maxStatenessMS ||
|
|
||||||
24 * 60 * 60 * 1000;
|
|
||||||
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
|
|
||||||
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
|
|
||||||
config.quota?.enableInflights || false;
|
|
||||||
this.quota = {
|
|
||||||
maxStaleness,
|
|
||||||
enableInflights,
|
|
||||||
};
|
|
||||||
if (config.utapi) {
|
if (config.utapi) {
|
||||||
this.utapi = { component: 's3' };
|
this.utapi = { component: 's3' };
|
||||||
if (config.utapi.host) {
|
if (config.utapi.host) {
|
||||||
|
@ -1177,8 +1145,50 @@ class Config extends EventEmitter {
|
||||||
assert(config.redis, 'missing required property of utapi ' +
|
assert(config.redis, 'missing required property of utapi ' +
|
||||||
'configuration: redis');
|
'configuration: redis');
|
||||||
if (config.utapi.redis) {
|
if (config.utapi.redis) {
|
||||||
this.utapi.redis = parseRedisConfig(config.utapi.redis);
|
if (config.utapi.redis.sentinels) {
|
||||||
if (this.utapi.redis.retry === undefined) {
|
this.utapi.redis = { sentinels: [], name: null };
|
||||||
|
|
||||||
|
assert(typeof config.utapi.redis.name === 'string',
|
||||||
|
'bad config: redis sentinel name must be a string');
|
||||||
|
this.utapi.redis.name = config.utapi.redis.name;
|
||||||
|
|
||||||
|
assert(Array.isArray(config.utapi.redis.sentinels),
|
||||||
|
'bad config: redis sentinels must be an array');
|
||||||
|
config.utapi.redis.sentinels.forEach(item => {
|
||||||
|
const { host, port } = item;
|
||||||
|
assert(typeof host === 'string',
|
||||||
|
'bad config: redis sentinel host must be a string');
|
||||||
|
assert(typeof port === 'number',
|
||||||
|
'bad config: redis sentinel port must be a number');
|
||||||
|
this.utapi.redis.sentinels.push({ host, port });
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// check for standalone configuration
|
||||||
|
this.utapi.redis = {};
|
||||||
|
assert(typeof config.utapi.redis.host === 'string',
|
||||||
|
'bad config: redis.host must be a string');
|
||||||
|
assert(typeof config.utapi.redis.port === 'number',
|
||||||
|
'bad config: redis.port must be a number');
|
||||||
|
this.utapi.redis.host = config.utapi.redis.host;
|
||||||
|
this.utapi.redis.port = config.utapi.redis.port;
|
||||||
|
}
|
||||||
|
if (config.utapi.redis.retry !== undefined) {
|
||||||
|
if (config.utapi.redis.retry.connectBackoff !== undefined) {
|
||||||
|
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
|
||||||
|
assert.strictEqual(typeof min, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: min must be a number');
|
||||||
|
assert.strictEqual(typeof max, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: max must be a number');
|
||||||
|
assert.strictEqual(typeof jitter, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
||||||
|
assert.strictEqual(typeof factor, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: factor must be a number');
|
||||||
|
assert.strictEqual(typeof deadline, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
||||||
|
}
|
||||||
|
|
||||||
|
this.utapi.redis.retry = config.utapi.redis.retry;
|
||||||
|
} else {
|
||||||
this.utapi.redis.retry = {
|
this.utapi.redis.retry = {
|
||||||
connectBackoff: {
|
connectBackoff: {
|
||||||
min: 10,
|
min: 10,
|
||||||
|
@ -1189,6 +1199,22 @@ class Config extends EventEmitter {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
if (config.utapi.redis.password !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(config.utapi.redis.password),
|
||||||
|
'config: invalid password for utapi redis. password' +
|
||||||
|
' must be a string');
|
||||||
|
this.utapi.redis.password = config.utapi.redis.password;
|
||||||
|
}
|
||||||
|
if (config.utapi.redis.sentinelPassword !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(
|
||||||
|
config.utapi.redis.sentinelPassword),
|
||||||
|
'config: invalid password for utapi redis. password' +
|
||||||
|
' must be a string');
|
||||||
|
this.utapi.redis.sentinelPassword =
|
||||||
|
config.utapi.redis.sentinelPassword;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (config.utapi.metrics) {
|
if (config.utapi.metrics) {
|
||||||
this.utapi.metrics = config.utapi.metrics;
|
this.utapi.metrics = config.utapi.metrics;
|
||||||
|
@ -1258,7 +1284,8 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.utapi && config.utapi.reindex) {
|
if (config.utapi && config.utapi.reindex) {
|
||||||
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
|
parseUtapiReindex(config.utapi.reindex);
|
||||||
|
this.utapi.reindex = config.utapi.reindex;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1303,8 +1330,6 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.authdata = config.authdata || 'authdata.json';
|
|
||||||
|
|
||||||
this.kms = {};
|
this.kms = {};
|
||||||
if (config.kms) {
|
if (config.kms) {
|
||||||
assert(typeof config.kms.userName === 'string');
|
assert(typeof config.kms.userName === 'string');
|
||||||
|
@ -1524,6 +1549,25 @@ class Config extends EventEmitter {
|
||||||
this.outboundProxy.certs = certObj.certs;
|
this.outboundProxy.certs = certObj.certs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.managementAgent = {};
|
||||||
|
this.managementAgent.port = 8010;
|
||||||
|
this.managementAgent.host = 'localhost';
|
||||||
|
if (config.managementAgent !== undefined) {
|
||||||
|
if (config.managementAgent.port !== undefined) {
|
||||||
|
assert(Number.isInteger(config.managementAgent.port)
|
||||||
|
&& config.managementAgent.port > 0,
|
||||||
|
'bad config: managementAgent port must be a positive ' +
|
||||||
|
'integer');
|
||||||
|
this.managementAgent.port = config.managementAgent.port;
|
||||||
|
}
|
||||||
|
if (config.managementAgent.host !== undefined) {
|
||||||
|
assert.strictEqual(typeof config.managementAgent.host, 'string',
|
||||||
|
'bad config: management agent host must ' +
|
||||||
|
'be a string');
|
||||||
|
this.managementAgent.host = config.managementAgent.host;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ephemeral token to protect the reporting endpoint:
|
// Ephemeral token to protect the reporting endpoint:
|
||||||
// try inherited from parent first, then hardcoded in conf file,
|
// try inherited from parent first, then hardcoded in conf file,
|
||||||
// then create a fresh one as last resort.
|
// then create a fresh one as last resort.
|
||||||
|
@ -1613,8 +1657,6 @@ class Config extends EventEmitter {
|
||||||
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
|
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
|
||||||
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
||||||
}
|
}
|
||||||
|
|
||||||
this._configureBackends(config);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_setTimeOptions() {
|
_setTimeOptions() {
|
||||||
|
@ -1653,37 +1695,34 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
_getAuthData() {
|
_getAuthData() {
|
||||||
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
|
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
|
||||||
}
|
}
|
||||||
|
|
||||||
_configureBackends(config) {
|
_configureBackends() {
|
||||||
const backends = config.backends || {};
|
|
||||||
/**
|
/**
|
||||||
* Configure the backends for Authentication, Data and Metadata.
|
* Configure the backends for Authentication, Data and Metadata.
|
||||||
*/
|
*/
|
||||||
let auth = backends.auth || 'mem';
|
let auth = 'mem';
|
||||||
let data = backends.data || 'multiple';
|
let data = 'multiple';
|
||||||
let metadata = backends.metadata || 'file';
|
let metadata = 'file';
|
||||||
let kms = backends.kms || 'file';
|
let kms = 'file';
|
||||||
let quota = backends.quota || 'none';
|
|
||||||
if (process.env.S3BACKEND) {
|
if (process.env.S3BACKEND) {
|
||||||
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
||||||
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
||||||
'bad environment variable: S3BACKEND environment variable ' +
|
'bad environment variable: S3BACKEND environment variable ' +
|
||||||
'should be one of mem/file/scality/cdmi'
|
'should be one of mem/file/scality/cdmi'
|
||||||
);
|
);
|
||||||
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
|
auth = process.env.S3BACKEND;
|
||||||
data = process.env.S3BACKEND;
|
data = process.env.S3BACKEND;
|
||||||
metadata = process.env.S3BACKEND;
|
metadata = process.env.S3BACKEND;
|
||||||
kms = process.env.S3BACKEND;
|
kms = process.env.S3BACKEND;
|
||||||
}
|
}
|
||||||
if (process.env.S3VAULT) {
|
if (process.env.S3VAULT) {
|
||||||
auth = process.env.S3VAULT;
|
auth = process.env.S3VAULT;
|
||||||
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
||||||
// Auth only checks for 'mem' since mem === file
|
// Auth only checks for 'mem' since mem === file
|
||||||
|
auth = 'mem';
|
||||||
let authData;
|
let authData;
|
||||||
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
||||||
|
@ -1712,10 +1751,10 @@ class Config extends EventEmitter {
|
||||||
'should be one of mem/file/scality/multiple'
|
'should be one of mem/file/scality/multiple'
|
||||||
);
|
);
|
||||||
data = process.env.S3DATA;
|
data = process.env.S3DATA;
|
||||||
|
}
|
||||||
if (data === 'scality' || data === 'multiple') {
|
if (data === 'scality' || data === 'multiple') {
|
||||||
data = 'multiple';
|
data = 'multiple';
|
||||||
}
|
}
|
||||||
}
|
|
||||||
assert(this.locationConstraints !== undefined &&
|
assert(this.locationConstraints !== undefined &&
|
||||||
this.restEndpoints !== undefined,
|
this.restEndpoints !== undefined,
|
||||||
'bad config: locationConstraints and restEndpoints must be set'
|
'bad config: locationConstraints and restEndpoints must be set'
|
||||||
|
@ -1727,18 +1766,18 @@ class Config extends EventEmitter {
|
||||||
if (process.env.S3KMS) {
|
if (process.env.S3KMS) {
|
||||||
kms = process.env.S3KMS;
|
kms = process.env.S3KMS;
|
||||||
}
|
}
|
||||||
if (process.env.S3QUOTA) {
|
|
||||||
quota = process.env.S3QUOTA;
|
|
||||||
}
|
|
||||||
this.backends = {
|
this.backends = {
|
||||||
auth,
|
auth,
|
||||||
data,
|
data,
|
||||||
metadata,
|
metadata,
|
||||||
kms,
|
kms,
|
||||||
quota,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_verifyRedisPassword(password) {
|
||||||
|
return typeof password === 'string';
|
||||||
|
}
|
||||||
|
|
||||||
setAuthDataAccounts(accounts) {
|
setAuthDataAccounts(accounts) {
|
||||||
this.authData.accounts = accounts;
|
this.authData.accounts = accounts;
|
||||||
this.emit('authdata-update');
|
this.emit('authdata-update');
|
||||||
|
@ -1861,19 +1900,10 @@ class Config extends EventEmitter {
|
||||||
.update(instanceId)
|
.update(instanceId)
|
||||||
.digest('hex');
|
.digest('hex');
|
||||||
}
|
}
|
||||||
|
|
||||||
isQuotaEnabled() {
|
|
||||||
return !!this.quotaEnabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
isQuotaInflightEnabled() {
|
|
||||||
return this.quota.enableInflights;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
parseSproxydConfig,
|
parseSproxydConfig,
|
||||||
parseRedisConfig,
|
|
||||||
locationConstraintAssert,
|
locationConstraintAssert,
|
||||||
ConfigObject: Config,
|
ConfigObject: Config,
|
||||||
config: new Config(),
|
config: new Config(),
|
||||||
|
|
|
@ -7,7 +7,6 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
|
||||||
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
||||||
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
||||||
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
||||||
const bucketDeleteQuota = require('./bucketDeleteQuota');
|
|
||||||
const { bucketGet } = require('./bucketGet');
|
const { bucketGet } = require('./bucketGet');
|
||||||
const bucketGetACL = require('./bucketGetACL');
|
const bucketGetACL = require('./bucketGetACL');
|
||||||
const bucketGetCors = require('./bucketGetCors');
|
const bucketGetCors = require('./bucketGetCors');
|
||||||
|
@ -18,7 +17,6 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
|
||||||
const bucketGetNotification = require('./bucketGetNotification');
|
const bucketGetNotification = require('./bucketGetNotification');
|
||||||
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
||||||
const bucketGetPolicy = require('./bucketGetPolicy');
|
const bucketGetPolicy = require('./bucketGetPolicy');
|
||||||
const bucketGetQuota = require('./bucketGetQuota');
|
|
||||||
const bucketGetEncryption = require('./bucketGetEncryption');
|
const bucketGetEncryption = require('./bucketGetEncryption');
|
||||||
const bucketHead = require('./bucketHead');
|
const bucketHead = require('./bucketHead');
|
||||||
const { bucketPut } = require('./bucketPut');
|
const { bucketPut } = require('./bucketPut');
|
||||||
|
@ -35,7 +33,6 @@ const bucketPutNotification = require('./bucketPutNotification');
|
||||||
const bucketPutEncryption = require('./bucketPutEncryption');
|
const bucketPutEncryption = require('./bucketPutEncryption');
|
||||||
const bucketPutPolicy = require('./bucketPutPolicy');
|
const bucketPutPolicy = require('./bucketPutPolicy');
|
||||||
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
||||||
const bucketUpdateQuota = require('./bucketUpdateQuota');
|
|
||||||
const bucketGetReplication = require('./bucketGetReplication');
|
const bucketGetReplication = require('./bucketGetReplication');
|
||||||
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
||||||
const corsPreflight = require('./corsPreflight');
|
const corsPreflight = require('./corsPreflight');
|
||||||
|
@ -85,10 +82,6 @@ const api = {
|
||||||
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.apiMethod = apiMethod;
|
request.apiMethod = apiMethod;
|
||||||
// Array of end of API callbacks, used to perform some logic
|
|
||||||
// at the end of an API.
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
request.finalizerHooks = [];
|
|
||||||
|
|
||||||
const actionLog = monitoringMap[apiMethod];
|
const actionLog = monitoringMap[apiMethod];
|
||||||
if (!actionLog &&
|
if (!actionLog &&
|
||||||
|
@ -197,17 +190,14 @@ const api = {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => auth.server.doAuth(
|
next => auth.server.doAuth(
|
||||||
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// VaultClient returns standard errors, but the route requires
|
|
||||||
// Arsenal errors
|
|
||||||
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
|
|
||||||
log.trace('authentication error', { error: err });
|
log.trace('authentication error', { error: err });
|
||||||
return next(arsenalError);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
}, 's3', requestContexts),
|
}, 's3', requestContexts),
|
||||||
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
|
(userInfo, authorizationResults, streamingV4Params, next) => {
|
||||||
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
||||||
if (userInfo.isRequesterAnIAMUser()) {
|
if (userInfo.isRequesterAnIAMUser()) {
|
||||||
authNames.userName = userInfo.getIAMdisplayName();
|
authNames.userName = userInfo.getIAMdisplayName();
|
||||||
|
@ -217,7 +207,7 @@ const api = {
|
||||||
}
|
}
|
||||||
log.addDefaultFields(authNames);
|
log.addDefaultFields(authNames);
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
}
|
}
|
||||||
// issue 100 Continue to the client
|
// issue 100 Continue to the client
|
||||||
writeContinue(request, response);
|
writeContinue(request, response);
|
||||||
|
@ -248,12 +238,12 @@ const api = {
|
||||||
}
|
}
|
||||||
// Convert array of post buffers into one string
|
// Convert array of post buffers into one string
|
||||||
request.post = Buffer.concat(post, postLength).toString();
|
request.post = Buffer.concat(post, postLength).toString();
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
},
|
},
|
||||||
// Tag condition keys require information from CloudServer for evaluation
|
// Tag condition keys require information from CloudServer for evaluation
|
||||||
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
|
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
|
||||||
authorizationResults,
|
authorizationResults,
|
||||||
request,
|
request,
|
||||||
requestContexts,
|
requestContexts,
|
||||||
|
@ -264,14 +254,13 @@ const api = {
|
||||||
log.trace('tag authentication error', { error: err });
|
log.trace('tag authentication error', { error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
|
return next(null, userInfo, authResultsWithTags, streamingV4Params);
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
], (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
request.accountQuotas = infos?.accountQuota;
|
|
||||||
if (authorizationResults) {
|
if (authorizationResults) {
|
||||||
const checkedResults = checkAuthResults(authorizationResults);
|
const checkedResults = checkAuthResults(authorizationResults);
|
||||||
if (checkedResults instanceof Error) {
|
if (checkedResults instanceof Error) {
|
||||||
|
@ -288,23 +277,19 @@ const api = {
|
||||||
return acc;
|
return acc;
|
||||||
}, {});
|
}, {});
|
||||||
}
|
}
|
||||||
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
|
|
||||||
(hook, done) => hook(err, done),
|
|
||||||
() => callback(err, ...results));
|
|
||||||
|
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
request._response = response;
|
request._response = response;
|
||||||
return this[apiMethod](userInfo, request, streamingV4Params,
|
return this[apiMethod](userInfo, request, streamingV4Params,
|
||||||
log, methodCallback, authorizationResults);
|
log, callback, authorizationResults);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||||
return this[apiMethod](userInfo, request, sourceBucket,
|
return this[apiMethod](userInfo, request, sourceBucket,
|
||||||
sourceObject, sourceVersionId, log, methodCallback);
|
sourceObject, sourceVersionId, log, callback);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
||||||
}
|
}
|
||||||
return this[apiMethod](userInfo, request, log, methodCallback);
|
return this[apiMethod](userInfo, request, log, callback);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
bucketDelete,
|
bucketDelete,
|
||||||
|
@ -331,14 +316,11 @@ const api = {
|
||||||
bucketPutReplication,
|
bucketPutReplication,
|
||||||
bucketGetReplication,
|
bucketGetReplication,
|
||||||
bucketDeleteReplication,
|
bucketDeleteReplication,
|
||||||
bucketDeleteQuota,
|
|
||||||
bucketPutLifecycle,
|
bucketPutLifecycle,
|
||||||
bucketUpdateQuota,
|
|
||||||
bucketGetLifecycle,
|
bucketGetLifecycle,
|
||||||
bucketDeleteLifecycle,
|
bucketDeleteLifecycle,
|
||||||
bucketPutPolicy,
|
bucketPutPolicy,
|
||||||
bucketGetPolicy,
|
bucketGetPolicy,
|
||||||
bucketGetQuota,
|
|
||||||
bucketDeletePolicy,
|
bucketDeletePolicy,
|
||||||
bucketPutObjectLock,
|
bucketPutObjectLock,
|
||||||
bucketPutNotification,
|
bucketPutNotification,
|
||||||
|
|
|
@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
apiMethod, 's3');
|
apiMethod, 's3');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (apiMethod === 'bucketPut') {
|
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,17 +65,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
|
|
||||||
const requestContexts = [];
|
const requestContexts = [];
|
||||||
|
|
||||||
if (apiMethod === 'multiObjectDelete') {
|
if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||||
// MultiObjectDelete does not require any authorization when evaluating
|
|
||||||
// the API. Instead, we authorize each object passed.
|
|
||||||
// But in order to get any relevant information from the authorization service
|
|
||||||
// for example, the account quota, we must send a request context object
|
|
||||||
// with no `specificResource`. We expect the result to be an implicit deny.
|
|
||||||
// In the API, we then ignore these authorization results, and we can use
|
|
||||||
// any information returned, e.g., the quota.
|
|
||||||
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
|
|
||||||
requestContexts.push(requestContextMultiObjectDelete);
|
|
||||||
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|
|
||||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||||
'objectGet';
|
'objectGet';
|
||||||
|
|
|
@ -7,8 +7,6 @@ const errors = require('arsenal').errors;
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
const { locationConstraints } = config;
|
const { locationConstraints } = config;
|
||||||
|
|
||||||
const { scaledMsPerDay } = config.getTimeOptions();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get response header "x-amz-restore"
|
* Get response header "x-amz-restore"
|
||||||
* Be called by objectHead.js
|
* Be called by objectHead.js
|
||||||
|
@ -147,7 +145,7 @@ function _updateObjectExpirationDate(objectMD, log) {
|
||||||
});
|
});
|
||||||
if (isObjectAlreadyRestored) {
|
if (isObjectAlreadyRestored) {
|
||||||
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
|
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
|
||||||
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
|
expiryDate.setDate(expiryDate.getDate() + objectMD.archive.restoreRequestedDays);
|
||||||
|
|
||||||
/* eslint-disable no-param-reassign */
|
/* eslint-disable no-param-reassign */
|
||||||
objectMD.archive.restoreWillExpireAt = expiryDate;
|
objectMD.archive.restoreWillExpireAt = expiryDate;
|
||||||
|
|
|
@ -5,6 +5,7 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { data } = require('../../../data/wrapper');
|
const { data } = require('../../../data/wrapper');
|
||||||
const services = require('../../../services');
|
const services = require('../../../services');
|
||||||
|
const logger = require('../../../utilities/logger');
|
||||||
const { dataStore } = require('./storeObject');
|
const { dataStore } = require('./storeObject');
|
||||||
const locationConstraintCheck = require('./locationConstraintCheck');
|
const locationConstraintCheck = require('./locationConstraintCheck');
|
||||||
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
|
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
|
||||||
|
@ -20,7 +21,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
|
||||||
'a versioned object to a location-constraint of type Azure or GCP.';
|
'a versioned object to a location-constraint of type Azure or GCP.';
|
||||||
|
|
||||||
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
metadataStoreParams, dataToDelete, log, requestMethod, callback) {
|
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
|
||||||
services.metadataStoreObject(bucketName, dataGetInfo,
|
services.metadataStoreObject(bucketName, dataGetInfo,
|
||||||
cipherBundle, metadataStoreParams, (err, result) => {
|
cipherBundle, metadataStoreParams, (err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -30,7 +31,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
||||||
dataGetInfo[0].dataStoreName : null;
|
dataGetInfo[0].dataStoreName : null;
|
||||||
return data.batchDelete(dataToDelete, requestMethod,
|
return data.batchDelete(dataToDelete, requestMethod,
|
||||||
newDataStoreName, log, err => callback(err, result));
|
newDataStoreName, deleteLog, err => callback(err, result));
|
||||||
}
|
}
|
||||||
return callback(null, result);
|
return callback(null, result);
|
||||||
});
|
});
|
||||||
|
@ -197,9 +198,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
const dontSkipBackend = externalBackends;
|
const dontSkipBackend = externalBackends;
|
||||||
/* eslint-enable camelcase */
|
/* eslint-enable camelcase */
|
||||||
|
|
||||||
|
const requestLogger =
|
||||||
|
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
|
||||||
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
|
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
|
||||||
const mdOnlySize = request.headers['x-amz-meta-size'];
|
const mdOnlySize = request.headers['x-amz-meta-size'];
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function storeData(next) {
|
function storeData(next) {
|
||||||
if (size === 0) {
|
if (size === 0) {
|
||||||
|
@ -294,7 +296,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
}
|
}
|
||||||
return _storeInMDandDeleteData(bucketName, infoArr,
|
return _storeInMDandDeleteData(bucketName, infoArr,
|
||||||
cipherBundle, metadataStoreParams,
|
cipherBundle, metadataStoreParams,
|
||||||
options.dataToDelete, log, requestMethod, next);
|
options.dataToDelete, requestLogger, requestMethod, next);
|
||||||
},
|
},
|
||||||
], callback);
|
], callback);
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
|
||||||
const { decodeVersionId } = require('./versioning');
|
const { decodeVersionId } = require('./versioning');
|
||||||
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
|
||||||
const { parseRestoreRequestXml } = s3middleware.objectRestore;
|
const { parseRestoreRequestXml } = s3middleware.objectRestore;
|
||||||
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if tier is supported
|
* Check if tier is supported
|
||||||
|
@ -59,14 +59,6 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
||||||
objectKey,
|
objectKey,
|
||||||
versionId: decodedVidResult,
|
versionId: decodedVidResult,
|
||||||
requestType: request.apiMethods || 'restoreObject',
|
requestType: request.apiMethods || 'restoreObject',
|
||||||
/**
|
|
||||||
* Restoring an object might not cause any impact on
|
|
||||||
* the storage, if the object is already restored: in
|
|
||||||
* this case, the duration is extended. We disable the
|
|
||||||
* quota evaluation and trigger it manually.
|
|
||||||
*/
|
|
||||||
checkQuota: false,
|
|
||||||
request,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
|
@ -124,16 +116,6 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
||||||
return next(err, bucketMD, objectMD);
|
return next(err, bucketMD, objectMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function evaluateQuotas(bucketMD, objectMD, next) {
|
|
||||||
if (isObjectRestored) {
|
|
||||||
return next(null, bucketMD, objectMD);
|
|
||||||
}
|
|
||||||
const actions = Array.isArray(mdValueParams.requestType) ?
|
|
||||||
mdValueParams.requestType : [mdValueParams.requestType];
|
|
||||||
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
|
|
||||||
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
|
|
||||||
false, log, err => next(err, bucketMD, objectMD));
|
|
||||||
},
|
|
||||||
function updateObjectMD(bucketMD, objectMD, next) {
|
function updateObjectMD(bucketMD, objectMD, next) {
|
||||||
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
|
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
|
||||||
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
|
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
|
||||||
|
|
|
@ -1,314 +0,0 @@
|
||||||
const async = require('async');
|
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const monitoring = require('../../../utilities/monitoringHandler');
|
|
||||||
const {
|
|
||||||
actionNeedQuotaCheckCopy,
|
|
||||||
actionNeedQuotaCheck,
|
|
||||||
actionWithDataDeletion,
|
|
||||||
} = require('arsenal').policies;
|
|
||||||
const { config } = require('../../../Config');
|
|
||||||
const QuotaService = require('../../../quotas/quotas');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Process the bytes to write based on the request and object metadata
|
|
||||||
* @param {string} apiMethod - api method
|
|
||||||
* @param {BucketInfo} bucket - bucket info
|
|
||||||
* @param {string} versionId - version id of the object
|
|
||||||
* @param {number} contentLength - content length of the object
|
|
||||||
* @param {object} objMD - object metadata
|
|
||||||
* @param {object} destObjMD - destination object metadata
|
|
||||||
* @return {number} processed content length
|
|
||||||
*/
|
|
||||||
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
|
|
||||||
let bytes = contentLength;
|
|
||||||
if (apiMethod === 'objectRestore') {
|
|
||||||
// object is being restored
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10);
|
|
||||||
} else if (!bytes && objMD?.['content-length']) {
|
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
|
||||||
if (!destObjMD || bucket.isVersioningEnabled()) {
|
|
||||||
// object is being copied
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10);
|
|
||||||
} else if (!bucket.isVersioningEnabled()) {
|
|
||||||
// object is being copied and replaces the target
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10) -
|
|
||||||
Number.parseInt(destObjMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
|
|
||||||
// object is being deleted
|
|
||||||
bytes = -Number.parseInt(objMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
|
|
||||||
// object is being replaced: store the diff, if the bucket is not versioned
|
|
||||||
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
return bytes || 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks if a metric is stale based on the provided parameters.
|
|
||||||
*
|
|
||||||
* @param {Object} metric - The metric object to check.
|
|
||||||
* @param {string} resourceType - The type of the resource.
|
|
||||||
* @param {string} resourceName - The name of the resource.
|
|
||||||
* @param {string} action - The action being performed.
|
|
||||||
* @param {number} inflight - The number of inflight requests.
|
|
||||||
* @param {Object} log - The logger object.
|
|
||||||
* @returns {boolean} Returns true if the metric is stale, false otherwise.
|
|
||||||
*/
|
|
||||||
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
|
|
||||||
if (metric.date && Date.now() - new Date(metric.date).getTime() >
|
|
||||||
QuotaService.maxStaleness) {
|
|
||||||
log.warn('Stale metrics from the quota service, allowing the request', {
|
|
||||||
resourceType,
|
|
||||||
resourceName,
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
});
|
|
||||||
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Evaluates quotas for a bucket and an account and update inflight count.
|
|
||||||
*
|
|
||||||
* @param {number} bucketQuota - The quota limit for the bucket.
|
|
||||||
* @param {number} accountQuota - The quota limit for the account.
|
|
||||||
* @param {object} bucket - The bucket object.
|
|
||||||
* @param {object} account - The account object.
|
|
||||||
* @param {number} inflight - The number of inflight requests.
|
|
||||||
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
|
|
||||||
* @param {string} action - The action being performed.
|
|
||||||
* @param {object} log - The logger object.
|
|
||||||
* @param {function} callback - The callback function to be called when evaluation is complete.
|
|
||||||
* @returns {object} - The result of the evaluation.
|
|
||||||
*/
|
|
||||||
function _evaluateQuotas(
|
|
||||||
bucketQuota,
|
|
||||||
accountQuota,
|
|
||||||
bucket,
|
|
||||||
account,
|
|
||||||
inflight,
|
|
||||||
inflightForCheck,
|
|
||||||
action,
|
|
||||||
log,
|
|
||||||
callback,
|
|
||||||
) {
|
|
||||||
let bucketQuotaExceeded = false;
|
|
||||||
let accountQuotaExceeded = false;
|
|
||||||
const creationDate = new Date(bucket.getCreationDate()).getTime();
|
|
||||||
return async.parallel({
|
|
||||||
bucketQuota: parallelDone => {
|
|
||||||
if (bucketQuota > 0) {
|
|
||||||
return QuotaService.getUtilizationMetrics('bucket',
|
|
||||||
`${bucket.getName()}_${creationDate}`, null, {
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
}, (err, bucketMetrics) => {
|
|
||||||
if (err || inflight < 0) {
|
|
||||||
return parallelDone(err);
|
|
||||||
}
|
|
||||||
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
|
|
||||||
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
|
|
||||||
log.debug('Bucket quota exceeded', {
|
|
||||||
bucket: bucket.getName(),
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
quota: bucketQuota,
|
|
||||||
bytesTotal: bucketMetrics.bytesTotal,
|
|
||||||
});
|
|
||||||
bucketQuotaExceeded = true;
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
},
|
|
||||||
accountQuota: parallelDone => {
|
|
||||||
if (accountQuota > 0 && account?.account) {
|
|
||||||
return QuotaService.getUtilizationMetrics('account',
|
|
||||||
account.account, null, {
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
}, (err, accountMetrics) => {
|
|
||||||
if (err || inflight < 0) {
|
|
||||||
return parallelDone(err);
|
|
||||||
}
|
|
||||||
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
|
|
||||||
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
|
|
||||||
log.debug('Account quota exceeded', {
|
|
||||||
accountId: account.account,
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
quota: accountQuota,
|
|
||||||
bytesTotal: accountMetrics.bytesTotal,
|
|
||||||
});
|
|
||||||
accountQuotaExceeded = true;
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
},
|
|
||||||
}, err => {
|
|
||||||
if (err) {
|
|
||||||
log.warn('Error evaluating quotas', {
|
|
||||||
error: err.name,
|
|
||||||
description: err.message,
|
|
||||||
isInflightDeletion: inflight < 0,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Monitors the duration of quota evaluation for a specific API method.
|
|
||||||
*
|
|
||||||
* @param {string} apiMethod - The name of the API method being monitored.
|
|
||||||
* @param {string} type - The type of quota being evaluated.
|
|
||||||
* @param {string} code - The code associated with the quota being evaluated.
|
|
||||||
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
|
|
||||||
* @returns {undefined} - Returns nothing.
|
|
||||||
*/
|
|
||||||
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
|
|
||||||
monitoring.quotaEvaluationDuration.labels({
|
|
||||||
action: apiMethod,
|
|
||||||
type,
|
|
||||||
code,
|
|
||||||
}).observe(duration / 1e9);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @param {Request} request - request object
|
|
||||||
* @param {BucketInfo} bucket - bucket object
|
|
||||||
* @param {Account} account - account object
|
|
||||||
* @param {array} apiNames - action names: operations to authorize
|
|
||||||
* @param {string} apiMethod - the main API call
|
|
||||||
* @param {number} inflight - inflight bytes
|
|
||||||
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
|
|
||||||
* the incoming bytes, are under the limit.
|
|
||||||
* @param {Logger} log - logger
|
|
||||||
* @param {function} callback - callback function
|
|
||||||
* @returns {boolean} - true if the quota is valid, false otherwise
|
|
||||||
*/
|
|
||||||
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
|
|
||||||
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
|
|
||||||
return callback(null);
|
|
||||||
}
|
|
||||||
let type;
|
|
||||||
let bucketQuotaExceeded = false;
|
|
||||||
let accountQuotaExceeded = false;
|
|
||||||
let quotaEvaluationDuration;
|
|
||||||
const requestStartTime = process.hrtime.bigint();
|
|
||||||
const bucketQuota = bucket.getQuota();
|
|
||||||
const accountQuota = account?.quota || 0;
|
|
||||||
const shouldSendInflights = config.isQuotaInflightEnabled();
|
|
||||||
|
|
||||||
if (bucketQuota && accountQuota) {
|
|
||||||
type = 'bucket+account';
|
|
||||||
} else if (bucketQuota) {
|
|
||||||
type = 'bucket';
|
|
||||||
} else {
|
|
||||||
type = 'account';
|
|
||||||
}
|
|
||||||
|
|
||||||
if (actionWithDataDeletion[apiMethod]) {
|
|
||||||
type = 'delete';
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
|
|
||||||
if (bucketQuota > 0 || accountQuota > 0) {
|
|
||||||
log.warn('quota is set for a bucket, but the quota service is disabled', {
|
|
||||||
bucketName: bucket.getName(),
|
|
||||||
});
|
|
||||||
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
|
||||||
}
|
|
||||||
return callback(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isStorageReserved) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
inflight = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return async.forEach(apiNames, (apiName, done) => {
|
|
||||||
// Object copy operations first check the target object,
|
|
||||||
// meaning the source object, containing the current bytes,
|
|
||||||
// is checked second. This logic handles these APIs calls by
|
|
||||||
// ensuring the bytes are positives (i.e., not an object
|
|
||||||
// replacement).
|
|
||||||
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
inflight = Math.abs(inflight);
|
|
||||||
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
|
|
||||||
return done();
|
|
||||||
}
|
|
||||||
// When inflights are disabled, the sum of the current utilization metrics
|
|
||||||
// and the current bytes are compared with the quota. The current bytes
|
|
||||||
// are not sent to the utilization service. When inflights are enabled,
|
|
||||||
// the sum of the current utilization metrics only are compared with the
|
|
||||||
// quota. They include the current inflight bytes sent in the request.
|
|
||||||
let _inflights = shouldSendInflights ? inflight : undefined;
|
|
||||||
const inflightForCheck = shouldSendInflights ? 0 : inflight;
|
|
||||||
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
|
||||||
inflightForCheck, apiName, log,
|
|
||||||
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
bucketQuotaExceeded = _bucketQuotaExceeded;
|
|
||||||
accountQuotaExceeded = _accountQuotaExceeded;
|
|
||||||
|
|
||||||
// Inflights are inverted: in case of cleanup, we just re-issue
|
|
||||||
// the same API call.
|
|
||||||
if (_inflights) {
|
|
||||||
_inflights = -_inflights;
|
|
||||||
}
|
|
||||||
|
|
||||||
request.finalizerHooks.push((errorFromAPI, _done) => {
|
|
||||||
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
|
|
||||||
const quotaCleanUpStartTime = process.hrtime.bigint();
|
|
||||||
// Quotas are cleaned only in case of error in the API
|
|
||||||
async.waterfall([
|
|
||||||
cb => {
|
|
||||||
if (errorFromAPI) {
|
|
||||||
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
|
||||||
null, apiName, log, cb);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
},
|
|
||||||
], () => {
|
|
||||||
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
|
|
||||||
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
|
|
||||||
return _done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
}, err => {
|
|
||||||
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
|
|
||||||
if (err) {
|
|
||||||
log.warn('Error getting metrics from the quota service, allowing the request', {
|
|
||||||
error: err.name,
|
|
||||||
description: err.message,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (!actionWithDataDeletion[apiMethod] &&
|
|
||||||
(bucketQuotaExceeded || accountQuotaExceeded)) {
|
|
||||||
return callback(errors.QuotaExceeded);
|
|
||||||
}
|
|
||||||
return callback();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
processBytesToWrite,
|
|
||||||
isMetricStale,
|
|
||||||
validateQuotas,
|
|
||||||
};
|
|
|
@ -1,58 +0,0 @@
|
||||||
const { waterfall } = require('async');
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
|
||||||
|
|
||||||
const requestType = 'bucketDeleteQuota';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Bucket Update Quota - Update bucket quota
|
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
|
||||||
* @param {object} request - http request object
|
|
||||||
* @param {object} log - Werelogs logger
|
|
||||||
* @param {function} callback - callback to server
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function bucketDeleteQuota(authInfo, request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'bucketDeleteQuota' });
|
|
||||||
|
|
||||||
const { bucketName } = request;
|
|
||||||
const metadataValParams = {
|
|
||||||
authInfo,
|
|
||||||
bucketName,
|
|
||||||
requestType: request.apiMethods || requestType,
|
|
||||||
request,
|
|
||||||
};
|
|
||||||
return waterfall([
|
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
|
||||||
(err, bucket) => next(err, bucket)),
|
|
||||||
(bucket, next) => {
|
|
||||||
bucket.setQuota(0);
|
|
||||||
metadata.updateBucket(bucket.getName(), bucket, log, err =>
|
|
||||||
next(err, bucket));
|
|
||||||
},
|
|
||||||
], (err, bucket) => {
|
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
|
||||||
request.method, bucket);
|
|
||||||
if (err) {
|
|
||||||
log.debug('error processing request', {
|
|
||||||
error: err,
|
|
||||||
method: 'bucketDeleteQuota'
|
|
||||||
});
|
|
||||||
monitoring.promMetrics('DELETE', bucketName, err.code,
|
|
||||||
'bucketDeleteQuota');
|
|
||||||
return callback(err, err.code, corsHeaders);
|
|
||||||
}
|
|
||||||
monitoring.promMetrics(
|
|
||||||
'DELETE', bucketName, '204', 'bucketDeleteQuota');
|
|
||||||
pushMetric('bucketDeleteQuota', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, 204, corsHeaders);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = bucketDeleteQuota;
|
|
|
@ -1,58 +0,0 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* bucketGetQuota - Get the bucket quota
|
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
|
||||||
* @param {object} request - http request object
|
|
||||||
* @param {object} log - Werelogs logger
|
|
||||||
* @param {function} callback - callback to server
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function bucketGetQuota(authInfo, request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'bucketGetQuota' });
|
|
||||||
const { bucketName, headers, method } = request;
|
|
||||||
const metadataValParams = {
|
|
||||||
authInfo,
|
|
||||||
bucketName,
|
|
||||||
requestType: request.apiMethods || 'bucketGetQuota',
|
|
||||||
request,
|
|
||||||
};
|
|
||||||
const xml = [];
|
|
||||||
|
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
|
||||||
if (err) {
|
|
||||||
log.debug('error processing request', {
|
|
||||||
error: err,
|
|
||||||
method: 'bucketGetQuota',
|
|
||||||
});
|
|
||||||
return callback(err, null, corsHeaders);
|
|
||||||
}
|
|
||||||
xml.push(
|
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
|
||||||
'<GetBucketQuota>',
|
|
||||||
'<Name>', bucket.getName(), '</Name>',
|
|
||||||
);
|
|
||||||
const bucketQuota = bucket.getQuota();
|
|
||||||
if (!bucketQuota) {
|
|
||||||
log.debug('bucket has no quota', {
|
|
||||||
method: 'bucketGetQuota',
|
|
||||||
});
|
|
||||||
return callback(errors.NoSuchQuota, null,
|
|
||||||
corsHeaders);
|
|
||||||
}
|
|
||||||
xml.push('<Quota>', bucketQuota, '</Quota>',
|
|
||||||
'</GetBucketQuota>');
|
|
||||||
|
|
||||||
pushMetric('getBucketQuota', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, xml.join(''), corsHeaders);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = bucketGetQuota;
|
|
|
@ -45,8 +45,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
|
||||||
} else if (parsedHost && restEndpoints[parsedHost]) {
|
} else if (parsedHost && restEndpoints[parsedHost]) {
|
||||||
locationConstraintChecked = restEndpoints[parsedHost];
|
locationConstraintChecked = restEndpoints[parsedHost];
|
||||||
} else {
|
} else {
|
||||||
locationConstraintChecked = Object.keys(locationConstrains)[0];
|
log.trace('no location constraint provided on bucket put;' +
|
||||||
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
|
'setting us-east-1');
|
||||||
|
locationConstraintChecked = 'us-east-1';
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!locationConstraints[locationConstraintChecked]) {
|
if (!locationConstraints[locationConstraintChecked]) {
|
||||||
|
|
|
@ -1,85 +0,0 @@
|
||||||
const { waterfall } = require('async');
|
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
|
||||||
const { parseString } = require('xml2js');
|
|
||||||
|
|
||||||
function validateBucketQuotaProperty(requestBody, next) {
|
|
||||||
const quota = requestBody.quota;
|
|
||||||
const quotaValue = parseInt(quota, 10);
|
|
||||||
if (Number.isNaN(quotaValue)) {
|
|
||||||
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
|
|
||||||
}
|
|
||||||
if (quotaValue <= 0) {
|
|
||||||
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
|
|
||||||
}
|
|
||||||
return next(null, quotaValue);
|
|
||||||
}
|
|
||||||
|
|
||||||
function parseRequestBody(requestBody, next) {
|
|
||||||
try {
|
|
||||||
const jsonData = JSON.parse(requestBody);
|
|
||||||
if (typeof jsonData !== 'object') {
|
|
||||||
throw new Error('Invalid JSON');
|
|
||||||
}
|
|
||||||
return next(null, jsonData);
|
|
||||||
} catch (jsonError) {
|
|
||||||
return parseString(requestBody, (xmlError, xmlData) => {
|
|
||||||
if (xmlError) {
|
|
||||||
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
|
|
||||||
}
|
|
||||||
return next(null, xmlData);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function bucketUpdateQuota(authInfo, request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'bucketUpdateQuota' });
|
|
||||||
|
|
||||||
const { bucketName } = request;
|
|
||||||
const metadataValParams = {
|
|
||||||
authInfo,
|
|
||||||
bucketName,
|
|
||||||
requestType: request.apiMethods || 'bucketUpdateQuota',
|
|
||||||
request,
|
|
||||||
};
|
|
||||||
let bucket = null;
|
|
||||||
return waterfall([
|
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
|
||||||
(err, b) => {
|
|
||||||
bucket = b;
|
|
||||||
return next(err, bucket);
|
|
||||||
}),
|
|
||||||
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
|
|
||||||
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
|
|
||||||
next(err, bucket, quotaValue)),
|
|
||||||
(bucket, quotaValue, next) => {
|
|
||||||
bucket.setQuota(quotaValue);
|
|
||||||
return metadata.updateBucket(bucket.getName(), bucket, log, next);
|
|
||||||
},
|
|
||||||
], (err, bucket) => {
|
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
|
||||||
request.method, bucket);
|
|
||||||
if (err) {
|
|
||||||
log.debug('error processing request', {
|
|
||||||
error: err,
|
|
||||||
method: 'bucketUpdateQuota'
|
|
||||||
});
|
|
||||||
monitoring.promMetrics('PUT', bucketName, err.code,
|
|
||||||
'updateBucketQuota');
|
|
||||||
return callback(err, err.code, corsHeaders);
|
|
||||||
}
|
|
||||||
monitoring.promMetrics(
|
|
||||||
'PUT', bucketName, '200', 'updateBucketQuota');
|
|
||||||
pushMetric('updateBucketQuota', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, corsHeaders);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = bucketUpdateQuota;
|
|
|
@ -21,6 +21,8 @@ const { validateAndFilterMpuParts, generateMpuPartStorageInfo } =
|
||||||
const locationKeysHaveChanged
|
const locationKeysHaveChanged
|
||||||
= require('./apiUtils/object/locationKeysHaveChanged');
|
= require('./apiUtils/object/locationKeysHaveChanged');
|
||||||
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
||||||
|
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
|
const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
@ -474,9 +476,12 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
const newDataStoreName =
|
const newDataStoreName =
|
||||||
Array.isArray(dataLocations) && dataLocations[0] ?
|
Array.isArray(dataLocations) && dataLocations[0] ?
|
||||||
dataLocations[0].dataStoreName : null;
|
dataLocations[0].dataStoreName : null;
|
||||||
|
const delLog =
|
||||||
|
logger.newRequestLoggerFromSerializedUids(log
|
||||||
|
.getSerializedUids());
|
||||||
return data.batchDelete(dataToDelete,
|
return data.batchDelete(dataToDelete,
|
||||||
request.method,
|
request.method,
|
||||||
newDataStoreName, log, err => {
|
newDataStoreName, delLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
@ -499,8 +504,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
function batchDeleteExtraParts(extraPartLocations, destinationBucket,
|
function batchDeleteExtraParts(extraPartLocations, destinationBucket,
|
||||||
aggregateETag, generatedVersionId, next) {
|
aggregateETag, generatedVersionId, next) {
|
||||||
if (extraPartLocations && extraPartLocations.length > 0) {
|
if (extraPartLocations && extraPartLocations.length > 0) {
|
||||||
|
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||||
|
log.getSerializedUids());
|
||||||
return data.batchDelete(extraPartLocations, request.method,
|
return data.batchDelete(extraPartLocations, request.method,
|
||||||
null, log, err => {
|
null, delLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@ const convertToXml = s3middleware.convertToXml;
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { hasNonPrintables } = require('../utilities/stringChecks');
|
const { hasNonPrintables } = require('../utilities/stringChecks');
|
||||||
const { config } = require('../Config');
|
|
||||||
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
|
@ -66,7 +65,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
const websiteRedirectHeader =
|
const websiteRedirectHeader =
|
||||||
request.headers['x-amz-website-redirect-location'];
|
request.headers['x-amz-website-redirect-location'];
|
||||||
if (request.headers['x-amz-storage-class'] &&
|
if (request.headers['x-amz-storage-class'] &&
|
||||||
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
|
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', bucketName,
|
monitoring.promMetrics('PUT', bucketName,
|
||||||
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
|
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
|
||||||
|
|
|
@ -31,7 +31,6 @@ const { overheadField } = require('../../constants');
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
const { data } = require('../data/wrapper');
|
const { data } = require('../data/wrapper');
|
||||||
const logger = require('../utilities/logger');
|
const logger = require('../utilities/logger');
|
||||||
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
@ -333,9 +332,6 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
|
|
||||||
return callback(null, objMD, versionId);
|
return callback(null, objMD, versionId);
|
||||||
},
|
},
|
||||||
(objMD, versionId, callback) => validateQuotas(
|
|
||||||
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
|
|
||||||
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
|
|
||||||
(objMD, versionId, callback) => {
|
(objMD, versionId, callback) => {
|
||||||
const options = preprocessingVersioningDelete(
|
const options = preprocessingVersioningDelete(
|
||||||
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
|
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
|
||||||
|
@ -508,9 +504,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
if (bucketShield(bucketMD, 'objectDelete')) {
|
if (bucketShield(bucketMD, 'objectDelete')) {
|
||||||
return next(errors.NoSuchBucket);
|
return next(errors.NoSuchBucket);
|
||||||
}
|
}
|
||||||
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
|
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request,
|
||||||
// affects the objects.
|
request.actionImplicitDenies)) {
|
||||||
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
|
|
||||||
log.trace("access denied due to bucket acl's");
|
log.trace("access denied due to bucket acl's");
|
||||||
// if access denied at the bucket level, no access for
|
// if access denied at the bucket level, no access for
|
||||||
// any of the objects so all results will be error results
|
// any of the objects so all results will be error results
|
||||||
|
|
|
@ -12,6 +12,7 @@ const { checkQueryVersionId, versioningPreprocessing }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
|
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
|
||||||
const { data } = require('../data/wrapper');
|
const { data } = require('../data/wrapper');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
|
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
|
||||||
|
@ -220,14 +221,6 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
versionId: sourceVersionId,
|
versionId: sourceVersionId,
|
||||||
getDeleteMarker: true,
|
getDeleteMarker: true,
|
||||||
requestType: 'objectGet',
|
requestType: 'objectGet',
|
||||||
/**
|
|
||||||
* Authorization will first check the target object, with an objectPut
|
|
||||||
* action. But in this context, the source object metadata is still
|
|
||||||
* unknown. In the context of quotas, to know the number of bytes that
|
|
||||||
* are being written, we explicitly enable the quota evaluation logic
|
|
||||||
* during the objectGet action instead.
|
|
||||||
*/
|
|
||||||
checkQuota: true,
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const valPutParams = {
|
const valPutParams = {
|
||||||
|
@ -235,7 +228,6 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
requestType: 'objectPut',
|
requestType: 'objectPut',
|
||||||
checkQuota: false,
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const dataStoreContext = {
|
const dataStoreContext = {
|
||||||
|
@ -249,7 +241,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
const responseHeaders = {};
|
const responseHeaders = {};
|
||||||
|
|
||||||
if (request.headers['x-amz-storage-class'] &&
|
if (request.headers['x-amz-storage-class'] &&
|
||||||
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
|
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', destBucketName,
|
monitoring.promMetrics('PUT', destBucketName,
|
||||||
errors.InvalidStorageClass.code, 'copyObject');
|
errors.InvalidStorageClass.code, 'copyObject');
|
||||||
|
@ -287,10 +279,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
|
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
|
||||||
return standardMetadataValidateBucketAndObj({
|
return standardMetadataValidateBucketAndObj(valGetParams, request.actionImplicitDenies, log,
|
||||||
...valGetParams,
|
|
||||||
destObjMD,
|
|
||||||
}, request.actionImplicitDenies, log,
|
|
||||||
(err, sourceBucketMD, sourceObjMD) => {
|
(err, sourceBucketMD, sourceObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating get part of request',
|
log.debug('error validating get part of request',
|
||||||
|
@ -544,8 +533,10 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
// the same as the destination
|
// the same as the destination
|
||||||
if (!sourceIsDestination && dataToDelete) {
|
if (!sourceIsDestination && dataToDelete) {
|
||||||
const newDataStoreName = storeMetadataParams.dataStoreName;
|
const newDataStoreName = storeMetadataParams.dataStoreName;
|
||||||
|
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||||
|
log.getSerializedUids());
|
||||||
return data.batchDelete(dataToDelete, request.method,
|
return data.batchDelete(dataToDelete, request.method,
|
||||||
newDataStoreName, log, err => {
|
newDataStoreName, delLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// if error, log the error and move on as it is not
|
// if error, log the error and move on as it is not
|
||||||
// relevant to the client as the client's
|
// relevant to the client as the client's
|
||||||
|
|
|
@ -3,7 +3,6 @@ const { errors, versioning } = require('arsenal');
|
||||||
|
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const aclUtils = require('../utilities/aclUtils');
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
const { config } = require('../Config');
|
|
||||||
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
@ -72,7 +71,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
query,
|
query,
|
||||||
} = request;
|
} = request;
|
||||||
if (headers['x-amz-storage-class'] &&
|
if (headers['x-amz-storage-class'] &&
|
||||||
!config.locationConstraints[headers['x-amz-storage-class']]) {
|
!constants.validStorageClasses.includes(headers['x-amz-storage-class'])) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', request.bucketName,
|
monitoring.promMetrics('PUT', request.bucketName,
|
||||||
errors.InvalidStorageClass.code, 'putObject');
|
errors.InvalidStorageClass.code, 'putObject');
|
||||||
|
@ -99,7 +98,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
'The encryption method specified is not supported');
|
'The encryption method specified is not supported');
|
||||||
const requestType = request.apiMethods || 'objectPut';
|
const requestType = request.apiMethods || 'objectPut';
|
||||||
const valParams = { authInfo, bucketName, objectKey, versionId,
|
const valParams = { authInfo, bucketName, objectKey, versionId,
|
||||||
requestType, request, withVersionId: isPutVersion };
|
requestType, request };
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
if (hasNonPrintables(objectKey)) {
|
if (hasNonPrintables(objectKey)) {
|
||||||
|
|
|
@ -9,12 +9,12 @@ const locationConstraintCheck =
|
||||||
require('./apiUtils/object/locationConstraintCheck');
|
require('./apiUtils/object/locationConstraintCheck');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
|
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
||||||
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
|
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
|
@ -46,14 +46,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
versionId: reqVersionId,
|
versionId: reqVersionId,
|
||||||
getDeleteMarker: true,
|
getDeleteMarker: true,
|
||||||
requestType: 'objectGet',
|
requestType: 'objectGet',
|
||||||
/**
|
|
||||||
* Authorization will first check the target object, with an objectPut
|
|
||||||
* action. But in this context, the source object metadata is still
|
|
||||||
* unknown. In the context of quotas, to know the number of bytes that
|
|
||||||
* are being written, we explicitly enable the quota evaluation logic
|
|
||||||
* during the objectGet action instead.
|
|
||||||
*/
|
|
||||||
checkQuota: true,
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -77,7 +69,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
requestType: 'objectPutPart',
|
requestType: 'objectPutPart',
|
||||||
checkQuota: false,
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -98,7 +89,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
partNumber: paddedPartNumber,
|
partNumber: paddedPartNumber,
|
||||||
uploadId,
|
uploadId,
|
||||||
enableQuota: true,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
|
@ -192,16 +182,9 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
return next(null, copyLocator.dataLocator, destBucketMD,
|
return next(null, copyLocator.dataLocator, destBucketMD,
|
||||||
copyLocator.copyObjectSize, sourceVerId,
|
copyLocator.copyObjectSize, sourceVerId,
|
||||||
sourceLocationConstraintName, sourceObjMD);
|
sourceLocationConstraintName);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function _validateQuotas(dataLocator, destBucketMD,
|
|
||||||
copyObjectSize, sourceVerId,
|
|
||||||
sourceLocationConstraintName, sourceObjMD, next) {
|
|
||||||
return validateQuotas(request, destBucketMD, request.accountQuotas, valPutParams.requestType,
|
|
||||||
request.apiMethod, sourceObjMD?.['content-length'] || 0, false, log, err =>
|
|
||||||
next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName));
|
|
||||||
},
|
|
||||||
// get MPU shadow bucket to get splitter based on MD version
|
// get MPU shadow bucket to get splitter based on MD version
|
||||||
function getMpuShadowBucket(dataLocator, destBucketMD,
|
function getMpuShadowBucket(dataLocator, destBucketMD,
|
||||||
copyObjectSize, sourceVerId,
|
copyObjectSize, sourceVerId,
|
||||||
|
@ -399,8 +382,10 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
// Clean up the old data now that new metadata (with new
|
// Clean up the old data now that new metadata (with new
|
||||||
// data locations) has been stored
|
// data locations) has been stored
|
||||||
if (oldLocationsToDelete) {
|
if (oldLocationsToDelete) {
|
||||||
|
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||||
|
log.getSerializedUids());
|
||||||
return data.batchDelete(oldLocationsToDelete, request.method, null,
|
return data.batchDelete(oldLocationsToDelete, request.method, null,
|
||||||
log, err => {
|
delLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// if error, log the error and move on as it is not
|
// if error, log the error and move on as it is not
|
||||||
// relevant to the client as the client's
|
// relevant to the client as the client's
|
||||||
|
|
|
@ -11,6 +11,7 @@ const { isBucketAuthorized } =
|
||||||
const kms = require('../kms/wrapper');
|
const kms = require('../kms/wrapper');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const locationConstraintCheck
|
const locationConstraintCheck
|
||||||
= require('./apiUtils/object/locationConstraintCheck');
|
= require('./apiUtils/object/locationConstraintCheck');
|
||||||
|
@ -21,7 +22,6 @@ const { BackendInfo } = models;
|
||||||
const writeContinue = require('../utilities/writeContinue');
|
const writeContinue = require('../utilities/writeContinue');
|
||||||
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
||||||
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
|
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
|
||||||
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
|
|
||||||
|
|
||||||
const skipError = new Error('skip');
|
const skipError = new Error('skip');
|
||||||
|
|
||||||
|
@ -61,9 +61,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
log.debug('processing request', { method: 'objectPutPart' });
|
log.debug('processing request', { method: 'objectPutPart' });
|
||||||
const size = request.parsedContentLength;
|
const size = request.parsedContentLength;
|
||||||
|
|
||||||
const putVersionId = request.headers['x-scal-s3-version-id'];
|
|
||||||
const isPutVersion = putVersionId || putVersionId === '';
|
|
||||||
|
|
||||||
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
|
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
|
||||||
log.debug('put part size too large', { size });
|
log.debug('put part size too large', { size });
|
||||||
monitoring.promMetrics('PUT', request.bucketName, 400,
|
monitoring.promMetrics('PUT', request.bucketName, 400,
|
||||||
|
@ -107,9 +104,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||||
const { objectKey } = request;
|
const { objectKey } = request;
|
||||||
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
||||||
// For validating the request at the destinationBucket level the
|
|
||||||
// `requestType` is the general 'objectPut'.
|
|
||||||
const requestType = request.apiMethods || 'objectPutPart';
|
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
// Get the destination bucket.
|
// Get the destination bucket.
|
||||||
|
@ -129,6 +123,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
}),
|
}),
|
||||||
// Check the bucket authorization.
|
// Check the bucket authorization.
|
||||||
(destinationBucket, next) => {
|
(destinationBucket, next) => {
|
||||||
|
// For validating the request at the destinationBucket level the
|
||||||
|
// `requestType` is the general 'objectPut'.
|
||||||
|
const requestType = request.apiMethods || 'objectPutPart';
|
||||||
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
|
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
|
||||||
log, request, request.actionImplicitDenies)) {
|
log, request, request.actionImplicitDenies)) {
|
||||||
log.debug('access denied for user on bucket', { requestType });
|
log.debug('access denied for user on bucket', { requestType });
|
||||||
|
@ -136,8 +133,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
}
|
}
|
||||||
return next(null, destinationBucket);
|
return next(null, destinationBucket);
|
||||||
},
|
},
|
||||||
(destinationBucket, next) => validateQuotas(request, destinationBucket, request.accountQuotas,
|
|
||||||
requestType, request.apiMethod, size, isPutVersion, log, err => next(err, destinationBucket)),
|
|
||||||
// Get bucket server-side encryption, if it exists.
|
// Get bucket server-side encryption, if it exists.
|
||||||
(destinationBucket, next) => getObjectSSEConfiguration(
|
(destinationBucket, next) => getObjectSSEConfiguration(
|
||||||
request.headers, destinationBucket, log,
|
request.headers, destinationBucket, log,
|
||||||
|
@ -385,8 +380,10 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
prevObjectSize, next) => {
|
prevObjectSize, next) => {
|
||||||
if (oldLocationsToDelete) {
|
if (oldLocationsToDelete) {
|
||||||
log.trace('overwriting mpu part, deleting data');
|
log.trace('overwriting mpu part, deleting data');
|
||||||
|
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||||
|
log.getSerializedUids());
|
||||||
return data.batchDelete(oldLocationsToDelete, request.method,
|
return data.batchDelete(oldLocationsToDelete, request.method,
|
||||||
objectLocationConstraint, log, err => {
|
objectLocationConstraint, delLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// if error, log the error and move on as it is not
|
// if error, log the error and move on as it is not
|
||||||
// relevant to the client as the client's
|
// relevant to the client as the client's
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
const vaultclient = require('vaultclient');
|
||||||
const { auth } = require('arsenal');
|
const { auth } = require('arsenal');
|
||||||
|
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
|
@ -20,7 +21,6 @@ function getVaultClient(config) {
|
||||||
port,
|
port,
|
||||||
https: true,
|
https: true,
|
||||||
});
|
});
|
||||||
const vaultclient = require('vaultclient');
|
|
||||||
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
|
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
|
||||||
} else {
|
} else {
|
||||||
logger.info('vaultclient configuration', {
|
logger.info('vaultclient configuration', {
|
||||||
|
@ -28,7 +28,6 @@ function getVaultClient(config) {
|
||||||
port,
|
port,
|
||||||
https: false,
|
https: false,
|
||||||
});
|
});
|
||||||
const vaultclient = require('vaultclient');
|
|
||||||
vaultClient = new vaultclient.Client(host, port);
|
vaultClient = new vaultclient.Client(host, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,6 +49,10 @@ function getMemBackend(config) {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (config.backends.auth) {
|
switch (config.backends.auth) {
|
||||||
|
case 'mem':
|
||||||
|
implName = 'vaultMem';
|
||||||
|
client = getMemBackend(config);
|
||||||
|
break;
|
||||||
case 'multiple':
|
case 'multiple':
|
||||||
implName = 'vaultChain';
|
implName = 'vaultChain';
|
||||||
client = new ChainBackend('s3', [
|
client = new ChainBackend('s3', [
|
||||||
|
@ -57,14 +60,9 @@ case 'multiple':
|
||||||
getVaultClient(config),
|
getVaultClient(config),
|
||||||
]);
|
]);
|
||||||
break;
|
break;
|
||||||
case 'vault':
|
default: // vault
|
||||||
implName = 'vault';
|
implName = 'vault';
|
||||||
client = getVaultClient(config);
|
client = getVaultClient(config);
|
||||||
break;
|
|
||||||
default: // mem
|
|
||||||
implName = 'vaultMem';
|
|
||||||
client = getMemBackend(config);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = new Vault(client, implName);
|
module.exports = new Vault(client, implName);
|
||||||
|
|
|
@ -8,6 +8,20 @@ const inMemory = require('./in_memory/backend').backend;
|
||||||
const file = require('./file/backend');
|
const file = require('./file/backend');
|
||||||
const KMIPClient = require('arsenal').network.kmipClient;
|
const KMIPClient = require('arsenal').network.kmipClient;
|
||||||
const Common = require('./common');
|
const Common = require('./common');
|
||||||
|
let scalityKMS;
|
||||||
|
let scalityKMSImpl;
|
||||||
|
try {
|
||||||
|
// eslint-disable-next-line import/no-unresolved
|
||||||
|
const ScalityKMS = require('scality-kms');
|
||||||
|
scalityKMS = new ScalityKMS(config.kms);
|
||||||
|
scalityKMSImpl = 'scalityKms';
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn('scality kms unavailable. ' +
|
||||||
|
'Using file kms backend unless mem specified.',
|
||||||
|
{ error });
|
||||||
|
scalityKMS = file;
|
||||||
|
scalityKMSImpl = 'fileKms';
|
||||||
|
}
|
||||||
|
|
||||||
let client;
|
let client;
|
||||||
let implName;
|
let implName;
|
||||||
|
@ -19,9 +33,8 @@ if (config.backends.kms === 'mem') {
|
||||||
client = file;
|
client = file;
|
||||||
implName = 'fileKms';
|
implName = 'fileKms';
|
||||||
} else if (config.backends.kms === 'scality') {
|
} else if (config.backends.kms === 'scality') {
|
||||||
const ScalityKMS = require('scality-kms');
|
client = scalityKMS;
|
||||||
client = new ScalityKMS(config.kms);
|
implName = scalityKMSImpl;
|
||||||
implName = 'scalityKms';
|
|
||||||
} else if (config.backends.kms === 'kmip') {
|
} else if (config.backends.kms === 'kmip') {
|
||||||
const kmipConfig = { kmip: config.kmip };
|
const kmipConfig = { kmip: config.kmip };
|
||||||
if (!kmipConfig.kmip) {
|
if (!kmipConfig.kmip) {
|
||||||
|
|
|
@ -0,0 +1,131 @@
|
||||||
|
/**
|
||||||
|
* Target service that should handle a message
|
||||||
|
* @readonly
|
||||||
|
* @enum {number}
|
||||||
|
*/
|
||||||
|
const MessageType = {
|
||||||
|
/** Message that contains a configuration overlay */
|
||||||
|
CONFIG_OVERLAY_MESSAGE: 1,
|
||||||
|
/** Message that requests a metrics report */
|
||||||
|
METRICS_REQUEST_MESSAGE: 2,
|
||||||
|
/** Message that contains a metrics report */
|
||||||
|
METRICS_REPORT_MESSAGE: 3,
|
||||||
|
/** Close the virtual TCP socket associated to the channel */
|
||||||
|
CHANNEL_CLOSE_MESSAGE: 4,
|
||||||
|
/** Write data to the virtual TCP socket associated to the channel */
|
||||||
|
CHANNEL_PAYLOAD_MESSAGE: 5,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Target service that should handle a message
|
||||||
|
* @readonly
|
||||||
|
* @enum {number}
|
||||||
|
*/
|
||||||
|
const TargetType = {
|
||||||
|
/** Let the dispatcher choose the most appropriate message */
|
||||||
|
TARGET_ANY: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
const headerSize = 3;
|
||||||
|
|
||||||
|
class ChannelMessageV0 {
|
||||||
|
/**
|
||||||
|
* @param {Buffer} buffer Message bytes
|
||||||
|
*/
|
||||||
|
constructor(buffer) {
|
||||||
|
this.messageType = buffer.readUInt8(0);
|
||||||
|
this.channelNumber = buffer.readUInt8(1);
|
||||||
|
this.target = buffer.readUInt8(2);
|
||||||
|
this.payload = buffer.slice(headerSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {number} Message type
|
||||||
|
*/
|
||||||
|
getType() {
|
||||||
|
return this.messageType;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {number} Channel number if applicable
|
||||||
|
*/
|
||||||
|
getChannelNumber() {
|
||||||
|
return this.channelNumber;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {number} Target service, or 0 to choose automatically
|
||||||
|
*/
|
||||||
|
getTarget() {
|
||||||
|
return this.target;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {Buffer} Message payload if applicable
|
||||||
|
*/
|
||||||
|
getPayload() {
|
||||||
|
return this.payload;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a wire representation of a channel close message
|
||||||
|
*
|
||||||
|
* @param {number} channelId Channel number
|
||||||
|
*
|
||||||
|
* @returns {Buffer} wire representation
|
||||||
|
*/
|
||||||
|
static encodeChannelCloseMessage(channelId) {
|
||||||
|
const buf = Buffer.alloc(headerSize);
|
||||||
|
buf.writeUInt8(MessageType.CHANNEL_CLOSE_MESSAGE, 0);
|
||||||
|
buf.writeUInt8(channelId, 1);
|
||||||
|
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a wire representation of a channel data message
|
||||||
|
*
|
||||||
|
* @param {number} channelId Channel number
|
||||||
|
* @param {Buffer} data Payload
|
||||||
|
*
|
||||||
|
* @returns {Buffer} wire representation
|
||||||
|
*/
|
||||||
|
static encodeChannelDataMessage(channelId, data) {
|
||||||
|
const buf = Buffer.alloc(data.length + headerSize);
|
||||||
|
buf.writeUInt8(MessageType.CHANNEL_PAYLOAD_MESSAGE, 0);
|
||||||
|
buf.writeUInt8(channelId, 1);
|
||||||
|
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
||||||
|
data.copy(buf, headerSize);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a wire representation of a metrics message
|
||||||
|
*
|
||||||
|
* @param {object} body Metrics report
|
||||||
|
*
|
||||||
|
* @returns {Buffer} wire representation
|
||||||
|
*/
|
||||||
|
static encodeMetricsReportMessage(body) {
|
||||||
|
const report = JSON.stringify(body);
|
||||||
|
const buf = Buffer.alloc(report.length + headerSize);
|
||||||
|
buf.writeUInt8(MessageType.METRICS_REPORT_MESSAGE, 0);
|
||||||
|
buf.writeUInt8(0, 1);
|
||||||
|
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
||||||
|
buf.write(report, headerSize);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protocol name used for subprotocol negociation
|
||||||
|
*/
|
||||||
|
static get protocolName() {
|
||||||
|
return 'zenko-secure-channel-v0';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
ChannelMessageV0,
|
||||||
|
MessageType,
|
||||||
|
TargetType,
|
||||||
|
};
|
|
@ -0,0 +1,94 @@
|
||||||
|
const WebSocket = require('ws');
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
const _config = require('../Config').config;
|
||||||
|
const { patchConfiguration } = require('./configuration');
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
|
||||||
|
|
||||||
|
const managementAgentMessageType = {
|
||||||
|
/** Message that contains the loaded overlay */
|
||||||
|
NEW_OVERLAY: 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
const CONNECTION_RETRY_TIMEOUT_MS = 5000;
|
||||||
|
|
||||||
|
|
||||||
|
function initManagementClient() {
|
||||||
|
const { host, port } = _config.managementAgent;
|
||||||
|
|
||||||
|
const ws = new WebSocket(`ws://${host}:${port}/watch`);
|
||||||
|
|
||||||
|
ws.on('open', () => {
|
||||||
|
logger.info('connected with management agent');
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('close', (code, reason) => {
|
||||||
|
logger.info('disconnected from management agent', { reason });
|
||||||
|
setTimeout(initManagementClient, CONNECTION_RETRY_TIMEOUT_MS);
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('error', error => {
|
||||||
|
logger.error('error on connection with management agent', { error });
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('message', data => {
|
||||||
|
const method = 'initManagementclient::onMessage';
|
||||||
|
const log = logger.newRequestLogger();
|
||||||
|
let msg;
|
||||||
|
|
||||||
|
if (!data) {
|
||||||
|
log.error('message without data', { method });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
msg = JSON.parse(data);
|
||||||
|
} catch (err) {
|
||||||
|
log.error('data is an invalid json', { method, err, data });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (msg.payload === undefined) {
|
||||||
|
log.error('message without payload', { method });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (typeof msg.messageType !== 'number') {
|
||||||
|
log.error('messageType is not an integer', {
|
||||||
|
type: typeof msg.messageType,
|
||||||
|
method,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (msg.messageType) {
|
||||||
|
case managementAgentMessageType.NEW_OVERLAY:
|
||||||
|
patchConfiguration(msg.payload, log, err => {
|
||||||
|
if (err) {
|
||||||
|
log.error('failed to patch overlay', {
|
||||||
|
error: reshapeExceptionError(err),
|
||||||
|
method,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
log.error('new overlay message with unmanaged message type', {
|
||||||
|
method,
|
||||||
|
type: msg.messageType,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function isManagementAgentUsed() {
|
||||||
|
return process.env.MANAGEMENT_USE_AGENT === '1';
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
managementAgentMessageType,
|
||||||
|
initManagementClient,
|
||||||
|
isManagementAgentUsed,
|
||||||
|
};
|
|
@ -0,0 +1,240 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
|
||||||
|
const { buildAuthDataAccount } = require('../auth/in_memory/builder');
|
||||||
|
const _config = require('../Config').config;
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
|
||||||
|
const { getStoredCredentials } = require('./credentials');
|
||||||
|
|
||||||
|
const latestOverlayVersionKey = 'configuration/overlay-version';
|
||||||
|
const managementDatabaseName = 'PENSIEVE';
|
||||||
|
const replicatorEndpoint = 'zenko-cloudserver-replicator';
|
||||||
|
const { decryptSecret } = arsenal.pensieve.credentialUtils;
|
||||||
|
const { patchLocations } = arsenal.patches.locationConstraints;
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
const { replicationBackends } = require('arsenal').constants;
|
||||||
|
|
||||||
|
function overlayHasVersion(overlay) {
|
||||||
|
return overlay && overlay.version !== undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
|
||||||
|
return (overlayHasVersion(remoteOverlay) &&
|
||||||
|
(!overlayHasVersion(cachedOverlay) ||
|
||||||
|
remoteOverlay.version > cachedOverlay.version));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates the live {Config} object with the new overlay configuration.
|
||||||
|
*
|
||||||
|
* No-op if this version was already applied to the live {Config}.
|
||||||
|
*
|
||||||
|
* @param {object} newConf Overlay configuration to apply
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger
|
||||||
|
* @param {function} cb Function to call with (error, newConf)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function patchConfiguration(newConf, log, cb) {
|
||||||
|
if (newConf.version === undefined) {
|
||||||
|
log.debug('no remote configuration created yet');
|
||||||
|
return process.nextTick(cb, null, newConf);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_config.overlayVersion !== undefined &&
|
||||||
|
newConf.version <= _config.overlayVersion) {
|
||||||
|
log.debug('configuration version already applied',
|
||||||
|
{ configurationVersion: newConf.version });
|
||||||
|
return process.nextTick(cb, null, newConf);
|
||||||
|
}
|
||||||
|
return getStoredCredentials(log, (err, creds) => {
|
||||||
|
if (err) {
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
const accounts = [];
|
||||||
|
if (newConf.users) {
|
||||||
|
newConf.users.forEach(u => {
|
||||||
|
if (u.secretKey && u.secretKey.length > 0) {
|
||||||
|
const secretKey = decryptSecret(creds, u.secretKey);
|
||||||
|
// accountType will be service-replication or service-clueso
|
||||||
|
let serviceName;
|
||||||
|
if (u.accountType && u.accountType.startsWith('service-')) {
|
||||||
|
serviceName = u.accountType.split('-')[1];
|
||||||
|
}
|
||||||
|
const newAccount = buildAuthDataAccount(
|
||||||
|
u.accessKey, secretKey, u.canonicalId, serviceName,
|
||||||
|
u.userName);
|
||||||
|
accounts.push(newAccount.accounts[0]);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const restEndpoints = Object.assign({}, _config.restEndpoints);
|
||||||
|
if (newConf.endpoints) {
|
||||||
|
newConf.endpoints.forEach(e => {
|
||||||
|
restEndpoints[e.hostname] = e.locationName;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!restEndpoints[replicatorEndpoint]) {
|
||||||
|
restEndpoints[replicatorEndpoint] = 'us-east-1';
|
||||||
|
}
|
||||||
|
|
||||||
|
const locations = patchLocations(newConf.locations, creds, log);
|
||||||
|
if (Object.keys(locations).length !== 0) {
|
||||||
|
try {
|
||||||
|
_config.setLocationConstraints(locations);
|
||||||
|
} catch (error) {
|
||||||
|
const exceptionError = reshapeExceptionError(error);
|
||||||
|
log.error('could not apply configuration version location ' +
|
||||||
|
'constraints', { error: exceptionError,
|
||||||
|
method: 'getStoredCredentials' });
|
||||||
|
return cb(exceptionError);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const locationsWithReplicationBackend = Object.keys(locations)
|
||||||
|
// NOTE: In Orbit, we don't need to have Scality location in our
|
||||||
|
// replication endpoind config, since we do not replicate to
|
||||||
|
// any Scality Instance yet.
|
||||||
|
.filter(key => replicationBackends
|
||||||
|
[locations[key].type])
|
||||||
|
.reduce((obj, key) => {
|
||||||
|
/* eslint no-param-reassign:0 */
|
||||||
|
obj[key] = locations[key];
|
||||||
|
return obj;
|
||||||
|
}, {});
|
||||||
|
_config.setReplicationEndpoints(
|
||||||
|
locationsWithReplicationBackend);
|
||||||
|
} catch (error) {
|
||||||
|
const exceptionError = reshapeExceptionError(error);
|
||||||
|
log.error('could not apply replication endpoints',
|
||||||
|
{ error: exceptionError, method: 'getStoredCredentials' });
|
||||||
|
return cb(exceptionError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_config.setAuthDataAccounts(accounts);
|
||||||
|
_config.setRestEndpoints(restEndpoints);
|
||||||
|
_config.setPublicInstanceId(newConf.instanceId);
|
||||||
|
|
||||||
|
if (newConf.browserAccess) {
|
||||||
|
if (Boolean(_config.browserAccessEnabled) !==
|
||||||
|
Boolean(newConf.browserAccess.enabled)) {
|
||||||
|
_config.browserAccessEnabled =
|
||||||
|
Boolean(newConf.browserAccess.enabled);
|
||||||
|
_config.emit('browser-access-enabled-change');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_config.overlayVersion = newConf.version;
|
||||||
|
|
||||||
|
log.info('applied configuration version',
|
||||||
|
{ configurationVersion: _config.overlayVersion });
|
||||||
|
|
||||||
|
return cb(null, newConf);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes configuration version to the management database
|
||||||
|
*
|
||||||
|
* @param {object} cachedOverlay Latest stored configuration version
|
||||||
|
* for freshness comparison purposes
|
||||||
|
* @param {object} remoteOverlay New configuration version
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger
|
||||||
|
* @param {function} cb Function to call with (error, remoteOverlay)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
|
||||||
|
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
|
||||||
|
const objName = `configuration/overlay/${remoteOverlay.version}`;
|
||||||
|
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
|
||||||
|
{}, log, error => {
|
||||||
|
if (error) {
|
||||||
|
const exceptionError = reshapeExceptionError(error);
|
||||||
|
log.error('could not save configuration',
|
||||||
|
{ error: exceptionError,
|
||||||
|
method: 'saveConfigurationVersion',
|
||||||
|
configurationVersion: remoteOverlay.version });
|
||||||
|
cb(exceptionError);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
metadata.putObjectMD(managementDatabaseName,
|
||||||
|
latestOverlayVersionKey, remoteOverlay.version, {}, log,
|
||||||
|
error => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not save configuration version', {
|
||||||
|
configurationVersion: remoteOverlay.version,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
cb(error, remoteOverlay);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
log.debug('no remote configuration to cache yet');
|
||||||
|
process.nextTick(cb, null, remoteOverlay);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loads the latest cached configuration overlay from the management
|
||||||
|
* database, without contacting the Orbit API.
|
||||||
|
*
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger
|
||||||
|
* @param {function} callback Function called with (error, cachedOverlay)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function loadCachedOverlay(log, callback) {
|
||||||
|
return metadata.getObjectMD(managementDatabaseName,
|
||||||
|
latestOverlayVersionKey, {}, log, (err, version) => {
|
||||||
|
if (err) {
|
||||||
|
if (err.is.NoSuchKey) {
|
||||||
|
return process.nextTick(callback, null, {});
|
||||||
|
}
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
return metadata.getObjectMD(managementDatabaseName,
|
||||||
|
`configuration/overlay/${version}`, {}, log, (err, conf) => {
|
||||||
|
if (err) {
|
||||||
|
if (err.is.NoSuchKey) {
|
||||||
|
return process.nextTick(callback, null, {});
|
||||||
|
}
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
return callback(null, conf);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function applyAndSaveOverlay(overlay, log) {
|
||||||
|
patchConfiguration(overlay, log, err => {
|
||||||
|
if (err) {
|
||||||
|
log.error('could not apply pushed overlay', {
|
||||||
|
error: reshapeExceptionError(err),
|
||||||
|
method: 'applyAndSaveOverlay',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
saveConfigurationVersion(null, overlay, log, err => {
|
||||||
|
if (err) {
|
||||||
|
log.error('could not cache overlay version', {
|
||||||
|
error: reshapeExceptionError(err),
|
||||||
|
method: 'applyAndSaveOverlay',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
log.info('overlay push processed');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
loadCachedOverlay,
|
||||||
|
managementDatabaseName,
|
||||||
|
patchConfiguration,
|
||||||
|
saveConfigurationVersion,
|
||||||
|
remoteOverlayIsNewer,
|
||||||
|
applyAndSaveOverlay,
|
||||||
|
};
|
|
@ -0,0 +1,145 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const forge = require('node-forge');
|
||||||
|
const request = require('../utilities/request');
|
||||||
|
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
|
||||||
|
const managementDatabaseName = 'PENSIEVE';
|
||||||
|
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
|
||||||
|
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retrieves Orbit API token from the management database.
|
||||||
|
*
|
||||||
|
* The token is used to authenticate stat posting and
|
||||||
|
*
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
||||||
|
* initialization process
|
||||||
|
* @param {function} callback Function called with (error, result)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function getStoredCredentials(log, callback) {
|
||||||
|
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
|
||||||
|
log, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
function issueCredentials(managementEndpoint, instanceId, log, callback) {
|
||||||
|
log.info('registering with API to get token');
|
||||||
|
|
||||||
|
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
|
||||||
|
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
|
||||||
|
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
|
||||||
|
|
||||||
|
const postData = {
|
||||||
|
publicKey,
|
||||||
|
};
|
||||||
|
|
||||||
|
request.post(`${managementEndpoint}/${instanceId}/register`,
|
||||||
|
{ body: postData, json: true }, (error, response, body) => {
|
||||||
|
if (error) {
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
if (response.statusCode !== 201) {
|
||||||
|
log.error('could not register instance', {
|
||||||
|
statusCode: response.statusCode,
|
||||||
|
});
|
||||||
|
return callback(arsenal.errors.InternalError);
|
||||||
|
}
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
body.privateKey = privateKey;
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
return callback(null, body);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function confirmInstanceCredentials(
|
||||||
|
managementEndpoint, instanceId, creds, log, callback) {
|
||||||
|
const postData = {
|
||||||
|
serial: creds.serial || 0,
|
||||||
|
publicKey: creds.publicKey,
|
||||||
|
};
|
||||||
|
|
||||||
|
const opts = {
|
||||||
|
headers: {
|
||||||
|
'x-instance-authentication-token': creds.token,
|
||||||
|
},
|
||||||
|
body: postData,
|
||||||
|
};
|
||||||
|
|
||||||
|
request.post(`${managementEndpoint}/${instanceId}/confirm`,
|
||||||
|
opts, (error, response) => {
|
||||||
|
if (error) {
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
if (response.statusCode === 200) {
|
||||||
|
return callback(null, instanceId, creds.token);
|
||||||
|
}
|
||||||
|
return callback(arsenal.errors.InternalError);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initializes credentials and PKI in the management database.
|
||||||
|
*
|
||||||
|
* In case the management database is new and empty, the instance
|
||||||
|
* is registered as new against the Orbit API with newly-generated
|
||||||
|
* RSA key pair.
|
||||||
|
*
|
||||||
|
* @param {string} managementEndpoint API endpoint
|
||||||
|
* @param {string} instanceId UUID of this deployment
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
||||||
|
* initialization process
|
||||||
|
* @param {function} callback Function called with (error, result)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function initManagementCredentials(
|
||||||
|
managementEndpoint, instanceId, log, callback) {
|
||||||
|
getStoredCredentials(log, (error, value) => {
|
||||||
|
if (error) {
|
||||||
|
if (error.is.NoSuchKey) {
|
||||||
|
return issueCredentials(managementEndpoint, instanceId, log,
|
||||||
|
(error, value) => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not issue token',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'initManagementCredentials' });
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
log.debug('saving token');
|
||||||
|
return metadata.putObjectMD(managementDatabaseName,
|
||||||
|
tokenConfigurationKey, value, {}, log, error => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not save token',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'initManagementCredentials',
|
||||||
|
});
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
log.info('saved token locally, ' +
|
||||||
|
'confirming instance');
|
||||||
|
return confirmInstanceCredentials(
|
||||||
|
managementEndpoint, instanceId, value, log,
|
||||||
|
callback);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
log.debug('could not get token', { error });
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info('returning existing token');
|
||||||
|
if (Date.now() - value.issueDate > tokenRotationDelay) {
|
||||||
|
log.warn('management API token is too old, should re-issue');
|
||||||
|
}
|
||||||
|
|
||||||
|
return callback(null, instanceId, value.token);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
getStoredCredentials,
|
||||||
|
initManagementCredentials,
|
||||||
|
};
|
|
@ -0,0 +1,138 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const async = require('async');
|
||||||
|
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
|
||||||
|
const {
|
||||||
|
loadCachedOverlay,
|
||||||
|
managementDatabaseName,
|
||||||
|
patchConfiguration,
|
||||||
|
} = require('./configuration');
|
||||||
|
const { initManagementCredentials } = require('./credentials');
|
||||||
|
const { startWSManagementClient } = require('./push');
|
||||||
|
const { startPollingManagementClient } = require('./poll');
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
const { isManagementAgentUsed } = require('./agentClient');
|
||||||
|
|
||||||
|
const initRemoteManagementRetryDelay = 10000;
|
||||||
|
|
||||||
|
const managementEndpointRoot =
|
||||||
|
process.env.MANAGEMENT_ENDPOINT ||
|
||||||
|
'https://api.zenko.io';
|
||||||
|
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
|
||||||
|
|
||||||
|
const pushEndpointRoot =
|
||||||
|
process.env.PUSH_ENDPOINT ||
|
||||||
|
'https://push.api.zenko.io';
|
||||||
|
const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`;
|
||||||
|
|
||||||
|
function initManagementDatabase(log, callback) {
|
||||||
|
// XXX choose proper owner names
|
||||||
|
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
|
||||||
|
'owner display name', new Date().toJSON());
|
||||||
|
|
||||||
|
metadata.createBucket(managementDatabaseName, md, log, error => {
|
||||||
|
if (error) {
|
||||||
|
if (error.is.BucketAlreadyExists) {
|
||||||
|
log.info('created management database');
|
||||||
|
return callback();
|
||||||
|
}
|
||||||
|
log.error('could not initialize management database',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'initManagementDatabase' });
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
log.info('initialized management database');
|
||||||
|
return callback();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function startManagementListeners(instanceId, token) {
|
||||||
|
const mode = process.env.MANAGEMENT_MODE || 'push';
|
||||||
|
if (mode === 'push') {
|
||||||
|
const url = `${pushEndpoint}/${instanceId}/ws`;
|
||||||
|
startWSManagementClient(url, token);
|
||||||
|
} else {
|
||||||
|
startPollingManagementClient(managementEndpoint, instanceId, token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initializes Orbit-based management by:
|
||||||
|
* - creating the management database in metadata
|
||||||
|
* - generating a key pair for credentials encryption
|
||||||
|
* - generating an instance-unique ID
|
||||||
|
* - getting an authentication token for the API
|
||||||
|
* - loading and applying the latest cached overlay configuration
|
||||||
|
* - starting a configuration update and metrics push background task
|
||||||
|
*
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
||||||
|
* initialization process
|
||||||
|
* @param {function} callback Function to call once the overlay is loaded
|
||||||
|
* (overlay)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function initManagement(log, callback) {
|
||||||
|
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
|
||||||
|
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|
||||||
|
|| process.env.S3BACKEND === 'mem') {
|
||||||
|
log.info('remote management disabled');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Temporary check before to fully move to the process management agent. */
|
||||||
|
if (isManagementAgentUsed() ^ typeof callback === 'function') {
|
||||||
|
let msg = 'misuse of initManagement function: ';
|
||||||
|
msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`;
|
||||||
|
msg += `, callback type: ${typeof callback}`;
|
||||||
|
throw new Error(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
async.waterfall([
|
||||||
|
// eslint-disable-next-line arrow-body-style
|
||||||
|
cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); },
|
||||||
|
cb => initManagementDatabase(log, cb),
|
||||||
|
cb => metadata.getUUID(log, cb),
|
||||||
|
(instanceId, cb) => initManagementCredentials(
|
||||||
|
managementEndpoint, instanceId, log, cb),
|
||||||
|
(instanceId, token, cb) => {
|
||||||
|
if (!isManagementAgentUsed()) {
|
||||||
|
cb(null, instanceId, token, {});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
loadCachedOverlay(log, (err, overlay) => cb(err, instanceId,
|
||||||
|
token, overlay));
|
||||||
|
},
|
||||||
|
(instanceId, token, overlay, cb) => {
|
||||||
|
if (!isManagementAgentUsed()) {
|
||||||
|
cb(null, instanceId, token, overlay);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
patchConfiguration(overlay, log,
|
||||||
|
err => cb(err, instanceId, token, overlay));
|
||||||
|
},
|
||||||
|
], (error, instanceId, token, overlay) => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not initialize remote management, retrying later',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'initManagement' });
|
||||||
|
setTimeout(initManagement,
|
||||||
|
initRemoteManagementRetryDelay,
|
||||||
|
logger.newRequestLogger());
|
||||||
|
} else {
|
||||||
|
log.info(`this deployment's Instance ID is ${instanceId}`);
|
||||||
|
log.end('management init done');
|
||||||
|
startManagementListeners(instanceId, token);
|
||||||
|
if (callback) {
|
||||||
|
callback(overlay);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
initManagement,
|
||||||
|
initManagementDatabase,
|
||||||
|
};
|
|
@ -0,0 +1,157 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const async = require('async');
|
||||||
|
const request = require('../utilities/request');
|
||||||
|
|
||||||
|
const _config = require('../Config').config;
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const {
|
||||||
|
loadCachedOverlay,
|
||||||
|
patchConfiguration,
|
||||||
|
saveConfigurationVersion,
|
||||||
|
} = require('./configuration');
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
|
||||||
|
const pushReportDelay = 30000;
|
||||||
|
const pullConfigurationOverlayDelay = 60000;
|
||||||
|
|
||||||
|
function loadRemoteOverlay(
|
||||||
|
managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) {
|
||||||
|
log.debug('loading remote overlay');
|
||||||
|
const opts = {
|
||||||
|
headers: {
|
||||||
|
'x-instance-authentication-token': remoteToken,
|
||||||
|
'x-scal-request-id': log.getSerializedUids(),
|
||||||
|
},
|
||||||
|
json: true,
|
||||||
|
};
|
||||||
|
request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
|
||||||
|
(error, response, body) => {
|
||||||
|
if (error) {
|
||||||
|
return cb(error);
|
||||||
|
}
|
||||||
|
if (response.statusCode === 200) {
|
||||||
|
return cb(null, cachedOverlay, body);
|
||||||
|
}
|
||||||
|
if (response.statusCode === 404) {
|
||||||
|
return cb(null, cachedOverlay, {});
|
||||||
|
}
|
||||||
|
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO save only after successful patch
|
||||||
|
function applyConfigurationOverlay(
|
||||||
|
managementEndpoint, instanceId, remoteToken, log) {
|
||||||
|
async.waterfall([
|
||||||
|
wcb => loadCachedOverlay(log, wcb),
|
||||||
|
(cachedOverlay, wcb) => patchConfiguration(cachedOverlay,
|
||||||
|
log, wcb),
|
||||||
|
(cachedOverlay, wcb) =>
|
||||||
|
loadRemoteOverlay(managementEndpoint, instanceId, remoteToken,
|
||||||
|
cachedOverlay, log, wcb),
|
||||||
|
(cachedOverlay, remoteOverlay, wcb) =>
|
||||||
|
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
|
||||||
|
(remoteOverlay, wcb) => patchConfiguration(remoteOverlay,
|
||||||
|
log, wcb),
|
||||||
|
], error => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not apply managed configuration',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'applyConfigurationOverlay' });
|
||||||
|
}
|
||||||
|
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
|
||||||
|
managementEndpoint, instanceId, remoteToken,
|
||||||
|
logger.newRequestLogger());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function postStats(managementEndpoint, instanceId, remoteToken, report, next) {
|
||||||
|
const toURL = `${managementEndpoint}/${instanceId}/stats`;
|
||||||
|
const toOptions = {
|
||||||
|
json: true,
|
||||||
|
headers: {
|
||||||
|
'content-type': 'application/json',
|
||||||
|
'x-instance-authentication-token': remoteToken,
|
||||||
|
},
|
||||||
|
body: report,
|
||||||
|
};
|
||||||
|
const toCallback = (err, response, body) => {
|
||||||
|
if (err) {
|
||||||
|
logger.info('could not post stats', { error: err });
|
||||||
|
}
|
||||||
|
if (response && response.statusCode !== 201) {
|
||||||
|
logger.info('could not post stats', {
|
||||||
|
body,
|
||||||
|
statusCode: response.statusCode,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (next) {
|
||||||
|
next(null, instanceId, remoteToken);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
return request.post(toURL, toOptions, toCallback);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getStats(next) {
|
||||||
|
const fromURL = `http://localhost:${_config.port}/_/report`;
|
||||||
|
const fromOptions = {
|
||||||
|
headers: {
|
||||||
|
'x-scal-report-token': process.env.REPORT_TOKEN,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return request.get(fromURL, fromOptions, next);
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushStats(managementEndpoint, instanceId, remoteToken, next) {
|
||||||
|
if (process.env.PUSH_STATS === 'false') {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
getStats((err, res, report) => {
|
||||||
|
if (err) {
|
||||||
|
logger.info('could not retrieve stats', { error: err });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('report', { report });
|
||||||
|
postStats(
|
||||||
|
managementEndpoint,
|
||||||
|
instanceId,
|
||||||
|
remoteToken,
|
||||||
|
report,
|
||||||
|
next
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
});
|
||||||
|
|
||||||
|
setTimeout(pushStats, pushReportDelay,
|
||||||
|
managementEndpoint, instanceId, remoteToken);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts background task that updates configuration and pushes stats.
|
||||||
|
*
|
||||||
|
* Periodically polls for configuration updates, and pushes stats at
|
||||||
|
* a fixed interval.
|
||||||
|
*
|
||||||
|
* @param {string} managementEndpoint API endpoint
|
||||||
|
* @param {string} instanceId UUID of this deployment
|
||||||
|
* @param {string} remoteToken API authentication token
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function startPollingManagementClient(
|
||||||
|
managementEndpoint, instanceId, remoteToken) {
|
||||||
|
metadata.notifyBucketChange(() => {
|
||||||
|
pushStats(managementEndpoint, instanceId, remoteToken);
|
||||||
|
});
|
||||||
|
|
||||||
|
pushStats(managementEndpoint, instanceId, remoteToken);
|
||||||
|
applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken,
|
||||||
|
logger.newRequestLogger());
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
startPollingManagementClient,
|
||||||
|
};
|
|
@ -0,0 +1,301 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const HttpsProxyAgent = require('https-proxy-agent');
|
||||||
|
const net = require('net');
|
||||||
|
const request = require('../utilities/request');
|
||||||
|
const { URL } = require('url');
|
||||||
|
const WebSocket = require('ws');
|
||||||
|
const assert = require('assert');
|
||||||
|
const http = require('http');
|
||||||
|
|
||||||
|
const _config = require('../Config').config;
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
const { isManagementAgentUsed } = require('./agentClient');
|
||||||
|
const { applyAndSaveOverlay } = require('./configuration');
|
||||||
|
const {
|
||||||
|
ChannelMessageV0,
|
||||||
|
MessageType,
|
||||||
|
} = require('./ChannelMessageV0');
|
||||||
|
|
||||||
|
const {
|
||||||
|
CONFIG_OVERLAY_MESSAGE,
|
||||||
|
METRICS_REQUEST_MESSAGE,
|
||||||
|
CHANNEL_CLOSE_MESSAGE,
|
||||||
|
CHANNEL_PAYLOAD_MESSAGE,
|
||||||
|
} = MessageType;
|
||||||
|
|
||||||
|
const PING_INTERVAL_MS = 10000;
|
||||||
|
const subprotocols = [ChannelMessageV0.protocolName];
|
||||||
|
|
||||||
|
const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST
|
||||||
|
|| 'localhost';
|
||||||
|
const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT
|
||||||
|
|| _config.port;
|
||||||
|
|
||||||
|
let overlayMessageListener = null;
|
||||||
|
let connected = false;
|
||||||
|
|
||||||
|
// No wildcard nor cidr/mask match for now
|
||||||
|
function createWSAgent(pushEndpoint, env, log) {
|
||||||
|
const url = new URL(pushEndpoint);
|
||||||
|
const noProxy = (env.NO_PROXY || env.no_proxy
|
||||||
|
|| '').split(',');
|
||||||
|
|
||||||
|
if (noProxy.includes(url.hostname)) {
|
||||||
|
log.info('push server ws has proxy exclusion', { noProxy });
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (url.protocol === 'https:' || url.protocol === 'wss:') {
|
||||||
|
const httpsProxy = (env.HTTPS_PROXY || env.https_proxy);
|
||||||
|
if (httpsProxy) {
|
||||||
|
log.info('push server ws using https proxy', { httpsProxy });
|
||||||
|
return new HttpsProxyAgent(httpsProxy);
|
||||||
|
}
|
||||||
|
} else if (url.protocol === 'http:' || url.protocol === 'ws:') {
|
||||||
|
const httpProxy = (env.HTTP_PROXY || env.http_proxy);
|
||||||
|
if (httpProxy) {
|
||||||
|
log.info('push server ws using http proxy', { httpProxy });
|
||||||
|
return new HttpsProxyAgent(httpProxy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const allProxy = (env.ALL_PROXY || env.all_proxy);
|
||||||
|
if (allProxy) {
|
||||||
|
log.info('push server ws using wildcard proxy', { allProxy });
|
||||||
|
return new HttpsProxyAgent(allProxy);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info('push server ws not using proxy');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts background task that updates configuration and pushes stats.
|
||||||
|
*
|
||||||
|
* Receives pushed Websocket messages on configuration updates, and
|
||||||
|
* sends stat messages in response to API sollicitations.
|
||||||
|
*
|
||||||
|
* @param {string} url API endpoint
|
||||||
|
* @param {string} token API authentication token
|
||||||
|
* @param {function} cb end-of-connection callback
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function startWSManagementClient(url, token, cb) {
|
||||||
|
logger.info('connecting to push server', { url });
|
||||||
|
function _logError(error, errorMessage, method) {
|
||||||
|
if (error) {
|
||||||
|
logger.error(`management client error: ${errorMessage}`,
|
||||||
|
{ error: reshapeExceptionError(error), method });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const socketsByChannelId = [];
|
||||||
|
const headers = {
|
||||||
|
'x-instance-authentication-token': token,
|
||||||
|
};
|
||||||
|
const agent = createWSAgent(url, process.env, logger);
|
||||||
|
|
||||||
|
const ws = new WebSocket(url, subprotocols, { headers, agent });
|
||||||
|
let pingTimeout = null;
|
||||||
|
|
||||||
|
function sendPing() {
|
||||||
|
if (ws.readyState === ws.OPEN) {
|
||||||
|
ws.ping(err => _logError(err, 'failed to send a ping', 'sendPing'));
|
||||||
|
}
|
||||||
|
pingTimeout = setTimeout(() => ws.terminate(), PING_INTERVAL_MS);
|
||||||
|
}
|
||||||
|
|
||||||
|
function initiatePing() {
|
||||||
|
clearTimeout(pingTimeout);
|
||||||
|
setTimeout(sendPing, PING_INTERVAL_MS);
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushStats(options) {
|
||||||
|
if (process.env.PUSH_STATS === 'false') {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const fromURL = `http://${cloudServerHost}:${cloudServerPort}/_/report`;
|
||||||
|
const fromOptions = {
|
||||||
|
json: true,
|
||||||
|
headers: {
|
||||||
|
'x-scal-report-token': process.env.REPORT_TOKEN,
|
||||||
|
'x-scal-report-skip-cache': Boolean(options && options.noCache),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
request.get(fromURL, fromOptions, (err, response, body) => {
|
||||||
|
if (err) {
|
||||||
|
_logError(err, 'failed to get metrics report', 'pushStats');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ws.send(ChannelMessageV0.encodeMetricsReportMessage(body),
|
||||||
|
err => _logError(err, 'failed to send metrics report message',
|
||||||
|
'pushStats'));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function closeChannel(channelId) {
|
||||||
|
const socket = socketsByChannelId[channelId];
|
||||||
|
if (socket) {
|
||||||
|
socket.destroy();
|
||||||
|
delete socketsByChannelId[channelId];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function receiveChannelData(channelId, payload) {
|
||||||
|
let socket = socketsByChannelId[channelId];
|
||||||
|
if (!socket) {
|
||||||
|
socket = net.createConnection(cloudServerPort, cloudServerHost);
|
||||||
|
|
||||||
|
socket.on('data', data => {
|
||||||
|
ws.send(ChannelMessageV0.
|
||||||
|
encodeChannelDataMessage(channelId, data), err =>
|
||||||
|
_logError(err, 'failed to send channel data message',
|
||||||
|
'receiveChannelData'));
|
||||||
|
});
|
||||||
|
|
||||||
|
socket.on('connect', () => {
|
||||||
|
});
|
||||||
|
|
||||||
|
socket.on('drain', () => {
|
||||||
|
});
|
||||||
|
|
||||||
|
socket.on('error', error => {
|
||||||
|
logger.error('failed to connect to S3', {
|
||||||
|
code: error.code,
|
||||||
|
host: error.address,
|
||||||
|
port: error.port,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
socket.on('end', () => {
|
||||||
|
socket.destroy();
|
||||||
|
socketsByChannelId[channelId] = null;
|
||||||
|
ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId),
|
||||||
|
err => _logError(err,
|
||||||
|
'failed to send channel close message',
|
||||||
|
'receiveChannelData'));
|
||||||
|
});
|
||||||
|
|
||||||
|
socketsByChannelId[channelId] = socket;
|
||||||
|
}
|
||||||
|
socket.write(payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
function browserAccessChangeHandler() {
|
||||||
|
if (!_config.browserAccessEnabled) {
|
||||||
|
socketsByChannelId.forEach(s => s.close());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ws.on('open', () => {
|
||||||
|
connected = true;
|
||||||
|
logger.info('connected to push server');
|
||||||
|
|
||||||
|
metadata.notifyBucketChange(() => {
|
||||||
|
pushStats({ noCache: true });
|
||||||
|
});
|
||||||
|
_config.on('browser-access-enabled-change', browserAccessChangeHandler);
|
||||||
|
|
||||||
|
initiatePing();
|
||||||
|
});
|
||||||
|
|
||||||
|
const cbOnce = cb ? arsenal.jsutil.once(cb) : null;
|
||||||
|
|
||||||
|
ws.on('close', () => {
|
||||||
|
logger.info('disconnected from push server, reconnecting in 10s');
|
||||||
|
metadata.notifyBucketChange(null);
|
||||||
|
_config.removeListener('browser-access-enabled-change',
|
||||||
|
browserAccessChangeHandler);
|
||||||
|
setTimeout(startWSManagementClient, 10000, url, token);
|
||||||
|
connected = false;
|
||||||
|
|
||||||
|
if (cbOnce) {
|
||||||
|
process.nextTick(cbOnce);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('error', err => {
|
||||||
|
connected = false;
|
||||||
|
logger.error('error from push server connection', {
|
||||||
|
error: err,
|
||||||
|
errorMessage: err.message,
|
||||||
|
});
|
||||||
|
if (cbOnce) {
|
||||||
|
process.nextTick(cbOnce, err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('ping', () => {
|
||||||
|
ws.pong(err => _logError(err, 'failed to send a pong'));
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('pong', () => {
|
||||||
|
initiatePing();
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('message', data => {
|
||||||
|
const log = logger.newRequestLogger();
|
||||||
|
const message = new ChannelMessageV0(data);
|
||||||
|
switch (message.getType()) {
|
||||||
|
case CONFIG_OVERLAY_MESSAGE:
|
||||||
|
if (!isManagementAgentUsed()) {
|
||||||
|
applyAndSaveOverlay(JSON.parse(message.getPayload()), log);
|
||||||
|
} else {
|
||||||
|
if (overlayMessageListener) {
|
||||||
|
overlayMessageListener(message.getPayload().toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case METRICS_REQUEST_MESSAGE:
|
||||||
|
pushStats();
|
||||||
|
break;
|
||||||
|
case CHANNEL_CLOSE_MESSAGE:
|
||||||
|
closeChannel(message.getChannelNumber());
|
||||||
|
break;
|
||||||
|
case CHANNEL_PAYLOAD_MESSAGE:
|
||||||
|
// browserAccessEnabled defaults to true unless explicitly false
|
||||||
|
if (_config.browserAccessEnabled !== false) {
|
||||||
|
receiveChannelData(
|
||||||
|
message.getChannelNumber(), message.getPayload());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
logger.error('unknown message type from push server',
|
||||||
|
{ messageType: message.getType() });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function addOverlayMessageListener(callback) {
|
||||||
|
assert(typeof callback === 'function');
|
||||||
|
overlayMessageListener = callback;
|
||||||
|
}
|
||||||
|
|
||||||
|
function startPushConnectionHealthCheckServer(cb) {
|
||||||
|
const server = http.createServer((req, res) => {
|
||||||
|
if (req.url !== '/_/healthcheck') {
|
||||||
|
res.writeHead(404);
|
||||||
|
res.write('Not Found');
|
||||||
|
} else if (connected) {
|
||||||
|
res.writeHead(200);
|
||||||
|
res.write('Connected');
|
||||||
|
} else {
|
||||||
|
res.writeHead(503);
|
||||||
|
res.write('Not Connected');
|
||||||
|
}
|
||||||
|
res.end();
|
||||||
|
});
|
||||||
|
|
||||||
|
server.listen(_config.port, cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
createWSAgent,
|
||||||
|
startWSManagementClient,
|
||||||
|
startPushConnectionHealthCheckServer,
|
||||||
|
addOverlayMessageListener,
|
||||||
|
};
|
|
@ -6,9 +6,6 @@ const BucketInfo = require('arsenal').models.BucketInfo;
|
||||||
const { isBucketAuthorized, isObjAuthorized } =
|
const { isBucketAuthorized, isObjAuthorized } =
|
||||||
require('../api/apiUtils/authorization/permissionChecks');
|
require('../api/apiUtils/authorization/permissionChecks');
|
||||||
const bucketShield = require('../api/apiUtils/bucket/bucketShield');
|
const bucketShield = require('../api/apiUtils/bucket/bucketShield');
|
||||||
const { onlyOwnerAllowed } = require('../../constants');
|
|
||||||
const { actionNeedQuotaCheck, actionWithDataDeletion } = require('arsenal/build/lib/policyEvaluator/RequestContext');
|
|
||||||
const { processBytesToWrite, validateQuotas } = require('../api/apiUtils/quotas/quotaUtils');
|
|
||||||
|
|
||||||
/** getNullVersionFromMaster - retrieves the null version
|
/** getNullVersionFromMaster - retrieves the null version
|
||||||
* metadata via retrieving the master key
|
* metadata via retrieving the master key
|
||||||
|
@ -155,6 +152,9 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
|
||||||
});
|
});
|
||||||
return errors.NoSuchBucket;
|
return errors.NoSuchBucket;
|
||||||
}
|
}
|
||||||
|
// if requester is not bucket owner, bucket policy actions should be denied with
|
||||||
|
// MethodNotAllowed error
|
||||||
|
const onlyOwnerAllowed = ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'];
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
if (!Array.isArray(requestType)) {
|
if (!Array.isArray(requestType)) {
|
||||||
requestType = [requestType];
|
requestType = [requestType];
|
||||||
|
@ -184,7 +184,7 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
|
||||||
* @return {undefined} - and call callback with params err, bucket md
|
* @return {undefined} - and call callback with params err, bucket md
|
||||||
*/
|
*/
|
||||||
function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) {
|
function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) {
|
||||||
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request, withVersionId } = params;
|
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request } = params;
|
||||||
let requestType = params.requestType;
|
let requestType = params.requestType;
|
||||||
if (!Array.isArray(requestType)) {
|
if (!Array.isArray(requestType)) {
|
||||||
requestType = [requestType];
|
requestType = [requestType];
|
||||||
|
@ -238,21 +238,6 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
|
||||||
}
|
}
|
||||||
return next(null, bucket, objMD);
|
return next(null, bucket, objMD);
|
||||||
},
|
},
|
||||||
(bucket, objMD, next) => {
|
|
||||||
const needQuotaCheck = requestType => requestType.some(type => actionNeedQuotaCheck[type] ||
|
|
||||||
actionWithDataDeletion[type]);
|
|
||||||
const checkQuota = params.checkQuota === undefined ? needQuotaCheck(requestType) : params.checkQuota;
|
|
||||||
// withVersionId cover cases when an object is being restored with a specific version ID.
|
|
||||||
// In this case, the storage space was already accounted for when the RestoreObject API call
|
|
||||||
// was made, so we don't need to add any inflight, but quota must be evaluated.
|
|
||||||
if (!checkQuota) {
|
|
||||||
return next(null, bucket, objMD);
|
|
||||||
}
|
|
||||||
const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId,
|
|
||||||
request?.parsedContentLength || 0, objMD, params.destObjMD);
|
|
||||||
return validateQuotas(request, bucket, request.accountQuotas, requestType, request.apiMethod,
|
|
||||||
contentLength, withVersionId, log, err => next(err, bucket, objMD));
|
|
||||||
},
|
|
||||||
], (err, bucket, objMD) => {
|
], (err, bucket, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// still return bucket for cors headers
|
// still return bucket for cors headers
|
||||||
|
@ -294,7 +279,6 @@ module.exports = {
|
||||||
validateBucket,
|
validateBucket,
|
||||||
metadataGetObject,
|
metadataGetObject,
|
||||||
metadataGetObjects,
|
metadataGetObjects,
|
||||||
processBytesToWrite,
|
|
||||||
standardMetadataValidateBucketAndObj,
|
standardMetadataValidateBucketAndObj,
|
||||||
standardMetadataValidateBucket,
|
standardMetadataValidateBucket,
|
||||||
};
|
};
|
||||||
|
|
|
@ -2,9 +2,9 @@ const MetadataWrapper = require('arsenal').storage.metadata.MetadataWrapper;
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
const logger = require('../utilities/logger');
|
const logger = require('../utilities/logger');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
|
const bucketclient = require('bucketclient');
|
||||||
|
|
||||||
const clientName = config.backends.metadata;
|
const clientName = config.backends.metadata;
|
||||||
let bucketclient;
|
|
||||||
let params;
|
let params;
|
||||||
if (clientName === 'mem') {
|
if (clientName === 'mem') {
|
||||||
params = {};
|
params = {};
|
||||||
|
@ -21,7 +21,6 @@ if (clientName === 'mem') {
|
||||||
noDbOpen: null,
|
noDbOpen: null,
|
||||||
};
|
};
|
||||||
} else if (clientName === 'scality') {
|
} else if (clientName === 'scality') {
|
||||||
bucketclient = require('bucketclient');
|
|
||||||
params = {
|
params = {
|
||||||
bucketdBootstrap: config.bucketd.bootstrap,
|
bucketdBootstrap: config.bucketd.bootstrap,
|
||||||
bucketdLog: config.bucketd.log,
|
bucketdLog: config.bucketd.log,
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
const { config } = require('../Config');
|
|
||||||
const { ScubaClientImpl } = require('./scuba/wrapper');
|
|
||||||
|
|
||||||
let instance = null;
|
|
||||||
|
|
||||||
switch (config.backends.quota) {
|
|
||||||
case 'scuba':
|
|
||||||
instance = new ScubaClientImpl(config);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
instance = {
|
|
||||||
enabled: false,
|
|
||||||
};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = instance;
|
|
|
@ -1,80 +0,0 @@
|
||||||
const util = require('util');
|
|
||||||
const { default: ScubaClient } = require('scubaclient');
|
|
||||||
const { externalBackendHealthCheckInterval } = require('../../../constants');
|
|
||||||
const monitoring = require('../../utilities/monitoringHandler');
|
|
||||||
|
|
||||||
class ScubaClientImpl extends ScubaClient {
|
|
||||||
constructor(config) {
|
|
||||||
super(config.scuba);
|
|
||||||
this.enabled = false;
|
|
||||||
this.maxStaleness = config.quota.maxStaleness;
|
|
||||||
this._healthCheckTimer = null;
|
|
||||||
this._log = null;
|
|
||||||
this._getLatestMetricsCallback = util.callbackify(this.getLatestMetrics);
|
|
||||||
|
|
||||||
if (config.scuba) {
|
|
||||||
this.enabled = true;
|
|
||||||
} else {
|
|
||||||
this.enabled = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
setup(log) {
|
|
||||||
this._log = log;
|
|
||||||
if (this.enabled) {
|
|
||||||
this.periodicHealthCheck();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_healthCheck() {
|
|
||||||
return this.healthCheck().then(data => {
|
|
||||||
if (data?.date) {
|
|
||||||
const date = new Date(data.date);
|
|
||||||
if (Date.now() - date.getTime() > this.maxStaleness) {
|
|
||||||
throw new Error('Data is stale, disabling quotas');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!this.enabled) {
|
|
||||||
this._log.info('Scuba health check passed, enabling quotas');
|
|
||||||
}
|
|
||||||
monitoring.utilizationServiceAvailable.set(1);
|
|
||||||
this.enabled = true;
|
|
||||||
}).catch(err => {
|
|
||||||
if (this.enabled) {
|
|
||||||
this._log.warn('Scuba health check failed, disabling quotas', {
|
|
||||||
err: err.name,
|
|
||||||
description: err.message,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
monitoring.utilizationServiceAvailable.set(0);
|
|
||||||
this.enabled = false;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
periodicHealthCheck() {
|
|
||||||
if (this._healthCheckTimer) {
|
|
||||||
clearInterval(this._healthCheckTimer);
|
|
||||||
}
|
|
||||||
this._healthCheck();
|
|
||||||
this._healthCheckTimer = setInterval(async () => {
|
|
||||||
this._healthCheck();
|
|
||||||
}, Number(process.env.SCUBA_HEALTHCHECK_FREQUENCY)
|
|
||||||
|| externalBackendHealthCheckInterval);
|
|
||||||
}
|
|
||||||
|
|
||||||
getUtilizationMetrics(metricsClass, resourceName, options, body, callback) {
|
|
||||||
const requestStartTime = process.hrtime.bigint();
|
|
||||||
return this._getLatestMetricsCallback(metricsClass, resourceName, options, body, (err, data) => {
|
|
||||||
const responseTimeInNs = Number(process.hrtime.bigint() - requestStartTime);
|
|
||||||
monitoring.utilizationMetricsRetrievalDuration.labels({
|
|
||||||
code: err ? (err.statusCode || 500) : 200,
|
|
||||||
class: metricsClass,
|
|
||||||
}).observe(responseTimeInNs / 1e9);
|
|
||||||
return callback(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
ScubaClientImpl,
|
|
||||||
};
|
|
|
@ -52,7 +52,6 @@ const NAMESPACE = 'default';
|
||||||
const CIPHER = null; // replication/lifecycle does not work on encrypted objects
|
const CIPHER = null; // replication/lifecycle does not work on encrypted objects
|
||||||
|
|
||||||
let { locationConstraints } = config;
|
let { locationConstraints } = config;
|
||||||
const { nullVersionCompatMode } = config;
|
|
||||||
const { implName } = dataWrapper;
|
const { implName } = dataWrapper;
|
||||||
let dataClient = dataWrapper.client;
|
let dataClient = dataWrapper.client;
|
||||||
config.on('location-constraints-update', () => {
|
config.on('location-constraints-update', () => {
|
||||||
|
@ -505,7 +504,9 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (versionId === 'null') {
|
if (versionId === 'null') {
|
||||||
|
if (!config.nullVersionCompatMode) {
|
||||||
isNull = true;
|
isNull = true;
|
||||||
|
}
|
||||||
// Retrieve the null version id from the object metadata.
|
// Retrieve the null version id from the object metadata.
|
||||||
versionId = objMd && objMd.versionId;
|
versionId = objMd && objMd.versionId;
|
||||||
if (!versionId) {
|
if (!versionId) {
|
||||||
|
@ -514,16 +515,6 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) {
|
||||||
// the flag is needed to allow cloudserver to know that the version
|
// the flag is needed to allow cloudserver to know that the version
|
||||||
// is a null version and allow access to it using the "null" versionId.
|
// is a null version and allow access to it using the "null" versionId.
|
||||||
omVal.isNull = true;
|
omVal.isNull = true;
|
||||||
// If the new null keys logic (S3C-7352) is supported (not compatibility mode),
|
|
||||||
// create a null key with the isNull2 flag.
|
|
||||||
if (!nullVersionCompatMode) {
|
|
||||||
omVal.isNull2 = true;
|
|
||||||
}
|
|
||||||
// Delete the version id from the version metadata payload to prevent issues
|
|
||||||
// with creating a non-version object (versioning set to false) that includes a version id.
|
|
||||||
// For example, this version ID might come from a null version of a suspended bucket being
|
|
||||||
// replicated to this bucket.
|
|
||||||
delete omVal.versionId;
|
|
||||||
if (versioning) {
|
if (versioning) {
|
||||||
// If the null version does not have a version id, it is a current null version.
|
// If the null version does not have a version id, it is a current null version.
|
||||||
// To update the metadata of a current version, versioning is set to false.
|
// To update the metadata of a current version, versioning is set to false.
|
||||||
|
@ -558,6 +549,7 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) {
|
||||||
}
|
}
|
||||||
|
|
||||||
const options = {
|
const options = {
|
||||||
|
isNull,
|
||||||
overheadField: constants.overheadField,
|
overheadField: constants.overheadField,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -578,11 +570,6 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) {
|
||||||
options.versionId = versionId;
|
options.versionId = versionId;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the new null keys logic (S3C-7352) is not supported (compatibility mode), 'isNull' remains undefined.
|
|
||||||
if (!nullVersionCompatMode) {
|
|
||||||
options.isNull = isNull;
|
|
||||||
}
|
|
||||||
|
|
||||||
log.trace('putting object version', {
|
log.trace('putting object version', {
|
||||||
objectKey: request.objectKey, omVal, options });
|
objectKey: request.objectKey, omVal, options });
|
||||||
return metadata.putObjectMD(bucketName, objectKey, omVal, options, log,
|
return metadata.putObjectMD(bucketName, objectKey, omVal, options, log,
|
||||||
|
|
|
@ -18,9 +18,13 @@ const locationStorageCheck =
|
||||||
require('./api/apiUtils/object/locationStorageCheck');
|
require('./api/apiUtils/object/locationStorageCheck');
|
||||||
const vault = require('./auth/vault');
|
const vault = require('./auth/vault');
|
||||||
const metadata = require('./metadata/wrapper');
|
const metadata = require('./metadata/wrapper');
|
||||||
|
const { initManagement } = require('./management');
|
||||||
|
const {
|
||||||
|
initManagementClient,
|
||||||
|
isManagementAgentUsed,
|
||||||
|
} = require('./management/agentClient');
|
||||||
|
|
||||||
const HttpAgent = require('agentkeepalive');
|
const HttpAgent = require('agentkeepalive');
|
||||||
const QuotaService = require('./quotas/quotas');
|
|
||||||
const routes = arsenal.s3routes.routes;
|
const routes = arsenal.s3routes.routes;
|
||||||
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
||||||
const websiteEndpoints = _config.websiteEndpoints;
|
const websiteEndpoints = _config.websiteEndpoints;
|
||||||
|
@ -51,6 +55,7 @@ const STATS_INTERVAL = 5; // 5 seconds
|
||||||
const STATS_EXPIRY = 30; // 30 seconds
|
const STATS_EXPIRY = 30; // 30 seconds
|
||||||
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
|
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
|
||||||
STATS_EXPIRY);
|
STATS_EXPIRY);
|
||||||
|
const enableRemoteManagement = true;
|
||||||
|
|
||||||
class S3Server {
|
class S3Server {
|
||||||
/**
|
/**
|
||||||
|
@ -316,9 +321,16 @@ class S3Server {
|
||||||
this._startServer(this.routeAdminRequest, _config.metricsPort);
|
this._startServer(this.routeAdminRequest, _config.metricsPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start quota service health checks
|
// TODO this should wait for metadata healthcheck to be ok
|
||||||
if (QuotaService.enabled) {
|
// TODO only do this in cluster master
|
||||||
QuotaService?.setup(log);
|
if (enableRemoteManagement) {
|
||||||
|
if (!isManagementAgentUsed()) {
|
||||||
|
setTimeout(() => {
|
||||||
|
initManagement(logger.newRequestLogger());
|
||||||
|
}, 5000);
|
||||||
|
} else {
|
||||||
|
initManagementClient();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.started = true;
|
this.started = true;
|
||||||
|
@ -327,7 +339,8 @@ class S3Server {
|
||||||
}
|
}
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
let workers = _config.workers || 1;
|
// TODO: change config to use workers prop. name for clarity
|
||||||
|
let workers = _config.clusters || 1;
|
||||||
if (process.env.S3BACKEND === 'mem') {
|
if (process.env.S3BACKEND === 'mem') {
|
||||||
workers = 1;
|
workers = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ const constants = require('../constants');
|
||||||
const { config } = require('./Config');
|
const { config } = require('./Config');
|
||||||
const { data } = require('./data/wrapper');
|
const { data } = require('./data/wrapper');
|
||||||
const metadata = require('./metadata/wrapper');
|
const metadata = require('./metadata/wrapper');
|
||||||
|
const logger = require('./utilities/logger');
|
||||||
const { setObjectLockInformation }
|
const { setObjectLockInformation }
|
||||||
= require('./api/apiUtils/object/objectLockHelpers');
|
= require('./api/apiUtils/object/objectLockHelpers');
|
||||||
const removeAWSChunked = require('./api/apiUtils/object/removeAWSChunked');
|
const removeAWSChunked = require('./api/apiUtils/object/removeAWSChunked');
|
||||||
|
@ -349,6 +350,9 @@ const services = {
|
||||||
return cb(err, res);
|
return cb(err, res);
|
||||||
}
|
}
|
||||||
log.trace('deleteObject: metadata delete OK');
|
log.trace('deleteObject: metadata delete OK');
|
||||||
|
const deleteLog =
|
||||||
|
logger.newRequestLoggerFromSerializedUids(
|
||||||
|
log.getSerializedUids());
|
||||||
if (objectMD.location === null) {
|
if (objectMD.location === null) {
|
||||||
return cb(null, res);
|
return cb(null, res);
|
||||||
}
|
}
|
||||||
|
@ -359,11 +363,11 @@ const services = {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!Array.isArray(objectMD.location)) {
|
if (!Array.isArray(objectMD.location)) {
|
||||||
data.delete(objectMD.location, log);
|
data.delete(objectMD.location, deleteLog);
|
||||||
return cb(null, res);
|
return cb(null, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
return data.batchDelete(objectMD.location, null, null, log, err => {
|
return data.batchDelete(objectMD.location, null, null, deleteLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,16 @@
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
|
||||||
const _config = require('../Config').config;
|
const _config = require('../Config').config;
|
||||||
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
|
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
|
||||||
const vault = require('../auth/vault');
|
|
||||||
|
|
||||||
// start utapi server
|
// start utapi server
|
||||||
if (utapiVersion === 1 && _config.utapi) {
|
if (utapiVersion === 1 && _config.utapi) {
|
||||||
const fullConfig = Object.assign({}, _config.utapi,
|
const fullConfig = Object.assign({}, _config.utapi,
|
||||||
{ redis: _config.redis, vaultclient: vault });
|
{ redis: _config.redis });
|
||||||
|
if (_config.vaultd) {
|
||||||
|
Object.assign(fullConfig, { vaultd: _config.vaultd });
|
||||||
|
}
|
||||||
|
if (_config.https) {
|
||||||
|
Object.assign(fullConfig, { https: _config.https });
|
||||||
|
}
|
||||||
// copy healthcheck IPs
|
// copy healthcheck IPs
|
||||||
if (_config.healthChecks) {
|
if (_config.healthChecks) {
|
||||||
Object.assign(fullConfig, { healthChecks: _config.healthChecks });
|
Object.assign(fullConfig, { healthChecks: _config.healthChecks });
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
|
||||||
const UtapiReindex = require('utapi').UtapiReindex;
|
const UtapiReindex = require('utapi').UtapiReindex;
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
|
||||||
const UtapiReplay = require('utapi').UtapiReplay;
|
const UtapiReplay = require('utapi').UtapiReplay;
|
||||||
const _config = require('../Config').config;
|
const _config = require('../Config').config;
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,7 @@
|
||||||
const { configure, Werelogs } = require('werelogs');
|
const { Werelogs } = require('werelogs');
|
||||||
|
|
||||||
const _config = require('../Config.js').config;
|
const _config = require('../Config.js').config;
|
||||||
|
|
||||||
configure({
|
|
||||||
level: _config.log.logLevel,
|
|
||||||
dump: _config.log.dumpLevel,
|
|
||||||
});
|
|
||||||
const werelogs = new Werelogs({
|
const werelogs = new Werelogs({
|
||||||
level: _config.log.logLevel,
|
level: _config.log.logLevel,
|
||||||
dump: _config.log.dumpLevel,
|
dump: _config.log.dumpLevel,
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
const client = require('prom-client');
|
const client = require('prom-client');
|
||||||
const { config } = require('../Config');
|
|
||||||
|
|
||||||
const collectDefaultMetrics = client.collectDefaultMetrics;
|
const collectDefaultMetrics = client.collectDefaultMetrics;
|
||||||
const numberOfBuckets = new client.Gauge({
|
const numberOfBuckets = new client.Gauge({
|
||||||
|
@ -65,49 +64,6 @@ const httpResponseSizeBytes = new client.Summary({
|
||||||
help: 'Cloudserver HTTP response sizes in bytes',
|
help: 'Cloudserver HTTP response sizes in bytes',
|
||||||
});
|
});
|
||||||
|
|
||||||
let quotaEvaluationDuration;
|
|
||||||
let utilizationMetricsRetrievalDuration;
|
|
||||||
let utilizationServiceAvailable;
|
|
||||||
let bucketsWithQuota;
|
|
||||||
let accountsWithQuota;
|
|
||||||
let requestWithQuotaMetricsUnavailable;
|
|
||||||
|
|
||||||
if (config.isQuotaEnabled) {
|
|
||||||
quotaEvaluationDuration = new client.Histogram({
|
|
||||||
name: 's3_cloudserver_quota_evaluation_duration_seconds',
|
|
||||||
help: 'Duration of the quota evaluation operation',
|
|
||||||
labelNames: ['action', 'code', 'type'],
|
|
||||||
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1],
|
|
||||||
});
|
|
||||||
|
|
||||||
utilizationMetricsRetrievalDuration = new client.Histogram({
|
|
||||||
name: 's3_cloudserver_quota_metrics_retrieval_duration_seconds',
|
|
||||||
help: 'Duration of the utilization metrics retrieval operation',
|
|
||||||
labelNames: ['code', 'class'],
|
|
||||||
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5],
|
|
||||||
});
|
|
||||||
|
|
||||||
utilizationServiceAvailable = new client.Gauge({
|
|
||||||
name: 's3_cloudserver_quota_utilization_service_available',
|
|
||||||
help: 'Availability of the utilization service',
|
|
||||||
});
|
|
||||||
|
|
||||||
bucketsWithQuota = new client.Gauge({
|
|
||||||
name: 's3_cloudserver_quota_buckets_count',
|
|
||||||
help: 'Total number of buckets quota',
|
|
||||||
});
|
|
||||||
|
|
||||||
accountsWithQuota = new client.Gauge({
|
|
||||||
name: 's3_cloudserver_quota_accounts_count',
|
|
||||||
help: 'Total number of account quota',
|
|
||||||
});
|
|
||||||
|
|
||||||
requestWithQuotaMetricsUnavailable = new client.Counter({
|
|
||||||
name: 's3_cloudserver_quota_unavailable_count',
|
|
||||||
help: 'Total number of requests with quota metrics unavailable',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lifecycle duration metric, to track the completion of restore.
|
// Lifecycle duration metric, to track the completion of restore.
|
||||||
// This metric is used to track the time it takes to complete the lifecycle operation (restore).
|
// This metric is used to track the time it takes to complete the lifecycle operation (restore).
|
||||||
// NOTE : this metric is the same as the one defined in Backbeat, and must keep the same name,
|
// NOTE : this metric is the same as the one defined in Backbeat, and must keep the same name,
|
||||||
|
@ -187,10 +143,6 @@ function crrCacheToProm(crrResults) {
|
||||||
numberOfBuckets.set(crrResults.getObjectCount.buckets || 0);
|
numberOfBuckets.set(crrResults.getObjectCount.buckets || 0);
|
||||||
numberOfObjects.set(crrResults.getObjectCount.objects || 0);
|
numberOfObjects.set(crrResults.getObjectCount.objects || 0);
|
||||||
}
|
}
|
||||||
if (config.isQuotaEnabled) {
|
|
||||||
bucketsWithQuota.set(crrResults?.getObjectCount?.bucketWithQuotaCount || 0);
|
|
||||||
accountsWithQuota.set(crrResults?.getVaultReport?.accountWithQuotaCount || 0);
|
|
||||||
}
|
|
||||||
if (crrResults.getDataDiskUsage) {
|
if (crrResults.getDataDiskUsage) {
|
||||||
dataDiskAvailable.set(crrResults.getDataDiskUsage.available || 0);
|
dataDiskAvailable.set(crrResults.getDataDiskUsage.available || 0);
|
||||||
dataDiskFree.set(crrResults.getDataDiskUsage.free || 0);
|
dataDiskFree.set(crrResults.getDataDiskUsage.free || 0);
|
||||||
|
@ -268,9 +220,4 @@ module.exports = {
|
||||||
httpRequestsTotal,
|
httpRequestsTotal,
|
||||||
httpActiveRequests,
|
httpActiveRequests,
|
||||||
lifecycleDuration,
|
lifecycleDuration,
|
||||||
quotaEvaluationDuration,
|
|
||||||
utilizationMetricsRetrievalDuration,
|
|
||||||
utilizationServiceAvailable,
|
|
||||||
bucketsWithQuota,
|
|
||||||
requestWithQuotaMetricsUnavailable,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -10,7 +10,6 @@ const config = require('../Config').config;
|
||||||
const { data } = require('../data/wrapper');
|
const { data } = require('../data/wrapper');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const vault = require('../auth/vault');
|
|
||||||
|
|
||||||
const REPORT_MODEL_VERSION = 1;
|
const REPORT_MODEL_VERSION = 1;
|
||||||
const ASYNCLIMIT = 5;
|
const ASYNCLIMIT = 5;
|
||||||
|
@ -462,7 +461,6 @@ function reportHandler(clientIP, req, res, log) {
|
||||||
getCRRMetrics: cb => getCRRMetrics(log, cb),
|
getCRRMetrics: cb => getCRRMetrics(log, cb),
|
||||||
getReplicationStates: cb => getReplicationStates(log, cb),
|
getReplicationStates: cb => getReplicationStates(log, cb),
|
||||||
getIngestionInfo: cb => getIngestionInfo(log, cb),
|
getIngestionInfo: cb => getIngestionInfo(log, cb),
|
||||||
getVaultReport: cb => vault.report(log, cb),
|
|
||||||
},
|
},
|
||||||
(err, results) => {
|
(err, results) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -490,7 +488,6 @@ function reportHandler(clientIP, req, res, log) {
|
||||||
capabilities: getCapabilities(),
|
capabilities: getCapabilities(),
|
||||||
ingestStats: results.getIngestionInfo.metrics,
|
ingestStats: results.getIngestionInfo.metrics,
|
||||||
ingestStatus: results.getIngestionInfo.status,
|
ingestStatus: results.getIngestionInfo.status,
|
||||||
vaultReport: results.getVaultReport,
|
|
||||||
};
|
};
|
||||||
monitoring.crrCacheToProm(results);
|
monitoring.crrCacheToProm(results);
|
||||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
{
|
|
||||||
"STANDARD": {
|
|
||||||
"type": "vitastor",
|
|
||||||
"objectId": "std",
|
|
||||||
"legacyAwsBehavior": true,
|
|
||||||
"details": {
|
|
||||||
"config_path": "/etc/vitastor/vitastor.conf",
|
|
||||||
"pool_id": 3,
|
|
||||||
"metadata_image": "s3-volume-meta"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,179 @@
|
||||||
|
const Uuid = require('uuid');
|
||||||
|
const WebSocket = require('ws');
|
||||||
|
|
||||||
|
const logger = require('./lib/utilities/logger');
|
||||||
|
const { initManagement } = require('./lib/management');
|
||||||
|
const _config = require('./lib/Config').config;
|
||||||
|
const { managementAgentMessageType } = require('./lib/management/agentClient');
|
||||||
|
const { addOverlayMessageListener } = require('./lib/management/push');
|
||||||
|
const { saveConfigurationVersion } = require('./lib/management/configuration');
|
||||||
|
|
||||||
|
|
||||||
|
// TODO: auth?
|
||||||
|
// TODO: werelogs with a specific name.
|
||||||
|
|
||||||
|
const CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS = 15000;
|
||||||
|
|
||||||
|
|
||||||
|
class ManagementAgentServer {
|
||||||
|
constructor() {
|
||||||
|
this.port = _config.managementAgent.port || 8010;
|
||||||
|
this.wss = null;
|
||||||
|
this.loadedOverlay = null;
|
||||||
|
|
||||||
|
this.stop = this.stop.bind(this);
|
||||||
|
process.on('SIGINT', this.stop);
|
||||||
|
process.on('SIGHUP', this.stop);
|
||||||
|
process.on('SIGQUIT', this.stop);
|
||||||
|
process.on('SIGTERM', this.stop);
|
||||||
|
process.on('SIGPIPE', () => {});
|
||||||
|
}
|
||||||
|
|
||||||
|
start(_cb) {
|
||||||
|
const cb = _cb || function noop() {};
|
||||||
|
|
||||||
|
/* Define REPORT_TOKEN env variable needed by the management
|
||||||
|
* module. */
|
||||||
|
process.env.REPORT_TOKEN = process.env.REPORT_TOKEN
|
||||||
|
|| _config.reportToken
|
||||||
|
|| Uuid.v4();
|
||||||
|
|
||||||
|
initManagement(logger.newRequestLogger(), overlay => {
|
||||||
|
let error = null;
|
||||||
|
|
||||||
|
if (overlay) {
|
||||||
|
this.loadedOverlay = overlay;
|
||||||
|
this.startServer();
|
||||||
|
} else {
|
||||||
|
error = new Error('failed to init management');
|
||||||
|
}
|
||||||
|
return cb(error);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
stop() {
|
||||||
|
if (!this.wss) {
|
||||||
|
process.exit(0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.wss.close(() => {
|
||||||
|
logger.info('server shutdown');
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
startServer() {
|
||||||
|
this.wss = new WebSocket.Server({
|
||||||
|
port: this.port,
|
||||||
|
clientTracking: true,
|
||||||
|
path: '/watch',
|
||||||
|
});
|
||||||
|
|
||||||
|
this.wss.on('connection', this.onConnection.bind(this));
|
||||||
|
this.wss.on('listening', this.onListening.bind(this));
|
||||||
|
this.wss.on('error', this.onError.bind(this));
|
||||||
|
|
||||||
|
setInterval(this.checkBrokenConnections.bind(this),
|
||||||
|
CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS);
|
||||||
|
|
||||||
|
addOverlayMessageListener(this.onNewOverlay.bind(this));
|
||||||
|
}
|
||||||
|
|
||||||
|
onConnection(socket, request) {
|
||||||
|
function hearthbeat() {
|
||||||
|
this.isAlive = true;
|
||||||
|
}
|
||||||
|
logger.info('client connected to watch route', {
|
||||||
|
ip: request.connection.remoteAddress,
|
||||||
|
});
|
||||||
|
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
socket.isAlive = true;
|
||||||
|
socket.on('pong', hearthbeat.bind(socket));
|
||||||
|
|
||||||
|
if (socket.readyState !== socket.OPEN) {
|
||||||
|
logger.error('client socket not in ready state', {
|
||||||
|
state: socket.readyState,
|
||||||
|
client: socket._socket._peername,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const msg = {
|
||||||
|
messageType: managementAgentMessageType.NEW_OVERLAY,
|
||||||
|
payload: this.loadedOverlay,
|
||||||
|
};
|
||||||
|
socket.send(JSON.stringify(msg), error => {
|
||||||
|
if (error) {
|
||||||
|
logger.error('failed to send remoteOverlay to client', {
|
||||||
|
error,
|
||||||
|
client: socket._socket._peername,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
onListening() {
|
||||||
|
logger.info('websocket server listening',
|
||||||
|
{ port: this.port });
|
||||||
|
}
|
||||||
|
|
||||||
|
onError(error) {
|
||||||
|
logger.error('websocket server error', { error });
|
||||||
|
}
|
||||||
|
|
||||||
|
_sendNewOverlayToClient(client) {
|
||||||
|
if (client.readyState !== client.OPEN) {
|
||||||
|
logger.error('client socket not in ready state', {
|
||||||
|
state: client.readyState,
|
||||||
|
client: client._socket._peername,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const msg = {
|
||||||
|
messageType: managementAgentMessageType.NEW_OVERLAY,
|
||||||
|
payload: this.loadedOverlay,
|
||||||
|
};
|
||||||
|
client.send(JSON.stringify(msg), error => {
|
||||||
|
if (error) {
|
||||||
|
logger.error(
|
||||||
|
'failed to send remoteOverlay to management agent client', {
|
||||||
|
error, client: client._socket._peername,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
onNewOverlay(remoteOverlay) {
|
||||||
|
const remoteOverlayObj = JSON.parse(remoteOverlay);
|
||||||
|
saveConfigurationVersion(
|
||||||
|
this.loadedOverlay, remoteOverlayObj, logger, err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('failed to save remote overlay', { err });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.loadedOverlay = remoteOverlayObj;
|
||||||
|
this.wss.clients.forEach(
|
||||||
|
this._sendNewOverlayToClient.bind(this)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
checkBrokenConnections() {
|
||||||
|
this.wss.clients.forEach(client => {
|
||||||
|
if (!client.isAlive) {
|
||||||
|
logger.info('close broken connection', {
|
||||||
|
client: client._socket._peername,
|
||||||
|
});
|
||||||
|
client.terminate();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
client.isAlive = false;
|
||||||
|
client.ping();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const server = new ManagementAgentServer();
|
||||||
|
server.start();
|
|
@ -192,163 +192,3 @@ tests:
|
||||||
summary: Very high delete latency
|
summary: Very high delete latency
|
||||||
exp_labels:
|
exp_labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
|
|
||||||
# QuotaMetricsNotAvailable (case with bucket quota)
|
|
||||||
##################################################################################################
|
|
||||||
- name: Quota metrics not available (bucket quota)
|
|
||||||
interval: 1m
|
|
||||||
input_series:
|
|
||||||
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
|
||||||
values: 1+1x6 0+0x20 1+1x6
|
|
||||||
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
|
||||||
values: 1+1x32
|
|
||||||
alert_rule_test:
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 6m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 15m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 20m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: critical
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 28m
|
|
||||||
exp_alerts: []
|
|
||||||
|
|
||||||
# QuotaMetricsNotAvailable (case with account quota)
|
|
||||||
##################################################################################################
|
|
||||||
- name: Quota metrics not available (account quota)
|
|
||||||
interval: 1m
|
|
||||||
input_series:
|
|
||||||
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
|
||||||
values: 1+1x6 0+0x20 1+1x6
|
|
||||||
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
|
||||||
values: 1+1x32
|
|
||||||
alert_rule_test:
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 6m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 15m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 20m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: critical
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 28m
|
|
||||||
exp_alerts: []
|
|
||||||
|
|
||||||
# QuotaMetricsNotAvailable (case with both quota quota)
|
|
||||||
##################################################################################################
|
|
||||||
- name: Quota metrics not available (account quota)
|
|
||||||
interval: 1m
|
|
||||||
input_series:
|
|
||||||
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
|
||||||
values: 1+1x6 0+0x20 1+1x6
|
|
||||||
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
|
||||||
values: 1+1x32
|
|
||||||
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
|
||||||
values: 1+1x32
|
|
||||||
alert_rule_test:
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 6m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 15m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 20m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: critical
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 28m
|
|
||||||
exp_alerts: []
|
|
||||||
|
|
||||||
# QuotaMetricsNotAvailable (case without quota)
|
|
||||||
##################################################################################################
|
|
||||||
- name: Utilization service Latency
|
|
||||||
interval: 1m
|
|
||||||
input_series:
|
|
||||||
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
|
||||||
values: 1+1x6 0+0x20 1+1x6
|
|
||||||
alert_rule_test:
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 6m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 15m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 20m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 28m
|
|
||||||
exp_alerts: []
|
|
||||||
|
|
||||||
# QuotaUnavailable
|
|
||||||
##################################################################################################
|
|
||||||
- name: Quota evaluation disabled
|
|
||||||
interval: 1m
|
|
||||||
input_series:
|
|
||||||
- series: s3_cloudserver_quota_unavailable_count{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
|
||||||
values: 0+0x6 1+1x20 0+0x6
|
|
||||||
alert_rule_test:
|
|
||||||
- alertname: QuotaUnavailable
|
|
||||||
eval_time: 6m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaUnavailable
|
|
||||||
eval_time: 20m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet.
|
|
||||||
summary: High number of quota requests with metrics unavailable
|
|
||||||
exp_labels:
|
|
||||||
severity: critical
|
|
||||||
- alertname: QuotaUnavailable
|
|
||||||
eval_time: 30m
|
|
||||||
exp_alerts: []
|
|
||||||
|
|
|
@ -6,9 +6,6 @@ x-inputs:
|
||||||
- name: service
|
- name: service
|
||||||
type: constant
|
type: constant
|
||||||
value: artesca-data-connector-s3api-metrics
|
value: artesca-data-connector-s3api-metrics
|
||||||
- name: reportJob
|
|
||||||
type: constant
|
|
||||||
value: artesca-data-ops-report-handler
|
|
||||||
- name: replicas
|
- name: replicas
|
||||||
type: constant
|
type: constant
|
||||||
- name: systemErrorsWarningThreshold
|
- name: systemErrorsWarningThreshold
|
||||||
|
@ -29,9 +26,6 @@ x-inputs:
|
||||||
- name: deleteLatencyCriticalThreshold
|
- name: deleteLatencyCriticalThreshold
|
||||||
type: config
|
type: config
|
||||||
value: 1.000
|
value: 1.000
|
||||||
- name: quotaUnavailabilityThreshold
|
|
||||||
type: config
|
|
||||||
value: 0.500
|
|
||||||
|
|
||||||
groups:
|
groups:
|
||||||
- name: CloudServer
|
- name: CloudServer
|
||||||
|
@ -138,45 +132,3 @@ groups:
|
||||||
annotations:
|
annotations:
|
||||||
description: "Latency of delete object operations is more than 1s"
|
description: "Latency of delete object operations is more than 1s"
|
||||||
summary: "Very high delete latency"
|
summary: "Very high delete latency"
|
||||||
|
|
||||||
# As a platform admin I want to be alerted (warning) when the utilization metrics service is enabled
|
|
||||||
# but not available for at least half of the S3 services during the last minute
|
|
||||||
- alert: QuotaMetricsNotAvailable
|
|
||||||
expr: |
|
|
||||||
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
|
|
||||||
< ${quotaUnavailabilityThreshold} and
|
|
||||||
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
|
|
||||||
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
|
|
||||||
labels:
|
|
||||||
severity: warning
|
|
||||||
annotations:
|
|
||||||
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
|
|
||||||
summary: "Utilization metrics service not available"
|
|
||||||
|
|
||||||
# As a platform admin I want to be alerted (critical) when the utilization metrics service is enabled
|
|
||||||
# but not available during the last 10 minutes
|
|
||||||
- alert: QuotaMetricsNotAvailable
|
|
||||||
expr: |
|
|
||||||
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
|
|
||||||
< ${quotaUnavailabilityThreshold} and
|
|
||||||
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
|
|
||||||
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
|
|
||||||
for: 10m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
|
|
||||||
summary: "Utilization metrics service not available"
|
|
||||||
|
|
||||||
# As a platform admin I want to be alerted (critical) when quotas were not honored due to metrics
|
|
||||||
# being unavailable
|
|
||||||
- alert: QuotaUnavailable
|
|
||||||
expr: |
|
|
||||||
sum(increase(s3_cloudserver_quota_unavailable_count{namespace="${namespace}",service="${service}"}[2m]))
|
|
||||||
> 0
|
|
||||||
for: 5m
|
|
||||||
labels:
|
|
||||||
severity: critical
|
|
||||||
annotations:
|
|
||||||
description: "Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet."
|
|
||||||
summary: "High number of quota requests with metrics unavailable"
|
|
||||||
|
|
|
@ -1931,7 +1931,7 @@
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"datasource": null,
|
"datasource": null,
|
||||||
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
|
||||||
"format": "heatmap",
|
"format": "heatmap",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
|
@ -1960,7 +1960,7 @@
|
||||||
},
|
},
|
||||||
"yAxis": {
|
"yAxis": {
|
||||||
"decimals": null,
|
"decimals": null,
|
||||||
"format": "s",
|
"format": "dtdurations",
|
||||||
"label": null,
|
"label": null,
|
||||||
"logBase": 1,
|
"logBase": 1,
|
||||||
"max": null,
|
"max": null,
|
||||||
|
@ -2182,7 +2182,7 @@
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"datasource": null,
|
"datasource": null,
|
||||||
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
|
@ -2196,7 +2196,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"datasource": null,
|
"datasource": null,
|
||||||
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": false,
|
"instant": false,
|
||||||
|
@ -2665,865 +2665,6 @@
|
||||||
"transformations": [],
|
"transformations": [],
|
||||||
"transparent": false,
|
"transparent": false,
|
||||||
"type": "piechart"
|
"type": "piechart"
|
||||||
},
|
|
||||||
{
|
|
||||||
"collapsed": false,
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 1,
|
|
||||||
"w": 24,
|
|
||||||
"x": 0,
|
|
||||||
"y": 65
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 34,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"panels": [],
|
|
||||||
"targets": [],
|
|
||||||
"title": "Quotas",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "row"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"description": "Number of S3 buckets with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"custom": {},
|
|
||||||
"decimals": null,
|
|
||||||
"mappings": [],
|
|
||||||
"noValue": "-",
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "#808080",
|
|
||||||
"index": 0,
|
|
||||||
"line": true,
|
|
||||||
"op": "gt",
|
|
||||||
"value": "null",
|
|
||||||
"yaxis": "left"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "blue",
|
|
||||||
"index": 1,
|
|
||||||
"line": true,
|
|
||||||
"op": "gt",
|
|
||||||
"value": 0.0,
|
|
||||||
"yaxis": "left"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "short"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 4,
|
|
||||||
"w": 6,
|
|
||||||
"x": 0,
|
|
||||||
"y": 66
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 35,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"colorMode": "value",
|
|
||||||
"graphMode": "area",
|
|
||||||
"justifyMode": "auto",
|
|
||||||
"orientation": "auto",
|
|
||||||
"reduceOptions": {
|
|
||||||
"calcs": [
|
|
||||||
"lastNotNull"
|
|
||||||
],
|
|
||||||
"fields": "",
|
|
||||||
"values": false
|
|
||||||
},
|
|
||||||
"textMode": "auto"
|
|
||||||
},
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "max(s3_cloudserver_quota_buckets_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Buckets with quota",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "stat"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"description": "Number of accounts with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"custom": {},
|
|
||||||
"decimals": null,
|
|
||||||
"mappings": [],
|
|
||||||
"noValue": "-",
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "#808080",
|
|
||||||
"index": 0,
|
|
||||||
"line": true,
|
|
||||||
"op": "gt",
|
|
||||||
"value": "null",
|
|
||||||
"yaxis": "left"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "blue",
|
|
||||||
"index": 1,
|
|
||||||
"line": true,
|
|
||||||
"op": "gt",
|
|
||||||
"value": 0.0,
|
|
||||||
"yaxis": "left"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "short"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 4,
|
|
||||||
"w": 6,
|
|
||||||
"x": 0,
|
|
||||||
"y": 70
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 36,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"colorMode": "value",
|
|
||||||
"graphMode": "area",
|
|
||||||
"justifyMode": "auto",
|
|
||||||
"orientation": "auto",
|
|
||||||
"reduceOptions": {
|
|
||||||
"calcs": [
|
|
||||||
"lastNotNull"
|
|
||||||
],
|
|
||||||
"fields": "",
|
|
||||||
"values": false
|
|
||||||
},
|
|
||||||
"textMode": "auto"
|
|
||||||
},
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "max(s3_cloudserver_quota_accounts_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Accounts with quota",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "stat"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 30,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"lineInterpolation": "smooth",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"log": 2,
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "auto",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": []
|
|
||||||
},
|
|
||||||
"unit": "ops"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 6,
|
|
||||||
"x": 6,
|
|
||||||
"y": 66
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 37,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "hidden",
|
|
||||||
"placement": "bottom"
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum(rate(s3_cloudserver_quota_unavailable_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Operations with unavailable metrics",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 0,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"lineInterpolation": "smooth",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"log": 2,
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "auto",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": []
|
|
||||||
},
|
|
||||||
"unit": "ops"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 12,
|
|
||||||
"x": 12,
|
|
||||||
"y": 66
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 38,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [
|
|
||||||
"min",
|
|
||||||
"mean",
|
|
||||||
"max"
|
|
||||||
],
|
|
||||||
"displayMode": "table",
|
|
||||||
"placement": "right"
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval])) by(action)",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{action}}",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Quota evaluaton rate per S3 action",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 30,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"lineInterpolation": "stepAfter",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"log": 2,
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "auto",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": [
|
|
||||||
{
|
|
||||||
"color": "green",
|
|
||||||
"index": 0,
|
|
||||||
"line": true,
|
|
||||||
"op": "gt",
|
|
||||||
"value": "null",
|
|
||||||
"yaxis": "left"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "orange",
|
|
||||||
"index": 1,
|
|
||||||
"line": true,
|
|
||||||
"op": "gt",
|
|
||||||
"value": 90.0,
|
|
||||||
"yaxis": "left"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"color": "red",
|
|
||||||
"index": 2,
|
|
||||||
"line": true,
|
|
||||||
"op": "gt",
|
|
||||||
"value": 0.0,
|
|
||||||
"yaxis": "left"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"unit": "percent"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 6,
|
|
||||||
"x": 0,
|
|
||||||
"y": 74
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 39,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "hidden",
|
|
||||||
"placement": "bottom"
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace=\"${namespace}\",job=\"${job}\"}[$__rate_interval])) * 100",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Quota service uptime",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 30,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"lineInterpolation": "smooth",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"log": 2,
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "auto",
|
|
||||||
"spanNulls": false,
|
|
||||||
"stacking": {},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": []
|
|
||||||
},
|
|
||||||
"unit": "ops"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 6,
|
|
||||||
"x": 6,
|
|
||||||
"y": 74
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 40,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "list",
|
|
||||||
"placement": "bottom"
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=~\"2..\", job=\"${job}\"}[$__rate_interval]))",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Success",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=\"429\", job=\"${job}\"}[$__rate_interval]))",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "Quota Exceeded",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Quota evaluation status code over time",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 0,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"lineInterpolation": "smooth",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"log": 2,
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "auto",
|
|
||||||
"spanNulls": 180000,
|
|
||||||
"stacking": {},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": []
|
|
||||||
},
|
|
||||||
"unit": "s"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 12,
|
|
||||||
"x": 12,
|
|
||||||
"y": 74
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 41,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [
|
|
||||||
"min",
|
|
||||||
"mean",
|
|
||||||
"max"
|
|
||||||
],
|
|
||||||
"displayMode": "table",
|
|
||||||
"placement": "right"
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{ type }} (success)",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{ type }} (exceeded)",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Average quota evaluation latencies",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "timeseries"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cards": {
|
|
||||||
"cardPadding": null,
|
|
||||||
"cardRound": null
|
|
||||||
},
|
|
||||||
"color": {
|
|
||||||
"cardColor": "#b4ff00",
|
|
||||||
"colorScale": "sqrt",
|
|
||||||
"colorScheme": "interpolateOranges",
|
|
||||||
"exponent": 0.5,
|
|
||||||
"max": null,
|
|
||||||
"min": null,
|
|
||||||
"mode": "opacity"
|
|
||||||
},
|
|
||||||
"dataFormat": "tsbuckets",
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 6,
|
|
||||||
"x": 0,
|
|
||||||
"y": 82
|
|
||||||
},
|
|
||||||
"heatmap": {},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"hideZeroBuckets": false,
|
|
||||||
"highlightCards": true,
|
|
||||||
"id": 42,
|
|
||||||
"legend": {
|
|
||||||
"show": false
|
|
||||||
},
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 25,
|
|
||||||
"reverseYBuckets": false,
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
|
|
||||||
"format": "heatmap",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{ le }}",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Quota evaluation duration",
|
|
||||||
"tooltip": {
|
|
||||||
"show": true,
|
|
||||||
"showHistogram": true
|
|
||||||
},
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "heatmap",
|
|
||||||
"xAxis": {
|
|
||||||
"mode": "time",
|
|
||||||
"name": null,
|
|
||||||
"show": true,
|
|
||||||
"values": []
|
|
||||||
},
|
|
||||||
"yAxis": {
|
|
||||||
"decimals": null,
|
|
||||||
"format": "s",
|
|
||||||
"label": null,
|
|
||||||
"logBase": 1,
|
|
||||||
"max": null,
|
|
||||||
"min": null,
|
|
||||||
"show": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": "${DS_PROMETHEUS}",
|
|
||||||
"editable": true,
|
|
||||||
"error": false,
|
|
||||||
"fieldConfig": {
|
|
||||||
"defaults": {
|
|
||||||
"color": {
|
|
||||||
"mode": "palette-classic"
|
|
||||||
},
|
|
||||||
"custom": {
|
|
||||||
"axisLabel": "",
|
|
||||||
"axisPlacement": "auto",
|
|
||||||
"barAlignment": 0,
|
|
||||||
"drawStyle": "line",
|
|
||||||
"fillOpacity": 0,
|
|
||||||
"gradientMode": "none",
|
|
||||||
"hideFrom": {
|
|
||||||
"legend": false,
|
|
||||||
"tooltip": false,
|
|
||||||
"viz": false
|
|
||||||
},
|
|
||||||
"lineInterpolation": "smooth",
|
|
||||||
"lineWidth": 1,
|
|
||||||
"pointSize": 5,
|
|
||||||
"scaleDistribution": {
|
|
||||||
"log": 2,
|
|
||||||
"type": "linear"
|
|
||||||
},
|
|
||||||
"showPoints": "auto",
|
|
||||||
"spanNulls": 180000,
|
|
||||||
"stacking": {},
|
|
||||||
"thresholdsStyle": {
|
|
||||||
"mode": "off"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"mappings": [],
|
|
||||||
"thresholds": {
|
|
||||||
"mode": "absolute",
|
|
||||||
"steps": []
|
|
||||||
},
|
|
||||||
"unit": "s"
|
|
||||||
},
|
|
||||||
"overrides": []
|
|
||||||
},
|
|
||||||
"gridPos": {
|
|
||||||
"h": 8,
|
|
||||||
"w": 18,
|
|
||||||
"x": 6,
|
|
||||||
"y": 82
|
|
||||||
},
|
|
||||||
"hideTimeOverride": false,
|
|
||||||
"id": 43,
|
|
||||||
"links": [],
|
|
||||||
"maxDataPoints": 100,
|
|
||||||
"options": {
|
|
||||||
"legend": {
|
|
||||||
"calcs": [],
|
|
||||||
"displayMode": "list",
|
|
||||||
"placement": "bottom"
|
|
||||||
},
|
|
||||||
"tooltip": {
|
|
||||||
"mode": "single"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{ class }} (success)",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"datasource": null,
|
|
||||||
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)",
|
|
||||||
"format": "time_series",
|
|
||||||
"hide": false,
|
|
||||||
"instant": false,
|
|
||||||
"interval": "",
|
|
||||||
"intervalFactor": 1,
|
|
||||||
"legendFormat": "{{ class }} (error)",
|
|
||||||
"metric": "",
|
|
||||||
"refId": "",
|
|
||||||
"step": 10,
|
|
||||||
"target": ""
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"title": "Average utilization metrics retrieval latencies",
|
|
||||||
"transformations": [],
|
|
||||||
"transparent": false,
|
|
||||||
"type": "timeseries"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"refresh": "30s",
|
"refresh": "30s",
|
||||||
|
@ -3625,5 +2766,5 @@
|
||||||
"timezone": "",
|
"timezone": "",
|
||||||
"title": "S3 service",
|
"title": "S3 service",
|
||||||
"uid": null,
|
"uid": null,
|
||||||
"version": 110
|
"version": 31
|
||||||
}
|
}
|
||||||
|
|
|
@ -366,28 +366,6 @@ def average_latency_target(title, action="", by=""):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def average_quota_latency_target(code="", by=""):
|
|
||||||
# type: (str, str) -> Target
|
|
||||||
extra = ', code=' + code if code else ""
|
|
||||||
by = " by (" + by + ")" if by else ""
|
|
||||||
return "\n".join([
|
|
||||||
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
|
|
||||||
" /",
|
|
||||||
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
def average_quota_retrieval_latency(code="", by=""):
|
|
||||||
# type: (str, str) -> Target
|
|
||||||
extra = ', code=' + code if code else ""
|
|
||||||
by = " by (" + by + ")" if by else ""
|
|
||||||
return "\n".join([
|
|
||||||
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
|
|
||||||
" /",
|
|
||||||
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
averageLatencies = TimeSeries(
|
averageLatencies = TimeSeries(
|
||||||
title="Average latencies",
|
title="Average latencies",
|
||||||
dataSource="${DS_PROMETHEUS}",
|
dataSource="${DS_PROMETHEUS}",
|
||||||
|
@ -428,10 +406,10 @@ requestTime = Heatmap(
|
||||||
dataFormat="tsbuckets",
|
dataFormat="tsbuckets",
|
||||||
maxDataPoints=25,
|
maxDataPoints=25,
|
||||||
tooltip=Tooltip(show=True, showHistogram=True),
|
tooltip=Tooltip(show=True, showHistogram=True),
|
||||||
yAxis=YAxis(format=UNITS.SECONDS),
|
yAxis=YAxis(format=UNITS.DURATION_SECONDS),
|
||||||
color=HeatmapColor(mode="opacity"),
|
color=HeatmapColor(mode="opacity"),
|
||||||
targets=[Target(
|
targets=[Target(
|
||||||
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
|
||||||
format="heatmap",
|
format="heatmap",
|
||||||
legendFormat="{{ le }}",
|
legendFormat="{{ le }}",
|
||||||
)],
|
)],
|
||||||
|
@ -455,11 +433,11 @@ bandWidth = TimeSeries(
|
||||||
unit="binBps",
|
unit="binBps",
|
||||||
targets=[
|
targets=[
|
||||||
Target(
|
Target(
|
||||||
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
|
||||||
legendFormat="Out"
|
legendFormat="Out"
|
||||||
),
|
),
|
||||||
Target(
|
Target(
|
||||||
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
|
||||||
legendFormat="In"
|
legendFormat="In"
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
|
@ -547,174 +525,6 @@ top10Error5xxByBucket = top10_errors_by_bucket(
|
||||||
title="5xx : Top10 by Bucket", code='~"5.."'
|
title="5xx : Top10 by Bucket", code='~"5.."'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
quotaHealth = TimeSeries(
|
|
||||||
title="Quota service uptime",
|
|
||||||
legendDisplayMode="hidden",
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
lineInterpolation="stepAfter",
|
|
||||||
fillOpacity=30,
|
|
||||||
unit=UNITS.PERCENT_FORMAT,
|
|
||||||
targets=[Target(
|
|
||||||
expr='avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",job="${job}"}[$__rate_interval])) * 100', # noqa: E501
|
|
||||||
)],
|
|
||||||
thresholds=[
|
|
||||||
Threshold("green", 0, 95.0),
|
|
||||||
Threshold("orange", 1, 90.0),
|
|
||||||
Threshold("red", 2, 0.0),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
quotaStatusCode = TimeSeries(
|
|
||||||
title="Quota evaluation status code over time",
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
fillOpacity=30,
|
|
||||||
lineInterpolation="smooth",
|
|
||||||
unit=UNITS.OPS_PER_SEC,
|
|
||||||
targets=[Target(
|
|
||||||
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code=~"2..", job="${job}"}[$__rate_interval]))', # noqa: E501
|
|
||||||
legendFormat="Success",
|
|
||||||
), Target(
|
|
||||||
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code="429", job="${job}"}[$__rate_interval]))', # noqa: E501
|
|
||||||
legendFormat="Quota Exceeded",
|
|
||||||
)],
|
|
||||||
)
|
|
||||||
|
|
||||||
quotaByAction = TimeSeries(
|
|
||||||
title="Quota evaluaton rate per S3 action",
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
legendDisplayMode="table",
|
|
||||||
legendPlacement="right",
|
|
||||||
legendValues=["min", "mean", "max"],
|
|
||||||
lineInterpolation="smooth",
|
|
||||||
unit=UNITS.OPS_PER_SEC,
|
|
||||||
targets=[
|
|
||||||
Target(
|
|
||||||
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"}[$__rate_interval])) by(action)', # noqa: E501
|
|
||||||
legendFormat="{{action}}",
|
|
||||||
)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
averageQuotaDuration = Heatmap(
|
|
||||||
title="Quota evaluation duration",
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
dataFormat="tsbuckets",
|
|
||||||
maxDataPoints=25,
|
|
||||||
tooltip=Tooltip(show=True, showHistogram=True),
|
|
||||||
yAxis=YAxis(format=UNITS.SECONDS),
|
|
||||||
color=HeatmapColor(mode="opacity"),
|
|
||||||
targets=[Target(
|
|
||||||
expr='sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
|
||||||
format="heatmap",
|
|
||||||
legendFormat="{{ le }}",
|
|
||||||
)],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
operationsWithUnavailableMetrics = TimeSeries(
|
|
||||||
title="Operations with unavailable metrics",
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
fillOpacity=30,
|
|
||||||
lineInterpolation="smooth",
|
|
||||||
unit=UNITS.OPS_PER_SEC,
|
|
||||||
legendDisplayMode="hidden",
|
|
||||||
targets=[Target(
|
|
||||||
expr='sum(rate(s3_cloudserver_quota_unavailable_count{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
|
|
||||||
)],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
averageQuotaLatencies = TimeSeries(
|
|
||||||
title="Average quota evaluation latencies",
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
lineInterpolation="smooth",
|
|
||||||
spanNulls=3*60*1000,
|
|
||||||
legendDisplayMode="table",
|
|
||||||
legendPlacement="right",
|
|
||||||
legendValues=["min", "mean", "max"],
|
|
||||||
unit=UNITS.SECONDS,
|
|
||||||
targets=[
|
|
||||||
Target(
|
|
||||||
expr=average_quota_latency_target(code='~"2.."', by='type'),
|
|
||||||
legendFormat='{{ type }} (success)',
|
|
||||||
),
|
|
||||||
Target(
|
|
||||||
expr=average_quota_latency_target(code='"429"', by='type'),
|
|
||||||
legendFormat='{{ type }} (exceeded)',
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
averageMetricsRetrievalLatencies = TimeSeries(
|
|
||||||
title="Average utilization metrics retrieval latencies",
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
lineInterpolation="smooth",
|
|
||||||
spanNulls=3*60*1000,
|
|
||||||
unit=UNITS.SECONDS,
|
|
||||||
targets=[
|
|
||||||
Target(
|
|
||||||
expr=average_quota_retrieval_latency(code='~"2.."', by='class'),
|
|
||||||
legendFormat='{{ class }} (success)',
|
|
||||||
),
|
|
||||||
Target(
|
|
||||||
expr=average_quota_retrieval_latency(
|
|
||||||
code='~"4..|5.."',
|
|
||||||
by='class'
|
|
||||||
),
|
|
||||||
legendFormat='{{ class }} (error)',
|
|
||||||
),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
bucketQuotaCounter = Stat(
|
|
||||||
title="Buckets with quota",
|
|
||||||
description=(
|
|
||||||
"Number of S3 buckets with quota enabled in the cluster.\n"
|
|
||||||
"This value is computed asynchronously, and update "
|
|
||||||
"may be delayed up to 1h."
|
|
||||||
),
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
colorMode="value",
|
|
||||||
format=UNITS.SHORT,
|
|
||||||
noValue="-",
|
|
||||||
reduceCalc="lastNotNull",
|
|
||||||
targets=[Target(
|
|
||||||
expr='max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
|
|
||||||
)],
|
|
||||||
thresholds=[
|
|
||||||
Threshold("#808080", 0, 0.0),
|
|
||||||
Threshold("blue", 1, 0.0),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
accountQuotaCounter = Stat(
|
|
||||||
title="Accounts with quota",
|
|
||||||
description=(
|
|
||||||
"Number of accounts with quota enabled in the cluster.\n"
|
|
||||||
"This value is computed asynchronously, and update "
|
|
||||||
"may be delayed up to 1h."
|
|
||||||
),
|
|
||||||
dataSource="${DS_PROMETHEUS}",
|
|
||||||
colorMode="value",
|
|
||||||
format=UNITS.SHORT,
|
|
||||||
noValue="-",
|
|
||||||
reduceCalc="lastNotNull",
|
|
||||||
targets=[Target(
|
|
||||||
expr='max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
|
|
||||||
)],
|
|
||||||
thresholds=[
|
|
||||||
Threshold("#808080", 0, 0.0),
|
|
||||||
Threshold("blue", 1, 0.0),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
dashboard = (
|
dashboard = (
|
||||||
Dashboard(
|
Dashboard(
|
||||||
title="S3 service",
|
title="S3 service",
|
||||||
|
@ -820,24 +630,6 @@ dashboard = (
|
||||||
top10Error500ByBucket,
|
top10Error500ByBucket,
|
||||||
top10Error5xxByBucket
|
top10Error5xxByBucket
|
||||||
], height=8),
|
], height=8),
|
||||||
RowPanel(title="Quotas"),
|
|
||||||
layout.row([
|
|
||||||
layout.column([
|
|
||||||
layout.resize([bucketQuotaCounter], width=6, height=4),
|
|
||||||
layout.resize([accountQuotaCounter], width=6, height=4),
|
|
||||||
], height=8),
|
|
||||||
layout.resize([operationsWithUnavailableMetrics], width=6),
|
|
||||||
quotaByAction,
|
|
||||||
], height=8),
|
|
||||||
layout.row([
|
|
||||||
layout.resize([quotaHealth], width=6),
|
|
||||||
layout.resize([quotaStatusCode], width=6),
|
|
||||||
averageQuotaLatencies,
|
|
||||||
], height=8),
|
|
||||||
layout.row([
|
|
||||||
layout.resize([averageQuotaDuration], width=6),
|
|
||||||
averageMetricsRetrievalLatencies,
|
|
||||||
], height=8),
|
|
||||||
]),
|
]),
|
||||||
)
|
)
|
||||||
.auto_panel_ids()
|
.auto_panel_ids()
|
||||||
|
|
|
@ -45,8 +45,8 @@ then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
REGISTRY=${REGISTRY:-"ghcr.io/scality"}
|
REGISTRY=${REGISTRY:-"registry.scality.com"}
|
||||||
PROJECT=${PROJECT:-"cloudserver"}
|
PROJECT=${PROJECT:-"cloudserver-dev"}
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
${ORAS} push "${REGISTRY}/${PROJECT}/${NAME_TAG}" "${INPUT_FILE}:${MIME_TYPE}"
|
${ORAS} push "${REGISTRY}/${PROJECT}/${NAME_TAG}" "${INPUT_FILE}:${MIME_TYPE}"
|
||||||
|
|
54
package.json
54
package.json
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@zenko/cloudserver",
|
"name": "@zenko/cloudserver",
|
||||||
"version": "8.8.27",
|
"version": "8.8.19",
|
||||||
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
|
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"engines": {
|
"engines": {
|
||||||
|
@ -21,13 +21,14 @@
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@azure/storage-blob": "^12.12.0",
|
"@azure/storage-blob": "^12.12.0",
|
||||||
"@hapi/joi": "^17.1.0",
|
"@hapi/joi": "^17.1.0",
|
||||||
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
|
"arsenal": "git+https://github.com/scality/arsenal#8.1.123",
|
||||||
"async": "^2.5.0",
|
"async": "~2.5.0",
|
||||||
"aws-sdk": "^2.905.0",
|
"aws-sdk": "2.905.0",
|
||||||
|
"bucketclient": "scality/bucketclient#8.1.9",
|
||||||
"bufferutil": "^4.0.6",
|
"bufferutil": "^4.0.6",
|
||||||
"commander": "^2.9.0",
|
"commander": "^2.9.0",
|
||||||
"cron-parser": "^2.11.0",
|
"cron-parser": "^2.11.0",
|
||||||
"diskusage": "^1.1.3",
|
"diskusage": "1.1.3",
|
||||||
"google-auto-auth": "^0.9.1",
|
"google-auto-auth": "^0.9.1",
|
||||||
"http-proxy": "^1.17.0",
|
"http-proxy": "^1.17.0",
|
||||||
"http-proxy-agent": "^4.0.1",
|
"http-proxy-agent": "^4.0.1",
|
||||||
|
@ -37,45 +38,37 @@
|
||||||
"mongodb": "^5.2.0",
|
"mongodb": "^5.2.0",
|
||||||
"node-fetch": "^2.6.0",
|
"node-fetch": "^2.6.0",
|
||||||
"node-forge": "^0.7.1",
|
"node-forge": "^0.7.1",
|
||||||
"npm-run-all": "^4.1.5",
|
"npm-run-all": "~4.1.5",
|
||||||
"prom-client": "14.2.0",
|
"prom-client": "14.2.0",
|
||||||
"request": "^2.81.0",
|
"request": "^2.81.0",
|
||||||
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git",
|
"sql-where-parser": "~2.2.1",
|
||||||
"sql-where-parser": "^2.2.1",
|
"utapi": "github:scality/utapi#8.1.13",
|
||||||
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
|
|
||||||
"utf-8-validate": "^5.0.8",
|
"utf-8-validate": "^5.0.8",
|
||||||
"utf8": "^2.1.1",
|
"utf8": "~2.1.1",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
|
"vaultclient": "scality/vaultclient#8.3.11",
|
||||||
|
"werelogs": "scality/werelogs#8.1.2",
|
||||||
"ws": "^5.1.0",
|
"ws": "^5.1.0",
|
||||||
"xml2js": "^0.4.16"
|
"xml2js": "~0.4.16"
|
||||||
},
|
|
||||||
"overrides": {
|
|
||||||
"ltgt": "^2.2.0"
|
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/core": "^7.25.2",
|
|
||||||
"@babel/preset-env": "^7.25.3",
|
|
||||||
"babel-loader": "^9.1.3",
|
|
||||||
"bluebird": "^3.3.1",
|
"bluebird": "^3.3.1",
|
||||||
"eslint": "^8.14.0",
|
"eslint": "^8.14.0",
|
||||||
"eslint-config-airbnb-base": "^15.0.0",
|
"eslint-config-airbnb-base": "^13.1.0",
|
||||||
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
|
"eslint-config-scality": "scality/Guidelines#8.2.0",
|
||||||
"eslint-plugin-import": "^2.14.0",
|
"eslint-plugin-import": "^2.14.0",
|
||||||
"eslint-plugin-mocha": "^10.1.0",
|
"eslint-plugin-mocha": "^10.1.0",
|
||||||
"express": "^4.17.1",
|
"express": "^4.17.1",
|
||||||
"ioredis": "^4.9.5",
|
"ioredis": "4.9.5",
|
||||||
"istanbul": "^1.0.0-alpha.2",
|
"istanbul": "1.0.0-alpha.2",
|
||||||
"istanbul-api": "^1.0.0-alpha.13",
|
"istanbul-api": "1.0.0-alpha.13",
|
||||||
"lolex": "^1.4.0",
|
"lolex": "^1.4.0",
|
||||||
"mocha": ">=3.1.2",
|
"mocha": "^2.3.4",
|
||||||
"mocha-junit-reporter": "^1.23.1",
|
"mocha-junit-reporter": "^1.23.1",
|
||||||
"mocha-multi-reporters": "^1.1.7",
|
"mocha-multi-reporters": "^1.1.7",
|
||||||
"node-mocks-http": "^1.5.2",
|
"node-mocks-http": "1.5.2",
|
||||||
"sinon": "^13.0.1",
|
"sinon": "^13.0.1",
|
||||||
"tv4": "^1.2.7",
|
"tv4": "^1.2.7"
|
||||||
"webpack": "^5.93.0",
|
|
||||||
"webpack-cli": "^5.1.4"
|
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
|
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
|
||||||
|
@ -116,11 +109,10 @@
|
||||||
"utapi_replay": "node lib/utapi/utapiReplay.js",
|
"utapi_replay": "node lib/utapi/utapiReplay.js",
|
||||||
"utapi_reindex": "node lib/utapi/utapiReindex.js",
|
"utapi_reindex": "node lib/utapi/utapiReindex.js",
|
||||||
"management_agent": "node managementAgent.js",
|
"management_agent": "node managementAgent.js",
|
||||||
"test": "CI=true S3BACKEND=mem S3QUOTA=scuba mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
"test": "CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
||||||
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
|
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
|
||||||
"test_legacy_location": "CI=true S3QUOTA=scuba S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
"test_legacy_location": "CI=true S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
||||||
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
|
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
|
||||||
"test_quota": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/quota",
|
|
||||||
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
|
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
|
||||||
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
||||||
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"
|
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
const AWS = require('aws-sdk');
|
|
||||||
const S3 = AWS.S3;
|
|
||||||
const assert = require('assert');
|
|
||||||
const getConfig = require('../support/config');
|
|
||||||
const sendRequest = require('../quota/tooling').sendRequest;
|
|
||||||
|
|
||||||
const bucket = 'deletequotatestbucket';
|
|
||||||
const nonExistantBucket = 'deletequotatestnonexistantbucket';
|
|
||||||
|
|
||||||
describe('Test delete bucket quota', () => {
|
|
||||||
let s3;
|
|
||||||
|
|
||||||
before(() => {
|
|
||||||
const config = getConfig('default', { signatureVersion: 'v4' });
|
|
||||||
s3 = new S3(config);
|
|
||||||
AWS.config.update(config);
|
|
||||||
});
|
|
||||||
|
|
||||||
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
|
||||||
|
|
||||||
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
|
||||||
|
|
||||||
it('should delete the bucket quota', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
|
||||||
assert.ok(true);
|
|
||||||
} catch (err) {
|
|
||||||
assert.fail(`Expected no error, but got ${err}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return no such bucket error', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('DELETE', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`);
|
|
||||||
} catch (err) {
|
|
||||||
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,77 +0,0 @@
|
||||||
const AWS = require('aws-sdk');
|
|
||||||
const S3 = AWS.S3;
|
|
||||||
const assert = require('assert');
|
|
||||||
const getConfig = require('../support/config');
|
|
||||||
const sendRequest = require('../quota/tooling').sendRequest;
|
|
||||||
|
|
||||||
const bucket = 'getquotatestbucket';
|
|
||||||
const quota = { quota: 1000 };
|
|
||||||
|
|
||||||
describe('Test get bucket quota', () => {
|
|
||||||
let s3;
|
|
||||||
|
|
||||||
before(() => {
|
|
||||||
const config = getConfig('default', { signatureVersion: 'v4' });
|
|
||||||
s3 = new S3(config);
|
|
||||||
AWS.config.update(config);
|
|
||||||
});
|
|
||||||
|
|
||||||
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
|
||||||
|
|
||||||
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
|
||||||
|
|
||||||
it('should return the quota', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
|
|
||||||
const data = await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
|
||||||
assert.strictEqual(data.GetBucketQuota.Name[0], bucket);
|
|
||||||
assert.strictEqual(data.GetBucketQuota.Quota[0], '1000');
|
|
||||||
} catch (err) {
|
|
||||||
assert.fail(`Expected no error, but got ${err}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return no such bucket error', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
|
|
||||||
} catch (err) {
|
|
||||||
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return no such bucket quota', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
|
||||||
try {
|
|
||||||
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
|
||||||
assert.fail('Expected NoSuchQuota error');
|
|
||||||
} catch (err) {
|
|
||||||
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
assert.fail(`Expected no error, but got ${err}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return no such bucket error', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
|
|
||||||
} catch (err) {
|
|
||||||
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return no such bucket quota', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
|
||||||
try {
|
|
||||||
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
|
||||||
assert.fail('Expected NoSuchQuota error');
|
|
||||||
} catch (err) {
|
|
||||||
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
assert.fail(`Expected no error, but got ${err}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,70 +0,0 @@
|
||||||
const AWS = require('aws-sdk');
|
|
||||||
const S3 = AWS.S3;
|
|
||||||
|
|
||||||
const assert = require('assert');
|
|
||||||
const getConfig = require('../support/config');
|
|
||||||
const sendRequest = require('../quota/tooling').sendRequest;
|
|
||||||
|
|
||||||
const bucket = 'updatequotatestbucket';
|
|
||||||
const nonExistantBucket = 'updatequotatestnonexistantbucket';
|
|
||||||
const quota = { quota: 2000 };
|
|
||||||
const negativeQuota = { quota: -1000 };
|
|
||||||
const wrongquotaFromat = '1000';
|
|
||||||
const largeQuota = { quota: 1000000000000 };
|
|
||||||
|
|
||||||
describe('Test update bucket quota', () => {
|
|
||||||
let s3;
|
|
||||||
|
|
||||||
before(() => {
|
|
||||||
const config = getConfig('default', { signatureVersion: 'v4' });
|
|
||||||
s3 = new S3(config);
|
|
||||||
AWS.config.update(config);
|
|
||||||
});
|
|
||||||
|
|
||||||
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
|
||||||
|
|
||||||
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
|
||||||
|
|
||||||
it('should update the quota', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
|
|
||||||
assert.ok(true);
|
|
||||||
} catch (err) {
|
|
||||||
assert.fail(`Expected no error, but got ${err}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return no such bucket error', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('PUT', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`, JSON.stringify(quota));
|
|
||||||
} catch (err) {
|
|
||||||
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return error when quota is negative', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(negativeQuota));
|
|
||||||
} catch (err) {
|
|
||||||
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
|
|
||||||
assert.strictEqual(err.Error.Message[0], 'Quota value must be a positive number');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return error when quota is not in correct format', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, wrongquotaFromat);
|
|
||||||
} catch (err) {
|
|
||||||
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
|
|
||||||
assert.strictEqual(err.Error.Message[0], 'Request body must be a JSON object');
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should handle large quota values', async () => {
|
|
||||||
try {
|
|
||||||
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(largeQuota));
|
|
||||||
} catch (err) {
|
|
||||||
assert.fail(`Expected no error, but got ${err}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -33,7 +33,7 @@ describe('aws-node-sdk v2auth query tests', function testSuite() {
|
||||||
let s3;
|
let s3;
|
||||||
|
|
||||||
before(() => {
|
before(() => {
|
||||||
const config = getConfig('default', { signatureVersion: 'v2' });
|
const config = getConfig('default');
|
||||||
|
|
||||||
s3 = new S3(config);
|
s3 = new S3(config);
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,52 +0,0 @@
|
||||||
const fetch = require('node-fetch');
|
|
||||||
const AWS = require('aws-sdk');
|
|
||||||
const xml2js = require('xml2js');
|
|
||||||
|
|
||||||
const sendRequest = async (method, host, path, body = '', config = null) =>
|
|
||||||
new Promise(async (resolve, reject) => {
|
|
||||||
const service = 's3';
|
|
||||||
const endpoint = new AWS.Endpoint(host);
|
|
||||||
|
|
||||||
const request = new AWS.HttpRequest(endpoint);
|
|
||||||
request.method = method.toUpperCase();
|
|
||||||
request.path = path;
|
|
||||||
request.body = body;
|
|
||||||
request.headers.Host = host;
|
|
||||||
request.headers['X-Amz-Date'] = new Date().toISOString().replace(/[:\-]|\.\d{3}/g, '');
|
|
||||||
const sha256hash = AWS.util.crypto.sha256(request.body || '', 'hex');
|
|
||||||
request.headers['X-Amz-Content-SHA256'] = sha256hash;
|
|
||||||
request.region = 'us-east-1';
|
|
||||||
|
|
||||||
const signer = new AWS.Signers.V4(request, service);
|
|
||||||
const accessKeyId = config?.accessKey || AWS.config.credentials?.accessKeyId;
|
|
||||||
const secretAccessKey = config?.secretKey || AWS.config.credentials?.secretAccessKey;
|
|
||||||
const credentials = new AWS.Credentials(accessKeyId, secretAccessKey);
|
|
||||||
signer.addAuthorization(credentials, new Date());
|
|
||||||
|
|
||||||
const url = `http://${host}${path}`;
|
|
||||||
const options = {
|
|
||||||
method: request.method,
|
|
||||||
headers: request.headers,
|
|
||||||
};
|
|
||||||
|
|
||||||
if (method !== 'GET') {
|
|
||||||
options.body = request.body;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const response = await fetch(url, options);
|
|
||||||
const text = await response.text();
|
|
||||||
const result = await xml2js.parseStringPromise(text);
|
|
||||||
if (result && result.Error) {
|
|
||||||
reject(result);
|
|
||||||
} else {
|
|
||||||
resolve(result);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
reject(error);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
sendRequest,
|
|
||||||
};
|
|
|
@ -3,8 +3,9 @@ const AWS = require('aws-sdk');
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
const crypto = require('crypto');
|
const crypto = require('crypto');
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
const { versioning } = require('arsenal');
|
const { models, versioning } = require('arsenal');
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
const { ObjectMD } = models;
|
||||||
|
|
||||||
const { makeid } = require('../../unit/helpers');
|
const { makeid } = require('../../unit/helpers');
|
||||||
const { makeRequest, makeBackbeatRequest } = require('../../functional/raw-node/utils/makeRequest');
|
const { makeRequest, makeBackbeatRequest } = require('../../functional/raw-node/utils/makeRequest');
|
||||||
|
@ -146,15 +147,17 @@ function checkVersionData(s3, bucket, objectKey, versionId, dataValue, done) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function updateStorageClass(data, storageClass) {
|
function updateStorageClass(data, storageClass) {
|
||||||
let result;
|
let parsedBody;
|
||||||
try {
|
try {
|
||||||
const parsedBody = JSON.parse(JSON.parse(data.body).Body);
|
parsedBody = JSON.parse(data.body);
|
||||||
parsedBody['x-amz-storage-class'] = storageClass;
|
|
||||||
result = JSON.stringify(parsedBody);
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
return { error: err };
|
return { error: err };
|
||||||
}
|
}
|
||||||
|
const { result, error } = ObjectMD.createFromBlob(parsedBody.Body);
|
||||||
|
if (error) {
|
||||||
|
return { error };
|
||||||
|
}
|
||||||
|
result.setAmzStorageClass(storageClass);
|
||||||
return { result };
|
return { result };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,11 +307,11 @@ describe('backbeat routes', () => {
|
||||||
|
|
||||||
it('should update metadata of a current null version', done => {
|
it('should update metadata of a current null version', done => {
|
||||||
let objMD;
|
let objMD;
|
||||||
return async.series({
|
return async.series([
|
||||||
putObject: next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next),
|
next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next),
|
||||||
enableVersioningSource: next => s3.putBucketVersioning(
|
next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } },
|
||||||
{ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
next),
|
||||||
getMetadata: next => makeBackbeatRequest({
|
next => makeBackbeatRequest({
|
||||||
method: 'GET',
|
method: 'GET',
|
||||||
resourceType: 'metadata',
|
resourceType: 'metadata',
|
||||||
bucket,
|
bucket,
|
||||||
|
@ -328,7 +331,7 @@ describe('backbeat routes', () => {
|
||||||
objMD = result;
|
objMD = result;
|
||||||
return next();
|
return next();
|
||||||
}),
|
}),
|
||||||
putMetadata: next => makeBackbeatRequest({
|
next => makeBackbeatRequest({
|
||||||
method: 'PUT',
|
method: 'PUT',
|
||||||
resourceType: 'metadata',
|
resourceType: 'metadata',
|
||||||
bucket,
|
bucket,
|
||||||
|
@ -337,37 +340,19 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
headObject: next => s3.headObject(
|
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
||||||
{ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
getMetadataAfter: next => makeBackbeatRequest({
|
], (err, data) => {
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return done(err);
|
return done(err);
|
||||||
}
|
}
|
||||||
|
const headObjectRes = data[4];
|
||||||
const headObjectRes = results.headObject;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
assert.strictEqual(headObjectRes.VersionId, 'null');
|
||||||
assert.strictEqual(headObjectRes.StorageClass, storageClass);
|
assert.strictEqual(headObjectRes.StorageClass, storageClass);
|
||||||
|
|
||||||
const getMetadataAfterRes = results.getMetadataAfter;
|
const listObjectVersionsRes = data[5];
|
||||||
const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body;
|
|
||||||
const expectedMd = JSON.parse(objMD);
|
|
||||||
expectedMd.isNull = true; // TODO remove the line once CLDSRV-509 is fixed
|
|
||||||
assert.deepStrictEqual(JSON.parse(objMDAfter), expectedMd);
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
const { Versions } = listObjectVersionsRes;
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 1);
|
assert.strictEqual(Versions.length, 1);
|
||||||
|
@ -381,20 +366,18 @@ describe('backbeat routes', () => {
|
||||||
it('should update metadata of a non-current null version', done => {
|
it('should update metadata of a non-current null version', done => {
|
||||||
let objMD;
|
let objMD;
|
||||||
let expectedVersionId;
|
let expectedVersionId;
|
||||||
return async.series({
|
return async.series([
|
||||||
putObjectInitial: next => s3.putObject(
|
next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next),
|
||||||
{ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next),
|
next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } },
|
||||||
enableVersioning: next => s3.putBucketVersioning(
|
next),
|
||||||
{ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => {
|
||||||
putObjectAgain: next => s3.putObject(
|
|
||||||
{ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, (err, data) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
expectedVersionId = data.VersionId;
|
expectedVersionId = data.VersionId;
|
||||||
return next();
|
return next();
|
||||||
}),
|
}),
|
||||||
getMetadata: next => makeBackbeatRequest({
|
next => makeBackbeatRequest({
|
||||||
method: 'GET',
|
method: 'GET',
|
||||||
resourceType: 'metadata',
|
resourceType: 'metadata',
|
||||||
bucket,
|
bucket,
|
||||||
|
@ -414,7 +397,7 @@ describe('backbeat routes', () => {
|
||||||
objMD = result;
|
objMD = result;
|
||||||
return next();
|
return next();
|
||||||
}),
|
}),
|
||||||
putMetadata: next => makeBackbeatRequest({
|
next => makeBackbeatRequest({
|
||||||
method: 'PUT',
|
method: 'PUT',
|
||||||
resourceType: 'metadata',
|
resourceType: 'metadata',
|
||||||
bucket,
|
bucket,
|
||||||
|
@ -423,36 +406,23 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
||||||
getMetadataAfter: next => makeBackbeatRequest({
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
method: 'GET',
|
], (err, data) => {
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return done(err);
|
return done(err);
|
||||||
}
|
}
|
||||||
const headObjectRes = results.headObject;
|
const headObjectRes = data[5];
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
assert.strictEqual(headObjectRes.VersionId, 'null');
|
||||||
assert.strictEqual(headObjectRes.StorageClass, storageClass);
|
assert.strictEqual(headObjectRes.StorageClass, storageClass);
|
||||||
|
|
||||||
const getMetadataAfterRes = results.getMetadataAfter;
|
const listObjectVersionsRes = data[6];
|
||||||
const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body;
|
|
||||||
assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD));
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
const { Versions } = listObjectVersionsRes;
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
assert.strictEqual(Versions.length, 2);
|
||||||
|
|
||||||
const currentVersion = Versions.find(v => v.IsLatest);
|
const currentVersion = Versions.find(v => v.IsLatest);
|
||||||
assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId);
|
assertVersionHasNotBeenUpdated(currentVersion, expectedVersionId);
|
||||||
|
|
||||||
|
@ -462,160 +432,6 @@ describe('backbeat routes', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should update metadata of a suspended null version', done => {
|
|
||||||
let objMD;
|
|
||||||
return async.series({
|
|
||||||
suspendVersioning: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, next),
|
|
||||||
putObject: next => s3.putObject(
|
|
||||||
{ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next),
|
|
||||||
enableVersioning: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
const { error, result } = updateStorageClass(data, storageClass);
|
|
||||||
if (error) {
|
|
||||||
return next(error);
|
|
||||||
}
|
|
||||||
objMD = result;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
putUpdatedMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
getMetadataAfter: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
const headObjectRes = results.headObject;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
|
||||||
assert.strictEqual(headObjectRes.StorageClass, storageClass);
|
|
||||||
|
|
||||||
const getMetadataAfterRes = results.getMetadataAfter;
|
|
||||||
const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body;
|
|
||||||
assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD));
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 1);
|
|
||||||
|
|
||||||
const [currentVersion] = Versions;
|
|
||||||
assertVersionIsNullAndUpdated(currentVersion);
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should update metadata of a suspended null version with internal version id', done => {
|
|
||||||
let objMD;
|
|
||||||
return async.series({
|
|
||||||
suspendVersioning: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucket, VersioningConfiguration: { Status: 'Suspended' } }, next),
|
|
||||||
putObject: next => s3.putObject(
|
|
||||||
{ Bucket: bucket, Key: keyName, Body: Buffer.from(testData) }, next),
|
|
||||||
enableVersioning: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
putObjectTagging: next => s3.putObjectTagging({
|
|
||||||
Bucket: bucket, Key: keyName, VersionId: 'null',
|
|
||||||
Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] },
|
|
||||||
}, next),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
const { error, result } = updateStorageClass(data, storageClass);
|
|
||||||
if (error) {
|
|
||||||
return next(error);
|
|
||||||
}
|
|
||||||
objMD = result;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
putUpdatedMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObject: next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
getMetadataAfter: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucket }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
const headObjectRes = results.headObject;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
|
||||||
assert.strictEqual(headObjectRes.StorageClass, storageClass);
|
|
||||||
|
|
||||||
const getMetadataAfterRes = results.getMetadataAfter;
|
|
||||||
const objMDAfter = JSON.parse(getMetadataAfterRes.body).Body;
|
|
||||||
assert.deepStrictEqual(JSON.parse(objMDAfter), JSON.parse(objMD));
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 1);
|
|
||||||
|
|
||||||
const [currentVersion] = Versions;
|
|
||||||
assertVersionIsNullAndUpdated(currentVersion);
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should update metadata of a non-version object', done => {
|
it('should update metadata of a non-version object', done => {
|
||||||
let objMD;
|
let objMD;
|
||||||
return async.series([
|
return async.series([
|
||||||
|
@ -649,7 +465,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
||||||
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
|
@ -711,7 +527,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
||||||
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
|
@ -774,7 +590,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
||||||
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
|
@ -845,7 +661,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
||||||
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
|
@ -910,7 +726,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName }, next),
|
||||||
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
|
@ -970,7 +786,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
||||||
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
|
@ -1031,7 +847,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next),
|
next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
||||||
|
@ -1094,7 +910,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } },
|
next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } },
|
||||||
next),
|
next),
|
||||||
|
@ -1174,7 +990,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
||||||
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
|
@ -1249,7 +1065,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
||||||
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
next => s3.listObjectVersions({ Bucket: bucket }, next),
|
||||||
|
@ -1321,7 +1137,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next),
|
next => s3.putObject({ Bucket: bucket, Key: keyName, Body: new Buffer(testData) }, next),
|
||||||
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
next => s3.headObject({ Bucket: bucket, Key: keyName, VersionId: 'null' }, next),
|
||||||
|
@ -1395,7 +1211,7 @@ describe('backbeat routes', () => {
|
||||||
versionId: 'null',
|
versionId: 'null',
|
||||||
},
|
},
|
||||||
authCredentials: backbeatAuthCredentials,
|
authCredentials: backbeatAuthCredentials,
|
||||||
requestBody: objMD,
|
requestBody: objMD.getSerialized(),
|
||||||
}, next),
|
}, next),
|
||||||
next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } },
|
next => s3.putBucketVersioning({ Bucket: bucket, VersioningConfiguration: { Status: 'Enabled' } },
|
||||||
next),
|
next),
|
||||||
|
|
|
@ -6,6 +6,8 @@ const { ObjectMD } = models;
|
||||||
const { makeBackbeatRequest } = require('../../functional/raw-node/utils/makeRequest');
|
const { makeBackbeatRequest } = require('../../functional/raw-node/utils/makeRequest');
|
||||||
const BucketUtility = require('../../functional/aws-node-sdk/lib/utility/bucket-util');
|
const BucketUtility = require('../../functional/aws-node-sdk/lib/utility/bucket-util');
|
||||||
|
|
||||||
|
const describeSkipIfAWS = process.env.AWS_ON_AIR ? describe.skip : describe;
|
||||||
|
|
||||||
const backbeatAuthCredentials = {
|
const backbeatAuthCredentials = {
|
||||||
accessKey: 'accessKey1',
|
accessKey: 'accessKey1',
|
||||||
secretKey: 'verySecretKey1',
|
secretKey: 'verySecretKey1',
|
||||||
|
@ -13,10 +15,7 @@ const backbeatAuthCredentials = {
|
||||||
|
|
||||||
const testData = 'testkey data';
|
const testData = 'testkey data';
|
||||||
|
|
||||||
// Skip since Zenko/Artesca use cloud storage provider clients like AWS SDK
|
describeSkipIfAWS('backbeat routes for replication', () => {
|
||||||
// to replicate objects/versions rather than using the get and put metadata
|
|
||||||
// functions of the Backbeat API.
|
|
||||||
describe.skip('backbeat routes for replication', () => {
|
|
||||||
const bucketUtil = new BucketUtility(
|
const bucketUtil = new BucketUtility(
|
||||||
'default', { signatureVersion: 'v4' });
|
'default', { signatureVersion: 'v4' });
|
||||||
const s3 = bucketUtil.s3;
|
const s3 = bucketUtil.s3;
|
||||||
|
@ -101,67 +100,6 @@ describe.skip('backbeat routes for replication', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should successfully replicate a suspended null version', done => {
|
|
||||||
let objMD;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
suspendVersioningSource: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketSource, VersioningConfiguration: { Status: 'Suspended' } }, next),
|
|
||||||
putObject: next => s3.putObject({ Bucket: bucketSource, Key: keyName, Body: new Buffer(testData) }, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMD = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
replicateMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObject: next => s3.headObject({ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucketDestination }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const headObjectRes = results.headObject;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 1);
|
|
||||||
|
|
||||||
const [currentVersion] = Versions;
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
assert.strictEqual(currentVersion.VersionId, 'null');
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should successfully replicate a null version and update it', done => {
|
it('should successfully replicate a null version and update it', done => {
|
||||||
let objMD;
|
let objMD;
|
||||||
|
|
||||||
|
@ -309,861 +247,4 @@ describe.skip('backbeat routes for replication', () => {
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should replicate/put metadata to a destination that has a version', done => {
|
|
||||||
let objMD;
|
|
||||||
let firstVersionId;
|
|
||||||
let secondVersionId;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
putObjectDestination: next => s3.putObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
firstVersionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
putObjectSource: next => s3.putObject(
|
|
||||||
{ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
secondVersionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: secondVersionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMD = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
replicateMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: secondVersionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObjectFirstVersion: next => s3.headObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: firstVersionId }, next),
|
|
||||||
headObjectSecondVersion: next => s3.headObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: secondVersionId }, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucketDestination }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const firstHeadObjectRes = results.headObjectFirstVersion;
|
|
||||||
assert.strictEqual(firstHeadObjectRes.VersionId, firstVersionId);
|
|
||||||
|
|
||||||
const secondHeadObjectRes = results.headObjectSecondVersion;
|
|
||||||
assert.strictEqual(secondHeadObjectRes.VersionId, secondVersionId);
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
|
||||||
const [currentVersion, nonCurrentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, secondVersionId);
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
assert.strictEqual(nonCurrentVersion.VersionId, firstVersionId);
|
|
||||||
assert.strictEqual(nonCurrentVersion.IsLatest, false);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should replicate/put metadata to a destination that has a null version', done => {
|
|
||||||
let objMD;
|
|
||||||
let versionId;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
putObjectDestinationInitial: next => s3.putObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next),
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
putObjectSource: next => s3.putObject(
|
|
||||||
{ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
versionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMD = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
replicateMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObjectNullVersion: next => s3.headObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions(
|
|
||||||
{ Bucket: bucketDestination }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const headObjectRes = results.headObjectNullVersion;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
|
||||||
const [currentVersion, nonCurrentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, versionId);
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
assert.strictEqual(nonCurrentVersion.VersionId, 'null');
|
|
||||||
assert.strictEqual(nonCurrentVersion.IsLatest, false);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should replicate/put metadata to a destination that has a suspended null version', done => {
|
|
||||||
let objMD;
|
|
||||||
let versionId;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
suspendVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Suspended' } }, next),
|
|
||||||
putObjectDestinationInitial: next => s3.putObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next),
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
putObjectSource: next => s3.putObject(
|
|
||||||
{ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
versionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMD = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
replicateMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObjectNullVersion: next => s3.headObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucketDestination }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const headObjectRes = results.headObjectNullVersion;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
|
||||||
const [currentVersion, nonCurrentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, versionId);
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
assert.strictEqual(nonCurrentVersion.VersionId, 'null');
|
|
||||||
assert.strictEqual(nonCurrentVersion.IsLatest, false);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should replicate/put metadata to a destination that has a previously updated null version', done => {
|
|
||||||
let objMD;
|
|
||||||
let objMDNull;
|
|
||||||
let versionId;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
putObjectDestinationInitial: next => s3.putObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next),
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
getMetadataNullVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMDNull = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
updateMetadataNullVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMDNull,
|
|
||||||
}, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
putObjectSource: next => s3.putObject(
|
|
||||||
{ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
versionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMD = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
replicateMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObjectNullVersion: next => s3.headObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucketDestination }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const headObjectRes = results.headObjectNullVersion;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
|
||||||
const [currentVersion, nonCurrentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, versionId);
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
assert.strictEqual(nonCurrentVersion.VersionId, 'null');
|
|
||||||
assert.strictEqual(nonCurrentVersion.IsLatest, false);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should replicate/put metadata to a destination that has a suspended null version with internal version',
|
|
||||||
done => {
|
|
||||||
const tagSet = [
|
|
||||||
{
|
|
||||||
Key: 'key1',
|
|
||||||
Value: 'value1',
|
|
||||||
},
|
|
||||||
];
|
|
||||||
let objMD;
|
|
||||||
let versionId;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
suspendVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Suspended' } }, next),
|
|
||||||
putObjectDestinationInitial: next => s3.putObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, Body: Buffer.from(testData) }, next),
|
|
||||||
putObjectTagging: next => s3.putObjectTagging(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, Tagging: { TagSet: tagSet } }, next),
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
putObjectSource: next => s3.putObject(
|
|
||||||
{ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
versionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMD = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
replicateMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObjectNullVersion: next => s3.headObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
getObjectTaggingNullVersion: next => s3.getObjectTagging(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({ Bucket: bucketDestination }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const headObjectRes = results.headObjectNullVersion;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const getObjectTaggingRes = results.getObjectTaggingNullVersion;
|
|
||||||
assert.deepStrictEqual(getObjectTaggingRes.TagSet, tagSet);
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
|
||||||
const [currentVersion, nonCurrentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, versionId);
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
assert.strictEqual(nonCurrentVersion.VersionId, 'null');
|
|
||||||
assert.strictEqual(nonCurrentVersion.IsLatest, false);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should mimic null version replication by crrExistingObjects, then replicate version', done => {
|
|
||||||
let objMDNull;
|
|
||||||
let objMDNullReplicated;
|
|
||||||
let objMDVersion;
|
|
||||||
let versionId;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
createNullSoloMasterKey: next => s3.putObject(
|
|
||||||
{ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketSource, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
simulateCrrExistingObjectsGetMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMDNull = JSON.parse(data.body).Body;
|
|
||||||
assert.strictEqual(JSON.parse(objMDNull).versionId, undefined);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
simulateCrrExistingObjectsPutMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMDNull,
|
|
||||||
}, next),
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning(
|
|
||||||
{ Bucket: bucketDestination, VersioningConfiguration: { Status: 'Enabled' } }, next),
|
|
||||||
replicateNullVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMDNullReplicated = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
putReplicatedNullVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId: 'null',
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMDNullReplicated,
|
|
||||||
}, next),
|
|
||||||
putNewVersionSource: next => s3.putObject(
|
|
||||||
{ Bucket: bucketSource, Key: keyName, Body: Buffer.from(testData) }, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
versionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
simulateMetadataReplicationVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMDVersion = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
listObjectVersionsBeforeReplicate: next => s3.listObjectVersions({ Bucket: bucketDestination }, next),
|
|
||||||
putReplicatedVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: {
|
|
||||||
versionId,
|
|
||||||
},
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMDVersion,
|
|
||||||
}, next),
|
|
||||||
checkReplicatedNullVersion: next => s3.headObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: 'null' }, next),
|
|
||||||
checkReplicatedVersion: next => s3.headObject(
|
|
||||||
{ Bucket: bucketDestination, Key: keyName, VersionId: versionId }, next),
|
|
||||||
listObjectVersionsAfterReplicate: next => s3.listObjectVersions({ Bucket: bucketDestination }, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const headObjectNullVersionRes = results.checkReplicatedNullVersion;
|
|
||||||
assert.strictEqual(headObjectNullVersionRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const headObjectVersionRes = results.checkReplicatedVersion;
|
|
||||||
assert.strictEqual(headObjectVersionRes.VersionId, versionId);
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersionsAfterReplicate;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
|
||||||
|
|
||||||
const [currentVersion, nonCurrentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, versionId);
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
assert.strictEqual(nonCurrentVersion.VersionId, 'null');
|
|
||||||
assert.strictEqual(nonCurrentVersion.IsLatest, false);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should replicate/put NULL metadata to a destination that has a version', done => {
|
|
||||||
let objMD;
|
|
||||||
let versionId;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
VersioningConfiguration: { Status: 'Enabled' },
|
|
||||||
}, next),
|
|
||||||
putObjectDestination: next => s3.putObject({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
Body: Buffer.from(testData),
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
versionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
putObjectSource: next => s3.putObject({
|
|
||||||
Bucket: bucketSource,
|
|
||||||
Key: keyName,
|
|
||||||
Body: Buffer.from(testData),
|
|
||||||
}, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning({
|
|
||||||
Bucket: bucketSource,
|
|
||||||
VersioningConfiguration: { Status: 'Enabled' },
|
|
||||||
}, next),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: { versionId: 'null' },
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMD = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
replicateMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: { versionId: 'null' },
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObjectByVersionId: next => s3.headObject({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
VersionId: versionId,
|
|
||||||
}, next),
|
|
||||||
headObjectByNullVersionId: next => s3.headObject({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
VersionId: 'null',
|
|
||||||
}, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
}, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const firstHeadObjectRes = results.headObjectByVersionId;
|
|
||||||
assert.strictEqual(firstHeadObjectRes.VersionId, versionId);
|
|
||||||
|
|
||||||
const secondHeadObjectRes = results.headObjectByNullVersionId;
|
|
||||||
assert.strictEqual(secondHeadObjectRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
|
||||||
const [currentVersion, nonCurrentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, 'null');
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
assert.strictEqual(nonCurrentVersion.VersionId, versionId);
|
|
||||||
assert.strictEqual(nonCurrentVersion.IsLatest, false);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should replicate/put NULL metadata to a destination that has a null version', done => {
|
|
||||||
let objMD;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
putObjectDestinationInitial: next => s3.putObject({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
Body: Buffer.from(testData),
|
|
||||||
}, next),
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
VersioningConfiguration: { Status: 'Enabled' },
|
|
||||||
}, next),
|
|
||||||
putObjectSource: next => s3.putObject({
|
|
||||||
Bucket: bucketSource,
|
|
||||||
Key: keyName,
|
|
||||||
Body: Buffer.from(testData),
|
|
||||||
}, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning({
|
|
||||||
Bucket: bucketSource,
|
|
||||||
VersioningConfiguration: { Status: 'Enabled' },
|
|
||||||
}, next),
|
|
||||||
putObjectTaggingSource: next => s3.putObjectTagging({
|
|
||||||
Bucket: bucketSource,
|
|
||||||
Key: keyName,
|
|
||||||
VersionId: 'null',
|
|
||||||
Tagging: { TagSet: [{ Key: 'key1', Value: 'value1' }] },
|
|
||||||
}, next),
|
|
||||||
getMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: { versionId: 'null' },
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMD = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
replicateMetadata: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: { versionId: 'null' },
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMD,
|
|
||||||
}, next),
|
|
||||||
headObjectNullVersion: next => s3.headObject({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
VersionId: 'null',
|
|
||||||
}, next),
|
|
||||||
getObjectTaggingNullVersion: next => s3.getObjectTagging({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
VersionId: 'null',
|
|
||||||
}, next),
|
|
||||||
listObjectVersions: next => s3.listObjectVersions({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
}, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const headObjectRes = results.headObjectNullVersion;
|
|
||||||
assert.strictEqual(headObjectRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const getObjectTaggingRes = results.getObjectTaggingNullVersion;
|
|
||||||
assert.deepStrictEqual(getObjectTaggingRes.TagSet, [{ Key: 'key1', Value: 'value1' }]);
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersions;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 1);
|
|
||||||
const [currentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, 'null');
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should replicate/put a lifecycled NULL metadata to a destination that has a version', done => {
|
|
||||||
let objMDUpdated;
|
|
||||||
let objMDReplicated;
|
|
||||||
let versionId;
|
|
||||||
|
|
||||||
async.series({
|
|
||||||
enableVersioningDestination: next => s3.putBucketVersioning({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
VersioningConfiguration: { Status: 'Enabled' },
|
|
||||||
}, next),
|
|
||||||
putObjectDestination: next => s3.putObject({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
Body: Buffer.from(testData),
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
versionId = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
putObjectSource: next => s3.putObject({
|
|
||||||
Bucket: bucketSource,
|
|
||||||
Key: keyName,
|
|
||||||
Body: Buffer.from(testData),
|
|
||||||
}, next),
|
|
||||||
enableVersioningSource: next => s3.putBucketVersioning({
|
|
||||||
Bucket: bucketSource,
|
|
||||||
VersioningConfiguration: { Status: 'Enabled' },
|
|
||||||
}, next),
|
|
||||||
simulateLifecycleNullVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: { versionId: 'null' },
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMDUpdated = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
updateMetadataSource: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: { versionId: 'null' },
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMDUpdated,
|
|
||||||
}, next),
|
|
||||||
getReplicatedNullVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'GET',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketSource,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: { versionId: 'null' },
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
objMDReplicated = JSON.parse(data.body).Body;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
putReplicatedNullVersion: next => makeBackbeatRequest({
|
|
||||||
method: 'PUT',
|
|
||||||
resourceType: 'metadata',
|
|
||||||
bucket: bucketDestination,
|
|
||||||
objectKey: keyName,
|
|
||||||
queryObj: { versionId: 'null' },
|
|
||||||
authCredentials: backbeatAuthCredentials,
|
|
||||||
requestBody: objMDReplicated,
|
|
||||||
}, next),
|
|
||||||
headObjectByVersionId: next => s3.headObject({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
VersionId: versionId,
|
|
||||||
}, next),
|
|
||||||
headObjectByNullVersion: next => s3.headObject({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
Key: keyName,
|
|
||||||
VersionId: 'null',
|
|
||||||
}, next),
|
|
||||||
listObjectVersionsDestination: next => s3.listObjectVersions({
|
|
||||||
Bucket: bucketDestination,
|
|
||||||
}, next),
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const firstHeadObjectRes = results.headObjectByVersionId;
|
|
||||||
assert.strictEqual(firstHeadObjectRes.VersionId, versionId);
|
|
||||||
|
|
||||||
const secondHeadObjectRes = results.headObjectByNullVersion;
|
|
||||||
assert.strictEqual(secondHeadObjectRes.VersionId, 'null');
|
|
||||||
|
|
||||||
const listObjectVersionsRes = results.listObjectVersionsDestination;
|
|
||||||
const { Versions } = listObjectVersionsRes;
|
|
||||||
|
|
||||||
assert.strictEqual(Versions.length, 2);
|
|
||||||
const [currentVersion, nonCurrentVersion] = Versions;
|
|
||||||
|
|
||||||
assert.strictEqual(currentVersion.VersionId, 'null');
|
|
||||||
assert.strictEqual(currentVersion.IsLatest, true);
|
|
||||||
|
|
||||||
assert.strictEqual(nonCurrentVersion.VersionId, versionId);
|
|
||||||
assert.strictEqual(nonCurrentVersion.IsLatest, false);
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,785 +0,0 @@
|
||||||
const async = require('async');
|
|
||||||
const assert = require('assert');
|
|
||||||
const { S3 } = require('aws-sdk');
|
|
||||||
const getConfig = require('../functional/aws-node-sdk/test/support/config');
|
|
||||||
const { Scuba: MockScuba, inflightFlushFrequencyMS } = require('../utilities/mock/Scuba');
|
|
||||||
const sendRequest = require('../functional/aws-node-sdk/test/quota/tooling').sendRequest;
|
|
||||||
const memCredentials = require('../functional/aws-node-sdk/lib/json/mem_credentials.json');
|
|
||||||
const metadata = require('../../lib/metadata/wrapper');
|
|
||||||
const { fakeMetadataArchive } = require('../functional/aws-node-sdk/test/utils/init');
|
|
||||||
const { config: s3Config } = require('../../lib/Config');
|
|
||||||
|
|
||||||
let mockScuba = null;
|
|
||||||
let s3Client = null;
|
|
||||||
const quota = { quota: 1000 };
|
|
||||||
|
|
||||||
function wait(timeoutMs, cb) {
|
|
||||||
if (s3Config.isQuotaInflightEnabled()) {
|
|
||||||
return setTimeout(cb, timeoutMs);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
|
|
||||||
function createBucket(bucket, locked, cb) {
|
|
||||||
const config = {
|
|
||||||
Bucket: bucket,
|
|
||||||
};
|
|
||||||
if (locked) {
|
|
||||||
config.ObjectLockEnabledForBucket = true;
|
|
||||||
}
|
|
||||||
return s3Client.createBucket(config, (err, data) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return cb(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function configureBucketVersioning(bucket, cb) {
|
|
||||||
return s3Client.putBucketVersioning({
|
|
||||||
Bucket: bucket,
|
|
||||||
VersioningConfiguration: {
|
|
||||||
Status: 'Enabled',
|
|
||||||
},
|
|
||||||
}, (err, data) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return cb(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function putObjectLockConfiguration(bucket, cb) {
|
|
||||||
return s3Client.putObjectLockConfiguration({
|
|
||||||
Bucket: bucket,
|
|
||||||
ObjectLockConfiguration: {
|
|
||||||
ObjectLockEnabled: 'Enabled',
|
|
||||||
Rule: {
|
|
||||||
DefaultRetention: {
|
|
||||||
Mode: 'GOVERNANCE',
|
|
||||||
Days: 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, (err, data) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return cb(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function deleteBucket(bucket, cb) {
|
|
||||||
return s3Client.deleteBucket({
|
|
||||||
Bucket: bucket,
|
|
||||||
}, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return cb(err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function putObject(bucket, key, size, cb) {
|
|
||||||
return s3Client.putObject({
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
Body: Buffer.alloc(size),
|
|
||||||
}, (err, data) => {
|
|
||||||
if (!err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, size);
|
|
||||||
}
|
|
||||||
return cb(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function putObjectWithCustomHeader(bucket, key, size, vID, cb) {
|
|
||||||
const request = s3Client.putObject({
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
Body: Buffer.alloc(size),
|
|
||||||
});
|
|
||||||
|
|
||||||
request.on('build', () => {
|
|
||||||
request.httpRequest.headers['x-scal-s3-version-id'] = vID;
|
|
||||||
});
|
|
||||||
|
|
||||||
return request.send((err, data) => {
|
|
||||||
if (!err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, 0);
|
|
||||||
}
|
|
||||||
return cb(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function copyObject(bucket, key, sourceSize, cb) {
|
|
||||||
return s3Client.copyObject({
|
|
||||||
Bucket: bucket,
|
|
||||||
CopySource: `/${bucket}/${key}`,
|
|
||||||
Key: `${key}-copy`,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (!err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, sourceSize);
|
|
||||||
}
|
|
||||||
return cb(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function deleteObject(bucket, key, size, cb) {
|
|
||||||
return s3Client.deleteObject({
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
}, err => {
|
|
||||||
if (!err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, -size);
|
|
||||||
}
|
|
||||||
assert.ifError(err);
|
|
||||||
return cb(err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function deleteVersionID(bucket, key, versionId, size, cb) {
|
|
||||||
return s3Client.deleteObject({
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
VersionId: versionId,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (!err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, -size);
|
|
||||||
}
|
|
||||||
return cb(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function objectMPU(bucket, key, parts, partSize, callback) {
|
|
||||||
let ETags = [];
|
|
||||||
let uploadId = null;
|
|
||||||
const partNumbers = Array.from(Array(parts).keys());
|
|
||||||
const initiateMPUParams = {
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
};
|
|
||||||
return async.waterfall([
|
|
||||||
next => s3Client.createMultipartUpload(initiateMPUParams,
|
|
||||||
(err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
uploadId = data.UploadId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next =>
|
|
||||||
async.mapLimit(partNumbers, 1, (partNumber, callback) => {
|
|
||||||
const uploadPartParams = {
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
PartNumber: partNumber + 1,
|
|
||||||
UploadId: uploadId,
|
|
||||||
Body: Buffer.alloc(partSize),
|
|
||||||
};
|
|
||||||
|
|
||||||
return s3Client.uploadPart(uploadPartParams,
|
|
||||||
(err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
return callback(null, data.ETag);
|
|
||||||
});
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
ETags = results;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => {
|
|
||||||
const params = {
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
MultipartUpload: {
|
|
||||||
Parts: partNumbers.map(n => ({
|
|
||||||
ETag: ETags[n],
|
|
||||||
PartNumber: n + 1,
|
|
||||||
})),
|
|
||||||
},
|
|
||||||
UploadId: uploadId,
|
|
||||||
};
|
|
||||||
return s3Client.completeMultipartUpload(params, next);
|
|
||||||
},
|
|
||||||
], err => {
|
|
||||||
if (!err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, parts * partSize);
|
|
||||||
}
|
|
||||||
return callback(err, uploadId);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function abortMPU(bucket, key, uploadId, size, callback) {
|
|
||||||
return s3Client.abortMultipartUpload({
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
UploadId: uploadId,
|
|
||||||
}, (err, data) => {
|
|
||||||
if (!err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, -size);
|
|
||||||
}
|
|
||||||
return callback(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function uploadPartCopy(bucket, key, partNumber, partSize, sleepDuration, keyToCopy, callback) {
|
|
||||||
const ETags = [];
|
|
||||||
let uploadId = null;
|
|
||||||
const parts = 5;
|
|
||||||
const partNumbers = Array.from(Array(parts).keys());
|
|
||||||
const initiateMPUParams = {
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
};
|
|
||||||
if (!s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, parts * partSize);
|
|
||||||
}
|
|
||||||
return async.waterfall([
|
|
||||||
next => s3Client.createMultipartUpload(initiateMPUParams,
|
|
||||||
(err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
uploadId = data.UploadId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => {
|
|
||||||
const uploadPartParams = {
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
PartNumber: partNumber + 1,
|
|
||||||
UploadId: uploadId,
|
|
||||||
Body: Buffer.alloc(partSize),
|
|
||||||
};
|
|
||||||
return s3Client.uploadPart(uploadPartParams, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
ETags[partNumber] = data.ETag;
|
|
||||||
return next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => wait(sleepDuration, next),
|
|
||||||
next => {
|
|
||||||
const copyPartParams = {
|
|
||||||
Bucket: bucket,
|
|
||||||
CopySource: `/${bucket}/${keyToCopy}`,
|
|
||||||
Key: `${key}-copy`,
|
|
||||||
PartNumber: partNumber + 1,
|
|
||||||
UploadId: uploadId,
|
|
||||||
};
|
|
||||||
return s3Client.uploadPartCopy(copyPartParams, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
ETags[partNumber] = data.ETag;
|
|
||||||
return next(null, data.ETag);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => {
|
|
||||||
const params = {
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
MultipartUpload: {
|
|
||||||
Parts: partNumbers.map(n => ({
|
|
||||||
ETag: ETags[n],
|
|
||||||
PartNumber: n + 1,
|
|
||||||
})),
|
|
||||||
},
|
|
||||||
UploadId: uploadId,
|
|
||||||
};
|
|
||||||
return s3Client.completeMultipartUpload(params, next);
|
|
||||||
},
|
|
||||||
], err => {
|
|
||||||
if (err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, -(parts * partSize));
|
|
||||||
}
|
|
||||||
return callback(err, uploadId);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function restoreObject(bucket, key, size, callback) {
|
|
||||||
return s3Client.restoreObject({
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
RestoreRequest: {
|
|
||||||
Days: 1,
|
|
||||||
},
|
|
||||||
}, (err, data) => {
|
|
||||||
if (!err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, size);
|
|
||||||
}
|
|
||||||
return callback(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function multiObjectDelete(bucket, keys, size, callback) {
|
|
||||||
if (!s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, -size);
|
|
||||||
}
|
|
||||||
return s3Client.deleteObjects({
|
|
||||||
Bucket: bucket,
|
|
||||||
Delete: {
|
|
||||||
Objects: keys.map(key => ({ Key: key })),
|
|
||||||
},
|
|
||||||
}, (err, data) => {
|
|
||||||
if (err && !s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, size);
|
|
||||||
}
|
|
||||||
return callback(err, data);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
(process.env.S3METADATA === 'mongodb' ? describe : describe.skip)('quota evaluation with scuba metrics',
|
|
||||||
function t() {
|
|
||||||
this.timeout(30000);
|
|
||||||
const scuba = new MockScuba();
|
|
||||||
const putQuotaVerb = 'PUT';
|
|
||||||
const config = {
|
|
||||||
accessKey: memCredentials.default.accessKey,
|
|
||||||
secretKey: memCredentials.default.secretKey,
|
|
||||||
};
|
|
||||||
mockScuba = scuba;
|
|
||||||
|
|
||||||
before(done => {
|
|
||||||
const config = getConfig('default', { signatureVersion: 'v4', maxRetries: 0 });
|
|
||||||
s3Client = new S3(config);
|
|
||||||
scuba.start();
|
|
||||||
return metadata.setup(err => wait(2000, () => done(err)));
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
scuba.reset();
|
|
||||||
});
|
|
||||||
|
|
||||||
after(() => {
|
|
||||||
scuba.stop();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return QuotaExceeded when trying to PutObject in a bucket with quota', done => {
|
|
||||||
const bucket = 'quota-test-bucket1';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 1024;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, err => {
|
|
||||||
assert.strictEqual(err.code, 'QuotaExceeded');
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return QuotaExceeded when trying to copyObject in a versioned bucket with quota', done => {
|
|
||||||
const bucket = 'quota-test-bucket12';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 900;
|
|
||||||
let vID = null;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => configureBucketVersioning(bucket, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, (err, data) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
vID = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => copyObject(bucket, key, size, err => {
|
|
||||||
assert.strictEqual(err.code, 'QuotaExceeded');
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => deleteVersionID(bucket, key, vID, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return QuotaExceeded when trying to CopyObject in a bucket with quota', done => {
|
|
||||||
const bucket = 'quota-test-bucket2';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 900;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, next),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => copyObject(bucket, key, size, err => {
|
|
||||||
assert.strictEqual(err.code, 'QuotaExceeded');
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => deleteObject(bucket, key, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return QuotaExceeded when trying to complete MPU in a bucket with quota', done => {
|
|
||||||
const bucket = 'quota-test-bucket3';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const parts = 5;
|
|
||||||
const partSize = 1024 * 1024 * 6;
|
|
||||||
let uploadId = null;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => objectMPU(bucket, key, parts, partSize, (err, _uploadId) => {
|
|
||||||
uploadId = _uploadId;
|
|
||||||
assert.strictEqual(err.code, 'QuotaExceeded');
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => abortMPU(bucket, key, uploadId, 0, next),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), 0);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not return QuotaExceeded if the quota is not exceeded', done => {
|
|
||||||
const bucket = 'quota-test-bucket4';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 300;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => deleteObject(bucket, key, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not evaluate quotas if the backend is not available', done => {
|
|
||||||
scuba.stop();
|
|
||||||
const bucket = 'quota-test-bucket5';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 1024;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => deleteObject(bucket, key, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
scuba.start();
|
|
||||||
return wait(2000, done);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return QuotaExceeded when trying to copy a part in a bucket with quota', done => {
|
|
||||||
const bucket = 'quota-test-bucket6';
|
|
||||||
const key = 'quota-test-object-copy';
|
|
||||||
const keyToCopy = 'quota-test-existing';
|
|
||||||
const parts = 5;
|
|
||||||
const partSize = 1024 * 1024 * 6;
|
|
||||||
let uploadId = null;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify({ quota: Math.round(partSize * 2.5) }), config)
|
|
||||||
.then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, keyToCopy, partSize, next),
|
|
||||||
next => uploadPartCopy(bucket, key, parts, partSize, inflightFlushFrequencyMS * 2, keyToCopy,
|
|
||||||
(err, _uploadId) => {
|
|
||||||
uploadId = _uploadId;
|
|
||||||
assert.strictEqual(err.code, 'QuotaExceeded');
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => abortMPU(bucket, key, uploadId, parts * partSize, next),
|
|
||||||
next => deleteObject(bucket, keyToCopy, partSize, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return QuotaExceeded when trying to restore an object in a bucket with quota', done => {
|
|
||||||
const bucket = 'quota-test-bucket7';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 900;
|
|
||||||
let vID = null;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => configureBucketVersioning(bucket, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, (err, data) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
vID = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => fakeMetadataArchive(bucket, key, vID, {
|
|
||||||
archiveInfo: {},
|
|
||||||
}, next),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => restoreObject(bucket, key, size, err => {
|
|
||||||
assert.strictEqual(err.code, 'QuotaExceeded');
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => deleteVersionID(bucket, key, vID, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not update the inflights if the quota check is passing but the object is already restored', done => {
|
|
||||||
const bucket = 'quota-test-bucket14';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 100;
|
|
||||||
let vID = null;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => configureBucketVersioning(bucket, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, (err, data) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
vID = data.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => fakeMetadataArchive(bucket, key, vID, {
|
|
||||||
archiveInfo: {},
|
|
||||||
restoreRequestedAt: new Date(0).toString(),
|
|
||||||
restoreCompletedAt: new Date(0).toString() + 1,
|
|
||||||
restoreRequestedDays: 5,
|
|
||||||
}, next),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => restoreObject(bucket, key, 0, next),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => deleteVersionID(bucket, key, vID, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should allow writes after deleting data with quotas', done => {
|
|
||||||
const bucket = 'quota-test-bucket8';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 400;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, `${key}1`, size, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => putObject(bucket, `${key}2`, size, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => putObject(bucket, `${key}3`, size, err => {
|
|
||||||
assert.strictEqual(err.code, 'QuotaExceeded');
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size * 2);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => deleteObject(bucket, `${key}2`, size, next),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => putObject(bucket, `${key}4`, size, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => deleteObject(bucket, `${key}1`, size, next),
|
|
||||||
next => deleteObject(bucket, `${key}3`, size, next),
|
|
||||||
next => deleteObject(bucket, `${key}4`, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not increase the inflights when the object is being rewritten with a smaller object', done => {
|
|
||||||
const bucket = 'quota-test-bucket9';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 400;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => putObject(bucket, key, size - 100, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
if (!s3Config.isQuotaInflightEnabled()) {
|
|
||||||
mockScuba.incrementBytesForBucket(bucket, -size);
|
|
||||||
}
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size - 100);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => deleteObject(bucket, key, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should decrease the inflights when performing multi object delete', done => {
|
|
||||||
const bucket = 'quota-test-bucket10';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 400;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, false, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, `${key}1`, size, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}
|
|
||||||
),
|
|
||||||
next => putObject(bucket, `${key}2`, size, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => multiObjectDelete(bucket, [`${key}1`, `${key}2`], size * 2, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), 0);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not update the inflights if the API errored after evaluating quotas (deletion)', done => {
|
|
||||||
const bucket = 'quota-test-bucket11';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 100;
|
|
||||||
let vID = null;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, true, next),
|
|
||||||
next => putObjectLockConfiguration(bucket, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, (err, val) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
vID = val.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => deleteVersionID(bucket, key, vID, size, err => {
|
|
||||||
assert.strictEqual(err.code, 'AccessDenied');
|
|
||||||
next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should only evaluate quota and not update inflights for PutObject with the x-scal-s3-version-id header',
|
|
||||||
done => {
|
|
||||||
const bucket = 'quota-test-bucket13';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 100;
|
|
||||||
let vID = null;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, true, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, (err, val) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
vID = val.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => fakeMetadataArchive(bucket, key, vID, {
|
|
||||||
archiveInfo: {},
|
|
||||||
restoreRequestedAt: new Date(0).toISOString(),
|
|
||||||
restoreRequestedDays: 7,
|
|
||||||
}, next),
|
|
||||||
// Simulate the real restore
|
|
||||||
next => putObjectWithCustomHeader(bucket, key, size, vID, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => deleteVersionID(bucket, key, vID, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should allow a restore if the quota is full but the objet fits with its reserved storage space',
|
|
||||||
done => {
|
|
||||||
const bucket = 'quota-test-bucket15';
|
|
||||||
const key = 'quota-test-object';
|
|
||||||
const size = 1000;
|
|
||||||
let vID = null;
|
|
||||||
return async.series([
|
|
||||||
next => createBucket(bucket, true, next),
|
|
||||||
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
|
||||||
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
|
||||||
next => putObject(bucket, key, size, (err, val) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
vID = val.VersionId;
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => wait(inflightFlushFrequencyMS * 2, next),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => fakeMetadataArchive(bucket, key, vID, {
|
|
||||||
archiveInfo: {},
|
|
||||||
restoreRequestedAt: new Date(0).toISOString(),
|
|
||||||
restoreRequestedDays: 7,
|
|
||||||
}, next),
|
|
||||||
// Put an object, the quota should be exceeded
|
|
||||||
next => putObject(bucket, `${key}-2`, size, err => {
|
|
||||||
assert.strictEqual(err.code, 'QuotaExceeded');
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
// Simulate the real restore
|
|
||||||
next => putObjectWithCustomHeader(bucket, key, size, vID, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => {
|
|
||||||
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
next => deleteVersionID(bucket, key, vID, size, next),
|
|
||||||
next => deleteBucket(bucket, next),
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -350,108 +350,6 @@ describe('Config', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('scuba option setup', () => {
|
|
||||||
let oldConfig;
|
|
||||||
|
|
||||||
before(() => {
|
|
||||||
oldConfig = process.env.S3_CONFIG_FILE;
|
|
||||||
process.env.S3_CONFIG_FILE =
|
|
||||||
'tests/unit/testConfigs/allOptsConfig/config.json';
|
|
||||||
});
|
|
||||||
|
|
||||||
after(() => {
|
|
||||||
process.env.S3_CONFIG_FILE = oldConfig;
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should set up scuba', () => {
|
|
||||||
const { ConfigObject } = require('../../lib/Config');
|
|
||||||
const config = new ConfigObject();
|
|
||||||
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
config.scuba,
|
|
||||||
{
|
|
||||||
host: 'localhost',
|
|
||||||
port: 8100,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use environment variables for scuba', () => {
|
|
||||||
setEnv('SCUBA_HOST', 'scubahost');
|
|
||||||
setEnv('SCUBA_PORT', 1234);
|
|
||||||
|
|
||||||
const { ConfigObject } = require('../../lib/Config');
|
|
||||||
const config = new ConfigObject();
|
|
||||||
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
config.scuba,
|
|
||||||
{
|
|
||||||
host: 'scubahost',
|
|
||||||
port: 1234,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('quota option setup', () => {
|
|
||||||
let oldConfig;
|
|
||||||
|
|
||||||
before(() => {
|
|
||||||
oldConfig = process.env.S3_CONFIG_FILE;
|
|
||||||
process.env.S3_CONFIG_FILE =
|
|
||||||
'tests/unit/testConfigs/allOptsConfig/config.json';
|
|
||||||
});
|
|
||||||
|
|
||||||
after(() => {
|
|
||||||
process.env.S3_CONFIG_FILE = oldConfig;
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should set up quota', () => {
|
|
||||||
const { ConfigObject } = require('../../lib/Config');
|
|
||||||
const config = new ConfigObject();
|
|
||||||
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
config.quota,
|
|
||||||
{
|
|
||||||
maxStaleness: 24 * 60 * 60 * 1000,
|
|
||||||
enableInflights: false,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use environment variables for scuba', () => {
|
|
||||||
setEnv('QUOTA_MAX_STALENESS_MS', 1234);
|
|
||||||
setEnv('QUOTA_ENABLE_INFLIGHTS', 'true');
|
|
||||||
|
|
||||||
const { ConfigObject } = require('../../lib/Config');
|
|
||||||
const config = new ConfigObject();
|
|
||||||
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
config.quota,
|
|
||||||
{
|
|
||||||
maxStaleness: 1234,
|
|
||||||
enableInflights: true,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should use the default if the maxStaleness is not a number', () => {
|
|
||||||
setEnv('QUOTA_MAX_STALENESS_MS', 'notanumber');
|
|
||||||
setEnv('QUOTA_ENABLE_INFLIGHTS', 'true');
|
|
||||||
|
|
||||||
const { ConfigObject } = require('../../lib/Config');
|
|
||||||
const config = new ConfigObject();
|
|
||||||
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
config.quota,
|
|
||||||
{
|
|
||||||
maxStaleness: 24 * 60 * 60 * 1000,
|
|
||||||
enableInflights: true,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('utapi option setup', () => {
|
describe('utapi option setup', () => {
|
||||||
let oldConfig;
|
let oldConfig;
|
||||||
|
|
||||||
|
|
|
@ -15,15 +15,13 @@ const sourceObject = 'objectsource';
|
||||||
const sourceVersionId = 'vid1';
|
const sourceVersionId = 'vid1';
|
||||||
|
|
||||||
describe('prepareRequestContexts', () => {
|
describe('prepareRequestContexts', () => {
|
||||||
it('should return s3:DeleteObject if multiObjectDelete method', () => {
|
it('should return null if multiObjectDelete method', () => {
|
||||||
const apiMethod = 'multiObjectDelete';
|
const apiMethod = 'multiObjectDelete';
|
||||||
const request = makeRequest();
|
const request = makeRequest();
|
||||||
const results = prepareRequestContexts(apiMethod, request, sourceBucket,
|
const results = prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
sourceObject, sourceVersionId);
|
sourceObject, sourceVersionId);
|
||||||
|
|
||||||
assert.strictEqual(results.length, 1);
|
assert.strictEqual(results, null);
|
||||||
const expectedAction = 's3:DeleteObject';
|
|
||||||
assert.strictEqual(results[0].getAction(), expectedAction);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return s3:PutObjectVersion request context action for objectPut method with x-scal-s3-version-id' +
|
it('should return s3:PutObjectVersion request context action for objectPut method with x-scal-s3-version-id' +
|
||||||
|
|
|
@ -8,8 +8,6 @@ const {
|
||||||
} = require('../../../../lib/api/apiUtils/object/coldStorage');
|
} = require('../../../../lib/api/apiUtils/object/coldStorage');
|
||||||
const { DummyRequestLogger } = require('../../helpers');
|
const { DummyRequestLogger } = require('../../helpers');
|
||||||
const { ObjectMD, ObjectMDArchive } = require('arsenal/build/lib/models');
|
const { ObjectMD, ObjectMDArchive } = require('arsenal/build/lib/models');
|
||||||
const { config } = require('../../../../lib/Config');
|
|
||||||
const { scaledMsPerDay } = config.getTimeOptions();
|
|
||||||
const log = new DummyRequestLogger();
|
const log = new DummyRequestLogger();
|
||||||
const oneDay = 24 * 60 * 60 * 1000;
|
const oneDay = 24 * 60 * 60 * 1000;
|
||||||
|
|
||||||
|
@ -229,7 +227,8 @@ describe('cold storage', () => {
|
||||||
|
|
||||||
assert.strictEqual(objectMd.archive.restoreCompletedAt, restoreCompletedAt);
|
assert.strictEqual(objectMd.archive.restoreCompletedAt, restoreCompletedAt);
|
||||||
assert.strictEqual(objectMd.archive.restoreWillExpireAt.getTime(),
|
assert.strictEqual(objectMd.archive.restoreWillExpireAt.getTime(),
|
||||||
objectMd.archive.restoreRequestedAt.getTime() + (5 * scaledMsPerDay));
|
objectMd.archive.restoreRequestedAt.getTime() + 5 * oneDay
|
||||||
|
);
|
||||||
assert.deepEqual(objectMd['x-amz-restore'], {
|
assert.deepEqual(objectMd['x-amz-restore'], {
|
||||||
'ongoing-request': false,
|
'ongoing-request': false,
|
||||||
'expiry-date': objectMd.archive.restoreWillExpireAt,
|
'expiry-date': objectMd.archive.restoreWillExpireAt,
|
||||||
|
|
|
@ -1,623 +0,0 @@
|
||||||
const sinon = require('sinon');
|
|
||||||
const assert = require('assert');
|
|
||||||
const { config } = require('../../../../../lib/Config');
|
|
||||||
const {
|
|
||||||
validateQuotas,
|
|
||||||
processBytesToWrite,
|
|
||||||
isMetricStale,
|
|
||||||
} = require('../../../../../lib/api/apiUtils/quotas/quotaUtils');
|
|
||||||
const QuotaService = require('../../../../../lib/quotas/quotas');
|
|
||||||
|
|
||||||
const mockLog = {
|
|
||||||
warn: sinon.stub(),
|
|
||||||
debug: sinon.stub(),
|
|
||||||
};
|
|
||||||
|
|
||||||
const mockBucket = {
|
|
||||||
getQuota: () => 100,
|
|
||||||
getName: () => 'bucketName',
|
|
||||||
getCreationDate: () => '2022-01-01T00:00:00.000Z',
|
|
||||||
};
|
|
||||||
|
|
||||||
const mockBucketNoQuota = {
|
|
||||||
getQuota: () => 0,
|
|
||||||
getName: () => 'bucketName',
|
|
||||||
getCreationDate: () => '2022-01-01T00:00:00.000Z',
|
|
||||||
};
|
|
||||||
|
|
||||||
describe('validateQuotas (buckets)', () => {
|
|
||||||
const request = {
|
|
||||||
getQuota: () => 100,
|
|
||||||
};
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
config.scuba = {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 8080,
|
|
||||||
};
|
|
||||||
config.quota = {
|
|
||||||
maxStaleness: 24 * 60 * 60 * 1000,
|
|
||||||
enableInflights: true,
|
|
||||||
};
|
|
||||||
config.isQuotaEnabled = sinon.stub().returns(true);
|
|
||||||
QuotaService.enabled = true;
|
|
||||||
QuotaService._getLatestMetricsCallback = sinon.stub().resolves({});
|
|
||||||
request.finalizerHooks = [];
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
sinon.restore();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return null if quota is <= 0', done => {
|
|
||||||
validateQuotas(request, mockBucketNoQuota, {}, [], '', false, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return null if scuba is disabled', done => {
|
|
||||||
QuotaService.enabled = false;
|
|
||||||
validateQuotas(request, mockBucket, {}, [], '', false, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return null if metrics retrieval fails', done => {
|
|
||||||
QuotaService.enabled = true;
|
|
||||||
const error = new Error('Failed to get metrics');
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(error);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'bucket',
|
|
||||||
'bucketName_1640995200000',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectPut',
|
|
||||||
inflight: 1,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return errors.QuotaExceeded if quota is exceeded', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 150,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 120,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
|
|
||||||
assert.strictEqual(err.is.QuotaExceeded, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
|
|
||||||
assert.strictEqual(request.finalizerHooks.length, 1);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'bucket',
|
|
||||||
'bucketName_1640995200000',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectPut',
|
|
||||||
inflight: 1,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 150,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 120,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {}, ['objectDelete'], 'objectDelete', -50, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'bucket',
|
|
||||||
'bucketName_1640995200000',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectDelete',
|
|
||||||
inflight: -50,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return null if quota is not exceeded', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 80,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 90,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore',
|
|
||||||
true, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'bucket',
|
|
||||||
'bucketName_1640995200000',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectRestore',
|
|
||||||
inflight: true,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not include the inflights in the request if they are disabled', done => {
|
|
||||||
config.quota.enableInflights = false;
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 80,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 90,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore',
|
|
||||||
true, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'bucket',
|
|
||||||
'bucketName_1640995200000',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectRestore',
|
|
||||||
inflight: undefined,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should evaluate the quotas and not update the inflights when isStorageReserved is true', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 80,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 90,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {}, ['objectPut'], 'objectPut',
|
|
||||||
true, true, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'bucket',
|
|
||||||
'bucketName_1640995200000',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectPut',
|
|
||||||
inflight: 0,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('validateQuotas (with accounts)', () => {
|
|
||||||
const request = {
|
|
||||||
getQuota: () => 100,
|
|
||||||
};
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
config.scuba = {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 8080,
|
|
||||||
};
|
|
||||||
config.quota = {
|
|
||||||
maxStaleness: 24 * 60 * 60 * 1000,
|
|
||||||
enableInflights: true,
|
|
||||||
};
|
|
||||||
request.finalizerHooks = [];
|
|
||||||
config.isQuotaEnabled = sinon.stub().returns(true);
|
|
||||||
QuotaService.enabled = true;
|
|
||||||
QuotaService._getLatestMetricsCallback = sinon.stub().resolves({});
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
sinon.restore();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return null if quota is <= 0', done => {
|
|
||||||
validateQuotas(request, mockBucketNoQuota, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 0,
|
|
||||||
}, [], '', false, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not return null if bucket quota is <= 0 but account quota is > 0', done => {
|
|
||||||
validateQuotas(request, mockBucketNoQuota, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 1000,
|
|
||||||
}, [], '', false, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return null if scuba is disabled', done => {
|
|
||||||
QuotaService.enabled = false;
|
|
||||||
validateQuotas(request, mockBucket, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 1000,
|
|
||||||
}, [], '', false, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return null if metrics retrieval fails', done => {
|
|
||||||
QuotaService.enabled = true;
|
|
||||||
const error = new Error('Failed to get metrics');
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(error);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 1000,
|
|
||||||
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'bucket',
|
|
||||||
'bucketName_1640995200000',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectPut',
|
|
||||||
inflight: 1,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return errors.QuotaExceeded if quota is exceeded', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 150,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 120,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucketNoQuota, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 100,
|
|
||||||
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
|
|
||||||
assert.strictEqual(err.is.QuotaExceeded, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
|
|
||||||
assert.strictEqual(request.finalizerHooks.length, 1);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'account',
|
|
||||||
'test_1',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectPut',
|
|
||||||
inflight: 1,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 150,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 120,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucketNoQuota, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 1000,
|
|
||||||
}, ['objectDelete'], 'objectDelete', -50, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'account',
|
|
||||||
'test_1',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectDelete',
|
|
||||||
inflight: -50,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return null if quota is not exceeded', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 80,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 90,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 1000,
|
|
||||||
}, ['objectRestore', 'objectPut'], 'objectRestore', true, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'account',
|
|
||||||
'test_1',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectRestore',
|
|
||||||
inflight: true,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return quota exceeded if account and bucket quotas are different', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 150,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 120,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 1000,
|
|
||||||
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
|
|
||||||
assert.strictEqual(err.is.QuotaExceeded, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 2);
|
|
||||||
assert.strictEqual(request.finalizerHooks.length, 1);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should update the request with one function per action to clear quota updates', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 80,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 90,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 1000,
|
|
||||||
}, ['objectRestore', 'objectPut'], 'objectRestore', true, false, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'account',
|
|
||||||
'test_1',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectRestore',
|
|
||||||
inflight: true,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should evaluate the quotas and not update the inflights when isStorageReserved is true', done => {
|
|
||||||
const result1 = {
|
|
||||||
bytesTotal: 80,
|
|
||||||
};
|
|
||||||
const result2 = {
|
|
||||||
bytesTotal: 90,
|
|
||||||
};
|
|
||||||
QuotaService._getLatestMetricsCallback.yields(null, result1);
|
|
||||||
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
|
|
||||||
|
|
||||||
validateQuotas(request, mockBucket, {
|
|
||||||
account: 'test_1',
|
|
||||||
quota: 1000,
|
|
||||||
}, ['objectPut'], 'objectPut', true, true, mockLog, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
|
|
||||||
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
|
|
||||||
'account',
|
|
||||||
'test_1',
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
action: 'objectPut',
|
|
||||||
inflight: 0,
|
|
||||||
}
|
|
||||||
), true);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('processBytesToWrite', () => {
|
|
||||||
let bucket;
|
|
||||||
let versionId;
|
|
||||||
let contentLength;
|
|
||||||
let objMD;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
bucket = {
|
|
||||||
isVersioningEnabled: sinon.stub(),
|
|
||||||
};
|
|
||||||
versionId = '';
|
|
||||||
contentLength = 0;
|
|
||||||
objMD = null;
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return a negative number if the operation is a delete and bucket is not versioned', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(false);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectDelete', bucket, versionId, contentLength, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, -100);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 0 if the operation is a delete and bucket is versioned', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(true);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectDelete', bucket, versionId, contentLength, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, 0);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return a negative number for a versioned bucket with a versionid deletion', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(true);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
versionId = 'versionId';
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectDelete', bucket, versionId, contentLength, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, -100);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return 0 for a delete operation if the object metadata is missing', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(true);
|
|
||||||
objMD = null;
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectDelete', bucket, versionId, contentLength, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, 0);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return the object metadata content length for a restore object operation', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(true);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
contentLength = 150;
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectRestore', bucket, versionId, contentLength, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, 100);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return the difference of the content length if the object is being replaced', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(false);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
contentLength = 150;
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectPut', bucket, versionId, contentLength, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, 50);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return content length if the object is being replaced and the bucket is versioned', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(true);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
contentLength = 150;
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectPut', bucket, versionId, contentLength, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, contentLength);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return content length of the object metadata if the action is a copy (getObject authz)', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(true);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectCopy', bucket, versionId, 0, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, 100);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return content length of the object metadata if the action is a copy part (getObject authz)', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(true);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectPutCopyPart', bucket, versionId, 0, objMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, 100);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should detect object replacement during copy object operation on a non versioned bucket', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(false);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
const destObjMD = { 'content-length': 20 };
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectCopy', bucket, versionId, 0, objMD, destObjMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, 80);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not detect object replacement during copy object operation if the bucket is versioned', () => {
|
|
||||||
bucket.isVersioningEnabled.returns(true);
|
|
||||||
objMD = { 'content-length': 100 };
|
|
||||||
const destObjMD = { 'content-length': 20 };
|
|
||||||
|
|
||||||
const bytes = processBytesToWrite('objectCopy', bucket, versionId, 0, objMD, destObjMD);
|
|
||||||
|
|
||||||
assert.strictEqual(bytes, 100);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('isMetricStale', () => {
|
|
||||||
const metric = {
|
|
||||||
date: new Date().toISOString(),
|
|
||||||
};
|
|
||||||
const resourceType = 'bucket';
|
|
||||||
const resourceName = 'bucketName';
|
|
||||||
const action = 'objectPut';
|
|
||||||
const inflight = 1;
|
|
||||||
const log = {
|
|
||||||
warn: sinon.stub(),
|
|
||||||
};
|
|
||||||
|
|
||||||
it('should return false if the metric is not stale', () => {
|
|
||||||
const result = isMetricStale(metric, resourceType, resourceName, action, inflight, log);
|
|
||||||
assert.strictEqual(result, false);
|
|
||||||
assert.strictEqual(log.warn.called, false);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return true and log a warning if the metric is stale', () => {
|
|
||||||
const staleDate = new Date(Date.now() - 24 * 60 * 60 * 1000 - 1);
|
|
||||||
metric.date = staleDate.toISOString();
|
|
||||||
|
|
||||||
const result = isMetricStale(metric, resourceType, resourceName, action, inflight, log);
|
|
||||||
assert.strictEqual(result, true);
|
|
||||||
assert.strictEqual(log.warn.calledOnce, true);
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,4 +1,3 @@
|
||||||
const crypto = require('crypto');
|
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const { errors, storage } = require('arsenal');
|
const { errors, storage } = require('arsenal');
|
||||||
|
|
||||||
|
@ -8,7 +7,6 @@ const multiObjectDelete = require('../../../lib/api/multiObjectDelete');
|
||||||
const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers');
|
const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers');
|
||||||
const DummyRequest = require('../DummyRequest');
|
const DummyRequest = require('../DummyRequest');
|
||||||
const { bucketPut } = require('../../../lib/api/bucketPut');
|
const { bucketPut } = require('../../../lib/api/bucketPut');
|
||||||
const metadataWrapper = require('../../../lib/metadata/wrapper');
|
|
||||||
const objectPut = require('../../../lib/api/objectPut');
|
const objectPut = require('../../../lib/api/objectPut');
|
||||||
const log = new DummyRequestLogger();
|
const log = new DummyRequestLogger();
|
||||||
|
|
||||||
|
@ -27,7 +25,6 @@ const objectKey1 = 'objectName1';
|
||||||
const objectKey2 = 'objectName2';
|
const objectKey2 = 'objectName2';
|
||||||
const metadataUtils = require('../../../lib/metadata/metadataUtils');
|
const metadataUtils = require('../../../lib/metadata/metadataUtils');
|
||||||
const services = require('../../../lib/services');
|
const services = require('../../../lib/services');
|
||||||
const { BucketInfo } = require('arsenal/build/lib/models');
|
|
||||||
const testBucketPutRequest = new DummyRequest({
|
const testBucketPutRequest = new DummyRequest({
|
||||||
bucketName,
|
bucketName,
|
||||||
namespace,
|
namespace,
|
||||||
|
@ -42,7 +39,7 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => {
|
||||||
headers: {},
|
headers: {},
|
||||||
parsedContentLength: contentLength,
|
parsedContentLength: contentLength,
|
||||||
}, postBody);
|
}, postBody);
|
||||||
const bucket = { getVersioningConfiguration: () => null, getQuota: () => 0 };
|
const bucket = { getVersioningConfiguration: () => null };
|
||||||
|
|
||||||
beforeEach(done => {
|
beforeEach(done => {
|
||||||
cleanup();
|
cleanup();
|
||||||
|
@ -360,43 +357,3 @@ describe('decodeObjectVersion function helper', () => {
|
||||||
assert.deepStrictEqual(ret[1], undefined);
|
assert.deepStrictEqual(ret[1], undefined);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('multiObjectDelete function', () => {
|
|
||||||
afterEach(() => {
|
|
||||||
sinon.restore();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not authorize the bucket and initial IAM authorization results', done => {
|
|
||||||
const post = '<Delete><Object><Key>objectname</Key></Object></Delete>';
|
|
||||||
const request = new DummyRequest({
|
|
||||||
bucketName: 'bucketname',
|
|
||||||
objectKey: 'objectname',
|
|
||||||
parsedHost: 'localhost',
|
|
||||||
headers: {
|
|
||||||
'content-md5': crypto.createHash('md5').update(post, 'utf8').digest('base64')
|
|
||||||
},
|
|
||||||
post,
|
|
||||||
url: '/bucketname',
|
|
||||||
});
|
|
||||||
const authInfo = makeAuthInfo('123456');
|
|
||||||
|
|
||||||
sinon.stub(metadataWrapper, 'getBucket').callsFake((bucketName, log, cb) =>
|
|
||||||
cb(null, new BucketInfo(
|
|
||||||
'bucketname',
|
|
||||||
'123456',
|
|
||||||
'accountA',
|
|
||||||
new Date().toISOString(),
|
|
||||||
15,
|
|
||||||
)));
|
|
||||||
|
|
||||||
multiObjectDelete.multiObjectDelete(authInfo, request, log, (err, res) => {
|
|
||||||
// Expected result is an access denied on the object, and no error, as the API was authorized
|
|
||||||
assert.strictEqual(err, null);
|
|
||||||
assert.strictEqual(
|
|
||||||
res.includes('<Error><Key>objectname</Key><Code>AccessDenied</Code>'),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
|
||||||
|
const {
|
||||||
|
createWSAgent,
|
||||||
|
} = require('../../../lib/management/push');
|
||||||
|
|
||||||
|
const proxy = 'http://proxy:3128/';
|
||||||
|
const logger = { info: () => {} };
|
||||||
|
|
||||||
|
function testVariableSet(httpProxy, httpsProxy, allProxy, noProxy) {
|
||||||
|
return () => {
|
||||||
|
it(`should use ${httpProxy} environment variable`, () => {
|
||||||
|
let agent = createWSAgent('https://pushserver', {
|
||||||
|
[httpProxy]: 'http://proxy:3128',
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
|
||||||
|
agent = createWSAgent('http://pushserver', {
|
||||||
|
[httpProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
});
|
||||||
|
|
||||||
|
it(`should use ${httpsProxy} environment variable`, () => {
|
||||||
|
let agent = createWSAgent('http://pushserver', {
|
||||||
|
[httpsProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
|
||||||
|
agent = createWSAgent('https://pushserver', {
|
||||||
|
[httpsProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
});
|
||||||
|
|
||||||
|
it(`should use ${allProxy} environment variable`, () => {
|
||||||
|
let agent = createWSAgent('http://pushserver', {
|
||||||
|
[allProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
|
||||||
|
agent = createWSAgent('https://pushserver', {
|
||||||
|
[allProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
});
|
||||||
|
|
||||||
|
it(`should use ${noProxy} environment variable`, () => {
|
||||||
|
let agent = createWSAgent('http://pushserver', {
|
||||||
|
[noProxy]: 'pushserver',
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
|
||||||
|
agent = createWSAgent('http://pushserver', {
|
||||||
|
[noProxy]: 'pushserver',
|
||||||
|
[httpProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
|
||||||
|
agent = createWSAgent('http://pushserver', {
|
||||||
|
[noProxy]: 'pushserver2',
|
||||||
|
[httpProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('Websocket connection agent', () => {
|
||||||
|
describe('with no proxy env', () => {
|
||||||
|
it('should handle empty proxy environment', () => {
|
||||||
|
const agent = createWSAgent('https://pushserver', {}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('with lowercase proxy env',
|
||||||
|
testVariableSet('http_proxy', 'https_proxy', 'all_proxy', 'no_proxy'));
|
||||||
|
|
||||||
|
describe('with uppercase proxy env',
|
||||||
|
testVariableSet('HTTP_PROXY', 'HTTPS_PROXY', 'ALL_PROXY', 'NO_PROXY'));
|
||||||
|
});
|
|
@ -0,0 +1,239 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
const crypto = require('crypto');
|
||||||
|
|
||||||
|
const { DummyRequestLogger } = require('../helpers');
|
||||||
|
const log = new DummyRequestLogger();
|
||||||
|
|
||||||
|
const metadata = require('../../../lib/metadata/wrapper');
|
||||||
|
const managementDatabaseName = 'PENSIEVE';
|
||||||
|
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
|
||||||
|
|
||||||
|
const { privateKey, accessKey, decryptedSecretKey, secretKey, canonicalId,
|
||||||
|
userName } = require('./resources.json');
|
||||||
|
const shortid = '123456789012';
|
||||||
|
const email = 'customaccount1@setbyenv.com';
|
||||||
|
const arn = 'arn:aws:iam::123456789012:root';
|
||||||
|
const { config } = require('../../../lib/Config');
|
||||||
|
|
||||||
|
const {
|
||||||
|
remoteOverlayIsNewer,
|
||||||
|
patchConfiguration,
|
||||||
|
} = require('../../../lib/management/configuration');
|
||||||
|
|
||||||
|
const {
|
||||||
|
initManagementDatabase,
|
||||||
|
} = require('../../../lib/management/index');
|
||||||
|
|
||||||
|
function initManagementCredentialsMock(cb) {
|
||||||
|
return metadata.putObjectMD(managementDatabaseName,
|
||||||
|
tokenConfigurationKey, { privateKey }, {},
|
||||||
|
log, error => cb(error));
|
||||||
|
}
|
||||||
|
|
||||||
|
function getConfig() {
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Original Config
|
||||||
|
const overlayVersionOriginal = Object.assign({}, config.overlayVersion);
|
||||||
|
const authDataOriginal = Object.assign({}, config.authData);
|
||||||
|
const locationConstraintsOriginal = Object.assign({},
|
||||||
|
config.locationConstraints);
|
||||||
|
const restEndpointsOriginal = Object.assign({}, config.restEndpoints);
|
||||||
|
const browserAccessEnabledOriginal = config.browserAccessEnabled;
|
||||||
|
const instanceId = '19683e55-56f7-4a4c-98a7-706c07e4ec30';
|
||||||
|
const publicInstanceId = crypto.createHash('sha256')
|
||||||
|
.update(instanceId)
|
||||||
|
.digest('hex');
|
||||||
|
|
||||||
|
function resetConfig() {
|
||||||
|
config.overlayVersion = overlayVersionOriginal;
|
||||||
|
config.authData = authDataOriginal;
|
||||||
|
config.locationConstraints = locationConstraintsOriginal;
|
||||||
|
config.restEndpoints = restEndpointsOriginal;
|
||||||
|
config.browserAccessEnabled = browserAccessEnabledOriginal;
|
||||||
|
}
|
||||||
|
|
||||||
|
function assertConfig(actualConf, expectedConf) {
|
||||||
|
Object.keys(expectedConf).forEach(key => {
|
||||||
|
assert.deepStrictEqual(actualConf[key], expectedConf[key]);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('patchConfiguration', () => {
|
||||||
|
before(done => initManagementDatabase(log, err => {
|
||||||
|
if (err) {
|
||||||
|
return done(err);
|
||||||
|
}
|
||||||
|
return initManagementCredentialsMock(done);
|
||||||
|
}));
|
||||||
|
beforeEach(() => {
|
||||||
|
resetConfig();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should modify config using the new config', done => {
|
||||||
|
const newConf = {
|
||||||
|
version: 1,
|
||||||
|
instanceId,
|
||||||
|
users: [
|
||||||
|
{
|
||||||
|
secretKey,
|
||||||
|
accessKey,
|
||||||
|
canonicalId,
|
||||||
|
userName,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
endpoints: [
|
||||||
|
{
|
||||||
|
hostname: '1.1.1.1',
|
||||||
|
locationName: 'us-east-1',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
locations: {
|
||||||
|
'us-east-1': {
|
||||||
|
name: 'us-east-1',
|
||||||
|
objectId: 'us-east-1',
|
||||||
|
locationType: 'location-file-v1',
|
||||||
|
legacyAwsBehavior: true,
|
||||||
|
details: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
browserAccess: {
|
||||||
|
enabled: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return patchConfiguration(newConf, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
const actualConf = getConfig();
|
||||||
|
const expectedConf = {
|
||||||
|
overlayVersion: 1,
|
||||||
|
publicInstanceId,
|
||||||
|
browserAccessEnabled: true,
|
||||||
|
authData: {
|
||||||
|
accounts: [{
|
||||||
|
name: userName,
|
||||||
|
email,
|
||||||
|
arn,
|
||||||
|
canonicalID: canonicalId,
|
||||||
|
shortid,
|
||||||
|
keys: [{
|
||||||
|
access: accessKey,
|
||||||
|
secret: decryptedSecretKey,
|
||||||
|
}],
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
locationConstraints: {
|
||||||
|
'us-east-1': {
|
||||||
|
type: 'file',
|
||||||
|
objectId: 'us-east-1',
|
||||||
|
legacyAwsBehavior: true,
|
||||||
|
isTransient: false,
|
||||||
|
sizeLimitGB: null,
|
||||||
|
details: { supportsVersioning: true },
|
||||||
|
name: 'us-east-1',
|
||||||
|
locationType: 'location-file-v1',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
assertConfig(actualConf, expectedConf);
|
||||||
|
assert.deepStrictEqual(actualConf.restEndpoints['1.1.1.1'],
|
||||||
|
'us-east-1');
|
||||||
|
return done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should apply second configuration if version (2) is greater than ' +
|
||||||
|
'overlayVersion (1)', done => {
|
||||||
|
const newConf1 = {
|
||||||
|
version: 1,
|
||||||
|
instanceId,
|
||||||
|
};
|
||||||
|
const newConf2 = {
|
||||||
|
version: 2,
|
||||||
|
instanceId,
|
||||||
|
browserAccess: {
|
||||||
|
enabled: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
patchConfiguration(newConf1, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return patchConfiguration(newConf2, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
const actualConf = getConfig();
|
||||||
|
const expectedConf = {
|
||||||
|
overlayVersion: 2,
|
||||||
|
browserAccessEnabled: true,
|
||||||
|
};
|
||||||
|
assertConfig(actualConf, expectedConf);
|
||||||
|
return done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not apply the second configuration if version equals ' +
|
||||||
|
'overlayVersion', done => {
|
||||||
|
const newConf1 = {
|
||||||
|
version: 1,
|
||||||
|
instanceId,
|
||||||
|
};
|
||||||
|
const newConf2 = {
|
||||||
|
version: 1,
|
||||||
|
instanceId,
|
||||||
|
browserAccess: {
|
||||||
|
enabled: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
patchConfiguration(newConf1, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return patchConfiguration(newConf2, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
const actualConf = getConfig();
|
||||||
|
const expectedConf = {
|
||||||
|
overlayVersion: 1,
|
||||||
|
browserAccessEnabled: undefined,
|
||||||
|
};
|
||||||
|
assertConfig(actualConf, expectedConf);
|
||||||
|
return done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('remoteOverlayIsNewer', () => {
|
||||||
|
it('should return remoteOverlayIsNewer equals false if remote overlay ' +
|
||||||
|
'is less than the cached', () => {
|
||||||
|
const cachedOverlay = {
|
||||||
|
version: 2,
|
||||||
|
};
|
||||||
|
const remoteOverlay = {
|
||||||
|
version: 1,
|
||||||
|
};
|
||||||
|
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
|
||||||
|
remoteOverlay);
|
||||||
|
assert.equal(isRemoteOverlayNewer, false);
|
||||||
|
});
|
||||||
|
it('should return remoteOverlayIsNewer equals false if remote overlay ' +
|
||||||
|
'and the cached one are equal', () => {
|
||||||
|
const cachedOverlay = {
|
||||||
|
version: 1,
|
||||||
|
};
|
||||||
|
const remoteOverlay = {
|
||||||
|
version: 1,
|
||||||
|
};
|
||||||
|
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
|
||||||
|
remoteOverlay);
|
||||||
|
assert.equal(isRemoteOverlayNewer, false);
|
||||||
|
});
|
||||||
|
it('should return remoteOverlayIsNewer equals true if remote overlay ' +
|
||||||
|
'version is greater than the cached one ', () => {
|
||||||
|
const cachedOverlay = {
|
||||||
|
version: 0,
|
||||||
|
};
|
||||||
|
const remoteOverlay = {
|
||||||
|
version: 1,
|
||||||
|
};
|
||||||
|
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
|
||||||
|
remoteOverlay);
|
||||||
|
assert.equal(isRemoteOverlayNewer, true);
|
||||||
|
});
|
||||||
|
});
|
|
@ -0,0 +1,9 @@
|
||||||
|
{
|
||||||
|
"privateKey": "-----BEGIN RSA PRIVATE KEY-----\r\nMIIEowIBAAKCAQEAj13sSYE40lAX2qpBvfdGfcSVNtBf8i5FH+E8FAhORwwPu+2S\r\n3yBQbgwHq30WWxunGb1NmZL1wkVZ+vf12DtxqFRnMA08LfO4oO6oC4V8XfKeuHyJ\r\n1qlaKRINz6r9yDkTHtwWoBnlAINurlcNKgGD5p7D+G26Chbr/Oo0ZwHula9DxXy6\r\neH8/bJ5/BynyNyyWRPoAO+UkUdY5utkFCUq2dbBIhovMgjjikf5p2oWqnRKXc+JK\r\nBegr6lSHkkhyqNhTmd8+wA+8Cace4sy1ajY1t5V4wfRZea5vwl/HlyyKodvHdxng\r\nJgg6H61JMYPkplY6Gr9OryBKEAgq02zYoYTDfwIDAQABAoIBAAuDYGlavkRteCzw\r\nRU1LIVcSRWVcgIgDXTu9K8T0Ec0008Kkxomyn6LmxmroJbZ1VwsDH8s4eRH73ckA\r\nxrZxt6Pr+0lplq6eBvKtl8MtGhq1VDe+kJczjHEF6SQHOFAu/TEaPZrn2XMcGvRX\r\nO1BnRL9tepFlxm3u/06VRFYNWqqchM+tFyzLu2AuiuKd5+slSX7KZvVgdkY1ErKH\r\ngB75lPyhPb77C/6ptqUisVMSO4JhLhsD0+ekDVY982Sb7KkI+szdWSbtMx9Ek2Wo\r\ntXwJz7I8T7IbODy9aW9G+ydyhMDFmaEYIaDVFKJj5+fluNza3oQ5PtFNVE50GQJA\r\nsisGqfECgYEAwpkwt0KpSamSEH6qknNYPOwxgEuXWoFVzibko7is2tFPvY+YJowb\r\n68MqHIYhf7gHLq2dc5Jg1TTbGqLECjVxp4xLU4c95KBy1J9CPAcuH4xQLDXmeLzP\r\nJ2YgznRocbzAMCDAwafCr3uY9FM7oGDHAi5bE5W11xWx+9MlFExL3JkCgYEAvJp5\r\nf+JGN1W037bQe2QLYUWGszewZsvplnNOeytGQa57w4YdF42lPhMz6Kc/zdzKZpN9\r\njrshiIDhAD5NCno6dwqafBAW9WZl0sn7EnlLhD4Lwm8E9bRHnC9H82yFuqmNrzww\r\nzxBCQogJISwHiVz4EkU48B283ecBn0wT/fAa19cCgYEApKWsnEHgrhy1IxOpCoRh\r\nUhqdv2k1xDPN/8DUjtnAFtwmVcLa/zJopU/Zn4y1ZzSzjwECSTi+iWZRQ/YXXHPf\r\nl92SFjhFW92Niuy8w8FnevXjF6T7PYiy1SkJ9OR1QlZrXc04iiGBDazLu115A7ce\r\nanACS03OLw+CKgl6Q/RR83ECgYBCUngDVoimkMcIHHt3yJiP3ikeAKlRnMdJlsa0\r\nXWVZV4hCG3lDfRXsnEgWuimftNKf+6GdfYSvQdLdiQsCcjT5A4uLsQTByv5nf4uA\r\n1ZKOsFrmRrARzxGXhLDikvj7yP//7USkq+0BBGFhfuAvl7fMhPceyPZPehqB7/jf\r\nxX1LBQKBgAn5GgSXzzS0e06ZlP/VrKxreOHa5Z8wOmqqYQ0QTeczAbNNmuITdwwB\r\nNkbRqpVXRIfuj0BQBegAiix8om1W4it0cwz54IXBwQULxJR1StWxj3jo4QtpMQ+z\r\npVPdB1Ilb9zPV1YvDwRfdS1xsobzznAx56ecsXduZjs9mF61db8Q\r\n-----END RSA PRIVATE KEY-----\r\n",
|
||||||
|
"publicKey": "-----BEGIN PUBLIC KEY-----\r\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAj13sSYE40lAX2qpBvfdG\r\nfcSVNtBf8i5FH+E8FAhORwwPu+2S3yBQbgwHq30WWxunGb1NmZL1wkVZ+vf12Dtx\r\nqFRnMA08LfO4oO6oC4V8XfKeuHyJ1qlaKRINz6r9yDkTHtwWoBnlAINurlcNKgGD\r\n5p7D+G26Chbr/Oo0ZwHula9DxXy6eH8/bJ5/BynyNyyWRPoAO+UkUdY5utkFCUq2\r\ndbBIhovMgjjikf5p2oWqnRKXc+JKBegr6lSHkkhyqNhTmd8+wA+8Cace4sy1ajY1\r\nt5V4wfRZea5vwl/HlyyKodvHdxngJgg6H61JMYPkplY6Gr9OryBKEAgq02zYoYTD\r\nfwIDAQAB\r\n-----END PUBLIC KEY-----\r\n",
|
||||||
|
"accessKey": "QXP3VDG3SALNBX2QBJ1C",
|
||||||
|
"secretKey": "K5FyqZo5uFKfw9QBtn95o6vuPuD0zH/1seIrqPKqGnz8AxALNSx6EeRq7G1I6JJpS1XN13EhnwGn2ipsml3Uf2fQ00YgEmImG8wzGVZm8fWotpVO4ilN4JGyQCah81rNX4wZ9xHqDD7qYR5MyIERxR/osoXfctOwY7GGUjRKJfLOguNUlpaovejg6mZfTvYAiDF+PTO1sKUYqHt1IfKQtsK3dov1EFMBB5pWM7sVfncq/CthKN5M+VHx9Y87qdoP3+7AW+RCBbSDOfQgxvqtS7PIAf10mDl8k2kEURLz+RqChu4O4S0UzbEmtja7wa7WYhYKv/tM/QeW7kyNJMmnPg==",
|
||||||
|
"decryptedSecretKey": "n7PSZ3U6SgerF9PCNhXYsq3S3fRKVGdZTicGV8Ur",
|
||||||
|
"canonicalId": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be",
|
||||||
|
"userName": "orbituser"
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
|
||||||
|
const { getCapabilities } = require('../../../lib/utilities/reportHandler');
|
||||||
|
|
||||||
|
// Ensures that expected features are enabled even if they
|
||||||
|
// rely on optional dependencies (such as secureChannelOptimizedPath)
|
||||||
|
describe('report handler', () => {
|
||||||
|
it('should report current capabilities', () => {
|
||||||
|
const c = getCapabilities();
|
||||||
|
assert.strictEqual(c.locationTypeDigitalOcean, true);
|
||||||
|
assert.strictEqual(c.locationTypeS3Custom, true);
|
||||||
|
assert.strictEqual(c.locationTypeSproxyd, true);
|
||||||
|
assert.strictEqual(c.locationTypeHyperdriveV2, true);
|
||||||
|
assert.strictEqual(c.locationTypeLocal, true);
|
||||||
|
assert.strictEqual(c.preferredReadLocation, true);
|
||||||
|
assert.strictEqual(c.managedLifecycle, true);
|
||||||
|
assert.strictEqual(c.secureChannelOptimizedPath, true);
|
||||||
|
assert.strictEqual(c.s3cIngestLocation, true);
|
||||||
|
});
|
||||||
|
|
||||||
|
[
|
||||||
|
{ value: 'true', result: true },
|
||||||
|
{ value: 'TRUE', result: true },
|
||||||
|
{ value: 'tRuE', result: true },
|
||||||
|
{ value: '1', result: true },
|
||||||
|
{ value: 'false', result: false },
|
||||||
|
{ value: 'FALSE', result: false },
|
||||||
|
{ value: 'FaLsE', result: false },
|
||||||
|
{ value: '0', result: false },
|
||||||
|
{ value: 'foo', result: false },
|
||||||
|
{ value: '', result: true },
|
||||||
|
{ value: undefined, result: true },
|
||||||
|
].forEach(param =>
|
||||||
|
it(`should allow set local file system capability ${param.value}`, () => {
|
||||||
|
const OLD_ENV = process.env;
|
||||||
|
|
||||||
|
if (param.value !== undefined) process.env.LOCAL_VOLUME_CAPABILITY = param.value;
|
||||||
|
assert.strictEqual(getCapabilities().locationTypeLocal, param.result);
|
||||||
|
|
||||||
|
process.env = OLD_ENV;
|
||||||
|
})
|
||||||
|
);
|
||||||
|
});
|
|
@ -0,0 +1,72 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
|
||||||
|
const {
|
||||||
|
ChannelMessageV0,
|
||||||
|
MessageType,
|
||||||
|
TargetType,
|
||||||
|
} = require('../../../lib/management/ChannelMessageV0');
|
||||||
|
|
||||||
|
const {
|
||||||
|
CONFIG_OVERLAY_MESSAGE,
|
||||||
|
METRICS_REQUEST_MESSAGE,
|
||||||
|
METRICS_REPORT_MESSAGE,
|
||||||
|
CHANNEL_CLOSE_MESSAGE,
|
||||||
|
CHANNEL_PAYLOAD_MESSAGE,
|
||||||
|
} = MessageType;
|
||||||
|
|
||||||
|
const { TARGET_ANY } = TargetType;
|
||||||
|
|
||||||
|
describe('ChannelMessageV0', () => {
|
||||||
|
describe('codec', () => {
|
||||||
|
it('should roundtrip metrics report', () => {
|
||||||
|
const b = ChannelMessageV0.encodeMetricsReportMessage({ a: 1 });
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(METRICS_REPORT_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(0, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
assert.strictEqual(m.getPayload().toString(), '{"a":1}');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should roundtrip channel data', () => {
|
||||||
|
const data = new Buffer('dummydata');
|
||||||
|
const b = ChannelMessageV0.encodeChannelDataMessage(50, data);
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(CHANNEL_PAYLOAD_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(50, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
assert.strictEqual(m.getPayload().toString(), 'dummydata');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should roundtrip channel close', () => {
|
||||||
|
const b = ChannelMessageV0.encodeChannelCloseMessage(3);
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(CHANNEL_CLOSE_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(3, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('decoder', () => {
|
||||||
|
it('should parse metrics request', () => {
|
||||||
|
const b = new Buffer([METRICS_REQUEST_MESSAGE, 0, 0]);
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(METRICS_REQUEST_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(0, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse overlay push', () => {
|
||||||
|
const b = new Buffer([CONFIG_OVERLAY_MESSAGE, 0, 0, 34, 65, 34]);
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(CONFIG_OVERLAY_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(0, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
assert.strictEqual(m.getPayload().toString(), '"A"');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
|
@ -14,11 +14,7 @@ const bucket = new BucketInfo('niftyBucket', ownerCanonicalId,
|
||||||
authInfo.getAccountDisplayName(), creationDate);
|
authInfo.getAccountDisplayName(), creationDate);
|
||||||
const log = new DummyRequestLogger();
|
const log = new DummyRequestLogger();
|
||||||
|
|
||||||
const {
|
const { validateBucket, metadataGetObjects, metadataGetObject } = require('../../../lib/metadata/metadataUtils');
|
||||||
validateBucket,
|
|
||||||
metadataGetObjects,
|
|
||||||
metadataGetObject,
|
|
||||||
} = require('../../../lib/metadata/metadataUtils');
|
|
||||||
const metadata = require('../../../lib/metadata/wrapper');
|
const metadata = require('../../../lib/metadata/wrapper');
|
||||||
|
|
||||||
describe('validateBucket', () => {
|
describe('validateBucket', () => {
|
||||||
|
|
|
@ -1,102 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
const sinon = require('sinon');
|
|
||||||
const { ScubaClientImpl } = require('../../../../lib/quotas/scuba/wrapper');
|
|
||||||
|
|
||||||
describe('ScubaClientImpl', () => {
|
|
||||||
let client;
|
|
||||||
let log;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
client = new ScubaClientImpl({ scuba: true, quota: { maxStaleness: 24 * 60 * 60 * 1000 } });
|
|
||||||
log = {
|
|
||||||
info: sinon.spy(),
|
|
||||||
warn: sinon.spy(),
|
|
||||||
};
|
|
||||||
client.setup(log);
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
sinon.restore();
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('setup', () => {
|
|
||||||
it('should enable Scuba and start periodic health check', () => {
|
|
||||||
client.setup(log);
|
|
||||||
|
|
||||||
assert.strictEqual(client.enabled, true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not enable Scuba if config.scuba is falsy', () => {
|
|
||||||
client = new ScubaClientImpl({ scuba: false, quota: { maxStaleness: 24 * 60 * 60 * 1000 } });
|
|
||||||
client.setup(log);
|
|
||||||
|
|
||||||
assert.strictEqual(client.enabled, false);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('_healthCheck', () => {
|
|
||||||
it('should enable Scuba if health check passes', async () => {
|
|
||||||
sinon.stub(client, 'healthCheck').resolves();
|
|
||||||
|
|
||||||
await client._healthCheck();
|
|
||||||
|
|
||||||
assert.strictEqual(client.enabled, true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should disable Scuba if health check returns non-stale data', async () => {
|
|
||||||
sinon.stub(client, 'healthCheck').resolves({ date: Date.now() - (12 * 60 * 60 * 1000) });
|
|
||||||
|
|
||||||
await client._healthCheck();
|
|
||||||
|
|
||||||
assert.strictEqual(client.enabled, true);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should disable Scuba if health check returns stale data', async () => {
|
|
||||||
sinon.stub(client, 'healthCheck').resolves({ date: Date.now() - (48 * 60 * 60 * 1000) });
|
|
||||||
|
|
||||||
await client._healthCheck();
|
|
||||||
|
|
||||||
assert.strictEqual(client.enabled, false);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should disable Scuba if health check fails', async () => {
|
|
||||||
const error = new Error('Health check failed');
|
|
||||||
sinon.stub(client, 'healthCheck').rejects(error);
|
|
||||||
|
|
||||||
await client._healthCheck();
|
|
||||||
|
|
||||||
assert.strictEqual(client.enabled, false);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('periodicHealthCheck', () => {
|
|
||||||
let healthCheckStub;
|
|
||||||
let setIntervalStub;
|
|
||||||
let clearIntervalStub;
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
healthCheckStub = sinon.stub(client, '_healthCheck');
|
|
||||||
setIntervalStub = sinon.stub(global, 'setInterval');
|
|
||||||
clearIntervalStub = sinon.stub(global, 'clearInterval');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should call _healthCheck and start periodic health check', () => {
|
|
||||||
client._healthCheckTimer = null;
|
|
||||||
client.periodicHealthCheck();
|
|
||||||
|
|
||||||
assert(healthCheckStub.calledOnce);
|
|
||||||
assert(setIntervalStub.calledOnce);
|
|
||||||
assert(clearIntervalStub.notCalled);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should clear previous health check timer before starting a new one', () => {
|
|
||||||
client._healthCheckTimer = 123;
|
|
||||||
|
|
||||||
client.periodicHealthCheck();
|
|
||||||
|
|
||||||
assert(healthCheckStub.calledOnce);
|
|
||||||
assert(setIntervalStub.calledOnce);
|
|
||||||
assert(clearIntervalStub.calledOnceWith(123));
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -100,13 +100,6 @@
|
||||||
"name": "zenko",
|
"name": "zenko",
|
||||||
"sentinels": "localhost:6379"
|
"sentinels": "localhost:6379"
|
||||||
},
|
},
|
||||||
"scuba": {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 8100
|
|
||||||
},
|
|
||||||
"quota": {
|
|
||||||
"maxStalenessMS": 86400000
|
|
||||||
},
|
|
||||||
"utapi": {
|
"utapi": {
|
||||||
"redis": {
|
"redis": {
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
|
|
|
@ -1,254 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
const { parseRedisConfig } = require('../../../lib/Config');
|
|
||||||
|
|
||||||
describe('parseRedisConfig', () => {
|
|
||||||
[
|
|
||||||
{
|
|
||||||
desc: 'with host and port',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with host, port and password',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
password: 'mypass',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with host, port and an empty password',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
password: '',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with host, port and an empty retry config',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
retry: {
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with host, port and a custom retry config',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
retry: {
|
|
||||||
connectBackoff: {
|
|
||||||
min: 10,
|
|
||||||
max: 1000,
|
|
||||||
jitter: 0.1,
|
|
||||||
factor: 1.5,
|
|
||||||
deadline: 10000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a single sentinel and no sentinel password',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'localhost',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with two sentinels and a sentinel password',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: '10.20.30.40',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: '10.20.30.41',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
sentinelPassword: 'mypass',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a sentinel and an empty sentinel password',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: '10.20.30.40',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
sentinelPassword: '',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a basic production-like config with sentinels',
|
|
||||||
input: {
|
|
||||||
name: 'scality-s3',
|
|
||||||
password: '',
|
|
||||||
sentinelPassword: '',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'storage-1',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'storage-2',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'storage-3',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'storage-4',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'storage-5',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a single sentinel passed as a string',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: '10.20.30.40:16479',
|
|
||||||
},
|
|
||||||
output: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: '10.20.30.40',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a list of sentinels passed as a string',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: '10.20.30.40:16479,another-host:16480,10.20.30.42:16481',
|
|
||||||
sentinelPassword: 'mypass',
|
|
||||||
},
|
|
||||||
output: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: '10.20.30.40',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'another-host',
|
|
||||||
port: 16480,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: '10.20.30.42',
|
|
||||||
port: 16481,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
sentinelPassword: 'mypass',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
].forEach(testCase => {
|
|
||||||
it(`should parse a valid config ${testCase.desc}`, () => {
|
|
||||||
const redisConfig = parseRedisConfig(testCase.input);
|
|
||||||
assert.deepStrictEqual(redisConfig, testCase.output || testCase.input);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
[
|
|
||||||
{
|
|
||||||
desc: 'that is empty',
|
|
||||||
input: {},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with only a host',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with only a port',
|
|
||||||
input: {
|
|
||||||
port: 6479,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a custom retry config with missing values',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
retry: {
|
|
||||||
connectBackoff: {
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a sentinel but no name',
|
|
||||||
input: {
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'localhost',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a sentinel but an empty name',
|
|
||||||
input: {
|
|
||||||
name: '',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'localhost',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with an empty list of sentinels',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with an empty list of sentinels passed as a string',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: '',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with an invalid list of sentinels passed as a string (missing port)',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: '10.20.30.40:16479,10.20.30.50',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
].forEach(testCase => {
|
|
||||||
it(`should fail to parse an invalid config ${testCase.desc}`, () => {
|
|
||||||
assert.throws(() => {
|
|
||||||
parseRedisConfig(testCase.input);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,123 +0,0 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const express = require('express');
|
|
||||||
const { config } = require('../../../lib/Config');
|
|
||||||
|
|
||||||
const inflightFlushFrequencyMS = 200;
|
|
||||||
|
|
||||||
class Scuba {
|
|
||||||
constructor() {
|
|
||||||
this._server = null;
|
|
||||||
this._port = 8100;
|
|
||||||
this._data = {
|
|
||||||
bucket: new Map(),
|
|
||||||
};
|
|
||||||
this._app = express();
|
|
||||||
this.supportsInflight = config.isQuotaInflightEnabled();
|
|
||||||
}
|
|
||||||
|
|
||||||
_initiateRoutes() {
|
|
||||||
this._app.use(express.json());
|
|
||||||
|
|
||||||
this._app.get('/health/deep', (req, res) => {
|
|
||||||
const headerValue = req.header('error');
|
|
||||||
if (headerValue) {
|
|
||||||
return res.status(500).send(errors.InternalError);
|
|
||||||
}
|
|
||||||
return res.status(204).end();
|
|
||||||
});
|
|
||||||
|
|
||||||
this._app.post('/metrics/bucket/:bucket/latest', (req, res) => {
|
|
||||||
let bucketName = req.params.bucket;
|
|
||||||
if (!this.supportsInflight) {
|
|
||||||
bucketName = req.params.bucket?.split('_')[0];
|
|
||||||
return res.status(200).json({
|
|
||||||
bytesTotal: this._data.bucket.get(bucketName)?.current || 0,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
const inflight = Number(req.body?.inflight) || 0;
|
|
||||||
this._updateData({
|
|
||||||
action: req.body?.action,
|
|
||||||
bucket: bucketName,
|
|
||||||
inflight,
|
|
||||||
});
|
|
||||||
const immediateInflights = req.body?.action === 'objectRestore' ? 0 : inflight;
|
|
||||||
return res.json({
|
|
||||||
bytesTotal: (this._data.bucket.get(bucketName)?.current || 0) +
|
|
||||||
(this._data.bucket.get(bucketName)?.nonCurrent || 0) +
|
|
||||||
(this._data.bucket.get(bucketName)?.inflight || 0) +
|
|
||||||
immediateInflights,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
_updateData(event) {
|
|
||||||
const { action, inflight, bucket } = event;
|
|
||||||
let timeout = inflightFlushFrequencyMS;
|
|
||||||
if (action === 'objectRestore') {
|
|
||||||
timeout = 0;
|
|
||||||
}
|
|
||||||
if (!this._data.bucket.get(bucket)) {
|
|
||||||
this._data.bucket.set(bucket, { current: 0, nonCurrent: 0, inflight: 0 });
|
|
||||||
}
|
|
||||||
if (timeout && this.supportsInflight) {
|
|
||||||
setTimeout(() => {
|
|
||||||
if (this._data.bucket.get(bucket)) {
|
|
||||||
this._data.bucket.set(bucket, {
|
|
||||||
current: this._data.bucket.get(bucket).current,
|
|
||||||
nonCurrent: this._data.bucket.get(bucket).nonCurrent,
|
|
||||||
inflight: this._data.bucket.get(bucket).inflight + inflight,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}, timeout);
|
|
||||||
} else {
|
|
||||||
if (this._data.bucket.get(bucket)) {
|
|
||||||
this._data.bucket.set(bucket, {
|
|
||||||
current: this._data.bucket.get(bucket).current,
|
|
||||||
nonCurrent: this._data.bucket.get(bucket).nonCurrent,
|
|
||||||
inflight: this._data.bucket.get(bucket).inflight + inflight,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
start() {
|
|
||||||
this._initiateRoutes();
|
|
||||||
this._server = this._app.listen(this._port);
|
|
||||||
}
|
|
||||||
|
|
||||||
reset() {
|
|
||||||
this._data = {
|
|
||||||
bucket: new Map(),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
stop() {
|
|
||||||
this._server.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
getInflightsForBucket(bucketName) {
|
|
||||||
let inflightCount = 0;
|
|
||||||
this._data.bucket.forEach((value, key) => {
|
|
||||||
if (!this.supportsInflight && key === bucketName) {
|
|
||||||
inflightCount += (value.current + value.nonCurrent);
|
|
||||||
} else if (this.supportsInflight && key.startsWith(`${bucketName}_`)) {
|
|
||||||
inflightCount += value.inflight;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return inflightCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
incrementBytesForBucket(bucketName, bytes) {
|
|
||||||
if (!this._data.bucket.has(bucketName)) {
|
|
||||||
this._data.bucket.set(bucketName, { current: 0, nonCurrent: 0, inflight: 0 });
|
|
||||||
}
|
|
||||||
const bucket = this._data.bucket.get(bucketName);
|
|
||||||
bucket.current += bytes;
|
|
||||||
this._data.bucket.set(bucketName, bucket);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
Scuba,
|
|
||||||
inflightFlushFrequencyMS,
|
|
||||||
};
|
|
|
@ -1,7 +1,6 @@
|
||||||
/* eslint-disable global-require */
|
/* eslint-disable global-require */
|
||||||
const index = {
|
const index = {
|
||||||
Utapi: require('./Utapi'),
|
Utapi: require('./Utapi'),
|
||||||
Scuba: require('./Scuba'),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = index;
|
module.exports = index;
|
||||||
|
|
|
@ -1,48 +0,0 @@
|
||||||
module.exports = {
|
|
||||||
entry: './index.js',
|
|
||||||
target: 'node',
|
|
||||||
devtool: 'source-map',
|
|
||||||
output: {
|
|
||||||
filename: 'zenko-vitastor.js'
|
|
||||||
},
|
|
||||||
module: {
|
|
||||||
rules: [
|
|
||||||
{
|
|
||||||
test: /.jsx?$/,
|
|
||||||
use: {
|
|
||||||
loader: 'babel-loader',
|
|
||||||
options: {
|
|
||||||
presets: [ [ "@babel/preset-env", { "targets": { "node": "12.0" }, "exclude": [ "transform-regenerator" ] } ] ],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: /.json$/,
|
|
||||||
type: 'json'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
externals: {
|
|
||||||
'leveldown': 'commonjs leveldown',
|
|
||||||
'bufferutil': 'commonjs bufferutil',
|
|
||||||
'diskusage': 'commonjs diskusage',
|
|
||||||
'utf-8-validate': 'commonjs utf-8-validate',
|
|
||||||
'fcntl': 'commonjs fcntl',
|
|
||||||
'ioctl': 'commonjs ioctl',
|
|
||||||
'vitastor': 'commonjs vitastor',
|
|
||||||
'vaultclient': 'commonjs vaultclient',
|
|
||||||
'bucketclient': 'commonjs bucketclient',
|
|
||||||
'scality-kms': 'commonjs scality-kms',
|
|
||||||
'sproxydclient': 'commonjs sproxydclient',
|
|
||||||
'hdclient': 'commonjs hdclient',
|
|
||||||
'cdmiclient': 'commonjs cdmiclient',
|
|
||||||
'kerberos': 'commonjs kerberos',
|
|
||||||
'@mongodb-js/zstd': 'commonjs @mongodb-js/zstd',
|
|
||||||
'@aws-sdk/credential-providers': 'commonjs @aws-sdk/credential-providers',
|
|
||||||
'snappy': 'commonjs snappy',
|
|
||||||
'mongodb-client-encryption': 'commonjs mongodb-client-encryption'
|
|
||||||
},
|
|
||||||
node: {
|
|
||||||
__dirname: false
|
|
||||||
}
|
|
||||||
};
|
|
Loading…
Reference in New Issue