Compare commits
59 Commits
developmen
...
hotfix/7.1
Author | SHA1 | Date |
---|---|---|
Alexander Chan | 66d5c2528c | |
Alexander Chan | 07fea61a3c | |
Alexander Chan | e8b7b74c45 | |
Thomas Carmet | 039518a164 | |
Thomas Carmet | 0ba7f1c125 | |
Jonathan Gramain | 0e57ef9dc2 | |
Taylor McKinnon | c54a5341ce | |
Jonathan Gramain | cc4a376c7a | |
Jonathan Gramain | 360a99fecb | |
Artem Bakalov | 287d67bee7 | |
Artem Bakalov | b92b132c87 | |
Jonathan Gramain | 6267aec8ac | |
Jonathan Gramain | 39bec21537 | |
Jonathan Gramain | 3d9dc5bdf6 | |
Nicolas Humbert | 3ae5bfea0a | |
Jonathan Gramain | 6866112eca | |
Jonathan Gramain | 0b72fd6530 | |
Alexander Chan | 0c98d801cf | |
Taylor McKinnon | e33902cfe8 | |
Taylor McKinnon | da0cc7b316 | |
Taylor McKinnon | 43a1f615bf | |
Taylor McKinnon | 6c6aa13a98 | |
Taylor McKinnon | 2bc60d4513 | |
Alexander Chan | 30bc39a86f | |
Alexander Chan | c1a85e5f37 | |
Alexander Chan | 32c57f5ab6 | |
Alexander Chan | f8bb4f0490 | |
Alexander Chan | f3323f5235 | |
Alexander Chan | f1f9ee05f2 | |
Alexander Chan | 5e202d7044 | |
Alexander Chan | fa3dead6af | |
Alexander Chan | a6d44136e0 | |
Alexander Chan | 58b2c112a0 | |
Alexander Chan | b01f836b34 | |
Alexander Chan | 217599968a | |
Alexander Chan | 4b9a9ca5a3 | |
Alexander Chan | c863b44bc0 | |
Alexander Chan | bd25deebb8 | |
Alexander Chan | 66a65a1c73 | |
Alexander Chan | 012a4f0dba | |
Alexander Chan | baf6521373 | |
Taylor McKinnon | c7f45df7ed | |
Taylor McKinnon | 3bacfcf810 | |
Taylor McKinnon | 33c32d9f44 | |
Taylor McKinnon | f9dbebf9fe | |
Taylor McKinnon | e26abb504d | |
williamlardier | 609ae03f5a | |
williamlardier | 06d4741dab | |
Ronnie Smith | 0a432f2724 | |
Ronnie | f9e695b2b3 | |
Ronnie Smith | a172ec6b2e | |
Ronnie Smith | 3c4d7adeae | |
Ronnie | 36d0ffa2b3 | |
Ronnie Smith | 619b947a0d | |
Ronnie | ea2d8d775a | |
Ronnie | 6861d7e94b | |
Ronnie Smith | 47648950e3 | |
Alexander Chan | 0138b678ac | |
Alexander Chan | 9f09820c5f |
|
@ -0,0 +1,3 @@
|
|||
.git
|
||||
.github
|
||||
node_modules
|
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
name: "Setup CI environment"
|
||||
description: "Setup Cloudserver CI environment"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Setup etc/hosts
|
||||
shell: bash
|
||||
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
|
||||
- name: Setup Credentials
|
||||
shell: bash
|
||||
run: bash .github/scripts/credentials.bash
|
||||
- name: Setup job artifacts directory
|
||||
shell: bash
|
||||
run: |-
|
||||
set -exu;
|
||||
mkdir -p /tmp/artifacts/${{ github.job }}/;
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: 'yarn'
|
||||
- name: install dependencies
|
||||
shell: bash
|
||||
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip
|
||||
- name: Install python deps
|
||||
shell: bash
|
||||
run: pip install docker-compose
|
|
@ -0,0 +1,37 @@
|
|||
azurebackend_AZURE_STORAGE_ACCESS_KEY
|
||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME
|
||||
azurebackend_AZURE_STORAGE_ENDPOINT
|
||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY
|
||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME
|
||||
azurebackend2_AZURE_STORAGE_ENDPOINT
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME
|
||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME
|
||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT
|
||||
azuretest_AZURE_BLOB_ENDPOINT
|
||||
b2backend_B2_ACCOUNT_ID
|
||||
b2backend_B2_STORAGE_ACCESS_KEY
|
||||
GOOGLE_SERVICE_EMAIL
|
||||
GOOGLE_SERVICE_KEY
|
||||
AWS_S3_BACKEND_ACCESS_KEY
|
||||
AWS_S3_BACKEND_SECRET_KEY
|
||||
AWS_S3_BACKEND_ACCESS_KEY_2
|
||||
AWS_S3_BACKEND_SECRET_KEY_2
|
||||
AWS_GCP_BACKEND_ACCESS_KEY
|
||||
AWS_GCP_BACKEND_SECRET_KEY
|
||||
AWS_GCP_BACKEND_ACCESS_KEY_2
|
||||
AWS_GCP_BACKEND_SECRET_KEY_2
|
||||
b2backend_B2_STORAGE_ENDPOINT
|
||||
gcpbackend2_GCP_SERVICE_EMAIL
|
||||
gcpbackend2_GCP_SERVICE_KEY
|
||||
gcpbackend2_GCP_SERVICE_KEYFILE
|
||||
gcpbackend_GCP_SERVICE_EMAIL
|
||||
gcpbackend_GCP_SERVICE_KEY
|
||||
gcpbackendmismatch_GCP_SERVICE_EMAIL
|
||||
gcpbackendmismatch_GCP_SERVICE_KEY
|
||||
gcpbackend_GCP_SERVICE_KEYFILE
|
||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE
|
||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
|
||||
gcpbackendproxy_GCP_SERVICE_KEYFILE
|
|
@ -0,0 +1,66 @@
|
|||
services:
|
||||
cloudserver:
|
||||
image: ${CLOUDSERVER_IMAGE}
|
||||
command: sh -c "yarn start > /artifacts/s3.log"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- /tmp/ssl:/ssl
|
||||
- /tmp/ssl-kmip:/ssl-kmip
|
||||
- ${HOME}/.aws/credentials:/root/.aws/credentials
|
||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||
environment:
|
||||
- CI=true
|
||||
- ENABLE_LOCAL_CACHE=true
|
||||
- REDIS_HOST=0.0.0.0
|
||||
- REDIS_PORT=6379
|
||||
- REPORT_TOKEN=report-token-1
|
||||
- REMOTE_MANAGEMENT_DISABLE=1
|
||||
- HEALTHCHECKS_ALLOWFROM=0.0.0.0/0
|
||||
- DATA_HOST=0.0.0.0
|
||||
- METADATA_HOST=0.0.0.0
|
||||
- S3BACKEND
|
||||
- S3DATA
|
||||
- MPU_TESTING
|
||||
- S3VAULT
|
||||
- S3_LOCATION_FILE
|
||||
- ENABLE_UTAPI_V2
|
||||
- BUCKET_DENY_FILTER
|
||||
- S3KMS
|
||||
- S3KMIP_PORT
|
||||
- S3KMIP_HOSTS
|
||||
- S3KMIP-COMPOUND_CREATE
|
||||
- S3KMIP_BUCKET_ATTRIBUTE_NAME
|
||||
- S3KMIP_PIPELINE_DEPTH
|
||||
- S3KMIP_KEY
|
||||
- S3KMIP_CERT
|
||||
- S3KMIP_CA
|
||||
env_file:
|
||||
- creds.env
|
||||
depends_on:
|
||||
- redis
|
||||
extra_hosts:
|
||||
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1"
|
||||
- "pykmip.local:127.0.0.1"
|
||||
redis:
|
||||
image: redis:alpine
|
||||
network_mode: "host"
|
||||
squid:
|
||||
network_mode: "host"
|
||||
profiles: ['ci-proxy']
|
||||
image: scality/ci-squid
|
||||
command: >-
|
||||
sh -c 'mkdir -p /ssl &&
|
||||
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
|
||||
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
|
||||
-keyout /ssl/myca.pem -out /ssl/myca.pem &&
|
||||
cp /ssl/myca.pem /ssl/CA.pem &&
|
||||
squid -f /etc/squid/squid.conf -N -z &&
|
||||
squid -f /etc/squid/squid.conf -NYCd 1'
|
||||
volumes:
|
||||
- /tmp/ssl:/ssl
|
||||
pykmip:
|
||||
network_mode: "host"
|
||||
profiles: ['pykmip']
|
||||
image: registry.scality.com/cloudserver-dev/pykmip
|
||||
volumes:
|
||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
|
@ -2,9 +2,9 @@
|
|||
set -x #echo on
|
||||
set -e #exit at the first error
|
||||
|
||||
mkdir -p ~/.aws
|
||||
mkdir -p $HOME/.aws
|
||||
|
||||
cat >>/root/.aws/credentials <<EOF
|
||||
cat >>$HOME/.aws/credentials <<EOF
|
||||
[default]
|
||||
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
|
||||
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY
|
|
@ -0,0 +1,310 @@
|
|||
---
|
||||
name: tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- 'development/**'
|
||||
- 'q/*/**'
|
||||
|
||||
env:
|
||||
# Secrets
|
||||
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||
azurebackend_AZURE_STORAGE_ENDPOINT: >-
|
||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY_2 }}
|
||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME_2 }}
|
||||
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
|
||||
${{ secrets.AZURE_STORAGE_ENDPOINT_2 }}
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
|
||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
|
||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||
azuretest_AZURE_BLOB_ENDPOINT: "${{ secrets.AZURE_STORAGE_ENDPOINT }}"
|
||||
b2backend_B2_ACCOUNT_ID: "${{ secrets.B2BACKEND_B2_ACCOUNT_ID }}"
|
||||
b2backend_B2_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.B2BACKEND_B2_STORAGE_ACCESS_KEY }}
|
||||
GOOGLE_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
|
||||
GOOGLE_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
|
||||
AWS_S3_BACKEND_ACCESS_KEY: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY }}"
|
||||
AWS_S3_BACKEND_SECRET_KEY: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY }}"
|
||||
AWS_S3_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY_2 }}"
|
||||
AWS_S3_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY_2 }}"
|
||||
AWS_GCP_BACKEND_ACCESS_KEY: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY }}"
|
||||
AWS_GCP_BACKEND_SECRET_KEY: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY }}"
|
||||
AWS_GCP_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY_2 }}"
|
||||
AWS_GCP_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY_2 }}"
|
||||
b2backend_B2_STORAGE_ENDPOINT: "${{ secrets.B2BACKEND_B2_STORAGE_ENDPOINT }}"
|
||||
gcpbackend2_GCP_SERVICE_EMAIL: "${{ secrets.GCP2_SERVICE_EMAIL }}"
|
||||
gcpbackend2_GCP_SERVICE_KEY: "${{ secrets.GCP2_SERVICE_KEY }}"
|
||||
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackend_GCP_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
|
||||
gcpbackend_GCP_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
|
||||
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
|
||||
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_EMAIL }}
|
||||
gcpbackendmismatch_GCP_SERVICE_KEY: >-
|
||||
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_KEY }}
|
||||
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
# Configs
|
||||
ENABLE_LOCAL_CACHE: "true"
|
||||
REPORT_TOKEN: "report-token-1"
|
||||
REMOTE_MANAGEMENT_DISABLE: "1"
|
||||
|
||||
jobs:
|
||||
linting-coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: yarn
|
||||
- name: install dependencies
|
||||
run: yarn install --frozen-lockfile --network-concurrency 1
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip
|
||||
- name: Install python deps
|
||||
run: pip install flake8
|
||||
- name: Lint Javascript
|
||||
run: yarn run --silent lint -- --max-warnings 0
|
||||
- name: Lint Markdown
|
||||
run: yarn run --silent lint_md
|
||||
- name: Lint python
|
||||
run: flake8 $(git ls-files "*.py")
|
||||
- name: Lint Yaml
|
||||
run: yamllint -c yamllint.yml $(git ls-files "*.yml")
|
||||
- name: Unit Coverage
|
||||
run: |
|
||||
set -ex
|
||||
mkdir -p $CIRCLE_TEST_REPORTS/unit
|
||||
yarn test
|
||||
yarn run test_legacy_location
|
||||
env:
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
CIRCLE_TEST_REPORTS: /tmp
|
||||
CIRCLE_ARTIFACTS: /tmp
|
||||
CI_REPORTS: /tmp
|
||||
- name: Unit Coverage logs
|
||||
run: find /tmp/unit -exec cat {} \;
|
||||
- name: preparing junit files for upload
|
||||
run: |
|
||||
mkdir -p artifacts/junit
|
||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
||||
if: always()
|
||||
- name: Upload files to artifacts
|
||||
uses: scality/action-artifacts@v2
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: artifacts
|
||||
if: always()
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.6.0
|
||||
- name: Login to GitHub Registry
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: registry.scality.com
|
||||
username: ${{ secrets.REGISTRY_LOGIN }}
|
||||
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
- name: Build and push cloudserver image
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
|
||||
cache-from: type=gha,scope=cloudserver
|
||||
cache-to: type=gha,mode=max,scope=cloudserver
|
||||
|
||||
multiple-backend:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||
S3BACKEND: mem
|
||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||
S3DATA: multiple
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup CI services
|
||||
run: docker-compose up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run multiple backend test
|
||||
run: |-
|
||||
set -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
env:
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v3
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
file-ft-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
S3BACKEND: file
|
||||
S3VAULT: mem
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||
MPU_TESTING: "yes"
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: |
|
||||
2.7
|
||||
3.9
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup python2 test environment
|
||||
run: |
|
||||
sudo apt-get install -y libdigest-hmac-perl
|
||||
pip install virtualenv
|
||||
virtualenv -p $(which python2) ~/.virtualenv/py2
|
||||
source ~/.virtualenv/py2/bin/activate
|
||||
pip install 's3cmd==1.6.1'
|
||||
- name: Setup CI services
|
||||
run: docker-compose up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run file ft tests
|
||||
run: |-
|
||||
set -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
source ~/.virtualenv/py2/bin/activate
|
||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v3
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
utapi-v2-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
ENABLE_UTAPI_V2: t
|
||||
S3BACKEND: mem
|
||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup CI services
|
||||
run: docker-compose up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run file utapi v2 tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v3
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
kmip-ft-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
S3BACKEND: file
|
||||
S3VAULT: mem
|
||||
MPU_TESTING: true
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Copy KMIP certs
|
||||
run: cp -r ./certs /tmp/ssl-kmip
|
||||
working-directory: .github/pykmip
|
||||
- name: Setup CI services
|
||||
run: docker-compose --profile pykmip up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run file KMIP tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
bash wait_for_local_port.bash 5696 40
|
||||
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v3
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
|
@ -174,6 +174,10 @@ const constants = {
|
|||
'bucket',
|
||||
],
|
||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||
// The AWS assumed Role resource type
|
||||
assumedRoleArnResourceType: 'assumed-role',
|
||||
// Session name of the backbeat lifecycle assumed role session.
|
||||
backbeatLifecycleSessionName: 'backbeat-lifecycle',
|
||||
};
|
||||
|
||||
module.exports = constants;
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
script_full_path=$(readlink -f "$0")
|
||||
file_dir=$(dirname "$script_full_path")/..
|
||||
|
||||
PACKAGE_VERSION=$(cat $file_dir/package.json \
|
||||
| grep version \
|
||||
| head -1 \
|
||||
| awk -F: '{ print $2 }' \
|
||||
| sed 's/[",]//g' \
|
||||
| tr -d '[[:space:]]')
|
||||
|
||||
echo $PACKAGE_VERSION
|
371
eve/main.yml
371
eve/main.yml
|
@ -1,371 +0,0 @@
|
|||
---
|
||||
version: 0.2
|
||||
|
||||
branches:
|
||||
feature/*, improvement/*, bugfix/*, w/*, q/*, hotfix/*, dependabot/*:
|
||||
stage: pre-merge
|
||||
development/*:
|
||||
stage: post-merge
|
||||
|
||||
models:
|
||||
- env: &global-env
|
||||
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:azure_storage_access_key)s
|
||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
%(secret:azure_storage_account_name)s
|
||||
azurebackend_AZURE_STORAGE_ENDPOINT: >-
|
||||
%(secret:azure_storage_endpoint)s
|
||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:azure_storage_access_key_2)s
|
||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
%(secret:azure_storage_account_name_2)s
|
||||
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
|
||||
%(secret:azure_storage_endpoint_2)s
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:azure_storage_access_key)s
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
%(secret:azure_storage_account_name)s
|
||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
|
||||
%(secret:azure_storage_endpoint)s
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:azure_storage_access_key)s
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
%(secret:azure_storage_account_name)s
|
||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
|
||||
%(secret:azure_storage_endpoint)s
|
||||
azuretest_AZURE_BLOB_ENDPOINT: "%(secret:azure_storage_endpoint)s"
|
||||
b2backend_B2_ACCOUNT_ID: "%(secret:b2backend_b2_account_id)s"
|
||||
b2backend_B2_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:b2backend_b2_storage_access_key)s
|
||||
GOOGLE_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
|
||||
GOOGLE_SERVICE_KEY: "%(secret:gcp_service_key)s"
|
||||
AWS_S3_BACKEND_ACCESS_KEY: "%(secret:aws_s3_backend_access_key)s"
|
||||
AWS_S3_BACKEND_SECRET_KEY: "%(secret:aws_s3_backend_secret_key)s"
|
||||
AWS_S3_BACKEND_ACCESS_KEY_2: "%(secret:aws_s3_backend_access_key_2)s"
|
||||
AWS_S3_BACKEND_SECRET_KEY_2: "%(secret:aws_s3_backend_secret_key_2)s"
|
||||
AWS_GCP_BACKEND_ACCESS_KEY: "%(secret:aws_gcp_backend_access_key)s"
|
||||
AWS_GCP_BACKEND_SECRET_KEY: "%(secret:aws_gcp_backend_secret_key)s"
|
||||
AWS_GCP_BACKEND_ACCESS_KEY_2: "%(secret:aws_gcp_backend_access_key_2)s"
|
||||
AWS_GCP_BACKEND_SECRET_KEY_2: "%(secret:aws_gcp_backend_secret_key_2)s"
|
||||
b2backend_B2_STORAGE_ENDPOINT: "%(secret:b2backend_b2_storage_endpoint)s"
|
||||
gcpbackend2_GCP_SERVICE_EMAIL: "%(secret:gcp2_service_email)s"
|
||||
gcpbackend2_GCP_SERVICE_KEY: "%(secret:gcp2_service_key)s"
|
||||
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackend_GCP_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
|
||||
gcpbackend_GCP_SERVICE_KEY: "%(secret:gcp_service_key)s"
|
||||
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
|
||||
%(secret:gcpbackendmismatch_gcp_service_email)s
|
||||
gcpbackendmismatch_GCP_SERVICE_KEY: >-
|
||||
%(secret:gcpbackendmismatch_gcp_service_key)s
|
||||
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
- env: &multiple-backend-vars
|
||||
S3BACKEND: "mem"
|
||||
S3DATA: "multiple"
|
||||
- env: &file-mem-mpu
|
||||
S3BACKEND: "file"
|
||||
S3VAULT: "mem"
|
||||
MPU_TESTING: "yes"
|
||||
- Git: &clone
|
||||
name: Pull repo
|
||||
repourl: '%(prop:git_reference)s'
|
||||
shallow: true
|
||||
retryFetch: true
|
||||
haltOnFailure: true
|
||||
- ShellCommand: &credentials
|
||||
name: Setup Credentials
|
||||
command: bash eve/workers/build/credentials.bash
|
||||
haltOnFailure: true
|
||||
env: *global-env
|
||||
- ShellCommand: &node_version
|
||||
name: get node version
|
||||
command: node -v
|
||||
- ShellCommand: &yarn-install
|
||||
name: install modules
|
||||
command: yarn install --ignore-engines --frozen-lockfile --network-concurrency=1
|
||||
haltOnFailure: true
|
||||
- ShellCommand: &check-s3-action-logs
|
||||
name: Check s3 action logs
|
||||
command: |
|
||||
LOGS=`cat /artifacts/s3.log | grep 'No actionLog'`
|
||||
test `echo -n ${LOGS} | wc -l` -eq 0 || (echo $LOGS && false)
|
||||
- Upload: &upload-artifacts
|
||||
source: /artifacts
|
||||
urls:
|
||||
- "*"
|
||||
- ShellCommand: &follow-s3-log
|
||||
logfiles:
|
||||
s3:
|
||||
filename: /artifacts/s3.log
|
||||
follow: true
|
||||
- ShellCommand: &setup-junit-upload
|
||||
name: preparing junit files for upload
|
||||
command: |
|
||||
mkdir -p artifacts/junit
|
||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
||||
alwaysRun: true
|
||||
- Upload: &upload-junits
|
||||
source: artifacts
|
||||
urls:
|
||||
- "*"
|
||||
alwaysRun: true
|
||||
- ShellCommand: &setup-github-ssh
|
||||
name: setup ssh with github
|
||||
command: |
|
||||
mkdir -p ~/.ssh
|
||||
ssh-keyscan -H github.com > ~/.ssh/ssh_known_hosts
|
||||
|
||||
stages:
|
||||
pre-merge:
|
||||
worker:
|
||||
type: local
|
||||
steps:
|
||||
- TriggerStages:
|
||||
name: Launch all workers
|
||||
stage_names:
|
||||
- linting-coverage
|
||||
- file-ft-tests
|
||||
- multiple-backend-test
|
||||
- kmip-ft-tests
|
||||
- utapi-v2-tests
|
||||
waitForFinish: true
|
||||
haltOnFailure: true
|
||||
|
||||
linting-coverage:
|
||||
worker:
|
||||
type: docker
|
||||
path: eve/workers/build
|
||||
volumes: &default_volumes
|
||||
- '/home/eve/workspace'
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *setup-github-ssh
|
||||
- ShellCommand: *yarn-install
|
||||
- ShellCommand: *credentials
|
||||
- ShellCommand:
|
||||
name: Linting
|
||||
command: |
|
||||
set -ex
|
||||
yarn run --silent lint -- --max-warnings 0
|
||||
yarn run --silent lint_md
|
||||
flake8 $(git ls-files "*.py")
|
||||
yamllint -c yamllint.yml $(git ls-files "*.yml")
|
||||
- ShellCommand:
|
||||
name: Unit Coverage
|
||||
command: |
|
||||
set -ex
|
||||
mkdir -p $CIRCLE_TEST_REPORTS/unit
|
||||
yarn test
|
||||
yarn run test_versionid_base62
|
||||
yarn run test_legacy_location
|
||||
env: &shared-vars
|
||||
<<: *global-env
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
CIRCLE_TEST_REPORTS: /tmp
|
||||
CIRCLE_ARTIFACTS: /tmp
|
||||
CI_REPORTS: /tmp
|
||||
- ShellCommand:
|
||||
name: Unit Coverage logs
|
||||
command: find /tmp/unit -exec cat {} \;
|
||||
- ShellCommand: *setup-junit-upload
|
||||
- Upload: *upload-junits
|
||||
|
||||
multiple-backend-test:
|
||||
worker:
|
||||
type: kube_pod
|
||||
path: eve/workers/pod.yaml
|
||||
images:
|
||||
aggressor: eve/workers/build
|
||||
s3: "."
|
||||
vars:
|
||||
aggressorMem: "2560Mi"
|
||||
s3Mem: "2560Mi"
|
||||
env:
|
||||
<<: *multiple-backend-vars
|
||||
<<: *global-env
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *setup-github-ssh
|
||||
- ShellCommand: *credentials
|
||||
- ShellCommand: *yarn-install
|
||||
- ShellCommand:
|
||||
command: |
|
||||
bash -c "
|
||||
source /root/.aws/exports &> /dev/null
|
||||
set -ex
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run multiple_backend_test"
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *multiple-backend-vars
|
||||
<<: *global-env
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
- ShellCommand:
|
||||
command: mvn test
|
||||
workdir: build/tests/functional/jaws
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *multiple-backend-vars
|
||||
- ShellCommand:
|
||||
command: rspec tests.rb
|
||||
workdir: build/tests/functional/fog
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *multiple-backend-vars
|
||||
- ShellCommand: *check-s3-action-logs
|
||||
- ShellCommand: *setup-junit-upload
|
||||
- Upload: *upload-artifacts
|
||||
- Upload: *upload-junits
|
||||
|
||||
file-ft-tests:
|
||||
worker:
|
||||
type: kube_pod
|
||||
path: eve/workers/pod.yaml
|
||||
images:
|
||||
aggressor: eve/workers/build
|
||||
s3: "."
|
||||
vars:
|
||||
aggressorMem: "2560Mi"
|
||||
s3Mem: "2Gi"
|
||||
redis: enabled
|
||||
env:
|
||||
<<: *file-mem-mpu
|
||||
<<: *global-env
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *setup-github-ssh
|
||||
- ShellCommand: *credentials
|
||||
- ShellCommand: *yarn-install
|
||||
- ShellCommand:
|
||||
command: |
|
||||
set -ex
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run ft_test
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *file-mem-mpu
|
||||
<<: *global-env
|
||||
- ShellCommand: *check-s3-action-logs
|
||||
- ShellCommand: *setup-junit-upload
|
||||
- Upload: *upload-artifacts
|
||||
- Upload: *upload-junits
|
||||
|
||||
kmip-ft-tests:
|
||||
worker:
|
||||
type: kube_pod
|
||||
path: eve/workers/pod.yaml
|
||||
images:
|
||||
aggressor: eve/workers/build
|
||||
s3: "."
|
||||
pykmip: eve/workers/pykmip
|
||||
vars:
|
||||
aggressorMem: "2560Mi"
|
||||
s3Mem: "1664Mi"
|
||||
redis: enabled
|
||||
pykmip: enabled
|
||||
env:
|
||||
<<: *file-mem-mpu
|
||||
<<: *global-env
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *credentials
|
||||
- ShellCommand: *yarn-install
|
||||
- ShellCommand:
|
||||
command: |
|
||||
set -ex
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
bash wait_for_local_port.bash 5696 40
|
||||
yarn run ft_kmip
|
||||
logfiles:
|
||||
pykmip:
|
||||
filename: /artifacts/pykmip.log
|
||||
follow: true
|
||||
s3:
|
||||
filename: /artifacts/s3.log
|
||||
follow: true
|
||||
env:
|
||||
<<: *file-mem-mpu
|
||||
<<: *global-env
|
||||
- ShellCommand: *setup-junit-upload
|
||||
- Upload: *upload-artifacts
|
||||
- Upload: *upload-junits
|
||||
|
||||
utapi-v2-tests:
|
||||
worker:
|
||||
type: kube_pod
|
||||
path: eve/workers/pod.yaml
|
||||
images:
|
||||
aggressor: eve/workers/build
|
||||
s3: "."
|
||||
vars:
|
||||
aggressorMem: "2560Mi"
|
||||
s3Mem: "2Gi"
|
||||
env:
|
||||
ENABLE_UTAPI_V2: t
|
||||
S3BACKEND: mem
|
||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *setup-github-ssh
|
||||
- ShellCommand: *credentials
|
||||
- ShellCommand: *yarn-install
|
||||
- ShellCommand:
|
||||
command: |
|
||||
bash -c "
|
||||
source /root/.aws/exports &> /dev/null
|
||||
set -ex
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run test_utapi_v2"
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
ENABLE_UTAPI_V2: t
|
||||
S3BACKEND: mem
|
||||
- ShellCommand: *check-s3-action-logs
|
||||
- ShellCommand: *setup-junit-upload
|
||||
- Upload: *upload-artifacts
|
||||
- Upload: *upload-junits
|
||||
|
||||
post-merge:
|
||||
worker:
|
||||
type: local
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: &docker_login
|
||||
name: Private Registry Login
|
||||
command: >
|
||||
docker login
|
||||
-u '%(secret:private_registry_username)s'
|
||||
-p '%(secret:private_registry_password)s'
|
||||
'%(secret:private_registry_url)s'
|
||||
- ShellCommand:
|
||||
name: Dockerhub Login
|
||||
command: >
|
||||
docker login
|
||||
-u '%(secret:dockerhub_ro_user)s'
|
||||
-p '%(secret:dockerhub_ro_password)s'
|
||||
- SetProperty: &docker_image_name
|
||||
name: Set docker image name property
|
||||
property: docker_image_name
|
||||
value:
|
||||
"%(secret:private_registry_url)s/zenko/cloudserver:\
|
||||
%(prop:commit_short_revision)s"
|
||||
- ShellCommand:
|
||||
name: Build docker image
|
||||
command: >-
|
||||
docker build
|
||||
--no-cache
|
||||
-t %(prop:docker_image_name)s
|
||||
.
|
||||
- ShellCommand:
|
||||
name: Tag images
|
||||
command: |
|
||||
docker tag %(prop:docker_image_name)s zenko/cloudserver:$TAG
|
||||
env:
|
||||
TAG: "latest-%(prop:product_version)s"
|
||||
- ShellCommand:
|
||||
name: Push image
|
||||
command: |
|
||||
docker push %(prop:docker_image_name)s
|
||||
docker push zenko/cloudserver:latest-%(prop:product_version)s
|
|
@ -1,59 +0,0 @@
|
|||
FROM buildpack-deps:bionic-curl
|
||||
|
||||
#
|
||||
# Install packages needed by the buildchain
|
||||
#
|
||||
ENV LANG C.UTF-8
|
||||
COPY ./s3_packages.list ./buildbot_worker_packages.list /tmp/
|
||||
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
|
||||
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
|
||||
&& apt-get update \
|
||||
&& cat /tmp/*packages.list | xargs apt-get install -y \
|
||||
&& update-ca-certificates \
|
||||
&& git clone https://github.com/tj/n.git \
|
||||
&& make -C ./n \
|
||||
&& n 16.13.2 \
|
||||
&& pip install pip==9.0.1 \
|
||||
&& rm -rf ./n \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& rm -f /tmp/packages.list
|
||||
|
||||
#
|
||||
# Add user eve
|
||||
#
|
||||
|
||||
RUN adduser -u 1042 --home /home/eve --disabled-password --gecos "" eve \
|
||||
&& adduser eve sudo \
|
||||
&& sed -ri 's/(%sudo.*)ALL$/\1NOPASSWD:ALL/' /etc/sudoers
|
||||
#
|
||||
# Install Dependencies
|
||||
#
|
||||
|
||||
# Install RVM and gems
|
||||
ENV RUBY_VERSION="2.4.1"
|
||||
RUN gem update --system
|
||||
COPY ./gems.list /tmp/
|
||||
RUN cat /tmp/gems.list | xargs gem install
|
||||
#RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 \
|
||||
# && curl -sSL https://get.rvm.io | bash -s stable --ruby=$RUBY_VERSION \
|
||||
# && usermod -a -G rvm eve
|
||||
#RUN /bin/bash -l -c "\
|
||||
# source /usr/local/rvm/scripts/rvm \
|
||||
# && cat /tmp/gems.list | xargs gem install \
|
||||
# && rm /tmp/gems.list"
|
||||
|
||||
# Install Pip packages
|
||||
COPY ./pip_packages.list /tmp/
|
||||
RUN cat /tmp/pip_packages.list | xargs pip install \
|
||||
&& rm -f /tmp/pip_packages.list \
|
||||
&& mkdir /home/eve/.aws \
|
||||
&& chown eve /home/eve/.aws
|
||||
|
||||
#
|
||||
# Run buildbot-worker on startup
|
||||
#
|
||||
|
||||
ARG BUILDBOT_VERSION
|
||||
RUN pip install buildbot-worker==$BUILDBOT_VERSION
|
||||
|
||||
CMD ["/bin/bash", "-l", "-c", "buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS && buildbot-worker start --nodaemon"]
|
|
@ -1,14 +0,0 @@
|
|||
ca-certificates
|
||||
git
|
||||
git-lfs
|
||||
gnupg
|
||||
libffi-dev
|
||||
libssl-dev
|
||||
python-pip
|
||||
python2.7
|
||||
python2.7-dev
|
||||
software-properties-common
|
||||
sudo
|
||||
tcl
|
||||
wget
|
||||
procps
|
|
@ -1,4 +0,0 @@
|
|||
fog-aws:1.3.0
|
||||
json
|
||||
mime-types:3.1
|
||||
rspec:3.5
|
|
@ -1,3 +0,0 @@
|
|||
flake8
|
||||
s3cmd==1.6.1
|
||||
yamllint
|
|
@ -1,14 +0,0 @@
|
|||
build-essential
|
||||
ca-certificates
|
||||
curl
|
||||
default-jdk
|
||||
gnupg2
|
||||
libdigest-hmac-perl
|
||||
lsof
|
||||
maven
|
||||
netcat
|
||||
redis-server
|
||||
ruby-full
|
||||
yarn
|
||||
zlib1g-dev
|
||||
openssl
|
|
@ -1,196 +0,0 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "proxy-ci-test-pod"
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
terminationGracePeriodSeconds: 10
|
||||
hostAliases:
|
||||
- ip: "127.0.0.1"
|
||||
hostnames:
|
||||
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com"
|
||||
- "pykmip.local"
|
||||
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
|
||||
initContainers:
|
||||
- name: kmip-certs-installer
|
||||
image: {{ images.pykmip }}
|
||||
command: [ 'sh', '-c', 'cp /ssl/* /ssl-kmip/']
|
||||
volumeMounts:
|
||||
- name: kmip-certs
|
||||
readOnly: false
|
||||
mountPath: /ssl-kmip
|
||||
{%- endif %}
|
||||
containers:
|
||||
- name: aggressor
|
||||
image: {{ images.aggressor }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: "1"
|
||||
memory: {{ vars.aggressorMem }}
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: {{ vars.aggressorMem }}
|
||||
volumeMounts:
|
||||
- name: creds
|
||||
readOnly: false
|
||||
mountPath: /root/.aws
|
||||
- name: artifacts
|
||||
readOnly: true
|
||||
mountPath: /artifacts
|
||||
command:
|
||||
- bash
|
||||
- -lc
|
||||
- |
|
||||
buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS
|
||||
buildbot-worker start --nodaemon
|
||||
env:
|
||||
- name: CI
|
||||
value: "true"
|
||||
- name: ENABLE_LOCAL_CACHE
|
||||
value: "true"
|
||||
- name: REPORT_TOKEN
|
||||
value: "report-token-1"
|
||||
- name: REMOTE_MANAGEMENT_DISABLE
|
||||
value: "1"
|
||||
{% for key, value in vars.env.items() %}
|
||||
- name: {{ key }}
|
||||
value: "{{ value }}"
|
||||
{% endfor %}
|
||||
- name: s3
|
||||
image: {{ images.s3 }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: "2"
|
||||
memory: {{ vars.s3Mem }}
|
||||
limits:
|
||||
cpu: "2"
|
||||
memory: {{ vars.s3Mem }}
|
||||
volumeMounts:
|
||||
- name: creds
|
||||
readOnly: false
|
||||
mountPath: /root/.aws
|
||||
- name: certs
|
||||
readOnly: true
|
||||
mountPath: /tmp
|
||||
- name: artifacts
|
||||
readOnly: false
|
||||
mountPath: /artifacts
|
||||
- name: kmip-certs
|
||||
readOnly: false
|
||||
mountPath: /ssl-kmip
|
||||
command:
|
||||
- bash
|
||||
- -ec
|
||||
- |
|
||||
sleep 10 # wait for
|
||||
/usr/src/app/docker-entrypoint.sh npm start | tee -a /artifacts/s3.log
|
||||
env:
|
||||
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" -%}
|
||||
- name: S3_LOCATION_FILE
|
||||
value: "/usr/src/app/tests/locationConfig/locationConfigTests.json"
|
||||
{%- endif %}
|
||||
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
|
||||
- name: S3KMS
|
||||
value: kmip
|
||||
- name: S3KMIP_PORT
|
||||
value: "5696"
|
||||
- name: S3KMIP_HOSTS
|
||||
value: "pykmip.local"
|
||||
- name: S3KMIP_COMPOUND_CREATE
|
||||
value: "false"
|
||||
- name: S3KMIP_BUCKET_ATTRIBUTE_NAME
|
||||
value: ''
|
||||
- name: S3KMIP_PIPELINE_DEPTH
|
||||
value: "8"
|
||||
- name: S3KMIP_KEY
|
||||
value: /ssl-kmip/kmip-client-key.pem
|
||||
- name: S3KMIP_CERT
|
||||
value: /ssl-kmip/kmip-client-cert.pem
|
||||
- name: S3KMIP_CA
|
||||
value: /ssl-kmip/kmip-ca.pem
|
||||
{%- endif %}
|
||||
- name: CI
|
||||
value: "true"
|
||||
- name: ENABLE_LOCAL_CACHE
|
||||
value: "true"
|
||||
- name: REDIS_HOST
|
||||
value: "localhost"
|
||||
- name: REDIS_PORT
|
||||
value: "6379"
|
||||
- name: REPORT_TOKEN
|
||||
value: "report-token-1"
|
||||
- name: REMOTE_MANAGEMENT_DISABLE
|
||||
value: "1"
|
||||
- name: HEALTHCHECKS_ALLOWFROM
|
||||
value: "0.0.0.0/0"
|
||||
{% for key, value in vars.env.items() %}
|
||||
- name: {{ key }}
|
||||
value: "{{ value }}"
|
||||
{% endfor %}
|
||||
{% if vars.redis is defined and vars.redis == "enabled" -%}
|
||||
- name: redis
|
||||
image: redis:alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
{%- endif %}
|
||||
{% if vars.env.CI_PROXY is defined and vars.env.CI_PROXY == "true" -%}
|
||||
- name: squid
|
||||
image: scality/ci-squid
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: certs
|
||||
readOnly: false
|
||||
mountPath: /ssl
|
||||
command:
|
||||
- sh
|
||||
- -exc
|
||||
- |
|
||||
mkdir -p /ssl
|
||||
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
|
||||
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
|
||||
-keyout /ssl/myca.pem -out /ssl/myca.pem
|
||||
cp /ssl/myca.pem /ssl/CA.pem
|
||||
squid -f /etc/squid/squid.conf -N -z
|
||||
squid -f /etc/squid/squid.conf -NYCd 1
|
||||
{%- endif %}
|
||||
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
|
||||
- name: pykmip
|
||||
image: {{ images.pykmip }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: artifacts
|
||||
readOnly: false
|
||||
mountPath: /artifacts
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
{%- endif %}
|
||||
volumes:
|
||||
- name: creds
|
||||
emptyDir: {}
|
||||
- name: certs
|
||||
emptyDir: {}
|
||||
- name: artifacts
|
||||
emptyDir: {}
|
||||
- name: kmip-certs
|
||||
emptyDir: {}
|
|
@ -1297,6 +1297,8 @@ class Config extends EventEmitter {
|
|||
if (config.bucketNotificationDestinations) {
|
||||
this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations);
|
||||
}
|
||||
|
||||
this.lifecycleRoleName = config.lifecycleRoleName || null;
|
||||
}
|
||||
|
||||
_configureBackends() {
|
||||
|
|
151
lib/api/api.js
151
lib/api/api.js
|
@ -156,32 +156,77 @@ const api = {
|
|||
}
|
||||
|
||||
return async.waterfall([
|
||||
next => auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params) =>
|
||||
next(err, userInfo, authorizationResults, streamingV4Params), 's3', requestContexts),
|
||||
(userInfo, authorizationResults, streamingV4Params, next) => {
|
||||
if (authorizationResults) {
|
||||
const checkedResults = checkAuthResults(authorizationResults);
|
||||
if (checkedResults instanceof Error) {
|
||||
return next(checkedResults);
|
||||
next => auth.server.doAuth(
|
||||
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||
if (err) {
|
||||
log.trace('authentication error', { error: err });
|
||||
return next(err);
|
||||
}
|
||||
returnTagCount = checkedResults;
|
||||
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||
}, 's3', requestContexts),
|
||||
(userInfo, authorizationResults, streamingV4Params, next) => {
|
||||
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
||||
if (userInfo.isRequesterAnIAMUser()) {
|
||||
authNames.userName = userInfo.getIAMdisplayName();
|
||||
}
|
||||
return tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log,
|
||||
(err, tagAuthResults, updatedContexts) =>
|
||||
next(err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts));
|
||||
log.addDefaultFields(authNames);
|
||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||
}
|
||||
// issue 100 Continue to the client
|
||||
writeContinue(request, response);
|
||||
const MAX_POST_LENGTH = request.method === 'POST' ?
|
||||
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
|
||||
const post = [];
|
||||
let postLength = 0;
|
||||
request.on('data', chunk => {
|
||||
postLength += chunk.length;
|
||||
// Sanity check on post length
|
||||
if (postLength <= MAX_POST_LENGTH) {
|
||||
post.push(chunk);
|
||||
}
|
||||
});
|
||||
|
||||
request.on('error', err => {
|
||||
log.trace('error receiving request', {
|
||||
error: err,
|
||||
});
|
||||
return next(errors.InternalError);
|
||||
});
|
||||
|
||||
request.on('end', () => {
|
||||
if (postLength > MAX_POST_LENGTH) {
|
||||
log.error('body length is too long for request type',
|
||||
{ postLength });
|
||||
return next(errors.InvalidRequest);
|
||||
}
|
||||
// Convert array of post buffers into one string
|
||||
request.post = Buffer.concat(post, postLength).toString();
|
||||
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||
});
|
||||
return undefined;
|
||||
},
|
||||
], (err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts) => {
|
||||
// Tag condition keys require information from CloudServer for evaluation
|
||||
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
|
||||
authorizationResults,
|
||||
request,
|
||||
requestContexts,
|
||||
apiMethod,
|
||||
log,
|
||||
(err, authResultsWithTags) => {
|
||||
if (err) {
|
||||
log.trace('tag authentication error', { error: err });
|
||||
return next(err);
|
||||
}
|
||||
return next(null, userInfo, authResultsWithTags, streamingV4Params);
|
||||
},
|
||||
),
|
||||
], (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||
if (err) {
|
||||
log.trace('authentication error', { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
||||
if (userInfo.isRequesterAnIAMUser()) {
|
||||
authNames.userName = userInfo.getIAMdisplayName();
|
||||
}
|
||||
log.addDefaultFields(authNames);
|
||||
if (tagAuthResults) {
|
||||
const checkedResults = checkAuthResults(tagAuthResults);
|
||||
if (authorizationResults) {
|
||||
const checkedResults = checkAuthResults(authorizationResults);
|
||||
if (checkedResults instanceof Error) {
|
||||
return callback(checkedResults);
|
||||
}
|
||||
|
@ -192,64 +237,14 @@ const api = {
|
|||
return this[apiMethod](userInfo, request, streamingV4Params,
|
||||
log, callback, authorizationResults);
|
||||
}
|
||||
// issue 100 Continue to the client
|
||||
writeContinue(request, response);
|
||||
const MAX_POST_LENGTH = request.method.toUpperCase() === 'POST' ?
|
||||
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
|
||||
const post = [];
|
||||
let postLength = 0;
|
||||
request.on('data', chunk => {
|
||||
postLength += chunk.length;
|
||||
// Sanity check on post length
|
||||
if (postLength <= MAX_POST_LENGTH) {
|
||||
post.push(chunk);
|
||||
}
|
||||
return undefined;
|
||||
});
|
||||
|
||||
request.on('error', err => {
|
||||
log.trace('error receiving request', {
|
||||
error: err,
|
||||
});
|
||||
return callback(errors.InternalError);
|
||||
});
|
||||
|
||||
request.on('end', () => {
|
||||
if (postLength > MAX_POST_LENGTH) {
|
||||
log.error('body length is too long for request type',
|
||||
{ postLength });
|
||||
return callback(errors.InvalidRequest);
|
||||
}
|
||||
// Convert array of post buffers into one string
|
||||
request.post = Buffer.concat(post, postLength).toString();
|
||||
|
||||
// IAM policy -Tag condition keys require information from CloudServer for evaluation
|
||||
return tagConditionKeyAuth(authorizationResults, request, (updatedContexts || requestContexts),
|
||||
apiMethod, log, (err, tagAuthResults) => {
|
||||
if (err) {
|
||||
log.trace('tag authentication error', { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
if (tagAuthResults) {
|
||||
const checkedResults = checkAuthResults(tagAuthResults);
|
||||
if (checkedResults instanceof Error) {
|
||||
return callback(checkedResults);
|
||||
}
|
||||
returnTagCount = checkedResults;
|
||||
}
|
||||
if (apiMethod === 'objectCopy' ||
|
||||
apiMethod === 'objectPutCopyPart') {
|
||||
return this[apiMethod](userInfo, request, sourceBucket,
|
||||
sourceObject, sourceVersionId, log, callback);
|
||||
}
|
||||
if (apiMethod === 'objectGet') {
|
||||
return this[apiMethod](userInfo, request,
|
||||
returnTagCount, log, callback);
|
||||
}
|
||||
return this[apiMethod](userInfo, request, log, callback);
|
||||
});
|
||||
});
|
||||
return undefined;
|
||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||
return this[apiMethod](userInfo, request, sourceBucket,
|
||||
sourceObject, sourceVersionId, log, callback);
|
||||
}
|
||||
if (apiMethod === 'objectGet') {
|
||||
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
||||
}
|
||||
return this[apiMethod](userInfo, request, log, callback);
|
||||
});
|
||||
},
|
||||
bucketDelete,
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies;
|
||||
const constants = require('../../../../constants');
|
||||
|
||||
const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants;
|
||||
const { allAuthedUsersId, bucketOwnerActions, logId, publicId,
|
||||
assumedRoleArnResourceType, backbeatLifecycleSessionName } = constants;
|
||||
|
||||
// whitelist buckets to allow public read on objects
|
||||
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
|
||||
|
@ -364,10 +365,34 @@ function validatePolicyResource(bucketName, policy) {
|
|||
});
|
||||
}
|
||||
|
||||
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
|
||||
* @param {string} arn - Amazon resource name - example:
|
||||
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
|
||||
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
|
||||
*/
|
||||
function isLifecycleSession(arn) {
|
||||
if (!arn) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const arnSplits = arn.split(':');
|
||||
const service = arnSplits[2];
|
||||
|
||||
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
|
||||
|
||||
const resourceType = resourceNames[0];
|
||||
const sessionName = resourceNames[resourceNames.length - 1];
|
||||
|
||||
return (service === 'sts' &&
|
||||
resourceType === assumedRoleArnResourceType &&
|
||||
sessionName === backbeatLifecycleSessionName);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
isBucketAuthorized,
|
||||
isObjAuthorized,
|
||||
checkBucketAcls,
|
||||
checkObjectAcls,
|
||||
validatePolicyResource,
|
||||
isLifecycleSession,
|
||||
};
|
||||
|
|
|
@ -97,11 +97,55 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
|||
const objectGetTaggingAction = (request.query &&
|
||||
request.query.versionId) ? 'objectGetTaggingVersion' :
|
||||
'objectGetTagging';
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const objectGetVersionAction = 'objectGetVersion';
|
||||
const getVersionResourceVersion =
|
||||
generateRequestContext(objectGetVersionAction);
|
||||
requestContexts.push(getVersionResourceVersion);
|
||||
}
|
||||
const getRequestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
const getTaggingRequestContext =
|
||||
generateRequestContext(objectGetTaggingAction);
|
||||
requestContexts.push(getRequestContext, getTaggingRequestContext);
|
||||
} else if (apiMethodAfterVersionCheck === 'objectGetTagging') {
|
||||
const objectGetTaggingAction = 'objectGetTagging';
|
||||
const getTaggingResourceVersion =
|
||||
generateRequestContext(objectGetTaggingAction);
|
||||
requestContexts.push(getTaggingResourceVersion);
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const objectGetTaggingVersionAction = 'objectGetTaggingVersion';
|
||||
const getTaggingVersionResourceVersion =
|
||||
generateRequestContext(objectGetTaggingVersionAction);
|
||||
requestContexts.push(getTaggingVersionResourceVersion);
|
||||
}
|
||||
} else if (apiMethodAfterVersionCheck === 'objectHead') {
|
||||
const objectHeadAction = 'objectHead';
|
||||
const headObjectAction =
|
||||
generateRequestContext(objectHeadAction);
|
||||
requestContexts.push(headObjectAction);
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const objectHeadVersionAction = 'objectGetVersion';
|
||||
const headObjectVersion =
|
||||
generateRequestContext(objectHeadVersionAction);
|
||||
requestContexts.push(headObjectVersion);
|
||||
}
|
||||
} else if (apiMethodAfterVersionCheck === 'objectPutTagging') {
|
||||
const putObjectTaggingRequestContext =
|
||||
generateRequestContext('objectPutTagging');
|
||||
requestContexts.push(putObjectTaggingRequestContext);
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const putObjectVersionRequestContext =
|
||||
generateRequestContext('objectPutTaggingVersion');
|
||||
requestContexts.push(putObjectVersionRequestContext);
|
||||
}
|
||||
} else if (apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||
const putObjectRequestContext =
|
||||
generateRequestContext('objectPut');
|
||||
requestContexts.push(putObjectRequestContext);
|
||||
const getObjectRequestContext =
|
||||
generateRequestContext('objectGet');
|
||||
requestContexts.push(getObjectRequestContext);
|
||||
} else if (apiMethodAfterVersionCheck === 'objectPut') {
|
||||
const putRequestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
|
@ -112,12 +156,28 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
|||
generateRequestContext('objectPutTagging');
|
||||
requestContexts.push(putTaggingRequestContext);
|
||||
}
|
||||
if (['ON', 'OFF'].includes(request.headers['x-amz-object-lock-legal-hold-status'])) {
|
||||
const putLegalHoldStatusAction =
|
||||
generateRequestContext('objectPutLegalHold');
|
||||
requestContexts.push(putLegalHoldStatusAction);
|
||||
}
|
||||
// if put object (versioning) with ACL
|
||||
if (isHeaderAcl(request.headers)) {
|
||||
const putAclRequestContext =
|
||||
generateRequestContext('objectPutACL');
|
||||
requestContexts.push(putAclRequestContext);
|
||||
}
|
||||
|
||||
if (request.headers['x-amz-object-lock-mode']) {
|
||||
const putObjectLockRequestContext =
|
||||
generateRequestContext('objectPutRetention');
|
||||
requestContexts.push(putObjectLockRequestContext);
|
||||
}
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const putObjectVersionRequestContext =
|
||||
generateRequestContext('objectPutTaggingVersion');
|
||||
requestContexts.push(putObjectVersionRequestContext);
|
||||
}
|
||||
} else {
|
||||
const requestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const assert = require('assert');
|
||||
const async = require('async');
|
||||
|
||||
const { auth, s3middleware } = require('arsenal');
|
||||
const metadata = require('../../../metadata/wrapper');
|
||||
const { decodeVersionId } = require('../object/versioning');
|
||||
|
@ -12,64 +12,64 @@ function makeTagQuery(tags) {
|
|||
.join('&');
|
||||
}
|
||||
|
||||
function updateRequestContexts(request, requestContexts, apiMethod, log, cb) {
|
||||
requestContexts.forEach(rc => {
|
||||
rc.setNeedTagEval(true);
|
||||
|
||||
async.series([
|
||||
next => {
|
||||
if (request.headers['x-amz-tagging']) {
|
||||
rc.setRequestObjTags(request.headers['x-amz-tagging']);
|
||||
process.nextTick(() => next());
|
||||
} else if (request.post && apiMethod === 'objectPutTagging') {
|
||||
parseTagXml(request.post, log, (err, tags) => {
|
||||
if (err) {
|
||||
log.trace('error parsing request tags');
|
||||
return next(err);
|
||||
}
|
||||
rc.setRequestObjTags(makeTagQuery(tags));
|
||||
return next();
|
||||
});
|
||||
} else {
|
||||
process.nextTick(() => next());
|
||||
}
|
||||
},
|
||||
next => {
|
||||
const objectKey = request.objectKey;
|
||||
const bucketName = request.bucketName;
|
||||
const decodedVidResult = decodeVersionId(request.query);
|
||||
if (decodedVidResult instanceof Error) {
|
||||
log.trace('invalid versionId query', {
|
||||
versionId: request.query.versionId,
|
||||
error: decodedVidResult,
|
||||
});
|
||||
return process.nextTick(() => next(decodedVidResult));
|
||||
}
|
||||
const reqVersionId = decodedVidResult;
|
||||
return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log,
|
||||
(err, objMD) => {
|
||||
function updateRequestContextsWithTags(request, requestContexts, apiMethod, log, cb) {
|
||||
async.waterfall([
|
||||
next => {
|
||||
if (request.headers['x-amz-tagging']) {
|
||||
return next(null, request.headers['x-amz-tagging']);
|
||||
}
|
||||
if (request.post && apiMethod === 'objectPutTagging') {
|
||||
return parseTagXml(request.post, log, (err, tags) => {
|
||||
if (err) {
|
||||
log.trace('error parsing request tags');
|
||||
return next(err);
|
||||
}
|
||||
return next(null, makeTagQuery(tags));
|
||||
});
|
||||
}
|
||||
return next(null, null);
|
||||
},
|
||||
(requestTagsQuery, next) => {
|
||||
const objectKey = request.objectKey;
|
||||
const bucketName = request.bucketName;
|
||||
const decodedVidResult = decodeVersionId(request.query);
|
||||
if (decodedVidResult instanceof Error) {
|
||||
log.trace('invalid versionId query', {
|
||||
versionId: request.query.versionId,
|
||||
error: decodedVidResult,
|
||||
});
|
||||
return next(decodedVidResult);
|
||||
}
|
||||
const reqVersionId = decodedVidResult;
|
||||
return metadata.getObjectMD(
|
||||
bucketName, objectKey, { versionId: reqVersionId }, log, (err, objMD) => {
|
||||
if (err) {
|
||||
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||
if (err.NoSuchKey) {
|
||||
return next();
|
||||
return next(null, requestTagsQuery, null);
|
||||
}
|
||||
log.trace('error getting request object tags');
|
||||
return next(err);
|
||||
}
|
||||
const existingTags = objMD.tags;
|
||||
if (existingTags) {
|
||||
rc.setExistingObjTag(makeTagQuery(existingTags));
|
||||
}
|
||||
return next();
|
||||
const existingTagsQuery = objMD.tags && makeTagQuery(objMD.tags);
|
||||
return next(null, requestTagsQuery, existingTagsQuery);
|
||||
});
|
||||
},
|
||||
], err => {
|
||||
if (err) {
|
||||
log.trace('error processing tag condition key evaluation');
|
||||
return cb(err);
|
||||
},
|
||||
], (err, requestTagsQuery, existingTagsQuery) => {
|
||||
if (err) {
|
||||
log.trace('error processing tag condition key evaluation');
|
||||
return cb(err);
|
||||
}
|
||||
for (const rc of requestContexts) {
|
||||
rc.setNeedTagEval(true);
|
||||
if (requestTagsQuery) {
|
||||
rc.setRequestObjTags(requestTagsQuery);
|
||||
}
|
||||
return cb(null, requestContexts);
|
||||
});
|
||||
if (existingTagsQuery) {
|
||||
rc.setExistingObjTag(existingTagsQuery);
|
||||
}
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -78,23 +78,20 @@ function tagConditionKeyAuth(authorizationResults, request, requestContexts, api
|
|||
return cb();
|
||||
}
|
||||
if (!authorizationResults.some(authRes => authRes.checkTagConditions)) {
|
||||
return cb();
|
||||
return cb(null, authorizationResults);
|
||||
}
|
||||
|
||||
return updateRequestContexts(request, requestContexts, apiMethod, log, (err, updatedContexts) => {
|
||||
return updateRequestContextsWithTags(request, requestContexts, apiMethod, log, err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
if (assert.deepStrictEqual(requestContexts, updatedContexts)) {
|
||||
return cb();
|
||||
}
|
||||
return auth.server.doAuth(request, log,
|
||||
(err, userInfo, tagAuthResults) => cb(err, tagAuthResults), 's3', updatedContexts);
|
||||
(err, userInfo, authResults) => cb(err, authResults), 's3', requestContexts);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
tagConditionKeyAuth,
|
||||
updateRequestContexts,
|
||||
updateRequestContextsWithTags,
|
||||
makeTagQuery,
|
||||
};
|
||||
|
|
|
@ -136,9 +136,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
size,
|
||||
headers,
|
||||
isDeleteMarker,
|
||||
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size),
|
||||
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size, null, null, authInfo, isDeleteMarker),
|
||||
log,
|
||||
};
|
||||
|
||||
if (!isDeleteMarker) {
|
||||
metadataStoreParams.contentType = request.headers['content-type'];
|
||||
metadataStoreParams.cacheControl = request.headers['cache-control'];
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
const s3config = require('../../../Config').config;
|
||||
const { isLifecycleSession } = require('../authorization/permissionChecks.js');
|
||||
|
||||
function _getBackend(objectMD, site) {
|
||||
const backends = objectMD ? objectMD.replicationInfo.backends : [];
|
||||
|
@ -63,14 +64,22 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
|||
* @param {boolean} objSize - The size, in bytes, of the object being PUT
|
||||
* @param {string} operationType - The type of operation to replicate
|
||||
* @param {object} objectMD - The object metadata
|
||||
* @param {AuthInfo} [authInfo] - authentication info of object owner
|
||||
* @param {boolean} [isDeleteMarker] - whether creating a delete marker
|
||||
* @return {undefined}
|
||||
*/
|
||||
function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType,
|
||||
objectMD) {
|
||||
objectMD, authInfo, isDeleteMarker) {
|
||||
const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA'];
|
||||
const config = bucketMD.getReplicationConfiguration();
|
||||
// If bucket does not have a replication configuration, do not replicate.
|
||||
if (config) {
|
||||
// If delete an object due to a lifecycle action,
|
||||
// the delete marker is not replicated to the destination buckets.
|
||||
if (isDeleteMarker && authInfo && isLifecycleSession(authInfo.getArn())) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const rule = config.rules.find(rule =>
|
||||
(objKey.startsWith(rule.prefix) && rule.enabled));
|
||||
if (rule) {
|
||||
|
|
|
@ -8,12 +8,13 @@
|
|||
*
|
||||
* @param {array|string|null} prev - list of keys from the object being
|
||||
* overwritten
|
||||
* @param {array} curr - list of keys to be used in composing current object
|
||||
* @param {array|null} curr - list of keys to be used in composing
|
||||
* current object
|
||||
* @returns {boolean} true if no key in `curr` is present in `prev`,
|
||||
* false otherwise
|
||||
*/
|
||||
function locationKeysHaveChanged(prev, curr) {
|
||||
if (!prev || prev.length === 0) {
|
||||
if (!prev || prev.length === 0 || !curr) {
|
||||
return true;
|
||||
}
|
||||
// backwards compatibility check if object is of model version 2
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
const { errors } = require('arsenal');
|
||||
const { errors, auth, policies } = require('arsenal');
|
||||
const moment = require('moment');
|
||||
|
||||
const { config } = require('../../../Config');
|
||||
const vault = require('../../../auth/vault');
|
||||
|
||||
/**
|
||||
* Calculates retain until date for the locked object version
|
||||
* @param {object} retention - includes days or years retention period
|
||||
|
@ -43,7 +47,7 @@ function validateHeaders(bucket, headers, log) {
|
|||
!(objectLockMode && objectLockDate)) {
|
||||
return errors.InvalidArgument.customizeDescription(
|
||||
'x-amz-object-lock-retain-until-date and ' +
|
||||
'x-amz-object-lock-mode must both be supplied'
|
||||
'x-amz-object-lock-mode must both be supplied',
|
||||
);
|
||||
}
|
||||
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
|
||||
|
@ -126,69 +130,190 @@ function setObjectLockInformation(headers, md, defaultRetention) {
|
|||
}
|
||||
|
||||
/**
|
||||
* isObjectLocked - checks whether object is locked or not
|
||||
* @param {obect} bucket - bucket metadata
|
||||
* @param {object} objectMD - object metadata
|
||||
* @param {array} headers - request headers
|
||||
* @return {boolean} - indicates whether object is locked or not
|
||||
* Helper class for check object lock state checks
|
||||
*/
|
||||
function isObjectLocked(bucket, objectMD, headers) {
|
||||
if (bucket.isObjectLockEnabled()) {
|
||||
const objectLegalHold = objectMD.legalHold;
|
||||
if (objectLegalHold) {
|
||||
class ObjectLockInfo {
|
||||
/**
|
||||
*
|
||||
* @param {object} retentionInfo - The object lock retention policy
|
||||
* @param {"GOVERNANCE" | "COMPLIANCE" | null} retentionInfo.mode - Retention policy mode.
|
||||
* @param {string} retentionInfo.date - Expiration date of retention policy. A string in ISO-8601 format
|
||||
* @param {bool} retentionInfo.legalHold - Whether a legal hold is enable for the object
|
||||
*/
|
||||
constructor(retentionInfo) {
|
||||
this.mode = retentionInfo.mode || null;
|
||||
this.date = retentionInfo.date || null;
|
||||
this.legalHold = retentionInfo.legalHold || false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ObjectLockInfo.isLocked
|
||||
* @returns {bool} - Whether the retention policy is active and protecting the object
|
||||
*/
|
||||
isLocked() {
|
||||
if (this.legalHold) {
|
||||
return true;
|
||||
}
|
||||
const retentionMode = objectMD.retentionMode;
|
||||
const retentionDate = objectMD.retentionDate;
|
||||
if (!retentionMode || !retentionDate) {
|
||||
|
||||
if (!this.mode || !this.date) {
|
||||
return false;
|
||||
}
|
||||
if (retentionMode === 'GOVERNANCE' &&
|
||||
headers['x-amz-bypass-governance-retention']) {
|
||||
return false;
|
||||
}
|
||||
const objectDate = moment(retentionDate);
|
||||
const now = moment();
|
||||
// indicates retain until date has expired
|
||||
if (now.isSameOrAfter(objectDate)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
return !this.isExpired();
|
||||
}
|
||||
|
||||
/**
|
||||
* ObjectLockInfo.isGovernanceMode
|
||||
* @returns {bool} - true if retention mode is GOVERNANCE
|
||||
*/
|
||||
isGovernanceMode() {
|
||||
return this.mode === 'GOVERNANCE';
|
||||
}
|
||||
|
||||
/**
|
||||
* ObjectLockInfo.isComplianceMode
|
||||
* @returns {bool} - True if retention mode is COMPLIANCE
|
||||
*/
|
||||
isComplianceMode() {
|
||||
return this.mode === 'COMPLIANCE';
|
||||
}
|
||||
|
||||
/**
|
||||
* ObjectLockInfo.isExpired
|
||||
* @returns {bool} - True if the retention policy has expired
|
||||
*/
|
||||
isExpired() {
|
||||
const now = moment();
|
||||
return this.date === null || now.isSameOrAfter(this.date);
|
||||
}
|
||||
|
||||
/**
|
||||
* ObjectLockInfo.isExtended
|
||||
* @param {string} timestamp - Timestamp in ISO-8601 format
|
||||
* @returns {bool} - True if the given timestamp is after the policy expiration date or if no expiration date is set
|
||||
*/
|
||||
isExtended(timestamp) {
|
||||
return timestamp !== undefined && (this.date === null || moment(timestamp).isSameOrAfter(this.date));
|
||||
}
|
||||
|
||||
/**
|
||||
* ObjectLockInfo.canModifyObject
|
||||
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
|
||||
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
|
||||
*/
|
||||
canModifyObject(hasGovernanceBypass) {
|
||||
return !this.isLocked() || (this.isGovernanceMode() && !!hasGovernanceBypass);
|
||||
}
|
||||
|
||||
/**
|
||||
* ObjectLockInfo.canModifyPolicy
|
||||
* @param {object} policyChanges - Proposed changes to the retention policy
|
||||
* @param {"GOVERNANCE" | "COMPLIANCE" | undefined} policyChanges.mode - Retention policy mode.
|
||||
* @param {string} policyChanges.date - Expiration date of retention policy. A string in ISO-8601 format
|
||||
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
|
||||
* @returns {bool} - True if the changes are allowed to be applied to the retention policy
|
||||
*/
|
||||
canModifyPolicy(policyChanges, hasGovernanceBypass) {
|
||||
// If an object does not have a retention policy or it is expired then all changes are allowed
|
||||
if (!this.isLocked()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// The only allowed change in compliance mode is extending the retention period
|
||||
if (this.isComplianceMode()) {
|
||||
if (policyChanges.mode === 'COMPLIANCE' && this.isExtended(policyChanges.date)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.isGovernanceMode()) {
|
||||
// Extensions are always allowed in governance mode
|
||||
if (policyChanges.mode === 'GOVERNANCE' && this.isExtended(policyChanges.date)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// All other changes in governance mode require a bypass
|
||||
if (hasGovernanceBypass) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
function validateObjectLockUpdate(objectMD, retentionInfo, bypassGovernance) {
|
||||
const { retentionMode: existingMode, retentionDate: existingDateISO } = objectMD;
|
||||
if (!existingMode) {
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
*
|
||||
* @param {object} headers - s3 request headers
|
||||
* @returns {bool} - True if the headers is present and === "true"
|
||||
*/
|
||||
function hasGovernanceBypassHeader(headers) {
|
||||
const bypassHeader = headers['x-amz-bypass-governance-retention'] || '';
|
||||
return bypassHeader.toLowerCase() === 'true';
|
||||
}
|
||||
|
||||
const existingDate = new Date(existingDateISO);
|
||||
const isExpired = existingDate < Date.now();
|
||||
|
||||
if (existingMode === 'GOVERNANCE' && !isExpired && !bypassGovernance) {
|
||||
return errors.AccessDenied;
|
||||
}
|
||||
/**
|
||||
* checkUserGovernanceBypass
|
||||
*
|
||||
* Checks for the presence of the s3:BypassGovernanceRetention permission for a given user
|
||||
*
|
||||
* @param {object} request - Incoming s3 request
|
||||
* @param {object} authInfo - s3 authentication info
|
||||
* @param {object} bucketMD - bucket metadata
|
||||
* @param {string} objectKey - object key
|
||||
* @param {object} log - Werelogs logger
|
||||
* @param {function} cb - callback returns errors.AccessDenied if the authorization fails
|
||||
* @returns {undefined} -
|
||||
*/
|
||||
function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, cb) {
|
||||
log.trace(
|
||||
'object in GOVERNANCE mode and is user, checking for attached policies',
|
||||
{ method: 'checkUserPolicyGovernanceBypass' },
|
||||
);
|
||||
|
||||
if (existingMode === 'COMPLIANCE') {
|
||||
if (retentionInfo.mode === 'GOVERNANCE' && !isExpired) {
|
||||
return errors.AccessDenied;
|
||||
}
|
||||
|
||||
if (new Date(retentionInfo.date) < existingDate) {
|
||||
return errors.AccessDenied;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
const authParams = auth.server.extractParams(request, log, 's3', request.query);
|
||||
const ip = policies.requestUtils.getClientIp(request, config);
|
||||
const requestContextParams = {
|
||||
constantParams: {
|
||||
headers: request.headers,
|
||||
query: request.query,
|
||||
generalResource: bucketMD.getName(),
|
||||
specificResource: { key: objectKey },
|
||||
requesterIp: ip,
|
||||
sslEnabled: request.connection.encrypted,
|
||||
apiMethod: 'bypassGovernanceRetention',
|
||||
awsService: 's3',
|
||||
locationConstraint: bucketMD.getLocationConstraint(),
|
||||
requesterInfo: authInfo,
|
||||
signatureVersion: authParams.params.data.signatureVersion,
|
||||
authType: authParams.params.data.authType,
|
||||
signatureAge: authParams.params.data.signatureAge,
|
||||
},
|
||||
};
|
||||
return vault.checkPolicies(requestContextParams,
|
||||
authInfo.getArn(), log, (err, authorizationResults) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
if (authorizationResults[0].isAllowed !== true) {
|
||||
log.trace('authorization check failed for user',
|
||||
{
|
||||
'method': 'checkUserPolicyGovernanceBypass',
|
||||
's3:BypassGovernanceRetention': false,
|
||||
});
|
||||
return cb(errors.AccessDenied);
|
||||
}
|
||||
return cb(null);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
calculateRetainUntilDate,
|
||||
compareObjectLockInformation,
|
||||
setObjectLockInformation,
|
||||
isObjectLocked,
|
||||
validateHeaders,
|
||||
validateObjectLockUpdate,
|
||||
hasGovernanceBypassHeader,
|
||||
checkUserGovernanceBypass,
|
||||
ObjectLockInfo,
|
||||
};
|
||||
|
|
|
@ -186,6 +186,7 @@ function processMasterVersions(bucketName, listParams, list) {
|
|||
{ tag: 'EncodingType', value: listParams.encoding },
|
||||
{ tag: 'IsTruncated', value: isTruncated },
|
||||
];
|
||||
|
||||
if (listParams.v2) {
|
||||
xmlParams.push(
|
||||
{ tag: 'StartAfter', value: listParams.startAfter || '' });
|
||||
|
@ -327,9 +328,13 @@ function bucketGet(authInfo, request, log, callback) {
|
|||
const listParams = {
|
||||
listingType: 'DelimiterMaster',
|
||||
maxKeys: actualMaxKeys,
|
||||
delimiter: params.delimiter,
|
||||
prefix: params.prefix,
|
||||
};
|
||||
|
||||
if (params.delimiter) {
|
||||
listParams.delimiter = params.delimiter;
|
||||
}
|
||||
|
||||
if (v2) {
|
||||
listParams.v2 = true;
|
||||
listParams.startAfter = params['start-after'];
|
||||
|
|
|
@ -97,6 +97,97 @@ function _parseXML(request, log, cb) {
|
|||
});
|
||||
}
|
||||
|
||||
function _buildConstantParams({ request, bucketName, authInfo, authParams, ip, locationConstraint, apiMethod }) {
|
||||
return {
|
||||
constantParams: {
|
||||
headers: request.headers,
|
||||
query: request.query,
|
||||
generalResource: bucketName,
|
||||
specificResource: {
|
||||
key: '',
|
||||
},
|
||||
requesterIp: ip,
|
||||
sslEnabled: request.connection.encrypted,
|
||||
awsService: 's3',
|
||||
requesterInfo: authInfo,
|
||||
signatureVersion: authParams.params.data.authType,
|
||||
authType: authParams.params.data.signatureVersion,
|
||||
signatureAge: authParams.params.data.signatureAge,
|
||||
apiMethod,
|
||||
locationConstraint,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function _handleAuthResults(locationConstraint, log, cb) {
|
||||
return (err, authorizationResults) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
if (!authorizationResults.every(res => {
|
||||
if (Array.isArray(res)) {
|
||||
return res.every(subRes => subRes.isAllowed);
|
||||
}
|
||||
return res.isAllowed;
|
||||
})) {
|
||||
log.trace(
|
||||
'authorization check failed for user',
|
||||
{ locationConstraint },
|
||||
);
|
||||
return cb(errors.AccessDenied);
|
||||
}
|
||||
return cb(null, locationConstraint);
|
||||
};
|
||||
}
|
||||
|
||||
function _isObjectLockEnabled(headers) {
|
||||
const header = headers['x-amz-bucket-object-lock-enabled'];
|
||||
return header !== undefined && header.toLowerCase() === 'true';
|
||||
}
|
||||
|
||||
function _isAclProvided(headers) {
|
||||
return aclUtils.getHeaders().some(h => Object.keys(headers).includes(h));
|
||||
}
|
||||
|
||||
function authBucketPut(authParams, bucketName, locationConstraint, request, authInfo) {
|
||||
const ip = requestUtils.getClientIp(request, config);
|
||||
const baseParams = {
|
||||
authParams,
|
||||
ip,
|
||||
bucketName,
|
||||
request,
|
||||
authInfo,
|
||||
locationConstraint,
|
||||
};
|
||||
const requestConstantParams = [Object.assign(
|
||||
baseParams,
|
||||
{ apiMethod: 'bucketPut' },
|
||||
)];
|
||||
|
||||
if (_isObjectLockEnabled(request.headers)) {
|
||||
requestConstantParams.push(Object.assign(
|
||||
{},
|
||||
baseParams,
|
||||
{ apiMethod: 'bucketPutObjectLock' },
|
||||
));
|
||||
requestConstantParams.push(Object.assign(
|
||||
{},
|
||||
baseParams,
|
||||
{ apiMethod: 'bucketPutVersioning' },
|
||||
));
|
||||
}
|
||||
|
||||
if (_isAclProvided(request.headers)) {
|
||||
requestConstantParams.push(Object.assign(
|
||||
{},
|
||||
baseParams,
|
||||
{ apiMethod: 'bucketPutACL' },
|
||||
));
|
||||
}
|
||||
|
||||
return requestConstantParams;
|
||||
}
|
||||
|
||||
/**
|
||||
* PUT Service - Create bucket for the user
|
||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||
|
@ -126,45 +217,23 @@ function bucketPut(authInfo, request, log, callback) {
|
|||
|
||||
return waterfall([
|
||||
next => _parseXML(request, log, next),
|
||||
// Check policies in Vault for a user.
|
||||
(locationConstraint, next) => {
|
||||
if (authInfo.isRequesterAnIAMUser()) {
|
||||
const authParams = auth.server.extractParams(request, log, 's3',
|
||||
request.query);
|
||||
const ip = requestUtils.getClientIp(request, config);
|
||||
const requestContextParams = {
|
||||
constantParams: {
|
||||
headers: request.headers,
|
||||
query: request.query,
|
||||
generalResource: bucketName,
|
||||
specificResource: {
|
||||
key: '',
|
||||
},
|
||||
requesterIp: ip,
|
||||
sslEnabled: request.connection.encrypted,
|
||||
apiMethod: 'bucketPut',
|
||||
awsService: 's3',
|
||||
locationConstraint,
|
||||
requesterInfo: authInfo,
|
||||
signatureVersion: authParams.params.data.authType,
|
||||
authType: authParams.params.data.signatureVersion,
|
||||
signatureAge: authParams.params.data.signatureAge,
|
||||
},
|
||||
};
|
||||
return vault.checkPolicies(requestContextParams,
|
||||
authInfo.getArn(), log, (err, authorizationResults) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
if (authorizationResults[0].isAllowed !== true) {
|
||||
log.trace('authorization check failed for user',
|
||||
{ locationConstraint });
|
||||
return next(errors.AccessDenied);
|
||||
}
|
||||
return next(null, locationConstraint);
|
||||
});
|
||||
// Check policies in Vault for a user.
|
||||
if (!authInfo.isRequesterAnIAMUser()) {
|
||||
return next(null, locationConstraint);
|
||||
}
|
||||
return next(null, locationConstraint);
|
||||
|
||||
const authParams = auth.server.extractParams(request, log, 's3', request.query);
|
||||
const requestConstantParams = authBucketPut(
|
||||
authParams, bucketName, locationConstraint, request, authInfo
|
||||
);
|
||||
|
||||
return vault.checkPolicies(
|
||||
requestConstantParams.map(_buildConstantParams),
|
||||
authInfo.getArn(),
|
||||
log,
|
||||
_handleAuthResults(locationConstraint, log, next),
|
||||
);
|
||||
},
|
||||
(locationConstraint, next) => createBucket(authInfo, bucketName,
|
||||
request.headers, locationConstraint, log, (err, previousBucket) => {
|
||||
|
@ -187,4 +256,6 @@ function bucketPut(authInfo, request, log, callback) {
|
|||
module.exports = {
|
||||
checkLocationConstraint,
|
||||
bucketPut,
|
||||
_handleAuthResults,
|
||||
authBucketPut,
|
||||
};
|
||||
|
|
|
@ -157,6 +157,22 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
}
|
||||
return next(errors.MalformedXML, destBucket);
|
||||
},
|
||||
function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList,
|
||||
storedMetadata, location, mpuOverviewKey, next) {
|
||||
return services.metadataMarkMPObjectForCompletion({
|
||||
bucketName: mpuBucket.getName(),
|
||||
objectKey,
|
||||
uploadId,
|
||||
splitter,
|
||||
storedMetadata,
|
||||
}, log, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
return next(null, destBucket, objMD, mpuBucket,
|
||||
jsonList, storedMetadata, location, mpuOverviewKey);
|
||||
});
|
||||
},
|
||||
function retrieveParts(destBucket, objMD, mpuBucket, jsonList,
|
||||
storedMetadata, location, mpuOverviewKey, next) {
|
||||
return services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||
|
|
|
@ -18,7 +18,8 @@ const { preprocessingVersioningDelete }
|
|||
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
||||
const { metadataGetObject } = require('../metadata/metadataUtils');
|
||||
const { config } = require('../Config');
|
||||
const { isObjectLocked } = require('./apiUtils/object/objectLockHelpers');
|
||||
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
|
||||
= require('./apiUtils/object/objectLockHelpers');
|
||||
const requestUtils = policies.requestUtils;
|
||||
|
||||
const versionIdUtils = versioning.VersionID;
|
||||
|
@ -229,10 +230,6 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
|||
successfullyDeleted.push({ entry });
|
||||
return callback(skipError);
|
||||
}
|
||||
if (versionId && isObjectLocked(bucket, objMD, request.headers)) {
|
||||
log.debug('trying to delete locked object');
|
||||
return callback(objectLockedError);
|
||||
}
|
||||
if (versionId && objMD.location &&
|
||||
Array.isArray(objMD.location) && objMD.location[0]) {
|
||||
// we need this information for data deletes to AWS
|
||||
|
@ -241,6 +238,47 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
|||
}
|
||||
return callback(null, objMD, versionId);
|
||||
}),
|
||||
(objMD, versionId, callback) => {
|
||||
// AWS only returns an object lock error if a version id
|
||||
// is specified, else continue to create a delete marker
|
||||
if (!versionId || !bucket.isObjectLockEnabled()) {
|
||||
return callback(null, null, objMD, versionId);
|
||||
}
|
||||
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
|
||||
if (hasGovernanceBypass && authInfo.isRequesterAnIAMUser()) {
|
||||
return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => {
|
||||
if (error && error.is.AccessDenied) {
|
||||
log.debug('user does not have BypassGovernanceRetention and object is locked', { error });
|
||||
return callback(objectLockedError);
|
||||
}
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
return callback(null, hasGovernanceBypass, objMD, versionId);
|
||||
});
|
||||
}
|
||||
return callback(null, hasGovernanceBypass, objMD, versionId);
|
||||
},
|
||||
(hasGovernanceBypass, objMD, versionId, callback) => {
|
||||
// AWS only returns an object lock error if a version id
|
||||
// is specified, else continue to create a delete marker
|
||||
if (!versionId || !bucket.isObjectLockEnabled()) {
|
||||
return callback(null, objMD, versionId);
|
||||
}
|
||||
const objLockInfo = new ObjectLockInfo({
|
||||
mode: objMD.retentionMode,
|
||||
date: objMD.retentionDate,
|
||||
legalHold: objMD.legalHold || false,
|
||||
});
|
||||
|
||||
// If the object can not be deleted raise an error
|
||||
if (!objLockInfo.canModifyObject(hasGovernanceBypass)) {
|
||||
log.debug('trying to delete locked object');
|
||||
return callback(objectLockedError);
|
||||
}
|
||||
|
||||
return callback(null, objMD, versionId);
|
||||
},
|
||||
(objMD, versionId, callback) =>
|
||||
preprocessingVersioningDelete(bucketName, bucket, objMD,
|
||||
versionId, log, (err, options) => callback(err, options,
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
/* eslint-disable indent */
|
||||
const async = require('async');
|
||||
const { errors, versioning } = require('arsenal');
|
||||
|
||||
|
@ -8,7 +9,8 @@ const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
|||
const { decodeVersionId, preprocessingVersioningDelete }
|
||||
= require('./apiUtils/object/versioning');
|
||||
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||
const { isObjectLocked } = require('./apiUtils/object/objectLockHelpers');
|
||||
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
|
||||
= require('./apiUtils/object/objectLockHelpers');
|
||||
const { config } = require('../Config');
|
||||
|
||||
const versionIdUtils = versioning.VersionID;
|
||||
|
@ -78,13 +80,6 @@ function objectDelete(authInfo, request, log, cb) {
|
|||
// versioning has been configured
|
||||
return next(null, bucketMD, objMD);
|
||||
}
|
||||
// AWS only returns an object lock error if a version id
|
||||
// is specified, else continue to create a delete marker
|
||||
if (reqVersionId &&
|
||||
isObjectLocked(bucketMD, objMD, request.headers)) {
|
||||
log.debug('trying to delete locked object');
|
||||
return next(objectLockedError, bucketMD);
|
||||
}
|
||||
if (reqVersionId && objMD.location &&
|
||||
Array.isArray(objMD.location) && objMD.location[0]) {
|
||||
// we need this information for data deletes to AWS
|
||||
|
@ -99,6 +94,45 @@ function objectDelete(authInfo, request, log, cb) {
|
|||
return next(null, bucketMD, objMD);
|
||||
});
|
||||
},
|
||||
function checkGovernanceBypassHeader(bucketMD, objectMD, next) {
|
||||
// AWS only returns an object lock error if a version id
|
||||
// is specified, else continue to create a delete marker
|
||||
if (!reqVersionId) {
|
||||
return next(null, null, bucketMD, objectMD);
|
||||
}
|
||||
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
|
||||
if (hasGovernanceBypass && authInfo.isRequesterAnIAMUser()) {
|
||||
return checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, err => {
|
||||
if (err) {
|
||||
log.debug('user does not have BypassGovernanceRetention and object is locked');
|
||||
return next(err, bucketMD);
|
||||
}
|
||||
return next(null, hasGovernanceBypass, bucketMD, objectMD);
|
||||
});
|
||||
}
|
||||
return next(null, hasGovernanceBypass, bucketMD, objectMD);
|
||||
},
|
||||
function evaluateObjectLockPolicy(hasGovernanceBypass, bucketMD, objectMD, next) {
|
||||
// AWS only returns an object lock error if a version id
|
||||
// is specified, else continue to create a delete marker
|
||||
if (!reqVersionId) {
|
||||
return next(null, bucketMD, objectMD);
|
||||
}
|
||||
|
||||
const objLockInfo = new ObjectLockInfo({
|
||||
mode: objectMD.retentionMode,
|
||||
date: objectMD.retentionDate,
|
||||
legalHold: objectMD.legalHold || false,
|
||||
});
|
||||
|
||||
// If the object can not be deleted raise an error
|
||||
if (!objLockInfo.canModifyObject(hasGovernanceBypass)) {
|
||||
log.debug('trying to delete locked object');
|
||||
return next(objectLockedError, bucketMD);
|
||||
}
|
||||
|
||||
return next(null, bucketMD, objectMD);
|
||||
},
|
||||
function getVersioningInfo(bucketMD, objectMD, next) {
|
||||
return preprocessingVersioningDelete(bucketName,
|
||||
bucketMD, objectMD, reqVersionId, log,
|
||||
|
|
|
@ -130,10 +130,12 @@ function objectHead(authInfo, request, log, callback) {
|
|||
return callback(errors.BadRequest, corsHeaders);
|
||||
}
|
||||
const partSize = getPartSize(objMD, partNumber);
|
||||
if (!partSize) {
|
||||
const isEmptyObject = objLength === 0;
|
||||
if (!partSize && !isEmptyObject) {
|
||||
return callback(errors.InvalidRange, corsHeaders);
|
||||
}
|
||||
responseHeaders['content-length'] = partSize;
|
||||
|
||||
responseHeaders['content-length'] = isEmptyObject ? 0 : partSize;
|
||||
const partsCount = getPartCountFromMd5(objMD);
|
||||
if (partsCount) {
|
||||
responseHeaders['x-amz-mp-parts-count'] = partsCount;
|
||||
|
|
|
@ -236,7 +236,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
res.controllingLocationConstraint;
|
||||
return next(null, dataLocator, destBucketMD,
|
||||
destObjLocationConstraint, copyObjectSize,
|
||||
sourceVerId, sourceLocationConstraintName);
|
||||
sourceVerId, sourceLocationConstraintName, splitter);
|
||||
});
|
||||
},
|
||||
function goGetData(
|
||||
|
@ -246,6 +246,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
copyObjectSize,
|
||||
sourceVerId,
|
||||
sourceLocationConstraintName,
|
||||
splitter,
|
||||
next,
|
||||
) {
|
||||
data.uploadPartCopy(
|
||||
|
@ -268,12 +269,12 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
}
|
||||
return next(null, destBucketMD, locations, eTag,
|
||||
copyObjectSize, sourceVerId, serverSideEncryption,
|
||||
lastModified);
|
||||
lastModified, splitter);
|
||||
});
|
||||
},
|
||||
function getExistingPartInfo(destBucketMD, locations, totalHash,
|
||||
copyObjectSize, sourceVerId, serverSideEncryption, lastModified,
|
||||
next) {
|
||||
splitter, next) {
|
||||
const partKey =
|
||||
`${uploadId}${constants.splitter}${paddedPartNumber}`;
|
||||
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||
|
@ -298,12 +299,12 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
}
|
||||
return next(null, destBucketMD, locations, totalHash,
|
||||
prevObjectSize, copyObjectSize, sourceVerId,
|
||||
serverSideEncryption, lastModified, oldLocations);
|
||||
serverSideEncryption, lastModified, oldLocations, splitter);
|
||||
});
|
||||
},
|
||||
function storeNewPartMetadata(destBucketMD, locations, totalHash,
|
||||
prevObjectSize, copyObjectSize, sourceVerId, serverSideEncryption,
|
||||
lastModified, oldLocations, next) {
|
||||
lastModified, oldLocations, splitter, next) {
|
||||
const metaStoreParams = {
|
||||
partNumber: paddedPartNumber,
|
||||
contentMD5: totalHash,
|
||||
|
@ -319,20 +320,58 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
{ error: err, method: 'storeNewPartMetadata' });
|
||||
return next(err);
|
||||
}
|
||||
return next(null, oldLocations, destBucketMD, totalHash,
|
||||
return next(null, locations, oldLocations, destBucketMD, totalHash,
|
||||
lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize);
|
||||
prevObjectSize, copyObjectSize, splitter);
|
||||
});
|
||||
},
|
||||
function cleanupExistingData(oldLocations, destBucketMD, totalHash,
|
||||
function checkCanDeleteOldLocations(partLocations, oldLocations, destBucketMD,
|
||||
totalHash, lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize, splitter, next) {
|
||||
if (!oldLocations) {
|
||||
return next(null, oldLocations, destBucketMD, totalHash,
|
||||
lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize);
|
||||
}
|
||||
return services.isCompleteMPUInProgress({
|
||||
bucketName: destBucketName,
|
||||
objectKey: destObjectKey,
|
||||
uploadId,
|
||||
splitter,
|
||||
}, log, (err, completeInProgress) => {
|
||||
if (err) {
|
||||
return next(err, destBucketMD);
|
||||
}
|
||||
let oldLocationsToDelete = oldLocations;
|
||||
// Prevent deletion of old data if a completeMPU
|
||||
// is already in progress because then there is no
|
||||
// guarantee that the old location will not be the
|
||||
// committed one.
|
||||
if (completeInProgress) {
|
||||
log.warn('not deleting old locations because CompleteMPU is in progress', {
|
||||
method: 'objectPutCopyPart::checkCanDeleteOldLocations',
|
||||
bucketName: destBucketName,
|
||||
objectKey: destObjectKey,
|
||||
uploadId,
|
||||
partLocations,
|
||||
oldLocations,
|
||||
});
|
||||
oldLocationsToDelete = null;
|
||||
}
|
||||
return next(null, oldLocationsToDelete, destBucketMD, totalHash,
|
||||
lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize);
|
||||
});
|
||||
},
|
||||
function cleanupExistingData(oldLocationsToDelete, destBucketMD, totalHash,
|
||||
lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize, next) {
|
||||
// Clean up the old data now that new metadata (with new
|
||||
// data locations) has been stored
|
||||
if (oldLocations) {
|
||||
if (oldLocationsToDelete) {
|
||||
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||
log.getSerializedUids());
|
||||
return data.batchDelete(oldLocations, request.method, null,
|
||||
return data.batchDelete(oldLocationsToDelete, request.method, null,
|
||||
delLog, err => {
|
||||
if (err) {
|
||||
// if error, log the error and move on as it is not
|
||||
|
|
|
@ -13,6 +13,7 @@ const kms = require('../kms/wrapper');
|
|||
const metadata = require('../metadata/wrapper');
|
||||
const { pushMetric } = require('../utapi/utilities');
|
||||
const logger = require('../utilities/logger');
|
||||
const services = require('../services');
|
||||
const locationConstraintCheck
|
||||
= require('./apiUtils/object/locationConstraintCheck');
|
||||
const writeContinue = require('../utilities/writeContinue');
|
||||
|
@ -243,19 +244,19 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
}
|
||||
return next(null, destinationBucket,
|
||||
objectLocationConstraint, cipherBundle,
|
||||
partKey, prevObjectSize, oldLocations, partInfo);
|
||||
partKey, prevObjectSize, oldLocations, partInfo, splitter);
|
||||
});
|
||||
},
|
||||
// Store in data backend.
|
||||
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||
partKey, prevObjectSize, oldLocations, partInfo, next) => {
|
||||
partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => {
|
||||
// NOTE: set oldLocations to null so we do not batchDelete for now
|
||||
if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||
// skip to storing metadata
|
||||
return next(null, destinationBucket, partInfo,
|
||||
partInfo.dataStoreETag,
|
||||
cipherBundle, partKey, prevObjectSize, null,
|
||||
objectLocationConstraint);
|
||||
objectLocationConstraint, splitter);
|
||||
}
|
||||
const objectContext = {
|
||||
bucketName,
|
||||
|
@ -275,12 +276,13 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
}
|
||||
return next(null, destinationBucket, dataGetInfo, hexDigest,
|
||||
cipherBundle, partKey, prevObjectSize, oldLocations,
|
||||
objectLocationConstraint);
|
||||
objectLocationConstraint, splitter);
|
||||
});
|
||||
},
|
||||
// Store data locations in metadata and delete any overwritten data.
|
||||
// Store data locations in metadata and delete any overwritten
|
||||
// data if completeMPU hasn't been initiated yet.
|
||||
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
|
||||
prevObjectSize, oldLocations, objectLocationConstraint, next) => {
|
||||
prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => {
|
||||
// Use an array to be consistent with objectPutCopyPart where there
|
||||
// could be multiple locations.
|
||||
const partLocations = [dataGetInfo];
|
||||
|
@ -310,19 +312,54 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
});
|
||||
return next(err, destinationBucket);
|
||||
}
|
||||
return next(null, oldLocations, objectLocationConstraint,
|
||||
destinationBucket, hexDigest, prevObjectSize);
|
||||
return next(null, partLocations, oldLocations, objectLocationConstraint,
|
||||
destinationBucket, hexDigest, prevObjectSize, splitter);
|
||||
});
|
||||
},
|
||||
(partLocations, oldLocations, objectLocationConstraint, destinationBucket,
|
||||
hexDigest, prevObjectSize, splitter, next) => {
|
||||
if (!oldLocations) {
|
||||
return next(null, oldLocations, objectLocationConstraint,
|
||||
destinationBucket, hexDigest, prevObjectSize);
|
||||
}
|
||||
return services.isCompleteMPUInProgress({
|
||||
bucketName,
|
||||
objectKey,
|
||||
uploadId,
|
||||
splitter,
|
||||
}, log, (err, completeInProgress) => {
|
||||
if (err) {
|
||||
return next(err, destinationBucket);
|
||||
}
|
||||
let oldLocationsToDelete = oldLocations;
|
||||
// Prevent deletion of old data if a completeMPU
|
||||
// is already in progress because then there is no
|
||||
// guarantee that the old location will not be the
|
||||
// committed one.
|
||||
if (completeInProgress) {
|
||||
log.warn('not deleting old locations because CompleteMPU is in progress', {
|
||||
method: 'objectPutPart::metadata.getObjectMD',
|
||||
bucketName,
|
||||
objectKey,
|
||||
uploadId,
|
||||
partLocations,
|
||||
oldLocations,
|
||||
});
|
||||
oldLocationsToDelete = null;
|
||||
}
|
||||
return next(null, oldLocationsToDelete, objectLocationConstraint,
|
||||
destinationBucket, hexDigest, prevObjectSize);
|
||||
});
|
||||
},
|
||||
// Clean up any old data now that new metadata (with new
|
||||
// data locations) has been stored.
|
||||
(oldLocations, objectLocationConstraint, destinationBucket, hexDigest,
|
||||
(oldLocationsToDelete, objectLocationConstraint, destinationBucket, hexDigest,
|
||||
prevObjectSize, next) => {
|
||||
if (oldLocations) {
|
||||
if (oldLocationsToDelete) {
|
||||
log.trace('overwriting mpu part, deleting data');
|
||||
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||
log.getSerializedUids());
|
||||
return data.batchDelete(oldLocations, request.method,
|
||||
return data.batchDelete(oldLocationsToDelete, request.method,
|
||||
objectLocationConstraint, delLog, err => {
|
||||
if (err) {
|
||||
// if error, log the error and move on as it is not
|
||||
|
|
|
@ -1,17 +1,15 @@
|
|||
const async = require('async');
|
||||
const { errors, s3middleware, auth, policies } = require('arsenal');
|
||||
const { errors, s3middleware } = require('arsenal');
|
||||
|
||||
const vault = require('../auth/vault');
|
||||
const { decodeVersionId, getVersionIdResHeader } =
|
||||
require('./apiUtils/object/versioning');
|
||||
const { validateObjectLockUpdate } =
|
||||
const { ObjectLockInfo, checkUserGovernanceBypass, hasGovernanceBypassHeader } =
|
||||
require('./apiUtils/object/objectLockHelpers');
|
||||
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||
const { pushMetric } = require('../utapi/utilities');
|
||||
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
|
||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||
const metadata = require('../metadata/wrapper');
|
||||
const { config } = require('../Config');
|
||||
|
||||
const { parseRetentionXml } = s3middleware.retention;
|
||||
const REPLICATION_ACTION = 'PUT_RETENTION';
|
||||
|
@ -83,54 +81,31 @@ function objectPutRetention(authInfo, request, log, callback) {
|
|||
(err, retentionInfo) => next(err, bucket, retentionInfo, objectMD));
|
||||
},
|
||||
(bucket, retentionInfo, objectMD, next) => {
|
||||
if (objectMD.retentionMode === 'GOVERNANCE' && authInfo.isRequesterAnIAMUser()) {
|
||||
log.trace('object in GOVERNANCE mode and is user, checking for attached policies',
|
||||
{ method: 'objectPutRetention' });
|
||||
const authParams = auth.server.extractParams(request, log, 's3',
|
||||
request.query);
|
||||
const ip = policies.requestUtils.getClientIp(request, config);
|
||||
const requestContextParams = {
|
||||
constantParams: {
|
||||
headers: request.headers,
|
||||
query: request.query,
|
||||
generalResource: bucketName,
|
||||
specificResource: { key: objectKey },
|
||||
requesterIp: ip,
|
||||
sslEnabled: request.connection.encrypted,
|
||||
apiMethod: 'bypassGovernanceRetention',
|
||||
awsService: 's3',
|
||||
locationConstraint: bucket.getLocationConstraint(),
|
||||
requesterInfo: authInfo,
|
||||
signatureVersion: authParams.params.data.signatureVersion,
|
||||
authType: authParams.params.data.authType,
|
||||
signatureAge: authParams.params.data.signatureAge,
|
||||
},
|
||||
};
|
||||
return vault.checkPolicies(requestContextParams,
|
||||
authInfo.getArn(), log, (err, authorizationResults) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
|
||||
if (hasGovernanceBypass && authInfo.isRequesterAnIAMUser()) {
|
||||
return checkUserGovernanceBypass(request, authInfo, bucket, objectKey, log, err => {
|
||||
if (err) {
|
||||
if (err.is.AccessDenied) {
|
||||
log.debug('user does not have BypassGovernanceRetention and object is locked');
|
||||
}
|
||||
if (authorizationResults[0].isAllowed !== true) {
|
||||
log.trace('authorization check failed for user',
|
||||
{
|
||||
'method': 'objectPutRetention',
|
||||
's3:BypassGovernanceRetention': false,
|
||||
});
|
||||
return next(errors.AccessDenied);
|
||||
}
|
||||
return next(null, bucket, retentionInfo, objectMD);
|
||||
});
|
||||
return next(err, bucket);
|
||||
}
|
||||
return next(null, bucket, retentionInfo, hasGovernanceBypass, objectMD);
|
||||
});
|
||||
}
|
||||
return next(null, bucket, retentionInfo, objectMD);
|
||||
return next(null, bucket, retentionInfo, hasGovernanceBypass, objectMD);
|
||||
},
|
||||
(bucket, retentionInfo, objectMD, next) => {
|
||||
const bypassHeader = request.headers['x-amz-bypass-governance-retention'] || '';
|
||||
const bypassGovernance = bypassHeader.toLowerCase() === 'true';
|
||||
const validationError = validateObjectLockUpdate(objectMD, retentionInfo, bypassGovernance);
|
||||
if (validationError) {
|
||||
return next(validationError, bucket, objectMD);
|
||||
(bucket, retentionInfo, hasGovernanceBypass, objectMD, next) => {
|
||||
const objLockInfo = new ObjectLockInfo({
|
||||
mode: objectMD.retentionMode,
|
||||
date: objectMD.retentionDate,
|
||||
legalHold: objectMD.legalHold,
|
||||
});
|
||||
|
||||
if (!objLockInfo.canModifyPolicy(retentionInfo, hasGovernanceBypass)) {
|
||||
return next(errors.AccessDenied, bucket);
|
||||
}
|
||||
|
||||
return next(null, bucket, retentionInfo, objectMD);
|
||||
},
|
||||
(bucket, retentionInfo, objectMD, next) => {
|
||||
|
|
|
@ -451,6 +451,80 @@ const services = {
|
|||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Mark the MPU overview key with a flag when starting the
|
||||
* CompleteMPU operation, to be checked by "put part" operations
|
||||
*
|
||||
* @param {object} params - params object
|
||||
* @param {string} params.bucketName - name of MPU bucket
|
||||
* @param {string} params.objectKey - object key
|
||||
* @param {string} params.uploadId - upload ID
|
||||
* @param {string} params.splitter - splitter for this overview key
|
||||
* @param {object} params.storedMetadata - original metadata of the overview key
|
||||
* @param {Logger} log - Logger object
|
||||
* @param {function} cb - callback(err)
|
||||
* @return {undefined}
|
||||
*/
|
||||
metadataMarkMPObjectForCompletion(params, log, cb) {
|
||||
assert.strictEqual(typeof params, 'object');
|
||||
assert.strictEqual(typeof params.bucketName, 'string');
|
||||
assert.strictEqual(typeof params.objectKey, 'string');
|
||||
assert.strictEqual(typeof params.uploadId, 'string');
|
||||
assert.strictEqual(typeof params.splitter, 'string');
|
||||
assert.strictEqual(typeof params.storedMetadata, 'object');
|
||||
const splitter = params.splitter;
|
||||
const longMPUIdentifier =
|
||||
`overview${splitter}${params.objectKey}${splitter}${params.uploadId}`;
|
||||
const multipartObjectMD = Object.assign({}, params.storedMetadata);
|
||||
multipartObjectMD.completeInProgress = true;
|
||||
metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD,
|
||||
{}, log, err => {
|
||||
if (err) {
|
||||
log.error('error from metadata', { error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Returns if a CompleteMPU operation is in progress for this
|
||||
* object, by looking at the `completeInProgress` flag stored in
|
||||
* the overview key
|
||||
*
|
||||
* @param {object} params - params object
|
||||
* @param {string} params.bucketName - bucket name where object should be stored
|
||||
* @param {string} params.objectKey - object key
|
||||
* @param {string} params.uploadId - upload ID
|
||||
* @param {string} params.splitter - splitter for this overview key
|
||||
* @param {object} log - request logger instance
|
||||
* @param {function} cb - callback(err, {bool} completeInProgress)
|
||||
* @return {undefined}
|
||||
*/
|
||||
isCompleteMPUInProgress(params, log, cb) {
|
||||
assert.strictEqual(typeof params, 'object');
|
||||
assert.strictEqual(typeof params.bucketName, 'string');
|
||||
assert.strictEqual(typeof params.objectKey, 'string');
|
||||
assert.strictEqual(typeof params.uploadId, 'string');
|
||||
assert.strictEqual(typeof params.splitter, 'string');
|
||||
|
||||
const mpuBucketName = `${constants.mpuBucketPrefix}${params.bucketName}`;
|
||||
const splitter = params.splitter;
|
||||
const mpuOverviewKey =
|
||||
`overview${splitter}${params.objectKey}${splitter}${params.uploadId}`;
|
||||
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
log.error('error getting the overview object from mpu bucket', {
|
||||
error: err,
|
||||
method: 'services.isCompleteMPUInProgress',
|
||||
params,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, Boolean(res.completeInProgress));
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Checks whether bucket exists, multipart upload
|
||||
|
|
|
@ -249,7 +249,8 @@ function listMetrics(metricType) {
|
|||
* added/deleted
|
||||
* @param {boolean} [metricObject.isDelete] - (optional) Indicates whether this
|
||||
* is a delete operation
|
||||
* @return {function} - `utapi.pushMetric`
|
||||
* @return {function | undefined} - `utapi.pushMetric` or undefined if the action is
|
||||
* filtered out and not pushed to utapi.
|
||||
*/
|
||||
function pushMetric(action, log, metricObj) {
|
||||
const {
|
||||
|
@ -300,12 +301,22 @@ function pushMetric(action, log, metricObj) {
|
|||
incomingBytes,
|
||||
outgoingBytes: action === 'getObject' ? newByteLength : 0,
|
||||
};
|
||||
|
||||
// Any operation from lifecycle that does not change object count or size is dropped
|
||||
const isLifecycle = _config.lifecycleRoleName
|
||||
&& authInfo && authInfo.arn.endsWith(`:assumed-role/${_config.lifecycleRoleName}/backbeat-lifecycle`);
|
||||
if (isLifecycle && !objectDelta && !sizeDelta) {
|
||||
log.trace('ignoring pushMetric from lifecycle service user', { action, bucket, keys });
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (keys && keys.length === 1) {
|
||||
[utapiObj.object] = keys;
|
||||
if (versionId) {
|
||||
utapiObj.versionId = versionId;
|
||||
}
|
||||
}
|
||||
|
||||
utapiObj.account = authInfo ? evalAuthInfo(authInfo, canonicalID, action).accountId : canonicalID;
|
||||
utapiObj.user = authInfo ? evalAuthInfo(authInfo, canonicalID, action).userId : undefined;
|
||||
return utapi.pushMetric(utapiObj);
|
||||
|
|
|
@ -412,4 +412,8 @@ aclUtils.getIndividualGrants = function getIndividualGrants(acl, canonicalIDs,
|
|||
return individualGrantInfo;
|
||||
};
|
||||
|
||||
aclUtils.getHeaders = function getHeaders() {
|
||||
return possibleGrantHeaders;
|
||||
};
|
||||
|
||||
module.exports = aclUtils;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "s3",
|
||||
"version": "7.10.8",
|
||||
"version": "7.10.8-6",
|
||||
"description": "S3 connector",
|
||||
"main": "index.js",
|
||||
"engines": {
|
||||
|
@ -20,7 +20,7 @@
|
|||
"homepage": "https://github.com/scality/S3#readme",
|
||||
"dependencies": {
|
||||
"@hapi/joi": "^17.1.0",
|
||||
"arsenal": "git+https://github.com/scality/Arsenal#7.10.31",
|
||||
"arsenal": "git+https://github.com/scality/Arsenal#7.10.31-1",
|
||||
"async": "~2.5.0",
|
||||
"aws-sdk": "2.905.0",
|
||||
"azure-storage": "^2.1.0",
|
||||
|
@ -37,7 +37,7 @@
|
|||
"utapi": "git+https://github.com/scality/utapi#7.10.7",
|
||||
"utf8": "~2.1.1",
|
||||
"uuid": "^3.0.1",
|
||||
"vaultclient": "scality/vaultclient#7.10.8",
|
||||
"vaultclient": "git+https://github.com/scality/vaultclient#7.10.8-1",
|
||||
"werelogs": "scality/werelogs#8.1.0",
|
||||
"xml2js": "~0.4.16"
|
||||
},
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
const assert = require('assert');
|
||||
|
||||
const withV4 = require('../../support/withV4');
|
||||
const BucketUtility = require('../../../lib/utility/bucket-util');
|
||||
const { makeTagQuery, updateRequestContexts } =
|
||||
require('../../../../../../lib/api/apiUtils/authorization/tagConditionKeys');
|
||||
const { DummyRequestLogger, TaggingConfigTester, createRequestContext } = require('../../../../../unit/helpers');
|
||||
|
||||
const taggingUtil = new TaggingConfigTester();
|
||||
const log = new DummyRequestLogger();
|
||||
const bucket = 'bucket2testconditionkeys';
|
||||
const object = 'object2testconditionkeys';
|
||||
const objPutTaggingReq = taggingUtil
|
||||
.createObjectTaggingRequest('PUT', bucket, object);
|
||||
const requestContexts = [createRequestContext('objectPutTagging', objPutTaggingReq)];
|
||||
|
||||
describe('Tag condition keys updateRequestContext', () => {
|
||||
withV4(sigCfg => {
|
||||
let bucketUtil;
|
||||
let s3;
|
||||
|
||||
beforeEach(() => {
|
||||
bucketUtil = new BucketUtility('default', sigCfg);
|
||||
s3 = bucketUtil.s3;
|
||||
return s3.createBucket({ Bucket: bucket }).promise()
|
||||
.catch(err => {
|
||||
process.stdout.write(`Error creating bucket: ${err}\n`);
|
||||
throw err;
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => bucketUtil.empty(bucket)
|
||||
.then(() => bucketUtil.deleteOne(bucket))
|
||||
.catch(err => {
|
||||
process.stdout.write('Error in afterEach');
|
||||
throw err;
|
||||
}));
|
||||
|
||||
it('should update request contexts with request tags and existing object tags', done => {
|
||||
const tagsToExist = 'oneKey=oneValue&twoKey=twoValue';
|
||||
const params = { Bucket: bucket, Key: object, Tagging: tagsToExist };
|
||||
s3.putObject(params, err => {
|
||||
assert.ifError(err);
|
||||
updateRequestContexts(objPutTaggingReq, requestContexts, 'objectPutTagging', log,
|
||||
(err, newRequestContexts) => {
|
||||
assert.ifError(err);
|
||||
assert(newRequestContexts[0].getNeedTagEval());
|
||||
assert.strictEqual(newRequestContexts[0].getExistingObjTag(), tagsToExist);
|
||||
assert.strictEqual(newRequestContexts[0].getRequestObjTags(), makeTagQuery(taggingUtil.getTags()));
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
|
@ -210,5 +210,39 @@ describe('Complete MPU', () => {
|
|||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('with re-upload of part during CompleteMPU execution', () => {
|
||||
let uploadId;
|
||||
let eTag;
|
||||
|
||||
beforeEach(() => _initiateMpuAndPutOnePart()
|
||||
.then(result => {
|
||||
uploadId = result.uploadId;
|
||||
eTag = result.eTag;
|
||||
})
|
||||
);
|
||||
|
||||
it('should complete the MPU successfully and leave a readable object', done => {
|
||||
async.parallel([
|
||||
doneReUpload => s3.uploadPart({
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
PartNumber: 1,
|
||||
UploadId: uploadId,
|
||||
Body: 'foo',
|
||||
}, err => {
|
||||
// in case the CompleteMPU finished earlier,
|
||||
// we may get a NoSuchKey error, so just
|
||||
// ignore it
|
||||
if (err && err.code === 'NoSuchKey') {
|
||||
return doneReUpload();
|
||||
}
|
||||
return doneReUpload(err);
|
||||
}),
|
||||
doneComplete => _completeMpuAndCheckVid(
|
||||
uploadId, eTag, undefined, doneComplete),
|
||||
], done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -577,6 +577,72 @@ describe('Object Part Copy', () => {
|
|||
checkNoError(err);
|
||||
});
|
||||
});
|
||||
|
||||
it('should not corrupt object if overwriting an existing part by copying a part ' +
|
||||
'while the MPU is being completed', () => {
|
||||
// AWS response etag for this completed MPU
|
||||
const finalObjETag = '"db77ebbae9e9f5a244a26b86193ad818-1"';
|
||||
process.stdout.write('Putting first part in MPU test');
|
||||
return s3.uploadPartCopy({ Bucket: destBucketName,
|
||||
Key: destObjName,
|
||||
CopySource: `${sourceBucketName}/${sourceObjName}`,
|
||||
PartNumber: 1,
|
||||
UploadId: uploadId,
|
||||
}).promise().then(res => {
|
||||
assert.strictEqual(res.ETag, etag);
|
||||
assert(res.LastModified);
|
||||
}).then(() => {
|
||||
process.stdout.write('Overwriting first part in MPU test and completing MPU ' +
|
||||
'at the same time');
|
||||
return Promise.all([
|
||||
s3.uploadPartCopy({
|
||||
Bucket: destBucketName,
|
||||
Key: destObjName,
|
||||
CopySource: `${sourceBucketName}/${sourceObjName}`,
|
||||
PartNumber: 1,
|
||||
UploadId: uploadId,
|
||||
}).promise().catch(err => {
|
||||
// in case the CompleteMPU finished
|
||||
// earlier, we may get a NoSuchKey error,
|
||||
// so just ignore it and resolve with a
|
||||
// special value, otherwise re-throw the
|
||||
// error
|
||||
if (err && err.code === 'NoSuchKey') {
|
||||
return Promise.resolve(null);
|
||||
}
|
||||
throw err;
|
||||
}),
|
||||
s3.completeMultipartUpload({
|
||||
Bucket: destBucketName,
|
||||
Key: destObjName,
|
||||
UploadId: uploadId,
|
||||
MultipartUpload: {
|
||||
Parts: [
|
||||
{ ETag: etag, PartNumber: 1 },
|
||||
],
|
||||
},
|
||||
}).promise(),
|
||||
]);
|
||||
}).then(([uploadRes, completeRes]) => {
|
||||
// if upload succeeded before CompleteMPU finished
|
||||
if (uploadRes !== null) {
|
||||
assert.strictEqual(uploadRes.ETag, etag);
|
||||
assert(uploadRes.LastModified);
|
||||
}
|
||||
assert.strictEqual(completeRes.Bucket, destBucketName);
|
||||
assert.strictEqual(completeRes.Key, destObjName);
|
||||
assert.strictEqual(completeRes.ETag, finalObjETag);
|
||||
}).then(() => {
|
||||
process.stdout.write('Getting object put by MPU with ' +
|
||||
'overwrite part');
|
||||
return s3.getObject({
|
||||
Bucket: destBucketName,
|
||||
Key: destObjName,
|
||||
}).promise();
|
||||
}).then(res => {
|
||||
assert.strictEqual(res.ETag, finalObjETag);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should return an error if no such upload initiated',
|
||||
|
|
|
@ -3,18 +3,7 @@ const async = require('async');
|
|||
|
||||
const withV4 = require('../support/withV4');
|
||||
const BucketUtility = require('../../lib/utility/bucket-util');
|
||||
const { maximumAllowedPartCount } = require('../../../../../constants');
|
||||
|
||||
const bucket = 'mpu-test-bucket';
|
||||
const object = 'mpu-test-object';
|
||||
|
||||
const bodySize = 1024 * 1024 * 5;
|
||||
const bodyContent = 'a';
|
||||
const howManyParts = 3;
|
||||
const partNumbers = Array.from(Array(howManyParts).keys());
|
||||
const invalidPartNumbers = [-1, 0, maximumAllowedPartCount + 1];
|
||||
|
||||
let ETags = [];
|
||||
const objectConfigs = require('../support/objectConfigs');
|
||||
|
||||
function checkError(err, statusCode, code) {
|
||||
assert.strictEqual(err.statusCode, statusCode);
|
||||
|
@ -26,128 +15,154 @@ function checkNoError(err) {
|
|||
`Expected success, got error ${JSON.stringify(err)}`);
|
||||
}
|
||||
|
||||
function generateContent(partNumber) {
|
||||
return Buffer.alloc(bodySize + partNumber, bodyContent);
|
||||
function generateContent(size, bodyContent) {
|
||||
return Buffer.alloc(size, bodyContent);
|
||||
}
|
||||
|
||||
describe('Part size tests with object head', () => {
|
||||
withV4(sigCfg => {
|
||||
let bucketUtil;
|
||||
let s3;
|
||||
objectConfigs.forEach(config => {
|
||||
describe(config.signature, () => {
|
||||
let ETags = [];
|
||||
|
||||
function headObject(fields, cb) {
|
||||
s3.headObject(Object.assign({
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
}, fields), cb);
|
||||
}
|
||||
const {
|
||||
bucket,
|
||||
object,
|
||||
bodySize,
|
||||
bodyContent,
|
||||
partNumbers,
|
||||
invalidPartNumbers,
|
||||
} = config;
|
||||
|
||||
beforeEach(function beforeF(done) {
|
||||
bucketUtil = new BucketUtility('default', sigCfg);
|
||||
s3 = bucketUtil.s3;
|
||||
withV4(sigCfg => { //eslint-disable-line
|
||||
let bucketUtil;
|
||||
let s3;
|
||||
|
||||
async.waterfall([
|
||||
next => s3.createBucket({ Bucket: bucket }, err => next(err)),
|
||||
next => s3.createMultipartUpload({ Bucket: bucket,
|
||||
Key: object }, (err, data) => {
|
||||
checkNoError(err);
|
||||
this.currentTest.UploadId = data.UploadId;
|
||||
return next();
|
||||
}),
|
||||
next => async.mapSeries(partNumbers, (partNumber, callback) => {
|
||||
const uploadPartParams = {
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
PartNumber: partNumber + 1,
|
||||
UploadId: this.currentTest.UploadId,
|
||||
Body: generateContent(partNumber + 1),
|
||||
};
|
||||
beforeEach(function beforeF(done) {
|
||||
bucketUtil = new BucketUtility('default', sigCfg);
|
||||
s3 = bucketUtil.s3;
|
||||
|
||||
return s3.uploadPart(uploadPartParams,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
async.waterfall([
|
||||
next => s3.createBucket({ Bucket: bucket }, err => next(err)),
|
||||
next => s3.createMultipartUpload({ Bucket: bucket,
|
||||
Key: object }, (err, data) => {
|
||||
checkNoError(err);
|
||||
this.currentTest.UploadId = data.UploadId;
|
||||
return next();
|
||||
}),
|
||||
next => async.mapSeries(partNumbers, (partNumber, callback) => {
|
||||
let allocAmount = bodySize + partNumber + 1;
|
||||
if (config.signature === 'for empty object') {
|
||||
allocAmount = 0;
|
||||
}
|
||||
return callback(null, data.ETag);
|
||||
});
|
||||
}, (err, results) => {
|
||||
checkNoError(err);
|
||||
ETags = results;
|
||||
return next();
|
||||
}),
|
||||
next => {
|
||||
const params = {
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
MultipartUpload: {
|
||||
Parts: partNumbers.map(partNumber => ({
|
||||
ETag: ETags[partNumber],
|
||||
const uploadPartParams = {
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
PartNumber: partNumber + 1,
|
||||
})),
|
||||
UploadId: this.currentTest.UploadId,
|
||||
Body: generateContent(allocAmount, bodyContent),
|
||||
};
|
||||
|
||||
return s3.uploadPart(uploadPartParams,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, data.ETag);
|
||||
});
|
||||
}, (err, results) => {
|
||||
checkNoError(err);
|
||||
ETags = results;
|
||||
return next();
|
||||
}),
|
||||
next => {
|
||||
const params = {
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
MultipartUpload: {
|
||||
Parts: partNumbers.map(partNumber => ({
|
||||
ETag: ETags[partNumber],
|
||||
PartNumber: partNumber + 1,
|
||||
})),
|
||||
},
|
||||
UploadId: this.currentTest.UploadId,
|
||||
};
|
||||
return s3.completeMultipartUpload(params, next);
|
||||
},
|
||||
UploadId: this.currentTest.UploadId,
|
||||
};
|
||||
return s3.completeMultipartUpload(params, next);
|
||||
},
|
||||
], err => {
|
||||
checkNoError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
], err => {
|
||||
checkNoError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(done => {
|
||||
async.waterfall([
|
||||
next => s3.deleteObject({ Bucket: bucket, Key: object },
|
||||
err => next(err)),
|
||||
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
|
||||
], done);
|
||||
});
|
||||
afterEach(done => {
|
||||
async.waterfall([
|
||||
next => s3.deleteObject({ Bucket: bucket, Key: object },
|
||||
err => next(err)),
|
||||
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
|
||||
], done);
|
||||
});
|
||||
|
||||
it('should return the total size of the object ' +
|
||||
'when --part-number is not used', done => {
|
||||
const totalSize = partNumbers.reduce((total, current) =>
|
||||
total + (bodySize + current + 1), 0);
|
||||
headObject({}, (err, data) => {
|
||||
checkNoError(err);
|
||||
assert.equal(totalSize, data.ContentLength);
|
||||
done();
|
||||
});
|
||||
});
|
||||
it('should return the total size of the object ' +
|
||||
'when --part-number is not used', done => {
|
||||
const totalSize = config.meta.computeTotalSize(partNumbers, bodySize);
|
||||
|
||||
partNumbers.forEach(part => {
|
||||
it(`should return the size of part ${part + 1} ` +
|
||||
`when --part-number is set to ${part + 1}`, done => {
|
||||
const partNumber = Number.parseInt(part, 0) + 1;
|
||||
const partSize = bodySize + partNumber;
|
||||
headObject({ PartNumber: partNumber }, (err, data) => {
|
||||
checkNoError(err);
|
||||
assert.equal(partSize, data.ContentLength);
|
||||
done();
|
||||
s3.headObject({ Bucket: bucket, Key: object }, (err, data) => {
|
||||
checkNoError(err);
|
||||
|
||||
assert.equal(totalSize, data.ContentLength);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
partNumbers.forEach(part => {
|
||||
it(`should return the size of part ${part + 1} ` +
|
||||
`when --part-number is set to ${part + 1}`, done => {
|
||||
const partNumber = Number.parseInt(part, 0) + 1;
|
||||
const partSize = bodySize + partNumber;
|
||||
|
||||
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumber }, (err, data) => {
|
||||
checkNoError(err);
|
||||
if (data.ContentLength === 0) {
|
||||
done();
|
||||
}
|
||||
assert.equal(partSize, data.ContentLength);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
invalidPartNumbers.forEach(part => {
|
||||
it(`should return an error when --part-number is set to ${part}`,
|
||||
done => {
|
||||
s3.headObject({ Bucket: bucket, Key: object, PartNumber: part }, (err, data) => {
|
||||
checkError(err, 400, 'BadRequest');
|
||||
assert.strictEqual(data, null);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('when incorrect --part-number is used', done => {
|
||||
bucketUtil = new BucketUtility('default', sigCfg);
|
||||
s3 = bucketUtil.s3;
|
||||
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumbers.length + 1 },
|
||||
(err, data) => {
|
||||
if (config.meta.objectIsEmpty) {
|
||||
// returns metadata for the only empty part
|
||||
checkNoError(err);
|
||||
assert.strictEqual(data.ContentLength, 0);
|
||||
done();
|
||||
} else {
|
||||
// returns a 416 error
|
||||
// the error response does not contain the actual
|
||||
// statusCode instead it has '416'
|
||||
checkError(err, 416, 416);
|
||||
assert.strictEqual(data, null);
|
||||
done();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
invalidPartNumbers.forEach(part => {
|
||||
it(`should return an error when --part-number is set to ${part}`,
|
||||
done => {
|
||||
headObject({ PartNumber: part }, (err, data) => {
|
||||
checkError(err, 400, 'BadRequest');
|
||||
assert.strictEqual(data, null);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should return an error when incorrect --part-number is used',
|
||||
done => {
|
||||
headObject({ PartNumber: partNumbers.length + 1 },
|
||||
(err, data) => {
|
||||
// the error response does not contain the actual
|
||||
// statusCode instead it has '416'
|
||||
checkError(err, 416, 416);
|
||||
assert.strictEqual(data, null);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
const { maximumAllowedPartCount } = require('../../../../../constants');
|
||||
|
||||
const canonicalObjectConfig = {
|
||||
bucket: 'mpu-test-bucket-canonical-object',
|
||||
object: 'mpu-test-object-canonical',
|
||||
bodySize: 1024 * 1024 * 5,
|
||||
bodyContent: 'a',
|
||||
howManyParts: 3,
|
||||
partNumbers: Array.from(Array(3).keys()), // 3 corresponds to howManyParts
|
||||
invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1],
|
||||
signature: 'for canonical object',
|
||||
meta: {
|
||||
computeTotalSize: (partNumbers, bodySize) => partNumbers.reduce((total, current) =>
|
||||
total + bodySize + current + 1
|
||||
, 0),
|
||||
objectIsEmpty: false,
|
||||
},
|
||||
};
|
||||
|
||||
const emptyObjectConfig = {
|
||||
bucket: 'mpu-test-bucket-empty-object',
|
||||
object: 'mpu-test-object-empty',
|
||||
bodySize: 0,
|
||||
bodyContent: null,
|
||||
howManyParts: 1,
|
||||
partNumbers: Array.from(Array(1).keys()), // 1 corresponds to howManyParts
|
||||
invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1],
|
||||
signature: 'for empty object',
|
||||
meta: {
|
||||
computeTotalSize: () => 0,
|
||||
objectIsEmpty: true,
|
||||
},
|
||||
};
|
||||
|
||||
const objectConfigs = [
|
||||
canonicalObjectConfig,
|
||||
emptyObjectConfig,
|
||||
];
|
||||
|
||||
module.exports = objectConfigs;
|
|
@ -27,6 +27,7 @@ const testData = 'testkey data';
|
|||
const testDataMd5 = crypto.createHash('md5')
|
||||
.update(testData, 'utf-8')
|
||||
.digest('hex');
|
||||
const emptyContentsMd5 = 'd41d8cd98f00b204e9800998ecf8427e';
|
||||
const testMd = {
|
||||
'md-model-version': 2,
|
||||
'owner-display-name': 'Bart',
|
||||
|
@ -60,6 +61,17 @@ const testMd = {
|
|||
},
|
||||
};
|
||||
|
||||
function checkObjectData(s3, objectKey, dataValue, done) {
|
||||
s3.getObject({
|
||||
Bucket: TEST_BUCKET,
|
||||
Key: objectKey,
|
||||
}, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Body.toString(), dataValue);
|
||||
done();
|
||||
});
|
||||
}
|
||||
|
||||
/** makeBackbeatRequest - utility function to generate a request going
|
||||
* through backbeat route
|
||||
* @param {object} params - params for making request
|
||||
|
@ -416,8 +428,8 @@ describeSkipIfAWS('backbeat routes', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should remove old object data locations if version is overwritten',
|
||||
done => {
|
||||
it('should remove old object data locations if version is overwritten ' +
|
||||
'with same contents', done => {
|
||||
let oldLocation;
|
||||
const testKeyOldData = `${testKey}-old-data`;
|
||||
async.waterfall([next => {
|
||||
|
@ -491,14 +503,8 @@ describeSkipIfAWS('backbeat routes', () => {
|
|||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// give some time for the async deletes to complete
|
||||
setTimeout(() => s3.getObject({
|
||||
Bucket: TEST_BUCKET,
|
||||
Key: testKey,
|
||||
}, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Body.toString(), testData);
|
||||
next();
|
||||
}), 1000);
|
||||
setTimeout(() => checkObjectData(s3, testKey, testData, next),
|
||||
1000);
|
||||
}, next => {
|
||||
// check that the object copy referencing the old data
|
||||
// locations is unreadable, confirming that the old
|
||||
|
@ -516,6 +522,89 @@ describeSkipIfAWS('backbeat routes', () => {
|
|||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should remove old object data locations if version is overwritten ' +
|
||||
'with empty contents', done => {
|
||||
let oldLocation;
|
||||
const testKeyOldData = `${testKey}-old-data`;
|
||||
async.waterfall([next => {
|
||||
// put object's data locations
|
||||
makeBackbeatRequest({
|
||||
method: 'PUT', bucket: TEST_BUCKET,
|
||||
objectKey: testKey,
|
||||
resourceType: 'data',
|
||||
headers: {
|
||||
'content-length': testData.length,
|
||||
'content-md5': testDataMd5,
|
||||
'x-scal-canonical-id': testArn,
|
||||
},
|
||||
authCredentials: backbeatAuthCredentials,
|
||||
requestBody: testData }, next);
|
||||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// put object metadata
|
||||
const newMd = Object.assign({}, testMd);
|
||||
newMd.location = JSON.parse(response.body);
|
||||
oldLocation = newMd.location;
|
||||
makeBackbeatRequest({
|
||||
method: 'PUT', bucket: TEST_BUCKET,
|
||||
objectKey: testKey,
|
||||
resourceType: 'metadata',
|
||||
authCredentials: backbeatAuthCredentials,
|
||||
requestBody: JSON.stringify(newMd),
|
||||
}, next);
|
||||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// put another object which metadata reference the
|
||||
// same data locations, we will attempt to retrieve
|
||||
// this object at the end of the test to confirm that
|
||||
// its locations have been deleted
|
||||
const oldDataMd = Object.assign({}, testMd);
|
||||
oldDataMd.location = oldLocation;
|
||||
makeBackbeatRequest({
|
||||
method: 'PUT', bucket: TEST_BUCKET,
|
||||
objectKey: testKeyOldData,
|
||||
resourceType: 'metadata',
|
||||
authCredentials: backbeatAuthCredentials,
|
||||
requestBody: JSON.stringify(oldDataMd),
|
||||
}, next);
|
||||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// overwrite the original object version with an empty location
|
||||
const newMd = Object.assign({}, testMd);
|
||||
newMd['content-length'] = 0;
|
||||
newMd['content-md5'] = emptyContentsMd5;
|
||||
newMd.location = null;
|
||||
makeBackbeatRequest({
|
||||
method: 'PUT', bucket: TEST_BUCKET,
|
||||
objectKey: testKey,
|
||||
resourceType: 'metadata',
|
||||
authCredentials: backbeatAuthCredentials,
|
||||
requestBody: JSON.stringify(newMd),
|
||||
}, next);
|
||||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// give some time for the async deletes to complete
|
||||
setTimeout(() => checkObjectData(s3, testKey, '', next),
|
||||
1000);
|
||||
}, next => {
|
||||
// check that the object copy referencing the old data
|
||||
// locations is unreadable, confirming that the old
|
||||
// data locations have been deleted
|
||||
s3.getObject({
|
||||
Bucket: TEST_BUCKET,
|
||||
Key: testKeyOldData,
|
||||
}, err => {
|
||||
assert(err, 'expected error to get object with old data ' +
|
||||
'locations, got success');
|
||||
next();
|
||||
});
|
||||
}], err => {
|
||||
assert.ifError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should not remove data locations on replayed metadata PUT',
|
||||
done => {
|
||||
let serializedNewMd;
|
||||
|
|
|
@ -3,14 +3,15 @@ const assert = require('assert');
|
|||
const BucketInfo = require('arsenal').models.BucketInfo;
|
||||
const getReplicationInfo =
|
||||
require('../../../../lib/api/apiUtils/object/getReplicationInfo');
|
||||
const { makeAuthInfo } = require('../../helpers');
|
||||
|
||||
function _getObjectReplicationInfo(replicationConfig) {
|
||||
function _getObjectReplicationInfo(replicationConfig, authInfo, isDeleteMarker) {
|
||||
const bucketInfo = new BucketInfo(
|
||||
'testbucket', 'someCanonicalId', 'accountDisplayName',
|
||||
new Date().toJSON(),
|
||||
null, null, null, null, null, null, null, null, null,
|
||||
replicationConfig);
|
||||
return getReplicationInfo('fookey', bucketInfo, true, 123, null, null);
|
||||
return getReplicationInfo('fookey', bucketInfo, true, 123, null, null, authInfo, isDeleteMarker);
|
||||
}
|
||||
|
||||
describe('getReplicationInfo helper', () => {
|
||||
|
@ -40,6 +41,65 @@ describe('getReplicationInfo helper', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should get replication info when action comming from a non-lifecycle session', () => {
|
||||
const replicationConfig = {
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
rules: [{
|
||||
prefix: '',
|
||||
enabled: true,
|
||||
storageClass: 'awsbackend',
|
||||
}],
|
||||
destination: 'tosomewhere',
|
||||
};
|
||||
|
||||
const authInfo = makeAuthInfo('accessKey1', null, 'another-session');
|
||||
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, true);
|
||||
|
||||
assert.deepStrictEqual(replicationInfo, {
|
||||
status: 'PENDING',
|
||||
backends: [{
|
||||
site: 'awsbackend',
|
||||
status: 'PENDING',
|
||||
dataStoreVersionId: '',
|
||||
}],
|
||||
content: ['METADATA'],
|
||||
destination: 'tosomewhere',
|
||||
storageClass: 'awsbackend',
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
storageType: 'aws_s3',
|
||||
});
|
||||
});
|
||||
|
||||
it('should get replication info when action comming from a lifecycle session ' +
|
||||
'but action is not delete marker', () => {
|
||||
const replicationConfig = {
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
rules: [{
|
||||
prefix: '',
|
||||
enabled: true,
|
||||
storageClass: 'awsbackend',
|
||||
}],
|
||||
destination: 'tosomewhere',
|
||||
};
|
||||
|
||||
const authInfo = makeAuthInfo('accessKey1', null, 'backbeat-lifecycle');
|
||||
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, false);
|
||||
|
||||
assert.deepStrictEqual(replicationInfo, {
|
||||
status: 'PENDING',
|
||||
backends: [{
|
||||
site: 'awsbackend',
|
||||
status: 'PENDING',
|
||||
dataStoreVersionId: '',
|
||||
}],
|
||||
content: ['METADATA'],
|
||||
destination: 'tosomewhere',
|
||||
storageClass: 'awsbackend',
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
storageType: 'aws_s3',
|
||||
});
|
||||
});
|
||||
|
||||
it('should not get replication info when rules are disabled', () => {
|
||||
const replicationConfig = {
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
|
@ -53,4 +113,21 @@ describe('getReplicationInfo helper', () => {
|
|||
const replicationInfo = _getObjectReplicationInfo(replicationConfig);
|
||||
assert.deepStrictEqual(replicationInfo, undefined);
|
||||
});
|
||||
|
||||
it('should not get replication info when action comming from lifecycle session', () => {
|
||||
const replicationConfig = {
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
rules: [{
|
||||
prefix: '',
|
||||
enabled: true,
|
||||
storageClass: 'awsbackend',
|
||||
}],
|
||||
destination: 'tosomewhere',
|
||||
};
|
||||
|
||||
const authInfo = makeAuthInfo('accessKey1', null, 'backbeat-lifecycle');
|
||||
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, true);
|
||||
|
||||
assert.deepStrictEqual(replicationInfo, undefined);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -38,4 +38,16 @@ describe('Check if location keys have changed between object locations', () => {
|
|||
const curr = [{ key: 'ddd' }, { key: 'eee' }, { key: 'fff' }];
|
||||
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
|
||||
});
|
||||
|
||||
it('should return true if curr location is null', () => {
|
||||
const prev = [{ key: 'ddd' }, { key: 'eee' }, { key: 'fff' }];
|
||||
const curr = null;
|
||||
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
|
||||
});
|
||||
|
||||
it('should return true if both prev and curr locations are null', () => {
|
||||
const prev = null;
|
||||
const curr = null;
|
||||
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -6,8 +6,8 @@ const { DummyRequestLogger } = require('../../helpers');
|
|||
const {
|
||||
calculateRetainUntilDate,
|
||||
validateHeaders,
|
||||
validateObjectLockUpdate,
|
||||
compareObjectLockInformation,
|
||||
ObjectLockInfo,
|
||||
} = require('../../../../lib/api/apiUtils/object/objectLockHelpers');
|
||||
|
||||
const mockName = 'testbucket';
|
||||
|
@ -179,113 +179,6 @@ describe('objectLockHelpers: calculateRetainUntilDate', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('objectLockHelpers: validateObjectLockUpdate', () => {
|
||||
it('should allow GOVERNANCE => COMPLIANCE if bypassGovernanceRetention is true', () => {
|
||||
const objMD = {
|
||||
retentionMode: 'GOVERNANCE',
|
||||
retentionDate: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const retentionInfo = {
|
||||
mode: 'COMPLIANCE',
|
||||
date: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const error = validateObjectLockUpdate(objMD, retentionInfo, true);
|
||||
assert.strictEqual(error, null);
|
||||
});
|
||||
|
||||
it('should disallow GOVERNANCE => COMPLIANCE if bypassGovernanceRetention is false', () => {
|
||||
const objMD = {
|
||||
retentionMode: 'GOVERNANCE',
|
||||
retentionDate: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const retentionInfo = {
|
||||
mode: 'COMPLIANCE',
|
||||
date: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const error = validateObjectLockUpdate(objMD, retentionInfo, false);
|
||||
assert.deepStrictEqual(error, errors.AccessDenied);
|
||||
});
|
||||
|
||||
it('should disallow COMPLIANCE => GOVERNANCE if retention is not expired', () => {
|
||||
const objMD = {
|
||||
retentionMode: 'COMPLIANCE',
|
||||
retentionDate: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const retentionInfo = {
|
||||
mode: 'GOVERNANCE',
|
||||
date: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const error = validateObjectLockUpdate(objMD, retentionInfo);
|
||||
assert.deepStrictEqual(error, errors.AccessDenied);
|
||||
});
|
||||
|
||||
it('should allow COMPLIANCE => GOVERNANCE if retention is expired', () => {
|
||||
const objMD = {
|
||||
retentionMode: 'COMPLIANCE',
|
||||
retentionDate: moment().subtract(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const retentionInfo = {
|
||||
mode: 'GOVERNANCE',
|
||||
date: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const error = validateObjectLockUpdate(objMD, retentionInfo);
|
||||
assert.strictEqual(error, null);
|
||||
});
|
||||
|
||||
it('should allow extending retention period if in COMPLIANCE', () => {
|
||||
const objMD = {
|
||||
retentionMode: 'COMPLIANCE',
|
||||
retentionDate: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const retentionInfo = {
|
||||
mode: 'COMPLIANCE',
|
||||
date: moment().add(2, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const error = validateObjectLockUpdate(objMD, retentionInfo);
|
||||
assert.strictEqual(error, null);
|
||||
});
|
||||
|
||||
it('should disallow shortening retention period if in COMPLIANCE', () => {
|
||||
const objMD = {
|
||||
retentionMode: 'COMPLIANCE',
|
||||
retentionDate: moment().add(2, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const retentionInfo = {
|
||||
mode: 'COMPLIANCE',
|
||||
date: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const error = validateObjectLockUpdate(objMD, retentionInfo);
|
||||
assert.deepStrictEqual(error, errors.AccessDenied);
|
||||
});
|
||||
|
||||
it('should allow shortening retention period if in GOVERNANCE', () => {
|
||||
const objMD = {
|
||||
retentionMode: 'GOVERNANCE',
|
||||
retentionDate: moment().add(2, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const retentionInfo = {
|
||||
mode: 'GOVERNANCE',
|
||||
date: moment().add(1, 'days').toISOString(),
|
||||
};
|
||||
|
||||
const error = validateObjectLockUpdate(objMD, retentionInfo, true);
|
||||
assert.strictEqual(error, null);
|
||||
});
|
||||
});
|
||||
|
||||
describe('objectLockHelpers: compareObjectLockInformation', () => {
|
||||
const mockDate = new Date();
|
||||
let origNow = null;
|
||||
|
@ -375,3 +268,368 @@ describe('objectLockHelpers: compareObjectLockInformation', () => {
|
|||
assert.deepStrictEqual(res, { legalHold: true });
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
const pastDate = moment().subtract(1, 'days');
|
||||
const futureDate = moment().add(100, 'days');
|
||||
|
||||
const isLockedTestCases = [
|
||||
{
|
||||
desc: 'no mode and no date',
|
||||
policy: {},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: 'mode and no date',
|
||||
policy: {
|
||||
mode: 'GOVERNANCE',
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: 'mode and past date',
|
||||
policy: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: pastDate.toISOString(),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
desc: 'mode and future date',
|
||||
policy: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
];
|
||||
|
||||
const isExpiredTestCases = [
|
||||
{
|
||||
desc: 'should return true, no date is the same as expired',
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: 'should return true, past date.',
|
||||
date: pastDate.toISOString(),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
desc: 'should return false, future date.',
|
||||
date: futureDate.toISOString(),
|
||||
expected: false,
|
||||
},
|
||||
];
|
||||
|
||||
const policyChangeTestCases = [
|
||||
{
|
||||
desc: 'enable governance policy',
|
||||
from: {},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'modifying expired governance policy',
|
||||
from: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: pastDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'extending governance policy',
|
||||
from: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.add(1, 'days').toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'shortening governance policy',
|
||||
from: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.subtract(1, 'days').toISOString(),
|
||||
},
|
||||
allowed: false,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'extending governance policy using same date',
|
||||
from: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'removing governance policy',
|
||||
from: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {},
|
||||
allowed: false,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'changing governance policy to compliance',
|
||||
from: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: false,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'enable compliance policy',
|
||||
from: {},
|
||||
to: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'modifying expired compliance policy',
|
||||
from: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: pastDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'extending compliance policy',
|
||||
from: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.add(1, 'days').toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'shortening compliance policy',
|
||||
from: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.subtract(1, 'days').toISOString(),
|
||||
},
|
||||
allowed: false,
|
||||
allowedWithBypass: false,
|
||||
},
|
||||
{
|
||||
desc: 'extending compliance policy with the same date',
|
||||
from: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'removing compliance policy',
|
||||
from: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {},
|
||||
allowed: false,
|
||||
allowedWithBypass: false,
|
||||
},
|
||||
{
|
||||
desc: 'changing compliance to governance policy',
|
||||
from: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: false,
|
||||
allowedWithBypass: false,
|
||||
},
|
||||
{
|
||||
desc: 'invalid starting mode',
|
||||
from: {
|
||||
mode: 'IM_AN_INVALID_MODE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: false,
|
||||
allowedWithBypass: false,
|
||||
},
|
||||
{
|
||||
desc: 'date with no mode',
|
||||
from: {
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
];
|
||||
|
||||
const canModifyObjectTestCases = [
|
||||
{
|
||||
desc: 'No object lock config',
|
||||
policy: {},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'active governance mode',
|
||||
policy: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: false,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'expired governance mode',
|
||||
policy: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: pastDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'active compliance mode',
|
||||
policy: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: false,
|
||||
allowedWithBypass: false,
|
||||
},
|
||||
{
|
||||
desc: 'expired compliance mode',
|
||||
policy: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: pastDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'invalid mode',
|
||||
policy: {
|
||||
mode: 'IM_AN_INVALID_MODE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: false,
|
||||
allowedWithBypass: false,
|
||||
},
|
||||
];
|
||||
|
||||
describe('objectLockHelpers: ObjectLockInfo', () => {
|
||||
['GOVERNANCE', 'COMPLIANCE'].forEach(mode => {
|
||||
it(`should return ${mode === 'GOVERNANCE'} for isGovernance`, () => {
|
||||
const info = new ObjectLockInfo({
|
||||
mode,
|
||||
});
|
||||
assert.strictEqual(info.isGovernanceMode(), mode === 'GOVERNANCE');
|
||||
});
|
||||
|
||||
it(`should return ${mode === 'COMPLIANCE'} for isCompliance`, () => {
|
||||
const info = new ObjectLockInfo({
|
||||
mode,
|
||||
});
|
||||
assert.strictEqual(info.isComplianceMode(), mode === 'COMPLIANCE');
|
||||
});
|
||||
});
|
||||
|
||||
describe('isExpired: ', () => isExpiredTestCases.forEach(testCase => {
|
||||
const objLockInfo = new ObjectLockInfo({ date: testCase.date });
|
||||
it(testCase.desc, () => assert.strictEqual(objLockInfo.isExpired(), testCase.expected));
|
||||
}));
|
||||
|
||||
describe('isLocked: ', () => isLockedTestCases.forEach(testCase => {
|
||||
describe(`${testCase.desc}`, () => {
|
||||
it(`should show policy as ${testCase.expected ? '' : 'not'} locked without legal hold`, () => {
|
||||
const objLockInfo = new ObjectLockInfo(testCase.policy);
|
||||
assert.strictEqual(objLockInfo.isLocked(), testCase.expected);
|
||||
});
|
||||
|
||||
// legal hold should show as locked regardless of policy
|
||||
it('should show policy as locked with legal hold', () => {
|
||||
const policy = Object.assign({}, testCase.policy, { legalHold: true });
|
||||
const objLockInfo = new ObjectLockInfo(policy);
|
||||
assert.strictEqual(objLockInfo.isLocked(), true);
|
||||
});
|
||||
});
|
||||
}));
|
||||
|
||||
describe('canModifyPolicy: ', () => policyChangeTestCases.forEach(testCase => {
|
||||
describe(testCase.desc, () => {
|
||||
const objLockInfo = new ObjectLockInfo(testCase.from);
|
||||
it(`should ${testCase.allowed ? 'allow' : 'deny'} modifying the policy without bypass`,
|
||||
() => assert.strictEqual(objLockInfo.canModifyPolicy(testCase.to), testCase.allowed));
|
||||
|
||||
it(`should ${testCase.allowedWithBypass ? 'allow' : 'deny'} modifying the policy with bypass`,
|
||||
() => assert.strictEqual(objLockInfo.canModifyPolicy(testCase.to, true), testCase.allowedWithBypass));
|
||||
});
|
||||
}));
|
||||
|
||||
describe('canModifyObject: ', () => canModifyObjectTestCases.forEach(testCase => {
|
||||
describe(testCase.desc, () => {
|
||||
const objLockInfo = new ObjectLockInfo(testCase.policy);
|
||||
it(`should ${testCase.allowed ? 'allow' : 'deny'} modifying object without bypass`,
|
||||
() => assert.strictEqual(objLockInfo.canModifyObject(), testCase.allowed));
|
||||
|
||||
it(`should ${testCase.allowedWithBypass ? 'allow' : 'deny'} modifying object with bypass`,
|
||||
() => assert.strictEqual(objLockInfo.canModifyObject(true), testCase.allowedWithBypass));
|
||||
});
|
||||
}));
|
||||
});
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
const assert = require('assert');
|
||||
|
||||
const { isLifecycleSession } =
|
||||
require('../../../../lib/api/apiUtils/authorization/permissionChecks.js');
|
||||
|
||||
const tests = [
|
||||
{
|
||||
arn: 'arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle',
|
||||
description: 'a role assumed by lifecycle service',
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
arn: undefined,
|
||||
description: 'undefined',
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
arn: '',
|
||||
description: 'empty',
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
arn: 'arn:aws:iam::257038443293:user/bart',
|
||||
description: 'a user',
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
arn: 'arn:aws:sts::257038443293:assumed-role/rolename/other-service',
|
||||
description: 'a role assumed by another service',
|
||||
expectedResult: false,
|
||||
},
|
||||
];
|
||||
|
||||
describe('authInfoHelper', () => {
|
||||
tests.forEach(t => {
|
||||
it(`should return ${t.expectedResult} if arn is ${t.description}`, () => {
|
||||
const result = isLifecycleSession(t.arn);
|
||||
assert.equal(result, t.expectedResult);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -8,9 +8,10 @@ const {
|
|||
TaggingConfigTester,
|
||||
createRequestContext,
|
||||
} = require('../../helpers');
|
||||
const { tagConditionKeyAuth, updateRequestContexts, makeTagQuery } =
|
||||
const { tagConditionKeyAuth, updateRequestContextsWithTags, makeTagQuery } =
|
||||
require('../../../../lib/api/apiUtils/authorization/tagConditionKeys');
|
||||
const { bucketPut } = require('../../../../lib/api/bucketPut');
|
||||
const objectPut = require('../../../../lib/api/objectPut');
|
||||
|
||||
const log = new DummyRequestLogger();
|
||||
const bucketName = 'tagconditionkeybuckettester';
|
||||
|
@ -39,27 +40,41 @@ const objectPutReq = new DummyRequest({
|
|||
calculatedHash: 'vnR+tLdVF79rPPfF+7YvOg==',
|
||||
}, postBody);
|
||||
|
||||
const requestContexts = [
|
||||
const objectPutRequestContexts = [
|
||||
createRequestContext('objectPut', objectPutReq),
|
||||
];
|
||||
|
||||
const objectGetReq = {
|
||||
bucketName,
|
||||
headers: {
|
||||
host: `${bucketName}.s3.amazonaws.com`,
|
||||
},
|
||||
objectKey,
|
||||
url: `/${bucketName}/${objectKey}`,
|
||||
query: {},
|
||||
};
|
||||
const objectGetRequestContexts = [
|
||||
createRequestContext('objectGet', objectGetReq),
|
||||
createRequestContext('objectGetTagging', objectGetReq),
|
||||
];
|
||||
|
||||
describe('tagConditionKeyAuth', () => {
|
||||
it('should return empty if no previous auth results', done => {
|
||||
tagConditionKeyAuth([], objectPutReq, requestContexts, 'bucketPut', log, err => {
|
||||
tagConditionKeyAuth([], objectPutReq, objectPutRequestContexts, 'bucketPut', log, err => {
|
||||
assert.ifError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
it('should return empty if auth results do not contain checkTagConditions key', done => {
|
||||
const authResults = [{ isAllowed: true }, { isAllowed: true }];
|
||||
tagConditionKeyAuth(authResults, objectPutReq, requestContexts, 'bucketPut', log, err => {
|
||||
tagConditionKeyAuth(authResults, objectPutReq, objectPutRequestContexts, 'bucketPut', log, err => {
|
||||
assert.ifError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('updateRequestContexts', () => {
|
||||
describe('updateRequestContextsWithTags', () => {
|
||||
before(done => {
|
||||
cleanup();
|
||||
bucketPut(authInfo, bucketPutReq, log, done);
|
||||
|
@ -68,11 +83,30 @@ describe('updateRequestContexts', () => {
|
|||
after(cleanup);
|
||||
|
||||
it('should update request context with request object tags', done => {
|
||||
updateRequestContexts(objectPutReq, requestContexts, 'objectPut', log, (err, newRequestContexts) => {
|
||||
updateRequestContextsWithTags(objectPutReq, objectPutRequestContexts, 'objectPut', log, err => {
|
||||
assert.ifError(err);
|
||||
assert(newRequestContexts[0].getNeedTagEval());
|
||||
assert.strictEqual(newRequestContexts[0].getRequestObjTags(), makeTagQuery(taggingUtil.getTags()));
|
||||
assert(objectPutRequestContexts[0].getNeedTagEval());
|
||||
assert.strictEqual(objectPutRequestContexts[0].getRequestObjTags(),
|
||||
makeTagQuery(taggingUtil.getTags()));
|
||||
assert.strictEqual(objectPutRequestContexts[0].getExistingObjTag(), null);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should update multiple request contexts with existing object tags', done => {
|
||||
objectPut(authInfo, objectPutReq, 'foobar', log, err => {
|
||||
assert.ifError(err);
|
||||
updateRequestContextsWithTags(objectGetReq, objectGetRequestContexts, 'objectGet', log,
|
||||
err => {
|
||||
assert.ifError(err);
|
||||
for (const requestContext of objectGetRequestContexts) {
|
||||
assert(requestContext.getNeedTagEval());
|
||||
assert.strictEqual(requestContext.getExistingObjTag(),
|
||||
makeTagQuery(taggingUtil.getTags()));
|
||||
assert.strictEqual(requestContext.getRequestObjTags(), null);
|
||||
}
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
const assert = require('assert');
|
||||
const { errors } = require('arsenal');
|
||||
const sinon = require('sinon');
|
||||
|
||||
const { checkLocationConstraint } = require('../../../lib/api/bucketPut');
|
||||
const { checkLocationConstraint, _handleAuthResults } = require('../../../lib/api/bucketPut');
|
||||
const { bucketPut } = require('../../../lib/api/bucketPut');
|
||||
const { config } = require('../../../lib/Config');
|
||||
const constants = require('../../../constants');
|
||||
|
@ -382,4 +383,77 @@ describe('bucketPut API', () => {
|
|||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('_handleAuthResults handles', () => {
|
||||
const constraint = 'location-constraint';
|
||||
[
|
||||
{
|
||||
description: 'errors',
|
||||
error: 'our error',
|
||||
results: undefined,
|
||||
calledWith: ['our error'],
|
||||
},
|
||||
{
|
||||
description: 'single allowed auth',
|
||||
error: undefined,
|
||||
results: [{ isAllowed: true }],
|
||||
calledWith: [null, constraint],
|
||||
},
|
||||
{
|
||||
description: 'many allowed auth',
|
||||
error: undefined,
|
||||
results: [
|
||||
{ isAllowed: true },
|
||||
{ isAllowed: true },
|
||||
{ isAllowed: true },
|
||||
{ isAllowed: true },
|
||||
],
|
||||
calledWith: [null, constraint],
|
||||
},
|
||||
{
|
||||
description: 'array of arrays allowed auth',
|
||||
error: undefined,
|
||||
results: [
|
||||
{ isAllowed: true },
|
||||
{ isAllowed: true },
|
||||
[{ isAllowed: true }, { isAllowed: true }],
|
||||
{ isAllowed: true },
|
||||
],
|
||||
calledWith: [null, constraint],
|
||||
},
|
||||
{
|
||||
description: 'array of arrays not allowed auth',
|
||||
error: undefined,
|
||||
results: [
|
||||
{ isAllowed: true },
|
||||
{ isAllowed: true },
|
||||
[{ isAllowed: true }, { isAllowed: false }],
|
||||
{ isAllowed: true },
|
||||
],
|
||||
calledWith: [errors.AccessDenied],
|
||||
},
|
||||
{
|
||||
description: 'single not allowed auth',
|
||||
error: undefined,
|
||||
results: [{ isAllowed: false }],
|
||||
calledWith: [errors.AccessDenied],
|
||||
},
|
||||
{
|
||||
description: 'one not allowed auth of many',
|
||||
error: undefined,
|
||||
results: [
|
||||
{ isAllowed: true },
|
||||
{ isAllowed: true },
|
||||
{ isAllowed: false },
|
||||
{ isAllowed: true },
|
||||
],
|
||||
calledWith: [errors.AccessDenied],
|
||||
},
|
||||
].forEach(tc => it(tc.description, () => {
|
||||
const cb = sinon.fake();
|
||||
const handler = _handleAuthResults(constraint, log, cb);
|
||||
handler(tc.error, tc.results);
|
||||
assert.deepStrictEqual(cb.getCalls()[0].args, tc.calledWith);
|
||||
}));
|
||||
});
|
||||
});
|
||||
|
|
|
@ -1641,6 +1641,78 @@ describe('Multipart Upload API', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should leave orphaned data when overwriting an object part during completeMPU',
|
||||
done => {
|
||||
const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024);
|
||||
const overWritePart = Buffer.from('Overwrite content', 'utf8');
|
||||
let uploadId;
|
||||
|
||||
async.waterfall([
|
||||
next => bucketPut(authInfo, bucketPutRequest, log, next),
|
||||
(corsHeaders, next) => initiateMultipartUpload(authInfo,
|
||||
initiateRequest, log, next),
|
||||
(result, corsHeaders, next) => parseString(result, next),
|
||||
(json, next) => {
|
||||
uploadId = json.InitiateMultipartUploadResult.UploadId[0];
|
||||
const requestObj = {
|
||||
bucketName,
|
||||
namespace,
|
||||
objectKey,
|
||||
headers: { host: `${bucketName}.s3.amazonaws.com` },
|
||||
url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`,
|
||||
query: {
|
||||
partNumber: '1',
|
||||
uploadId,
|
||||
},
|
||||
};
|
||||
const partRequest = new DummyRequest(requestObj, fullSizedPart);
|
||||
objectPutPart(authInfo, partRequest, undefined, log, (err, partCalculatedHash) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
next(null, requestObj, partCalculatedHash);
|
||||
});
|
||||
},
|
||||
(requestObj, partCalculatedHash, next) => {
|
||||
assert.deepStrictEqual(ds[1].value, fullSizedPart);
|
||||
async.parallel([
|
||||
done => {
|
||||
const partRequest = new DummyRequest(requestObj, overWritePart);
|
||||
objectPutPart(authInfo, partRequest, undefined, log, err => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
done();
|
||||
});
|
||||
},
|
||||
done => {
|
||||
const completeBody = '<CompleteMultipartUpload>' +
|
||||
'<Part>' +
|
||||
'<PartNumber>1</PartNumber>' +
|
||||
`<ETag>"${partCalculatedHash}"</ETag>` +
|
||||
'</Part>' +
|
||||
'</CompleteMultipartUpload>';
|
||||
|
||||
const completeRequest = {
|
||||
bucketName,
|
||||
namespace,
|
||||
objectKey,
|
||||
parsedHost: 's3.amazonaws.com',
|
||||
url: `/${objectKey}?uploadId=${uploadId}`,
|
||||
headers: { host: `${bucketName}.s3.amazonaws.com` },
|
||||
query: { uploadId },
|
||||
post: completeBody,
|
||||
};
|
||||
completeMultipartUpload(authInfo, completeRequest, log, done);
|
||||
},
|
||||
], err => next(err));
|
||||
},
|
||||
],
|
||||
err => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(ds[0], undefined);
|
||||
assert.deepStrictEqual(ds[1].value, fullSizedPart);
|
||||
assert.deepStrictEqual(ds[2].value, overWritePart);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw an error on put of an object part with an invalid ' +
|
||||
'uploadId', done => {
|
||||
const testUploadId = 'invalidUploadID';
|
||||
|
@ -1841,12 +1913,22 @@ describe('complete mpu with versioning', () => {
|
|||
},
|
||||
(eTag, testUploadId, next) => {
|
||||
const origPutObject = metadataBackend.putObject;
|
||||
let callCount = 0;
|
||||
metadataBackend.putObject =
|
||||
(bucketName, objName, objVal, params, log, cb) => {
|
||||
assert.strictEqual(params.replayId, testUploadId);
|
||||
metadataBackend.putObject = origPutObject;
|
||||
metadataBackend.putObject(
|
||||
bucketName, objName, objVal, params, log, cb);
|
||||
(putBucketName, objName, objVal, params, log, cb) => {
|
||||
if (callCount === 0) {
|
||||
// first putObject sets the completeInProgress flag in the overview key
|
||||
assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`);
|
||||
assert.strictEqual(
|
||||
objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`);
|
||||
assert.strictEqual(objVal.completeInProgress, true);
|
||||
} else {
|
||||
assert.strictEqual(params.replayId, testUploadId);
|
||||
metadataBackend.putObject = origPutObject;
|
||||
}
|
||||
origPutObject(
|
||||
putBucketName, objName, objVal, params, log, cb);
|
||||
callCount += 1;
|
||||
};
|
||||
const parts = [{ partNumber: 1, eTag }];
|
||||
const completeRequest = _createCompleteMpuRequest(testUploadId,
|
||||
|
@ -1903,12 +1985,22 @@ describe('complete mpu with versioning', () => {
|
|||
},
|
||||
(eTag, testUploadId, next) => {
|
||||
const origPutObject = metadataBackend.putObject;
|
||||
let callCount = 0;
|
||||
metadataBackend.putObject =
|
||||
(bucketName, objName, objVal, params, log, cb) => {
|
||||
assert.strictEqual(params.replayId, testUploadId);
|
||||
metadataBackend.putObject = origPutObject;
|
||||
metadataBackend.putObject(
|
||||
bucketName, objName, objVal, params, log, cb);
|
||||
(putBucketName, objName, objVal, params, log, cb) => {
|
||||
if (callCount === 0) {
|
||||
// first putObject sets the completeInProgress flag in the overview key
|
||||
assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`);
|
||||
assert.strictEqual(
|
||||
objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`);
|
||||
assert.strictEqual(objVal.completeInProgress, true);
|
||||
} else {
|
||||
assert.strictEqual(params.replayId, testUploadId);
|
||||
metadataBackend.putObject = origPutObject;
|
||||
}
|
||||
origPutObject(
|
||||
putBucketName, objName, objVal, params, log, cb);
|
||||
callCount += 1;
|
||||
};
|
||||
const parts = [{ partNumber: 1, eTag }];
|
||||
const completeRequest = _createCompleteMpuRequest(testUploadId,
|
||||
|
|
|
@ -16,8 +16,8 @@ const bucketName = 'bucketname';
|
|||
const objectName = 'objectName';
|
||||
const postBody = Buffer.from('I am a body', 'utf8');
|
||||
|
||||
const date = new Date();
|
||||
date.setDate(date.getDate() + 1);
|
||||
const expectedMode = 'GOVERNANCE';
|
||||
const expectedDate = moment().add(2, 'days').toISOString();
|
||||
|
||||
const bucketPutRequest = {
|
||||
bucketName,
|
||||
|
@ -36,13 +36,25 @@ const putObjectRequest = new DummyRequest({
|
|||
const objectRetentionXmlGovernance = '<Retention ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Mode>GOVERNANCE</Mode>' +
|
||||
`<RetainUntilDate>${date.toISOString()}</RetainUntilDate>` +
|
||||
`<RetainUntilDate>${expectedDate}</RetainUntilDate>` +
|
||||
'</Retention>';
|
||||
|
||||
const objectRetentionXmlCompliance = '<Retention ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Mode>COMPLIANCE</Mode>' +
|
||||
`<RetainUntilDate>${moment().add(2, 'days').toISOString()}</RetainUntilDate>` +
|
||||
`<RetainUntilDate>${expectedDate}</RetainUntilDate>` +
|
||||
'</Retention>';
|
||||
|
||||
const objectRetentionXmlGovernanceLonger = '<Retention ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Mode>GOVERNANCE</Mode>' +
|
||||
`<RetainUntilDate>${moment().add(5, 'days').toISOString()}</RetainUntilDate>` +
|
||||
'</Retention>';
|
||||
|
||||
const objectRetentionXmlGovernanceShorter = '<Retention ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Mode>GOVERNANCE</Mode>' +
|
||||
`<RetainUntilDate>${moment().add(1, 'days').toISOString()}</RetainUntilDate>` +
|
||||
'</Retention>';
|
||||
|
||||
const objectRetentionXmlComplianceShorter = '<Retention ' +
|
||||
|
@ -82,8 +94,19 @@ const putObjRetRequestComplianceShorter = {
|
|||
post: objectRetentionXmlComplianceShorter,
|
||||
};
|
||||
|
||||
const expectedMode = 'GOVERNANCE';
|
||||
const expectedDate = date.toISOString();
|
||||
const putObjRetRequestGovernanceLonger = {
|
||||
bucketName,
|
||||
objectKey: objectName,
|
||||
headers: { host: `${bucketName}.s3.amazonaws.com` },
|
||||
post: objectRetentionXmlGovernanceLonger,
|
||||
};
|
||||
|
||||
const putObjRetRequestGovernanceShorter = {
|
||||
bucketName,
|
||||
objectKey: objectName,
|
||||
headers: { host: `${bucketName}.s3.amazonaws.com` },
|
||||
post: objectRetentionXmlGovernanceShorter,
|
||||
};
|
||||
|
||||
describe('putObjectRetention API', () => {
|
||||
before(() => cleanup());
|
||||
|
@ -150,19 +173,40 @@ describe('putObjectRetention API', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should disallow update if the x-amz-bypass-governance-retention header is missing and'
|
||||
it('should allow update if the x-amz-bypass-governance-retention header is missing and '
|
||||
+ 'GOVERNANCE mode is enabled if time is being extended', done => {
|
||||
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
assert.ifError(err);
|
||||
return objectPutRetention(authInfo, putObjRetRequestGovernanceLonger, log, err => {
|
||||
assert.ifError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should disallow update if the x-amz-bypass-governance-retention header is missing and '
|
||||
+ 'GOVERNANCE mode is enabled', done => {
|
||||
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
assert.ifError(err);
|
||||
return objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
return objectPutRetention(authInfo, putObjRetRequestGovernanceShorter, log, err => {
|
||||
assert.deepStrictEqual(err, errors.AccessDenied);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should allow update if the x-amz-bypass-governance-retention header is missing and '
|
||||
+ 'GOVERNANCE mode is enabled and the same date is used', done => {
|
||||
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
assert.ifError(err);
|
||||
return objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
assert.ifError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should allow update if the x-amz-bypass-governance-retention header is present and'
|
||||
it('should allow update if the x-amz-bypass-governance-retention header is present and '
|
||||
+ 'GOVERNANCE mode is enabled', done => {
|
||||
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
assert.ifError(err);
|
||||
|
|
|
@ -66,7 +66,7 @@ function timeDiff(startTime) {
|
|||
return milliseconds;
|
||||
}
|
||||
|
||||
function makeAuthInfo(accessKey, userName) {
|
||||
function makeAuthInfo(accessKey, userName, sessionName) {
|
||||
const canIdMap = {
|
||||
accessKey1: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7'
|
||||
+ 'cd47ef2be',
|
||||
|
@ -94,6 +94,11 @@ function makeAuthInfo(accessKey, userName) {
|
|||
params.arn = `arn:aws:iam::${shortid}:user/${userName}`;
|
||||
}
|
||||
|
||||
if (sessionName) {
|
||||
params.IAMdisplayName = `[assumedRole] rolename:${sessionName}`;
|
||||
params.arn = `arn:aws:sts::${shortid}:assumed-role/rolename/${sessionName}`;
|
||||
}
|
||||
|
||||
return new AuthInfo(params);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,343 @@
|
|||
const assert = require('assert');
|
||||
const DummyRequest = require('./DummyRequest');
|
||||
const { authBucketPut } = require('../../lib/api/bucketPut');
|
||||
const prepareRequestContexts = require('../../lib/api/apiUtils/authorization/prepareRequestContexts.js');
|
||||
|
||||
const sourceBucket = 'bucket';
|
||||
const sourceObject = 'object';
|
||||
const apiMatrix = [
|
||||
{
|
||||
name: 'multipartDelete',
|
||||
expectedPermissions: ['s3:AbortMultipartUpload'],
|
||||
},
|
||||
{
|
||||
name: 'objectCopy',
|
||||
headers: {
|
||||
'x-amz-tagging': true,
|
||||
'x-amz-tagging-directive': 'REPLACE',
|
||||
},
|
||||
expectedPermissions: ['s3:GetObject', 's3:PutObject', 's3:PutObjectTagging'],
|
||||
},
|
||||
{
|
||||
name: 'objectCopy',
|
||||
headers: {
|
||||
'x-amz-tagging': true,
|
||||
'x-amz-tagging-directive': 'COPY',
|
||||
},
|
||||
expectedPermissions: ['s3:GetObject', 's3:PutObject'],
|
||||
},
|
||||
{
|
||||
name: 'initiateMultipartUpload',
|
||||
expectedPermissions: ['s3:PutObject'],
|
||||
},
|
||||
{
|
||||
name: 'bucketDelete',
|
||||
expectedPermissions: ['s3:DeleteBucket'],
|
||||
},
|
||||
{
|
||||
name: 'bucketDeleteCors',
|
||||
expectedPermissions: ['s3:PutBucketCORS'],
|
||||
},
|
||||
{
|
||||
name: 'bucketDeleteEncryption',
|
||||
expectedPermissions: ['s3:PutEncryptionConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'bucketDeleteLifecycle',
|
||||
expectedPermissions: ['s3:PutLifecycleConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'bucketDeletePolicy',
|
||||
expectedPermissions: ['s3:DeleteBucketPolicy'],
|
||||
},
|
||||
{
|
||||
name: 'bucketDeleteTagging',
|
||||
expectedPermissions: ['s3:PutBucketTagging'],
|
||||
},
|
||||
{
|
||||
name: 'bucketDeleteWebsite',
|
||||
expectedPermissions: ['s3:DeleteBucketWebsite'],
|
||||
},
|
||||
{
|
||||
name: 'objectDelete',
|
||||
expectedPermissions: ['s3:DeleteObject'],
|
||||
},
|
||||
{
|
||||
name: 'objectDeleteTagging',
|
||||
expectedPermissions: ['s3:DeleteObjectTagging'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetACL',
|
||||
expectedPermissions: ['s3:GetBucketAcl'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetCors',
|
||||
expectedPermissions: ['s3:GetBucketCORS'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetEncryption',
|
||||
expectedPermissions: ['s3:GetEncryptionConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetLifecycle',
|
||||
expectedPermissions: ['s3:GetLifecycleConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetLocation',
|
||||
expectedPermissions: ['s3:GetBucketLocation'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetNotification',
|
||||
expectedPermissions: ['s3:GetBucketNotification'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetPolicy',
|
||||
expectedPermissions: ['s3:GetBucketPolicy'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetReplication',
|
||||
expectedPermissions: ['s3:GetReplicationConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetTagging',
|
||||
expectedPermissions: ['s3:GetBucketTagging'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetVersioning',
|
||||
expectedPermissions: ['s3:GetBucketVersioning'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetWebsite',
|
||||
expectedPermissions: ['s3:GetBucketWebsite'],
|
||||
},
|
||||
{
|
||||
name: 'objectGet',
|
||||
expectedPermissions: ['s3:GetObject', 's3:GetObjectTagging'],
|
||||
},
|
||||
{
|
||||
name: 'objectGet',
|
||||
headers: {
|
||||
'x-amz-version-id': '1',
|
||||
},
|
||||
expectedPermissions: ['s3:GetObjectVersion', 's3:GetObject', 's3:GetObjectTagging'],
|
||||
},
|
||||
{
|
||||
name: 'objectGetACL',
|
||||
expectedPermissions: ['s3:GetObjectAcl'],
|
||||
},
|
||||
{
|
||||
name: 'objectGetLegalHold',
|
||||
expectedPermissions: ['s3:GetObjectLegalHold'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGetObjectLock',
|
||||
expectedPermissions: ['s3:GetBucketObjectLockConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'objectGetRetention',
|
||||
expectedPermissions: ['s3:GetObjectRetention'],
|
||||
},
|
||||
{
|
||||
name: 'objectGetTagging',
|
||||
expectedPermissions: ['s3:GetObjectTagging'],
|
||||
},
|
||||
{
|
||||
name: 'objectGetTagging',
|
||||
headers: {
|
||||
'x-amz-version-id': '1',
|
||||
},
|
||||
expectedPermissions: ['s3:GetObjectTagging', 's3:GetObjectVersionTagging'],
|
||||
},
|
||||
{
|
||||
name: 'bucketGet',
|
||||
expectedPermissions: ['s3:ListBucket'],
|
||||
},
|
||||
{
|
||||
name: 'objectHead',
|
||||
expectedPermissions: ['s3:GetObject'],
|
||||
},
|
||||
{
|
||||
name: 'objectHead',
|
||||
headers: {
|
||||
'x-amz-version-id': '1',
|
||||
},
|
||||
expectedPermissions: ['s3:GetObject', 's3:GetObjectVersion'],
|
||||
},
|
||||
{
|
||||
name: 'listParts',
|
||||
expectedPermissions: ['s3:ListMultipartUploadParts'],
|
||||
},
|
||||
{
|
||||
name: 'listObjectVersions',
|
||||
expectedPermissions: ['s3:ListBucketVersions'],
|
||||
},
|
||||
{
|
||||
name: 'listParts',
|
||||
expectedPermissions: ['s3:ListMultipartUploadParts'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutACL',
|
||||
expectedPermissions: ['s3:PutBucketAcl'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutCors',
|
||||
expectedPermissions: ['s3:PutBucketCORS'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutEncryption',
|
||||
expectedPermissions: ['s3:PutEncryptionConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutLifecycle',
|
||||
expectedPermissions: ['s3:PutLifecycleConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutNotification',
|
||||
expectedPermissions: ['s3:PutBucketNotification'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutPolicy',
|
||||
expectedPermissions: ['s3:PutBucketPolicy'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutReplication',
|
||||
expectedPermissions: ['s3:PutReplicationConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutTagging',
|
||||
expectedPermissions: ['s3:PutBucketTagging'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutVersioning',
|
||||
expectedPermissions: ['s3:PutBucketVersioning'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutWebsite',
|
||||
expectedPermissions: ['s3:PutBucketWebsite'],
|
||||
},
|
||||
{
|
||||
name: 'objectPut',
|
||||
expectedPermissions: ['s3:PutObject'],
|
||||
},
|
||||
{
|
||||
name: 'objectPut',
|
||||
headers: {
|
||||
'x-amz-object-lock-legal-hold-status': 'ON',
|
||||
'x-amz-object-lock-mode': 'GOVERNANCE',
|
||||
'x-amz-tagging': 'Key1=Value1',
|
||||
'x-amz-acl': 'private',
|
||||
},
|
||||
expectedPermissions: [
|
||||
's3:PutObject',
|
||||
's3:PutObjectTagging',
|
||||
's3:PutObjectLegalHold',
|
||||
's3:PutObjectAcl',
|
||||
's3:PutObjectRetention',
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'objectPut',
|
||||
headers: {
|
||||
'x-amz-version-id': '1',
|
||||
},
|
||||
expectedPermissions: [
|
||||
's3:PutObject',
|
||||
's3:PutObjectVersionTagging',
|
||||
],
|
||||
},
|
||||
{
|
||||
name: 'objectPutACL',
|
||||
expectedPermissions: ['s3:PutObjectAcl'],
|
||||
},
|
||||
{
|
||||
name: 'objectPutLegalHold',
|
||||
expectedPermissions: ['s3:PutObjectLegalHold'],
|
||||
},
|
||||
{
|
||||
name: 'bucketPutObjectLock',
|
||||
expectedPermissions: ['s3:PutBucketObjectLockConfiguration'],
|
||||
},
|
||||
{
|
||||
name: 'objectPutRetention',
|
||||
expectedPermissions: ['s3:PutObjectRetention'],
|
||||
},
|
||||
{
|
||||
name: 'objectPutTagging',
|
||||
expectedPermissions: ['s3:PutObjectTagging'],
|
||||
},
|
||||
{
|
||||
name: 'objectPutTagging',
|
||||
headers: {
|
||||
'x-amz-version-id': '1',
|
||||
},
|
||||
expectedPermissions: ['s3:PutObjectTagging', 's3:PutObjectVersionTagging'],
|
||||
},
|
||||
{
|
||||
name: 'objectPutPart',
|
||||
expectedPermissions: ['s3:PutObject'],
|
||||
},
|
||||
{
|
||||
name: 'objectPutCopyPart',
|
||||
expectedPermissions: ['s3:GetObject', 's3:PutObject'],
|
||||
},
|
||||
];
|
||||
|
||||
|
||||
function prepareDummyRequest(headers = {}) {
|
||||
const request = new DummyRequest({
|
||||
hostname: 'localhost',
|
||||
port: 80,
|
||||
headers,
|
||||
socket: {
|
||||
remoteAddress: '0.0.0.0',
|
||||
},
|
||||
});
|
||||
return request;
|
||||
}
|
||||
|
||||
describe('Policies: permission checks for S3 APIs', () => {
|
||||
apiMatrix.forEach(api => {
|
||||
if (api.name.length === 0) return;
|
||||
const message = `should return ${api.expectedPermissions.join(', ')} in requestContextParams for ${api.name}` +
|
||||
`${(api.headers && api.headers.length) > 0 ?
|
||||
` with headers ${api.headers.map(el => el[0]).join(', ')}` : ''}`;
|
||||
it(message, () => {
|
||||
const request = prepareDummyRequest(api.headers);
|
||||
const requestContexts = prepareRequestContexts(api.name, request,
|
||||
sourceBucket, sourceObject);
|
||||
const requestedActions = requestContexts.map(rq => rq.getAction());
|
||||
assert.deepStrictEqual(requestedActions, api.expectedPermissions);
|
||||
});
|
||||
});
|
||||
|
||||
describe('CreateBucket', () => {
|
||||
function putBucketApiMethods(headers) {
|
||||
const request = prepareDummyRequest(headers);
|
||||
const result = authBucketPut(null, 'name', null, request, null);
|
||||
return result.map(req => req.apiMethod);
|
||||
}
|
||||
|
||||
it('should return s3:PutBucket without any provided header', () => {
|
||||
assert.deepStrictEqual(
|
||||
putBucketApiMethods(),
|
||||
['bucketPut'],
|
||||
);
|
||||
});
|
||||
|
||||
it('should return s3:CreateBucket, s3:PutBucketVersioning and s3:PutBucketObjectLockConfiguration' +
|
||||
' with object-lock headers', () => {
|
||||
assert.deepStrictEqual(
|
||||
putBucketApiMethods({ 'x-amz-bucket-object-lock-enabled': 'true' }),
|
||||
['bucketPut', 'bucketPutObjectLock', 'bucketPutVersioning'],
|
||||
);
|
||||
});
|
||||
|
||||
it('should return s3:CreateBucket and s3:PutBucketAcl' +
|
||||
' with ACL headers', () => {
|
||||
assert.deepStrictEqual(
|
||||
putBucketApiMethods({ 'x-amz-grant-read': 'private' }),
|
||||
['bucketPut', 'bucketPutACL'],
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1,6 +1,8 @@
|
|||
const assert = require('assert');
|
||||
const sinon = require('sinon');
|
||||
const werelogs = require('werelogs');
|
||||
const _config = require('../../lib/Config').config;
|
||||
const { makeAuthInfo } = require('../unit/helpers');
|
||||
|
||||
const testEvents = [{
|
||||
action: 'getObject',
|
||||
|
@ -494,6 +496,10 @@ describe('utapi v2 pushmetrics utility', () => {
|
|||
pushMetric = require('../../lib/utapi/utilities').pushMetric;
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
_config.lifecycleRoleName = null;
|
||||
});
|
||||
|
||||
after(() => {
|
||||
sinon.restore();
|
||||
});
|
||||
|
@ -507,4 +513,51 @@ describe('utapi v2 pushmetrics utility', () => {
|
|||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('with lifecycle enabled', () => {
|
||||
_config.lifecycleRoleName = 'lifecycleTestRoleName';
|
||||
const eventFilterList = new Set([
|
||||
'getBucketAcl',
|
||||
'getBucketCors',
|
||||
'getBucketLocation',
|
||||
'getBucketNotification',
|
||||
'getBucketObjectLock',
|
||||
'getBucketReplication',
|
||||
'getBucketVersioning',
|
||||
'getBucketWebsite',
|
||||
'getObjectTagging',
|
||||
'headObject',
|
||||
]);
|
||||
|
||||
testEvents
|
||||
.map(event => {
|
||||
const modifiedEvent = event;
|
||||
const authInfo = makeAuthInfo('accesskey1', 'Bart');
|
||||
authInfo.arn = `foo:assumed-role/${_config.lifecycleRoleName}/backbeat-lifecycle`;
|
||||
modifiedEvent.metrics.authInfo = authInfo;
|
||||
modifiedEvent.metrics.canonicalID = 'accesskey1';
|
||||
return modifiedEvent;
|
||||
})
|
||||
.map(event => {
|
||||
if (eventFilterList.has(event.action)) {
|
||||
it(`should skip action ${event.action}`, () => {
|
||||
_config.lifecycleRoleName = 'lifecycleTestRoleName';
|
||||
const eventPushed = pushMetric(event.action, log, event.metrics);
|
||||
assert.strictEqual(eventPushed, undefined);
|
||||
});
|
||||
}
|
||||
return event;
|
||||
})
|
||||
.forEach(event => {
|
||||
if (!eventFilterList.has(event.action)) {
|
||||
it(`should compute and push metrics for ${event.action}`, () => {
|
||||
const eventPushed = pushMetric(event.action, log, event.metrics);
|
||||
assert(eventPushed);
|
||||
Object.keys(event.expected).forEach(key => {
|
||||
assert.strictEqual(eventPushed[key], event.expected[key]);
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
16
yarn.lock
16
yarn.lock
|
@ -466,9 +466,9 @@ arraybuffer.slice@~0.0.7:
|
|||
optionalDependencies:
|
||||
ioctl "^2.0.2"
|
||||
|
||||
"arsenal@git+https://github.com/scality/Arsenal#7.10.31":
|
||||
version "7.10.31"
|
||||
resolved "git+https://github.com/scality/Arsenal#10402ae78d48eda51d6a1c8f86d7e8443d32c748"
|
||||
"arsenal@git+https://github.com/scality/Arsenal#7.10.31-1":
|
||||
version "7.10.31-1"
|
||||
resolved "git+https://github.com/scality/Arsenal#914bc7559de65fd99e61c0afb0a5fb51193d3c37"
|
||||
dependencies:
|
||||
"@types/async" "^3.2.12"
|
||||
"@types/utf8" "^3.0.1"
|
||||
|
@ -5283,6 +5283,16 @@ vary@~1.1.2:
|
|||
resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc"
|
||||
integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=
|
||||
|
||||
"vaultclient@git+https://github.com/scality/vaultclient#7.10.8-1":
|
||||
version "7.10.8-1"
|
||||
resolved "git+https://github.com/scality/vaultclient#dcbaade9b7ddd266b0ea4631d45d59b23a6de7e1"
|
||||
dependencies:
|
||||
agentkeepalive "^4.1.3"
|
||||
arsenal "git+https://github.com/scality/Arsenal#7.10.28"
|
||||
commander "2.20.0"
|
||||
werelogs "git+https://github.com/scality/werelogs#8.1.0"
|
||||
xml2js "0.4.19"
|
||||
|
||||
vaultclient@scality/vaultclient#7.10.8:
|
||||
version "7.10.8"
|
||||
resolved "https://codeload.github.com/scality/vaultclient/tar.gz/aab460b592ca3ad8422bfa8f46b2a27cb6ca2b75"
|
||||
|
|
Loading…
Reference in New Issue