Compare commits

...

27 Commits

Author SHA1 Message Date
Alexander Chan b93f62036f remove duplications
(cherry picked from commit 2772976e86)
2023-01-13 15:57:53 -08:00
Alexander Chan 54ab2fc899 remove eve CI files
(cherry picked from commit 51905f82ba)
2023-01-13 15:57:53 -08:00
Alexander Chan b5beef3387 CLDSRV-244: migrate eve CI to GitHub Actions
- multibackend functional tests
- file functional tests
- utapi v2 functional tests
- kmip functional tests

(cherry picked from commit b72adc50a7)
2023-01-13 15:57:53 -08:00
Thomas Carmet 28ede3a096 CLDSRV-115 migration to gh actions
(cherry picked from commit b6def80347)
2023-01-13 15:57:53 -08:00
Thomas Carmet 79a622fc55 CLDSRV-115 migration github actions
(cherry picked from commit b3f7a22a07)
2023-01-13 15:57:53 -08:00
Jonathan Gramain ce656a804c CLDSRV-321 bump hotfix version to 7.10.15 2022-12-21 22:01:06 -08:00
Taylor McKinnon e617025904 bf(CLDSRV-321): Fix retention extension check to consider same date as extended
(cherry picked from commit 9f5ae852bf)
2022-12-21 22:00:39 -08:00
Jonathan Gramain c8f691db22 CLDSRV-173 CLDSRV-170 CLDSRV-177 S3C-5390 hotfix version bump 2022-12-14 14:56:09 -08:00
Artem Bakalov adfe9640e7 remove .only
(cherry picked from commit b4725aa032)
(cherry picked from commit af95fea311)
2022-12-14 14:50:48 -08:00
Artem Bakalov 457f7ffe42 S3C-5390 s3api head-object with part-number 1 on empty file fails: httpCode 416
(cherry picked from commit 4f3195a6ca)
(cherry picked from commit 8f3e737664)
2022-12-14 14:50:39 -08:00
Jonathan Gramain c15eac4b0c [7.10] CLDSRV-177 add missing test helper checkObjectData
(cherry picked from commit 413ebe743c)
(cherry picked from commit a1c4420eab)
2022-12-14 14:50:21 -08:00
Jonathan Gramain 02b1879438 bugfix: CLDSRV-177 fix crash with empty object replication
Fix a case of crash when a replication occurs with an empty object
over a non-empty object.

It is not clear how this happens in practice but there can be some
corner cases with race conditions between object replication and
versioning getting suspended on the target bucket at the same time,
that could lead to this situation, as the check between replication
configuration and actual replication is not atomic.

(cherry picked from commit a4e8cbebe6)
(cherry picked from commit 108d1c920f)
2022-12-14 14:50:16 -08:00
Jonathan Gramain 628223dde8 bugfix: CLDSRV-170 skip orphan cleanup in UploadPart[Copy]
Do not delete orphan data in UploadPart/UploadPartCopy on overwrite
iff a CompleteMPU of the target MPU is already in progress.

This is to prevent a race condition where a CompleteMPU is running
while UploadPart is uploading a part for the same MPU.

It leaves an orphan in storage since only one of the upload data will
be present in the finished MPU, but the window is limited to the
CompleteMPU execution and should only occur when there are retries of
UploadPart due to prior stuck requests, or with broken clients
misusing the MPU API, so it should be acceptable.

Implementation details:

- set a flag in the MPU overview key when starting the CompleteMPU
  process, before listing the parts from metadata to construct the
  manifest

- in UploadPart/UploadPartCopy, after the part metadata is written and
  if the same part already existed, re-fetch the MPU overview key to
  check the flag: if set, skip the deletion of the old data of this
  part, since the CompleteMPU process in progress may choose either
  part data depending on the exact timing of the listing vs. the
  part overwrite.

(cherry picked from commit 8496111518)
(cherry picked from commit c2dbbfa008)
2022-12-14 14:49:38 -08:00
Nicolas Humbert 0d562e98fb CLDSRV-173 DeleteMarkers created by Lifecycle should not be replicated
(cherry picked from commit d30430a81c)
2022-12-14 14:41:56 -08:00
Jonathan Gramain 74161d4670 [hotfix] CLDSRV-275 CLDSRV-289 CLDSRV-293 bump version to 7.10.15-2 2022-11-11 13:53:59 -08:00
Taylor McKinnon af706de35a bf(CLDSRV-293): convert multiObjectDelete to use ObjectLockInfo helper
(cherry picked from commit 3d6b7354a5)
2022-11-11 13:53:48 -08:00
Taylor McKinnon 2111f42d7c bf(CLDSRV-293): convert objectDelete to use ObjectLockInfo helper
(cherry picked from commit a5d694a92c)
2022-11-11 13:53:48 -08:00
Taylor McKinnon 3c4cc9015d bf(CLDSRV-293): convert objectPutRetention to use ObjectLockInfo helper
(cherry picked from commit 990e821ac8)
2022-11-11 13:53:48 -08:00
Taylor McKinnon c9b581ba72 bf(CLDSRV-293): Add tests for object lock refactor
(cherry picked from commit 8170bb9965)
2022-11-11 13:53:48 -08:00
Taylor McKinnon 9aede4faa3 bf(CLDSRV-293): Refactor object lock helpers to centralize logic
(cherry picked from commit 7e559d08c9)
2022-11-11 13:53:48 -08:00
Jonathan Gramain 40695809e3 [hotfix] CLDSRV-275 CLDSRV-289 bump utapi dep to 7.10.7-1 2022-11-11 11:54:25 -08:00
Taylor McKinnon 39fa2596cd bf(CLDSRV-289): Fix putDeleteMarkerObject metric for version suspended buckets
(cherry picked from commit 4174106c2d)
2022-11-11 11:43:48 -08:00
Artem Bakalov 87fc116f71 CLDSRV-275 - utapi-v1 delete inconsistency with versioning suspended
(cherry picked from commit 49fded7d5f)
2022-11-11 11:40:56 -08:00
Jonathan Gramain b0ca9acd7e chore: CLDSRV-296 remove get_product_version.sh
Not useful anymore and does not support hotfix versions x.y.z-p

(cherry picked from commit 29f87c7f2f)
2022-11-04 13:19:13 -07:00
Jonathan Gramain d4a4e25b71 CLDSRV-294 bump hotfix version 7.10.15-1 2022-11-04 11:57:58 -07:00
Jonathan Gramain 4078fdb1fa CLDSRV-291 update arsenal dep
(cherry picked from commit 950542237f)
2022-11-04 11:56:50 -07:00
Jonathan Gramain 29058836d0 bugfix: CLDSRV-291 test for HEAD object with bucket policy
Add unit tests to show that HEAD object requests are allowed when
bucket policy grants the `s3:GetObject` permission to another account
or user

(cherry picked from commit a3c3511ff9)
2022-11-04 11:56:50 -07:00
57 changed files with 2032 additions and 1199 deletions

3
.dockerignore Normal file
View File

@ -0,0 +1,3 @@
.git
.github
node_modules

32
.github/actions/setup-ci/action.yaml vendored Normal file
View File

@ -0,0 +1,32 @@
---
name: "Setup CI environment"
description: "Setup Cloudserver CI environment"
runs:
using: composite
steps:
- name: Setup etc/hosts
shell: bash
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
- name: Setup Credentials
shell: bash
run: bash .github/scripts/credentials.bash
- name: Setup job artifacts directory
shell: bash
run: |-
set -exu;
mkdir -p /tmp/artifacts/${{ github.job }}/;
- uses: actions/setup-node@v2
with:
node-version: '16'
cache: 'yarn'
- name: install dependencies
shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps
shell: bash
run: pip install docker-compose

37
.github/docker/creds.env vendored Normal file
View File

@ -0,0 +1,37 @@
azurebackend_AZURE_STORAGE_ACCESS_KEY
azurebackend_AZURE_STORAGE_ACCOUNT_NAME
azurebackend_AZURE_STORAGE_ENDPOINT
azurebackend2_AZURE_STORAGE_ACCESS_KEY
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME
azurebackend2_AZURE_STORAGE_ENDPOINT
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME
azurebackendmismatch_AZURE_STORAGE_ENDPOINT
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT
azuretest_AZURE_BLOB_ENDPOINT
b2backend_B2_ACCOUNT_ID
b2backend_B2_STORAGE_ACCESS_KEY
GOOGLE_SERVICE_EMAIL
GOOGLE_SERVICE_KEY
AWS_S3_BACKEND_ACCESS_KEY
AWS_S3_BACKEND_SECRET_KEY
AWS_S3_BACKEND_ACCESS_KEY_2
AWS_S3_BACKEND_SECRET_KEY_2
AWS_GCP_BACKEND_ACCESS_KEY
AWS_GCP_BACKEND_SECRET_KEY
AWS_GCP_BACKEND_ACCESS_KEY_2
AWS_GCP_BACKEND_SECRET_KEY_2
b2backend_B2_STORAGE_ENDPOINT
gcpbackend2_GCP_SERVICE_EMAIL
gcpbackend2_GCP_SERVICE_KEY
gcpbackend2_GCP_SERVICE_KEYFILE
gcpbackend_GCP_SERVICE_EMAIL
gcpbackend_GCP_SERVICE_KEY
gcpbackendmismatch_GCP_SERVICE_EMAIL
gcpbackendmismatch_GCP_SERVICE_KEY
gcpbackend_GCP_SERVICE_KEYFILE
gcpbackendmismatch_GCP_SERVICE_KEYFILE
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
gcpbackendproxy_GCP_SERVICE_KEYFILE

66
.github/docker/docker-compose.yaml vendored Normal file
View File

@ -0,0 +1,66 @@
services:
cloudserver:
image: ${CLOUDSERVER_IMAGE}
command: sh -c "yarn start > /artifacts/s3.log"
network_mode: "host"
volumes:
- /tmp/ssl:/ssl
- /tmp/ssl-kmip:/ssl-kmip
- ${HOME}/.aws/credentials:/root/.aws/credentials
- /tmp/artifacts/${JOB_NAME}:/artifacts
environment:
- CI=true
- ENABLE_LOCAL_CACHE=true
- REDIS_HOST=0.0.0.0
- REDIS_PORT=6379
- REPORT_TOKEN=report-token-1
- REMOTE_MANAGEMENT_DISABLE=1
- HEALTHCHECKS_ALLOWFROM=0.0.0.0/0
- DATA_HOST=0.0.0.0
- METADATA_HOST=0.0.0.0
- S3BACKEND
- S3DATA
- MPU_TESTING
- S3VAULT
- S3_LOCATION_FILE
- ENABLE_UTAPI_V2
- BUCKET_DENY_FILTER
- S3KMS
- S3KMIP_PORT
- S3KMIP_HOSTS
- S3KMIP-COMPOUND_CREATE
- S3KMIP_BUCKET_ATTRIBUTE_NAME
- S3KMIP_PIPELINE_DEPTH
- S3KMIP_KEY
- S3KMIP_CERT
- S3KMIP_CA
env_file:
- creds.env
depends_on:
- redis
extra_hosts:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1"
- "pykmip.local:127.0.0.1"
redis:
image: redis:alpine
network_mode: "host"
squid:
network_mode: "host"
profiles: ['ci-proxy']
image: scality/ci-squid
command: >-
sh -c 'mkdir -p /ssl &&
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
-keyout /ssl/myca.pem -out /ssl/myca.pem &&
cp /ssl/myca.pem /ssl/CA.pem &&
squid -f /etc/squid/squid.conf -N -z &&
squid -f /etc/squid/squid.conf -NYCd 1'
volumes:
- /tmp/ssl:/ssl
pykmip:
network_mode: "host"
profiles: ['pykmip']
image: registry.scality.com/cloudserver-dev/pykmip
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts

View File

@ -2,9 +2,9 @@
set -x #echo on
set -e #exit at the first error
mkdir -p ~/.aws
mkdir -p $HOME/.aws
cat >>/root/.aws/credentials <<EOF
cat >>$HOME/.aws/credentials <<EOF
[default]
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY

310
.github/workflows/tests.yaml vendored Normal file
View File

@ -0,0 +1,310 @@
---
name: tests
on:
push:
branches-ignore:
- 'development/**'
- 'q/*/**'
env:
# Secrets
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurebackend_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY_2 }}
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME_2 }}
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT_2 }}
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azuretest_AZURE_BLOB_ENDPOINT: "${{ secrets.AZURE_STORAGE_ENDPOINT }}"
b2backend_B2_ACCOUNT_ID: "${{ secrets.B2BACKEND_B2_ACCOUNT_ID }}"
b2backend_B2_STORAGE_ACCESS_KEY: >-
${{ secrets.B2BACKEND_B2_STORAGE_ACCESS_KEY }}
GOOGLE_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
GOOGLE_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
AWS_S3_BACKEND_ACCESS_KEY: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY }}"
AWS_S3_BACKEND_SECRET_KEY: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY }}"
AWS_S3_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY_2 }}"
AWS_S3_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY_2 }}"
AWS_GCP_BACKEND_ACCESS_KEY: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY }}"
AWS_GCP_BACKEND_SECRET_KEY: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY }}"
AWS_GCP_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY_2 }}"
AWS_GCP_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY_2 }}"
b2backend_B2_STORAGE_ENDPOINT: "${{ secrets.B2BACKEND_B2_STORAGE_ENDPOINT }}"
gcpbackend2_GCP_SERVICE_EMAIL: "${{ secrets.GCP2_SERVICE_EMAIL }}"
gcpbackend2_GCP_SERVICE_KEY: "${{ secrets.GCP2_SERVICE_KEY }}"
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackend_GCP_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
gcpbackend_GCP_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_EMAIL }}
gcpbackendmismatch_GCP_SERVICE_KEY: >-
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_KEY }}
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
# Configs
ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1"
jobs:
linting-coverage:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-node@v2
with:
node-version: '16'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps
run: pip install flake8
- name: Lint Javascript
run: yarn run --silent lint -- --max-warnings 0
- name: Lint Markdown
run: yarn run --silent lint_md
- name: Lint python
run: flake8 $(git ls-files "*.py")
- name: Lint Yaml
run: yamllint -c yamllint.yml $(git ls-files "*.yml")
- name: Unit Coverage
run: |
set -ex
mkdir -p $CIRCLE_TEST_REPORTS/unit
yarn test
yarn run test_legacy_location
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
CIRCLE_TEST_REPORTS: /tmp
CIRCLE_ARTIFACTS: /tmp
CI_REPORTS: /tmp
- name: Unit Coverage logs
run: find /tmp/unit -exec cat {} \;
- name: preparing junit files for upload
run: |
mkdir -p artifacts/junit
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always()
- name: Upload files to artifacts
uses: scality/action-artifacts@v2
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: artifacts
if: always()
build:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
- name: Login to GitHub Registry
uses: docker/login-action@v1.10.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Registry
uses: docker/login-action@v1
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push cloudserver image
uses: docker/build-push-action@v2.7.0
with:
push: true
context: .
tags: |
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver
multiple-backend:
runs-on: ubuntu-latest
needs: build
env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker-compose up -d
working-directory: .github/docker
- name: Run multiple backend test
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
file-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MPU_TESTING: "yes"
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: |
2.7
3.9
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup python2 test environment
run: |
sudo apt-get install -y libdigest-hmac-perl
pip install virtualenv
virtualenv -p $(which python2) ~/.virtualenv/py2
source ~/.virtualenv/py2/bin/activate
pip install 's3cmd==1.6.1'
- name: Setup CI services
run: docker-compose up -d
working-directory: .github/docker
- name: Run file ft tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
source ~/.virtualenv/py2/bin/activate
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
utapi-v2-tests:
runs-on: ubuntu-latest
needs: build
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker-compose up -d
working-directory: .github/docker
- name: Run file utapi v2 tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
kmip-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
MPU_TESTING: true
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy KMIP certs
run: cp -r ./certs /tmp/ssl-kmip
working-directory: .github/pykmip
- name: Setup CI services
run: docker-compose --profile pykmip up -d
working-directory: .github/docker
- name: Run file KMIP tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()

View File

@ -174,6 +174,10 @@ const constants = {
'bucket',
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
};
module.exports = constants;

View File

@ -1,13 +0,0 @@
#!/bin/bash
script_full_path=$(readlink -f "$0")
file_dir=$(dirname "$script_full_path")/..
PACKAGE_VERSION=$(cat $file_dir/package.json \
| grep version \
| head -1 \
| awk -F: '{ print $2 }' \
| sed 's/[",]//g' \
| tr -d '[[:space:]]')
echo $PACKAGE_VERSION

View File

@ -1,371 +0,0 @@
---
version: 0.2
branches:
feature/*, improvement/*, bugfix/*, w/*, q/*, hotfix/*, dependabot/*:
stage: pre-merge
development/*:
stage: post-merge
models:
- env: &global-env
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurebackend_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key_2)s
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name_2)s
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint_2)s
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azuretest_AZURE_BLOB_ENDPOINT: "%(secret:azure_storage_endpoint)s"
b2backend_B2_ACCOUNT_ID: "%(secret:b2backend_b2_account_id)s"
b2backend_B2_STORAGE_ACCESS_KEY: >-
%(secret:b2backend_b2_storage_access_key)s
GOOGLE_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
GOOGLE_SERVICE_KEY: "%(secret:gcp_service_key)s"
AWS_S3_BACKEND_ACCESS_KEY: "%(secret:aws_s3_backend_access_key)s"
AWS_S3_BACKEND_SECRET_KEY: "%(secret:aws_s3_backend_secret_key)s"
AWS_S3_BACKEND_ACCESS_KEY_2: "%(secret:aws_s3_backend_access_key_2)s"
AWS_S3_BACKEND_SECRET_KEY_2: "%(secret:aws_s3_backend_secret_key_2)s"
AWS_GCP_BACKEND_ACCESS_KEY: "%(secret:aws_gcp_backend_access_key)s"
AWS_GCP_BACKEND_SECRET_KEY: "%(secret:aws_gcp_backend_secret_key)s"
AWS_GCP_BACKEND_ACCESS_KEY_2: "%(secret:aws_gcp_backend_access_key_2)s"
AWS_GCP_BACKEND_SECRET_KEY_2: "%(secret:aws_gcp_backend_secret_key_2)s"
b2backend_B2_STORAGE_ENDPOINT: "%(secret:b2backend_b2_storage_endpoint)s"
gcpbackend2_GCP_SERVICE_EMAIL: "%(secret:gcp2_service_email)s"
gcpbackend2_GCP_SERVICE_KEY: "%(secret:gcp2_service_key)s"
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackend_GCP_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
gcpbackend_GCP_SERVICE_KEY: "%(secret:gcp_service_key)s"
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
%(secret:gcpbackendmismatch_gcp_service_email)s
gcpbackendmismatch_GCP_SERVICE_KEY: >-
%(secret:gcpbackendmismatch_gcp_service_key)s
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
- env: &multiple-backend-vars
S3BACKEND: "mem"
S3DATA: "multiple"
- env: &file-mem-mpu
S3BACKEND: "file"
S3VAULT: "mem"
MPU_TESTING: "yes"
- Git: &clone
name: Pull repo
repourl: '%(prop:git_reference)s'
shallow: true
retryFetch: true
haltOnFailure: true
- ShellCommand: &credentials
name: Setup Credentials
command: bash eve/workers/build/credentials.bash
haltOnFailure: true
env: *global-env
- ShellCommand: &node_version
name: get node version
command: node -v
- ShellCommand: &yarn-install
name: install modules
command: yarn install --ignore-engines --frozen-lockfile --network-concurrency=1
haltOnFailure: true
- ShellCommand: &check-s3-action-logs
name: Check s3 action logs
command: |
LOGS=`cat /artifacts/s3.log | grep 'No actionLog'`
test `echo -n ${LOGS} | wc -l` -eq 0 || (echo $LOGS && false)
- Upload: &upload-artifacts
source: /artifacts
urls:
- "*"
- ShellCommand: &follow-s3-log
logfiles:
s3:
filename: /artifacts/s3.log
follow: true
- ShellCommand: &setup-junit-upload
name: preparing junit files for upload
command: |
mkdir -p artifacts/junit
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
alwaysRun: true
- Upload: &upload-junits
source: artifacts
urls:
- "*"
alwaysRun: true
- ShellCommand: &setup-github-ssh
name: setup ssh with github
command: |
mkdir -p ~/.ssh
ssh-keyscan -H github.com > ~/.ssh/ssh_known_hosts
stages:
pre-merge:
worker:
type: local
steps:
- TriggerStages:
name: Launch all workers
stage_names:
- linting-coverage
- file-ft-tests
- multiple-backend-test
- kmip-ft-tests
- utapi-v2-tests
waitForFinish: true
haltOnFailure: true
linting-coverage:
worker:
type: docker
path: eve/workers/build
volumes: &default_volumes
- '/home/eve/workspace'
steps:
- Git: *clone
- ShellCommand: *setup-github-ssh
- ShellCommand: *yarn-install
- ShellCommand: *credentials
- ShellCommand:
name: Linting
command: |
set -ex
yarn run --silent lint -- --max-warnings 0
yarn run --silent lint_md
flake8 $(git ls-files "*.py")
yamllint -c yamllint.yml $(git ls-files "*.yml")
- ShellCommand:
name: Unit Coverage
command: |
set -ex
mkdir -p $CIRCLE_TEST_REPORTS/unit
yarn test
yarn run test_versionid_base62
yarn run test_legacy_location
env: &shared-vars
<<: *global-env
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
CIRCLE_TEST_REPORTS: /tmp
CIRCLE_ARTIFACTS: /tmp
CI_REPORTS: /tmp
- ShellCommand:
name: Unit Coverage logs
command: find /tmp/unit -exec cat {} \;
- ShellCommand: *setup-junit-upload
- Upload: *upload-junits
multiple-backend-test:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2560Mi"
s3Mem: "2560Mi"
env:
<<: *multiple-backend-vars
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *setup-github-ssh
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test"
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
<<: *global-env
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- ShellCommand:
command: mvn test
workdir: build/tests/functional/jaws
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
- ShellCommand:
command: rspec tests.rb
workdir: build/tests/functional/fog
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
- ShellCommand: *check-s3-action-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
file-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2560Mi"
s3Mem: "2Gi"
redis: enabled
env:
<<: *file-mem-mpu
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *setup-github-ssh
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
yarn run ft_test
<<: *follow-s3-log
env:
<<: *file-mem-mpu
<<: *global-env
- ShellCommand: *check-s3-action-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
kmip-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
pykmip: eve/workers/pykmip
vars:
aggressorMem: "2560Mi"
s3Mem: "1664Mi"
redis: enabled
pykmip: enabled
env:
<<: *file-mem-mpu
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip
logfiles:
pykmip:
filename: /artifacts/pykmip.log
follow: true
s3:
filename: /artifacts/s3.log
follow: true
env:
<<: *file-mem-mpu
<<: *global-env
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
utapi-v2-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2560Mi"
s3Mem: "2Gi"
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
steps:
- Git: *clone
- ShellCommand: *setup-github-ssh
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2"
<<: *follow-s3-log
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
- ShellCommand: *check-s3-action-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
post-merge:
worker:
type: local
steps:
- Git: *clone
- ShellCommand: &docker_login
name: Private Registry Login
command: >
docker login
-u '%(secret:private_registry_username)s'
-p '%(secret:private_registry_password)s'
'%(secret:private_registry_url)s'
- ShellCommand:
name: Dockerhub Login
command: >
docker login
-u '%(secret:dockerhub_ro_user)s'
-p '%(secret:dockerhub_ro_password)s'
- SetProperty: &docker_image_name
name: Set docker image name property
property: docker_image_name
value:
"%(secret:private_registry_url)s/zenko/cloudserver:\
%(prop:commit_short_revision)s"
- ShellCommand:
name: Build docker image
command: >-
docker build
--no-cache
-t %(prop:docker_image_name)s
.
- ShellCommand:
name: Tag images
command: |
docker tag %(prop:docker_image_name)s zenko/cloudserver:$TAG
env:
TAG: "latest-%(prop:product_version)s"
- ShellCommand:
name: Push image
command: |
docker push %(prop:docker_image_name)s
docker push zenko/cloudserver:latest-%(prop:product_version)s

View File

@ -1,59 +0,0 @@
FROM buildpack-deps:bionic-curl
#
# Install packages needed by the buildchain
#
ENV LANG C.UTF-8
COPY ./s3_packages.list ./buildbot_worker_packages.list /tmp/
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
&& apt-get update \
&& cat /tmp/*packages.list | xargs apt-get install -y \
&& update-ca-certificates \
&& git clone https://github.com/tj/n.git \
&& make -C ./n \
&& n 16.13.2 \
&& pip install pip==9.0.1 \
&& rm -rf ./n \
&& rm -rf /var/lib/apt/lists/* \
&& rm -f /tmp/packages.list
#
# Add user eve
#
RUN adduser -u 1042 --home /home/eve --disabled-password --gecos "" eve \
&& adduser eve sudo \
&& sed -ri 's/(%sudo.*)ALL$/\1NOPASSWD:ALL/' /etc/sudoers
#
# Install Dependencies
#
# Install RVM and gems
ENV RUBY_VERSION="2.4.1"
RUN gem update --system
COPY ./gems.list /tmp/
RUN cat /tmp/gems.list | xargs gem install
#RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 \
# && curl -sSL https://get.rvm.io | bash -s stable --ruby=$RUBY_VERSION \
# && usermod -a -G rvm eve
#RUN /bin/bash -l -c "\
# source /usr/local/rvm/scripts/rvm \
# && cat /tmp/gems.list | xargs gem install \
# && rm /tmp/gems.list"
# Install Pip packages
COPY ./pip_packages.list /tmp/
RUN cat /tmp/pip_packages.list | xargs pip install \
&& rm -f /tmp/pip_packages.list \
&& mkdir /home/eve/.aws \
&& chown eve /home/eve/.aws
#
# Run buildbot-worker on startup
#
ARG BUILDBOT_VERSION
RUN pip install buildbot-worker==$BUILDBOT_VERSION
CMD ["/bin/bash", "-l", "-c", "buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS && buildbot-worker start --nodaemon"]

View File

@ -1,14 +0,0 @@
ca-certificates
git
git-lfs
gnupg
libffi-dev
libssl-dev
python-pip
python2.7
python2.7-dev
software-properties-common
sudo
tcl
wget
procps

View File

@ -1,4 +0,0 @@
fog-aws:1.3.0
json
mime-types:3.1
rspec:3.5

View File

@ -1,3 +0,0 @@
flake8
s3cmd==1.6.1
yamllint

View File

@ -1,14 +0,0 @@
build-essential
ca-certificates
curl
default-jdk
gnupg2
libdigest-hmac-perl
lsof
maven
netcat
redis-server
ruby-full
yarn
zlib1g-dev
openssl

View File

@ -1,196 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: "proxy-ci-test-pod"
spec:
restartPolicy: Never
terminationGracePeriodSeconds: 10
hostAliases:
- ip: "127.0.0.1"
hostnames:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com"
- "pykmip.local"
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
initContainers:
- name: kmip-certs-installer
image: {{ images.pykmip }}
command: [ 'sh', '-c', 'cp /ssl/* /ssl-kmip/']
volumeMounts:
- name: kmip-certs
readOnly: false
mountPath: /ssl-kmip
{%- endif %}
containers:
- name: aggressor
image: {{ images.aggressor }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "1"
memory: {{ vars.aggressorMem }}
limits:
cpu: "1"
memory: {{ vars.aggressorMem }}
volumeMounts:
- name: creds
readOnly: false
mountPath: /root/.aws
- name: artifacts
readOnly: true
mountPath: /artifacts
command:
- bash
- -lc
- |
buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS
buildbot-worker start --nodaemon
env:
- name: CI
value: "true"
- name: ENABLE_LOCAL_CACHE
value: "true"
- name: REPORT_TOKEN
value: "report-token-1"
- name: REMOTE_MANAGEMENT_DISABLE
value: "1"
{% for key, value in vars.env.items() %}
- name: {{ key }}
value: "{{ value }}"
{% endfor %}
- name: s3
image: {{ images.s3 }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "2"
memory: {{ vars.s3Mem }}
limits:
cpu: "2"
memory: {{ vars.s3Mem }}
volumeMounts:
- name: creds
readOnly: false
mountPath: /root/.aws
- name: certs
readOnly: true
mountPath: /tmp
- name: artifacts
readOnly: false
mountPath: /artifacts
- name: kmip-certs
readOnly: false
mountPath: /ssl-kmip
command:
- bash
- -ec
- |
sleep 10 # wait for
/usr/src/app/docker-entrypoint.sh npm start | tee -a /artifacts/s3.log
env:
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" -%}
- name: S3_LOCATION_FILE
value: "/usr/src/app/tests/locationConfig/locationConfigTests.json"
{%- endif %}
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
- name: S3KMS
value: kmip
- name: S3KMIP_PORT
value: "5696"
- name: S3KMIP_HOSTS
value: "pykmip.local"
- name: S3KMIP_COMPOUND_CREATE
value: "false"
- name: S3KMIP_BUCKET_ATTRIBUTE_NAME
value: ''
- name: S3KMIP_PIPELINE_DEPTH
value: "8"
- name: S3KMIP_KEY
value: /ssl-kmip/kmip-client-key.pem
- name: S3KMIP_CERT
value: /ssl-kmip/kmip-client-cert.pem
- name: S3KMIP_CA
value: /ssl-kmip/kmip-ca.pem
{%- endif %}
- name: CI
value: "true"
- name: ENABLE_LOCAL_CACHE
value: "true"
- name: REDIS_HOST
value: "localhost"
- name: REDIS_PORT
value: "6379"
- name: REPORT_TOKEN
value: "report-token-1"
- name: REMOTE_MANAGEMENT_DISABLE
value: "1"
- name: HEALTHCHECKS_ALLOWFROM
value: "0.0.0.0/0"
{% for key, value in vars.env.items() %}
- name: {{ key }}
value: "{{ value }}"
{% endfor %}
{% if vars.redis is defined and vars.redis == "enabled" -%}
- name: redis
image: redis:alpine
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 200m
memory: 128Mi
limits:
cpu: 200m
memory: 128Mi
{%- endif %}
{% if vars.env.CI_PROXY is defined and vars.env.CI_PROXY == "true" -%}
- name: squid
image: scality/ci-squid
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 250m
memory: 128Mi
volumeMounts:
- name: certs
readOnly: false
mountPath: /ssl
command:
- sh
- -exc
- |
mkdir -p /ssl
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
-keyout /ssl/myca.pem -out /ssl/myca.pem
cp /ssl/myca.pem /ssl/CA.pem
squid -f /etc/squid/squid.conf -N -z
squid -f /etc/squid/squid.conf -NYCd 1
{%- endif %}
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
- name: pykmip
image: {{ images.pykmip }}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: artifacts
readOnly: false
mountPath: /artifacts
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
{%- endif %}
volumes:
- name: creds
emptyDir: {}
- name: certs
emptyDir: {}
- name: artifacts
emptyDir: {}
- name: kmip-certs
emptyDir: {}

View File

@ -1,7 +1,8 @@
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies;
const constants = require('../../../../constants');
const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants;
const { allAuthedUsersId, bucketOwnerActions, logId, publicId,
assumedRoleArnResourceType, backbeatLifecycleSessionName } = constants;
// whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
@ -364,10 +365,34 @@ function validatePolicyResource(bucketName, policy) {
});
}
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
* @param {string} arn - Amazon resource name - example:
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
*/
function isLifecycleSession(arn) {
if (!arn) {
return false;
}
const arnSplits = arn.split(':');
const service = arnSplits[2];
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
const resourceType = resourceNames[0];
const sessionName = resourceNames[resourceNames.length - 1];
return (service === 'sts' &&
resourceType === assumedRoleArnResourceType &&
sessionName === backbeatLifecycleSessionName);
}
module.exports = {
isBucketAuthorized,
isObjAuthorized,
checkBucketAcls,
checkObjectAcls,
validatePolicyResource,
isLifecycleSession,
};

View File

@ -136,9 +136,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
size,
headers,
isDeleteMarker,
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size),
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size, null, null, authInfo, isDeleteMarker),
log,
};
if (!isDeleteMarker) {
metadataStoreParams.contentType = request.headers['content-type'];
metadataStoreParams.cacheControl = request.headers['cache-control'];

View File

@ -1,4 +1,5 @@
const s3config = require('../../../Config').config;
const { isLifecycleSession } = require('../authorization/permissionChecks.js');
function _getBackend(objectMD, site) {
const backends = objectMD ? objectMD.replicationInfo.backends : [];
@ -63,14 +64,22 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
* @param {boolean} objSize - The size, in bytes, of the object being PUT
* @param {string} operationType - The type of operation to replicate
* @param {object} objectMD - The object metadata
* @param {AuthInfo} [authInfo] - authentication info of object owner
* @param {boolean} [isDeleteMarker] - whether creating a delete marker
* @return {undefined}
*/
function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType,
objectMD) {
objectMD, authInfo, isDeleteMarker) {
const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA'];
const config = bucketMD.getReplicationConfiguration();
// If bucket does not have a replication configuration, do not replicate.
if (config) {
// If delete an object due to a lifecycle action,
// the delete marker is not replicated to the destination buckets.
if (isDeleteMarker && authInfo && isLifecycleSession(authInfo.getArn())) {
return undefined;
}
const rule = config.rules.find(rule =>
(objKey.startsWith(rule.prefix) && rule.enabled));
if (rule) {

View File

@ -8,12 +8,13 @@
*
* @param {array|string|null} prev - list of keys from the object being
* overwritten
* @param {array} curr - list of keys to be used in composing current object
* @param {array|null} curr - list of keys to be used in composing
* current object
* @returns {boolean} true if no key in `curr` is present in `prev`,
* false otherwise
*/
function locationKeysHaveChanged(prev, curr) {
if (!prev || prev.length === 0) {
if (!prev || prev.length === 0 || !curr) {
return true;
}
// backwards compatibility check if object is of model version 2

View File

@ -1,5 +1,9 @@
const { errors } = require('arsenal');
const { errors, auth, policies } = require('arsenal');
const moment = require('moment');
const { config } = require('../../../Config');
const vault = require('../../../auth/vault');
/**
* Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period
@ -43,7 +47,7 @@ function validateHeaders(bucket, headers, log) {
!(objectLockMode && objectLockDate)) {
return errors.InvalidArgument.customizeDescription(
'x-amz-object-lock-retain-until-date and ' +
'x-amz-object-lock-mode must both be supplied'
'x-amz-object-lock-mode must both be supplied',
);
}
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
@ -126,101 +130,190 @@ function setObjectLockInformation(headers, md, defaultRetention) {
}
/**
* isObjectLocked - checks whether object is locked or not
* @param {obect} bucket - bucket metadata
* @param {object} objectMD - object metadata
* @param {array} headers - request headers
* @return {boolean} - indicates whether object is locked or not
* Helper class for check object lock state checks
*/
function isObjectLocked(bucket, objectMD, headers) {
if (bucket.isObjectLockEnabled()) {
const objectLegalHold = objectMD.legalHold;
if (objectLegalHold) {
class ObjectLockInfo {
/**
*
* @param {object} retentionInfo - The object lock retention policy
* @param {"GOVERNANCE" | "COMPLIANCE" | null} retentionInfo.mode - Retention policy mode.
* @param {string} retentionInfo.date - Expiration date of retention policy. A string in ISO-8601 format
* @param {bool} retentionInfo.legalHold - Whether a legal hold is enable for the object
*/
constructor(retentionInfo) {
this.mode = retentionInfo.mode || null;
this.date = retentionInfo.date || null;
this.legalHold = retentionInfo.legalHold || false;
}
/**
* ObjectLockInfo.isLocked
* @returns {bool} - Whether the retention policy is active and protecting the object
*/
isLocked() {
if (this.legalHold) {
return true;
}
const retentionMode = objectMD.retentionMode;
const retentionDate = objectMD.retentionDate;
if (!retentionMode || !retentionDate) {
if (!this.mode || !this.date) {
return false;
}
if (retentionMode === 'GOVERNANCE' &&
headers['x-amz-bypass-governance-retention']) {
return false;
}
const objectDate = moment(retentionDate);
const now = moment();
// indicates retain until date has expired
if (now.isSameOrAfter(objectDate)) {
return false;
}
return true;
return !this.isExpired();
}
return false;
}
/**
* ObjectLockInfo.isGovernanceMode
* @returns {bool} - true if retention mode is GOVERNANCE
*/
isGovernanceMode() {
return this.mode === 'GOVERNANCE';
}
/**
* ObjectLockInfo.isComplianceMode
* @returns {bool} - True if retention mode is COMPLIANCE
*/
isComplianceMode() {
return this.mode === 'COMPLIANCE';
}
/**
* ObjectLockInfo.isExpired
* @returns {bool} - True if the retention policy has expired
*/
isExpired() {
const now = moment();
return this.date === null || now.isSameOrAfter(this.date);
}
/**
* ObjectLockInfo.isExtended
* @param {string} timestamp - Timestamp in ISO-8601 format
* @returns {bool} - True if the given timestamp is after the policy expiration date or if no expiration date is set
*/
isExtended(timestamp) {
return timestamp !== undefined && (this.date === null || moment(timestamp).isSameOrAfter(this.date));
}
/**
* ObjectLockInfo.canModifyObject
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
*/
canModifyObject(hasGovernanceBypass) {
return !this.isLocked() || (this.isGovernanceMode() && !!hasGovernanceBypass);
}
/**
* ObjectLockInfo.canModifyPolicy
* @param {object} policyChanges - Proposed changes to the retention policy
* @param {"GOVERNANCE" | "COMPLIANCE" | undefined} policyChanges.mode - Retention policy mode.
* @param {string} policyChanges.date - Expiration date of retention policy. A string in ISO-8601 format
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
* @returns {bool} - True if the changes are allowed to be applied to the retention policy
*/
canModifyPolicy(policyChanges, hasGovernanceBypass) {
// If an object does not have a retention policy or it is expired then all changes are allowed
if (!this.isLocked()) {
return true;
}
// The only allowed change in compliance mode is extending the retention period
if (this.isComplianceMode()) {
if (policyChanges.mode === 'COMPLIANCE' && this.isExtended(policyChanges.date)) {
return true;
}
}
if (this.isGovernanceMode()) {
// Extensions are always allowed in governance mode
if (policyChanges.mode === 'GOVERNANCE' && this.isExtended(policyChanges.date)) {
return true;
}
// All other changes in governance mode require a bypass
if (hasGovernanceBypass) {
return true;
}
}
/* objectLockRequiresBypass will return true if the retention info change
* would require a bypass governance flag to be true.
* In order for this to be true the action must be valid as well, so going from
* COMPLIANCE to GOVERNANCE would return false unless it expired.
*/
function objectLockRequiresBypass(objectMD, retentionInfo) {
const { retentionMode: existingMode, retentionDate: existingDateISO } = objectMD;
if (!existingMode) {
return false;
}
const existingDate = new Date(existingDateISO);
const isExpired = existingDate < Date.now();
const isExtended = new Date(retentionInfo.date) > existingDate;
if (existingMode === 'GOVERNANCE' && !isExpired) {
if (retentionInfo.mode === 'GOVERNANCE' && isExtended) {
return false;
}
return true;
}
// an invalid retention change or unrelated to bypass
return false;
}
function validateObjectLockUpdate(objectMD, retentionInfo, bypassGovernance) {
const { retentionMode: existingMode, retentionDate: existingDateISO } = objectMD;
if (!existingMode) {
return null;
}
/**
*
* @param {object} headers - s3 request headers
* @returns {bool} - True if the headers is present and === "true"
*/
function hasGovernanceBypassHeader(headers) {
const bypassHeader = headers['x-amz-bypass-governance-retention'] || '';
return bypassHeader.toLowerCase() === 'true';
}
const existingDate = new Date(existingDateISO);
const isExpired = existingDate < Date.now();
const isExtended = new Date(retentionInfo.date) > existingDate;
if (existingMode === 'GOVERNANCE' && !isExpired && !bypassGovernance) {
if (retentionInfo.mode === 'GOVERNANCE' && isExtended) {
return null;
}
return errors.AccessDenied;
}
/**
* checkUserGovernanceBypass
*
* Checks for the presence of the s3:BypassGovernanceRetention permission for a given user
*
* @param {object} request - Incoming s3 request
* @param {object} authInfo - s3 authentication info
* @param {object} bucketMD - bucket metadata
* @param {string} objectKey - object key
* @param {object} log - Werelogs logger
* @param {function} cb - callback returns errors.AccessDenied if the authorization fails
* @returns {undefined} -
*/
function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, cb) {
log.trace(
'object in GOVERNANCE mode and is user, checking for attached policies',
{ method: 'checkUserPolicyGovernanceBypass' },
);
if (existingMode === 'COMPLIANCE') {
if (retentionInfo.mode === 'GOVERNANCE' && !isExpired) {
return errors.AccessDenied;
}
if (!isExtended) {
return errors.AccessDenied;
}
}
return null;
const authParams = auth.server.extractParams(request, log, 's3', request.query);
const ip = policies.requestUtils.getClientIp(request, config);
const requestContextParams = {
constantParams: {
headers: request.headers,
query: request.query,
generalResource: bucketMD.getName(),
specificResource: { key: objectKey },
requesterIp: ip,
sslEnabled: request.connection.encrypted,
apiMethod: 'bypassGovernanceRetention',
awsService: 's3',
locationConstraint: bucketMD.getLocationConstraint(),
requesterInfo: authInfo,
signatureVersion: authParams.params.data.signatureVersion,
authType: authParams.params.data.authType,
signatureAge: authParams.params.data.signatureAge,
},
};
return vault.checkPolicies(requestContextParams,
authInfo.getArn(), log, (err, authorizationResults) => {
if (err) {
return cb(err);
}
if (authorizationResults[0].isAllowed !== true) {
log.trace('authorization check failed for user',
{
'method': 'checkUserPolicyGovernanceBypass',
's3:BypassGovernanceRetention': false,
});
return cb(errors.AccessDenied);
}
return cb(null);
});
}
module.exports = {
calculateRetainUntilDate,
compareObjectLockInformation,
setObjectLockInformation,
isObjectLocked,
validateHeaders,
validateObjectLockUpdate,
objectLockRequiresBypass,
hasGovernanceBypassHeader,
checkUserGovernanceBypass,
ObjectLockInfo,
};

View File

@ -316,6 +316,7 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
* options.deleteData - (true/undefined) whether to delete data (if undefined
* means creating a delete marker instead)
* options.versionId - specific versionId to delete
* options.isNull - (true/undefined) whether version to be deleted/marked is null or not
*/
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD,
reqVersionId, log, callback) {
@ -367,6 +368,7 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD,
return callback(errors.NoSuchKey);
}
// not deleting any specific version, making a delete marker instead
options.isNull = true;
return callback(null, options);
}

View File

@ -157,6 +157,22 @@ function completeMultipartUpload(authInfo, request, log, callback) {
}
return next(errors.MalformedXML, destBucket);
},
function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList,
storedMetadata, location, mpuOverviewKey, next) {
return services.metadataMarkMPObjectForCompletion({
bucketName: mpuBucket.getName(),
objectKey,
uploadId,
splitter,
storedMetadata,
}, log, err => {
if (err) {
return next(err);
}
return next(null, destBucket, objMD, mpuBucket,
jsonList, storedMetadata, location, mpuOverviewKey);
});
},
function retrieveParts(destBucket, objMD, mpuBucket, jsonList,
storedMetadata, location, mpuOverviewKey, next) {
return services.getMPUparts(mpuBucket.getName(), uploadId, log,

View File

@ -18,7 +18,8 @@ const { preprocessingVersioningDelete }
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { metadataGetObject } = require('../metadata/metadataUtils');
const { config } = require('../Config');
const { isObjectLocked } = require('./apiUtils/object/objectLockHelpers');
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
= require('./apiUtils/object/objectLockHelpers');
const requestUtils = policies.requestUtils;
const versionIdUtils = versioning.VersionID;
@ -229,10 +230,6 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
successfullyDeleted.push({ entry });
return callback(skipError);
}
if (versionId && isObjectLocked(bucket, objMD, request.headers)) {
log.debug('trying to delete locked object');
return callback(objectLockedError);
}
if (versionId && objMD.location &&
Array.isArray(objMD.location) && objMD.location[0]) {
// we need this information for data deletes to AWS
@ -241,6 +238,47 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
}
return callback(null, objMD, versionId);
}),
(objMD, versionId, callback) => {
// AWS only returns an object lock error if a version id
// is specified, else continue to create a delete marker
if (!versionId || !bucket.isObjectLockEnabled()) {
return callback(null, null, objMD, versionId);
}
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
if (hasGovernanceBypass && authInfo.isRequesterAnIAMUser()) {
return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => {
if (error && error.is.AccessDenied) {
log.debug('user does not have BypassGovernanceRetention and object is locked', { error });
return callback(objectLockedError);
}
if (error) {
return callback(error);
}
return callback(null, hasGovernanceBypass, objMD, versionId);
});
}
return callback(null, hasGovernanceBypass, objMD, versionId);
},
(hasGovernanceBypass, objMD, versionId, callback) => {
// AWS only returns an object lock error if a version id
// is specified, else continue to create a delete marker
if (!versionId || !bucket.isObjectLockEnabled()) {
return callback(null, objMD, versionId);
}
const objLockInfo = new ObjectLockInfo({
mode: objMD.retentionMode,
date: objMD.retentionDate,
legalHold: objMD.legalHold || false,
});
// If the object can not be deleted raise an error
if (!objLockInfo.canModifyObject(hasGovernanceBypass)) {
log.debug('trying to delete locked object');
return callback(objectLockedError);
}
return callback(null, objMD, versionId);
},
(objMD, versionId, callback) =>
preprocessingVersioningDelete(bucketName, bucket, objMD,
versionId, log, (err, options) => callback(err, options,

View File

@ -1,3 +1,4 @@
/* eslint-disable indent */
const async = require('async');
const { errors, versioning } = require('arsenal');
@ -8,7 +9,8 @@ const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { decodeVersionId, preprocessingVersioningDelete }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { isObjectLocked } = require('./apiUtils/object/objectLockHelpers');
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
= require('./apiUtils/object/objectLockHelpers');
const { config } = require('../Config');
const versionIdUtils = versioning.VersionID;
@ -78,13 +80,6 @@ function objectDelete(authInfo, request, log, cb) {
// versioning has been configured
return next(null, bucketMD, objMD);
}
// AWS only returns an object lock error if a version id
// is specified, else continue to create a delete marker
if (reqVersionId &&
isObjectLocked(bucketMD, objMD, request.headers)) {
log.debug('trying to delete locked object');
return next(objectLockedError, bucketMD);
}
if (reqVersionId && objMD.location &&
Array.isArray(objMD.location) && objMD.location[0]) {
// we need this information for data deletes to AWS
@ -99,6 +94,45 @@ function objectDelete(authInfo, request, log, cb) {
return next(null, bucketMD, objMD);
});
},
function checkGovernanceBypassHeader(bucketMD, objectMD, next) {
// AWS only returns an object lock error if a version id
// is specified, else continue to create a delete marker
if (!reqVersionId) {
return next(null, null, bucketMD, objectMD);
}
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
if (hasGovernanceBypass && authInfo.isRequesterAnIAMUser()) {
return checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, err => {
if (err) {
log.debug('user does not have BypassGovernanceRetention and object is locked');
return next(err, bucketMD);
}
return next(null, hasGovernanceBypass, bucketMD, objectMD);
});
}
return next(null, hasGovernanceBypass, bucketMD, objectMD);
},
function evaluateObjectLockPolicy(hasGovernanceBypass, bucketMD, objectMD, next) {
// AWS only returns an object lock error if a version id
// is specified, else continue to create a delete marker
if (!reqVersionId) {
return next(null, bucketMD, objectMD);
}
const objLockInfo = new ObjectLockInfo({
mode: objectMD.retentionMode,
date: objectMD.retentionDate,
legalHold: objectMD.legalHold || false,
});
// If the object can not be deleted raise an error
if (!objLockInfo.canModifyObject(hasGovernanceBypass)) {
log.debug('trying to delete locked object');
return next(objectLockedError, bucketMD);
}
return next(null, bucketMD, objectMD);
},
function getVersioningInfo(bucketMD, objectMD, next) {
return preprocessingVersioningDelete(bucketName,
bucketMD, objectMD, reqVersionId, log,
@ -115,6 +149,7 @@ function objectDelete(authInfo, request, log, cb) {
const deleteInfo = {
removeDeleteMarker: false,
newDeleteMarker: false,
isNull: delOptions.isNull,
};
if (delOptions && delOptions.deleteData) {
if (objectMD.isDeleteMarker) {
@ -130,8 +165,9 @@ function objectDelete(authInfo, request, log, cb) {
deleteInfo.newDeleteMarker = true;
return createAndStoreObject(bucketName, bucketMD,
objectKey, objectMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) =>
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo));
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) => {
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo);
});
},
], (err, bucketMD, objectMD, result, deleteInfo) => {
const resHeaders = collectCorsHeaders(request.headers.origin,
@ -169,8 +205,28 @@ function objectDelete(authInfo, request, log, cb) {
result.versionId : versionIdUtils.encode(
result.versionId, config.versionIdEncodingType);
}
/* byteLength is passed under the following conditions:
* - bucket versioning is suspended
* - object version id is null
* and one of:
* - the content length of the object exists
* - or -
* - it is a delete marker
* In this case, the master key is deleted and replaced with a delete marker.
* The decrement accounts for the deletion of the master key when utapi reports
* on the number of objects.
*/
const versioningSuspended = bucketMD.getVersioningConfiguration()
&& bucketMD.getVersioningConfiguration().Status === 'Suspended';
const deletedSuspendedMasterVersion = versioningSuspended && !!objectMD && deleteInfo.isNull;
// Default to 0 content-length to cover deleting a DeleteMarker
const objectByteLength = (objectMD && objectMD['content-length']) || 0;
const byteLength = deletedSuspendedMasterVersion ? Number.parseInt(objectByteLength, 10) : null;
pushMetric('putDeleteMarkerObject', log, {
authInfo,
byteLength,
bucket: bucketName,
keys: [objectKey],
versionId: result.versionId,

View File

@ -130,10 +130,12 @@ function objectHead(authInfo, request, log, callback) {
return callback(errors.BadRequest, corsHeaders);
}
const partSize = getPartSize(objMD, partNumber);
if (!partSize) {
const isEmptyObject = objLength === 0;
if (!partSize && !isEmptyObject) {
return callback(errors.InvalidRange, corsHeaders);
}
responseHeaders['content-length'] = partSize;
responseHeaders['content-length'] = isEmptyObject ? 0 : partSize;
const partsCount = getPartCountFromMd5(objMD);
if (partsCount) {
responseHeaders['x-amz-mp-parts-count'] = partsCount;

View File

@ -236,7 +236,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
res.controllingLocationConstraint;
return next(null, dataLocator, destBucketMD,
destObjLocationConstraint, copyObjectSize,
sourceVerId, sourceLocationConstraintName);
sourceVerId, sourceLocationConstraintName, splitter);
});
},
function goGetData(
@ -246,6 +246,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
copyObjectSize,
sourceVerId,
sourceLocationConstraintName,
splitter,
next,
) {
data.uploadPartCopy(
@ -268,12 +269,12 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
}
return next(null, destBucketMD, locations, eTag,
copyObjectSize, sourceVerId, serverSideEncryption,
lastModified);
lastModified, splitter);
});
},
function getExistingPartInfo(destBucketMD, locations, totalHash,
copyObjectSize, sourceVerId, serverSideEncryption, lastModified,
next) {
splitter, next) {
const partKey =
`${uploadId}${constants.splitter}${paddedPartNumber}`;
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
@ -298,12 +299,12 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
}
return next(null, destBucketMD, locations, totalHash,
prevObjectSize, copyObjectSize, sourceVerId,
serverSideEncryption, lastModified, oldLocations);
serverSideEncryption, lastModified, oldLocations, splitter);
});
},
function storeNewPartMetadata(destBucketMD, locations, totalHash,
prevObjectSize, copyObjectSize, sourceVerId, serverSideEncryption,
lastModified, oldLocations, next) {
lastModified, oldLocations, splitter, next) {
const metaStoreParams = {
partNumber: paddedPartNumber,
contentMD5: totalHash,
@ -319,20 +320,58 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
{ error: err, method: 'storeNewPartMetadata' });
return next(err);
}
return next(null, oldLocations, destBucketMD, totalHash,
return next(null, locations, oldLocations, destBucketMD, totalHash,
lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize);
prevObjectSize, copyObjectSize, splitter);
});
},
function cleanupExistingData(oldLocations, destBucketMD, totalHash,
function checkCanDeleteOldLocations(partLocations, oldLocations, destBucketMD,
totalHash, lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize, splitter, next) {
if (!oldLocations) {
return next(null, oldLocations, destBucketMD, totalHash,
lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize);
}
return services.isCompleteMPUInProgress({
bucketName: destBucketName,
objectKey: destObjectKey,
uploadId,
splitter,
}, log, (err, completeInProgress) => {
if (err) {
return next(err, destBucketMD);
}
let oldLocationsToDelete = oldLocations;
// Prevent deletion of old data if a completeMPU
// is already in progress because then there is no
// guarantee that the old location will not be the
// committed one.
if (completeInProgress) {
log.warn('not deleting old locations because CompleteMPU is in progress', {
method: 'objectPutCopyPart::checkCanDeleteOldLocations',
bucketName: destBucketName,
objectKey: destObjectKey,
uploadId,
partLocations,
oldLocations,
});
oldLocationsToDelete = null;
}
return next(null, oldLocationsToDelete, destBucketMD, totalHash,
lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize);
});
},
function cleanupExistingData(oldLocationsToDelete, destBucketMD, totalHash,
lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize, next) {
// Clean up the old data now that new metadata (with new
// data locations) has been stored
if (oldLocations) {
if (oldLocationsToDelete) {
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(oldLocations, request.method, null,
return data.batchDelete(oldLocationsToDelete, request.method, null,
delLog, err => {
if (err) {
// if error, log the error and move on as it is not

View File

@ -13,6 +13,7 @@ const kms = require('../kms/wrapper');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const logger = require('../utilities/logger');
const services = require('../services');
const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck');
const writeContinue = require('../utilities/writeContinue');
@ -243,19 +244,19 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}
return next(null, destinationBucket,
objectLocationConstraint, cipherBundle,
partKey, prevObjectSize, oldLocations, partInfo);
partKey, prevObjectSize, oldLocations, partInfo, splitter);
});
},
// Store in data backend.
(destinationBucket, objectLocationConstraint, cipherBundle,
partKey, prevObjectSize, oldLocations, partInfo, next) => {
partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => {
// NOTE: set oldLocations to null so we do not batchDelete for now
if (partInfo && partInfo.dataStoreType === 'azure') {
// skip to storing metadata
return next(null, destinationBucket, partInfo,
partInfo.dataStoreETag,
cipherBundle, partKey, prevObjectSize, null,
objectLocationConstraint);
objectLocationConstraint, splitter);
}
const objectContext = {
bucketName,
@ -275,12 +276,13 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}
return next(null, destinationBucket, dataGetInfo, hexDigest,
cipherBundle, partKey, prevObjectSize, oldLocations,
objectLocationConstraint);
objectLocationConstraint, splitter);
});
},
// Store data locations in metadata and delete any overwritten data.
// Store data locations in metadata and delete any overwritten
// data if completeMPU hasn't been initiated yet.
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
prevObjectSize, oldLocations, objectLocationConstraint, next) => {
prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => {
// Use an array to be consistent with objectPutCopyPart where there
// could be multiple locations.
const partLocations = [dataGetInfo];
@ -310,19 +312,54 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
});
return next(err, destinationBucket);
}
return next(null, oldLocations, objectLocationConstraint,
destinationBucket, hexDigest, prevObjectSize);
return next(null, partLocations, oldLocations, objectLocationConstraint,
destinationBucket, hexDigest, prevObjectSize, splitter);
});
},
(partLocations, oldLocations, objectLocationConstraint, destinationBucket,
hexDigest, prevObjectSize, splitter, next) => {
if (!oldLocations) {
return next(null, oldLocations, objectLocationConstraint,
destinationBucket, hexDigest, prevObjectSize);
}
return services.isCompleteMPUInProgress({
bucketName,
objectKey,
uploadId,
splitter,
}, log, (err, completeInProgress) => {
if (err) {
return next(err, destinationBucket);
}
let oldLocationsToDelete = oldLocations;
// Prevent deletion of old data if a completeMPU
// is already in progress because then there is no
// guarantee that the old location will not be the
// committed one.
if (completeInProgress) {
log.warn('not deleting old locations because CompleteMPU is in progress', {
method: 'objectPutPart::metadata.getObjectMD',
bucketName,
objectKey,
uploadId,
partLocations,
oldLocations,
});
oldLocationsToDelete = null;
}
return next(null, oldLocationsToDelete, objectLocationConstraint,
destinationBucket, hexDigest, prevObjectSize);
});
},
// Clean up any old data now that new metadata (with new
// data locations) has been stored.
(oldLocations, objectLocationConstraint, destinationBucket, hexDigest,
(oldLocationsToDelete, objectLocationConstraint, destinationBucket, hexDigest,
prevObjectSize, next) => {
if (oldLocations) {
if (oldLocationsToDelete) {
log.trace('overwriting mpu part, deleting data');
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(oldLocations, request.method,
return data.batchDelete(oldLocationsToDelete, request.method,
objectLocationConstraint, delLog, err => {
if (err) {
// if error, log the error and move on as it is not

View File

@ -1,17 +1,15 @@
const async = require('async');
const { errors, s3middleware, auth, policies } = require('arsenal');
const { errors, s3middleware } = require('arsenal');
const vault = require('../auth/vault');
const { decodeVersionId, getVersionIdResHeader } =
require('./apiUtils/object/versioning');
const { validateObjectLockUpdate, objectLockRequiresBypass } =
const { ObjectLockInfo, checkUserGovernanceBypass, hasGovernanceBypassHeader } =
require('./apiUtils/object/objectLockHelpers');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper');
const { config } = require('../Config');
const { parseRetentionXml } = s3middleware.retention;
const REPLICATION_ACTION = 'PUT_RETENTION';
@ -83,54 +81,31 @@ function objectPutRetention(authInfo, request, log, callback) {
(err, retentionInfo) => next(err, bucket, retentionInfo, objectMD));
},
(bucket, retentionInfo, objectMD, next) => {
if (objectLockRequiresBypass(objectMD, retentionInfo) && authInfo.isRequesterAnIAMUser()) {
log.trace('object in GOVERNANCE mode and is user, checking for attached policies',
{ method: 'objectPutRetention' });
const authParams = auth.server.extractParams(request, log, 's3',
request.query);
const ip = policies.requestUtils.getClientIp(request, config);
const requestContextParams = {
constantParams: {
headers: request.headers,
query: request.query,
generalResource: bucketName,
specificResource: { key: objectKey },
requesterIp: ip,
sslEnabled: request.connection.encrypted,
apiMethod: 'bypassGovernanceRetention',
awsService: 's3',
locationConstraint: bucket.getLocationConstraint(),
requesterInfo: authInfo,
signatureVersion: authParams.params.data.signatureVersion,
authType: authParams.params.data.authType,
signatureAge: authParams.params.data.signatureAge,
},
};
return vault.checkPolicies(requestContextParams,
authInfo.getArn(), log, (err, authorizationResults) => {
if (err) {
return next(err);
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
if (hasGovernanceBypass && authInfo.isRequesterAnIAMUser()) {
return checkUserGovernanceBypass(request, authInfo, bucket, objectKey, log, err => {
if (err) {
if (err.is.AccessDenied) {
log.debug('user does not have BypassGovernanceRetention and object is locked');
}
if (authorizationResults[0].isAllowed !== true) {
log.trace('authorization check failed for user',
{
'method': 'objectPutRetention',
's3:BypassGovernanceRetention': false,
});
return next(errors.AccessDenied);
}
return next(null, bucket, retentionInfo, objectMD);
});
return next(err, bucket);
}
return next(null, bucket, retentionInfo, hasGovernanceBypass, objectMD);
});
}
return next(null, bucket, retentionInfo, objectMD);
return next(null, bucket, retentionInfo, hasGovernanceBypass, objectMD);
},
(bucket, retentionInfo, objectMD, next) => {
const bypassHeader = request.headers['x-amz-bypass-governance-retention'] || '';
const bypassGovernance = bypassHeader.toLowerCase() === 'true';
const validationError = validateObjectLockUpdate(objectMD, retentionInfo, bypassGovernance);
if (validationError) {
return next(validationError, bucket, objectMD);
(bucket, retentionInfo, hasGovernanceBypass, objectMD, next) => {
const objLockInfo = new ObjectLockInfo({
mode: objectMD.retentionMode,
date: objectMD.retentionDate,
legalHold: objectMD.legalHold,
});
if (!objLockInfo.canModifyPolicy(retentionInfo, hasGovernanceBypass)) {
return next(errors.AccessDenied, bucket);
}
return next(null, bucket, retentionInfo, objectMD);
},
(bucket, retentionInfo, objectMD, next) => {

View File

@ -451,6 +451,80 @@ const services = {
});
},
/**
* Mark the MPU overview key with a flag when starting the
* CompleteMPU operation, to be checked by "put part" operations
*
* @param {object} params - params object
* @param {string} params.bucketName - name of MPU bucket
* @param {string} params.objectKey - object key
* @param {string} params.uploadId - upload ID
* @param {string} params.splitter - splitter for this overview key
* @param {object} params.storedMetadata - original metadata of the overview key
* @param {Logger} log - Logger object
* @param {function} cb - callback(err)
* @return {undefined}
*/
metadataMarkMPObjectForCompletion(params, log, cb) {
assert.strictEqual(typeof params, 'object');
assert.strictEqual(typeof params.bucketName, 'string');
assert.strictEqual(typeof params.objectKey, 'string');
assert.strictEqual(typeof params.uploadId, 'string');
assert.strictEqual(typeof params.splitter, 'string');
assert.strictEqual(typeof params.storedMetadata, 'object');
const splitter = params.splitter;
const longMPUIdentifier =
`overview${splitter}${params.objectKey}${splitter}${params.uploadId}`;
const multipartObjectMD = Object.assign({}, params.storedMetadata);
multipartObjectMD.completeInProgress = true;
metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD,
{}, log, err => {
if (err) {
log.error('error from metadata', { error: err });
return cb(err);
}
return cb();
});
},
/**
* Returns if a CompleteMPU operation is in progress for this
* object, by looking at the `completeInProgress` flag stored in
* the overview key
*
* @param {object} params - params object
* @param {string} params.bucketName - bucket name where object should be stored
* @param {string} params.objectKey - object key
* @param {string} params.uploadId - upload ID
* @param {string} params.splitter - splitter for this overview key
* @param {object} log - request logger instance
* @param {function} cb - callback(err, {bool} completeInProgress)
* @return {undefined}
*/
isCompleteMPUInProgress(params, log, cb) {
assert.strictEqual(typeof params, 'object');
assert.strictEqual(typeof params.bucketName, 'string');
assert.strictEqual(typeof params.objectKey, 'string');
assert.strictEqual(typeof params.uploadId, 'string');
assert.strictEqual(typeof params.splitter, 'string');
const mpuBucketName = `${constants.mpuBucketPrefix}${params.bucketName}`;
const splitter = params.splitter;
const mpuOverviewKey =
`overview${splitter}${params.objectKey}${splitter}${params.uploadId}`;
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log,
(err, res) => {
if (err) {
log.error('error getting the overview object from mpu bucket', {
error: err,
method: 'services.isCompleteMPUInProgress',
params,
});
return cb(err);
}
return cb(null, Boolean(res.completeInProgress));
});
},
/**
* Checks whether bucket exists, multipart upload

View File

@ -280,11 +280,13 @@ function pushMetric(action, log, metricObj) {
sizeDelta = -oldByteLength;
} else if (action === 'abortMultipartUpload' && byteLength) {
sizeDelta = -byteLength;
} else if (action === 'putDeleteMarkerObject' && byteLength) {
sizeDelta = -byteLength;
}
let objectDelta = isDelete ? -numberOfObjects : numberOfObjects;
// putDeleteMarkerObject does not pass numberOfObjects
if (action === 'putDeleteMarkerObject'
if ((action === 'putDeleteMarkerObject' && byteLength === null)
|| action === 'replicateDelete'
|| action === 'replicateObject') {
objectDelta = 1;

View File

@ -1,6 +1,6 @@
{
"name": "s3",
"version": "7.10.15",
"version": "7.10.15-4",
"description": "S3 connector",
"main": "index.js",
"engines": {
@ -20,7 +20,7 @@
"homepage": "https://github.com/scality/S3#readme",
"dependencies": {
"@hapi/joi": "^17.1.0",
"arsenal": "git+https://github.com/scality/Arsenal#7.10.37",
"arsenal": "git+https://github.com/scality/Arsenal#7.10.38",
"async": "~2.5.0",
"aws-sdk": "2.905.0",
"azure-storage": "^2.1.0",
@ -34,7 +34,7 @@
"level-mem": "^5.0.1",
"moment": "^2.26.0",
"npm-run-all": "~4.1.5",
"utapi": "git+https://github.com/scality/utapi#7.10.7",
"utapi": "git+https://github.com/scality/utapi#7.10.7-1",
"utf8": "~2.1.1",
"uuid": "^3.0.1",
"vaultclient": "scality/vaultclient#7.10.10",

View File

@ -210,5 +210,39 @@ describe('Complete MPU', () => {
});
});
});
describe('with re-upload of part during CompleteMPU execution', () => {
let uploadId;
let eTag;
beforeEach(() => _initiateMpuAndPutOnePart()
.then(result => {
uploadId = result.uploadId;
eTag = result.eTag;
})
);
it('should complete the MPU successfully and leave a readable object', done => {
async.parallel([
doneReUpload => s3.uploadPart({
Bucket: bucket,
Key: key,
PartNumber: 1,
UploadId: uploadId,
Body: 'foo',
}, err => {
// in case the CompleteMPU finished earlier,
// we may get a NoSuchKey error, so just
// ignore it
if (err && err.code === 'NoSuchKey') {
return doneReUpload();
}
return doneReUpload(err);
}),
doneComplete => _completeMpuAndCheckVid(
uploadId, eTag, undefined, doneComplete),
], done);
});
});
});
});

View File

@ -577,6 +577,72 @@ describe('Object Part Copy', () => {
checkNoError(err);
});
});
it('should not corrupt object if overwriting an existing part by copying a part ' +
'while the MPU is being completed', () => {
// AWS response etag for this completed MPU
const finalObjETag = '"db77ebbae9e9f5a244a26b86193ad818-1"';
process.stdout.write('Putting first part in MPU test');
return s3.uploadPartCopy({ Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}).promise().then(res => {
assert.strictEqual(res.ETag, etag);
assert(res.LastModified);
}).then(() => {
process.stdout.write('Overwriting first part in MPU test and completing MPU ' +
'at the same time');
return Promise.all([
s3.uploadPartCopy({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}).promise().catch(err => {
// in case the CompleteMPU finished
// earlier, we may get a NoSuchKey error,
// so just ignore it and resolve with a
// special value, otherwise re-throw the
// error
if (err && err.code === 'NoSuchKey') {
return Promise.resolve(null);
}
throw err;
}),
s3.completeMultipartUpload({
Bucket: destBucketName,
Key: destObjName,
UploadId: uploadId,
MultipartUpload: {
Parts: [
{ ETag: etag, PartNumber: 1 },
],
},
}).promise(),
]);
}).then(([uploadRes, completeRes]) => {
// if upload succeeded before CompleteMPU finished
if (uploadRes !== null) {
assert.strictEqual(uploadRes.ETag, etag);
assert(uploadRes.LastModified);
}
assert.strictEqual(completeRes.Bucket, destBucketName);
assert.strictEqual(completeRes.Key, destObjName);
assert.strictEqual(completeRes.ETag, finalObjETag);
}).then(() => {
process.stdout.write('Getting object put by MPU with ' +
'overwrite part');
return s3.getObject({
Bucket: destBucketName,
Key: destObjName,
}).promise();
}).then(res => {
assert.strictEqual(res.ETag, finalObjETag);
});
});
});
it('should return an error if no such upload initiated',

View File

@ -3,18 +3,7 @@ const async = require('async');
const withV4 = require('../support/withV4');
const BucketUtility = require('../../lib/utility/bucket-util');
const { maximumAllowedPartCount } = require('../../../../../constants');
const bucket = 'mpu-test-bucket';
const object = 'mpu-test-object';
const bodySize = 1024 * 1024 * 5;
const bodyContent = 'a';
const howManyParts = 3;
const partNumbers = Array.from(Array(howManyParts).keys());
const invalidPartNumbers = [-1, 0, maximumAllowedPartCount + 1];
let ETags = [];
const objectConfigs = require('../support/objectConfigs');
function checkError(err, statusCode, code) {
assert.strictEqual(err.statusCode, statusCode);
@ -26,128 +15,154 @@ function checkNoError(err) {
`Expected success, got error ${JSON.stringify(err)}`);
}
function generateContent(partNumber) {
return Buffer.alloc(bodySize + partNumber, bodyContent);
function generateContent(size, bodyContent) {
return Buffer.alloc(size, bodyContent);
}
describe('Part size tests with object head', () => {
withV4(sigCfg => {
let bucketUtil;
let s3;
objectConfigs.forEach(config => {
describe(config.signature, () => {
let ETags = [];
function headObject(fields, cb) {
s3.headObject(Object.assign({
Bucket: bucket,
Key: object,
}, fields), cb);
}
const {
bucket,
object,
bodySize,
bodyContent,
partNumbers,
invalidPartNumbers,
} = config;
beforeEach(function beforeF(done) {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
withV4(sigCfg => { //eslint-disable-line
let bucketUtil;
let s3;
async.waterfall([
next => s3.createBucket({ Bucket: bucket }, err => next(err)),
next => s3.createMultipartUpload({ Bucket: bucket,
Key: object }, (err, data) => {
checkNoError(err);
this.currentTest.UploadId = data.UploadId;
return next();
}),
next => async.mapSeries(partNumbers, (partNumber, callback) => {
const uploadPartParams = {
Bucket: bucket,
Key: object,
PartNumber: partNumber + 1,
UploadId: this.currentTest.UploadId,
Body: generateContent(partNumber + 1),
};
beforeEach(function beforeF(done) {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
return s3.uploadPart(uploadPartParams,
(err, data) => {
if (err) {
return callback(err);
async.waterfall([
next => s3.createBucket({ Bucket: bucket }, err => next(err)),
next => s3.createMultipartUpload({ Bucket: bucket,
Key: object }, (err, data) => {
checkNoError(err);
this.currentTest.UploadId = data.UploadId;
return next();
}),
next => async.mapSeries(partNumbers, (partNumber, callback) => {
let allocAmount = bodySize + partNumber + 1;
if (config.signature === 'for empty object') {
allocAmount = 0;
}
return callback(null, data.ETag);
});
}, (err, results) => {
checkNoError(err);
ETags = results;
return next();
}),
next => {
const params = {
Bucket: bucket,
Key: object,
MultipartUpload: {
Parts: partNumbers.map(partNumber => ({
ETag: ETags[partNumber],
const uploadPartParams = {
Bucket: bucket,
Key: object,
PartNumber: partNumber + 1,
})),
UploadId: this.currentTest.UploadId,
Body: generateContent(allocAmount, bodyContent),
};
return s3.uploadPart(uploadPartParams,
(err, data) => {
if (err) {
return callback(err);
}
return callback(null, data.ETag);
});
}, (err, results) => {
checkNoError(err);
ETags = results;
return next();
}),
next => {
const params = {
Bucket: bucket,
Key: object,
MultipartUpload: {
Parts: partNumbers.map(partNumber => ({
ETag: ETags[partNumber],
PartNumber: partNumber + 1,
})),
},
UploadId: this.currentTest.UploadId,
};
return s3.completeMultipartUpload(params, next);
},
UploadId: this.currentTest.UploadId,
};
return s3.completeMultipartUpload(params, next);
},
], err => {
checkNoError(err);
done();
});
});
], err => {
checkNoError(err);
done();
});
});
afterEach(done => {
async.waterfall([
next => s3.deleteObject({ Bucket: bucket, Key: object },
err => next(err)),
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
], done);
});
afterEach(done => {
async.waterfall([
next => s3.deleteObject({ Bucket: bucket, Key: object },
err => next(err)),
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
], done);
});
it('should return the total size of the object ' +
'when --part-number is not used', done => {
const totalSize = partNumbers.reduce((total, current) =>
total + (bodySize + current + 1), 0);
headObject({}, (err, data) => {
checkNoError(err);
assert.equal(totalSize, data.ContentLength);
done();
});
});
it('should return the total size of the object ' +
'when --part-number is not used', done => {
const totalSize = config.meta.computeTotalSize(partNumbers, bodySize);
partNumbers.forEach(part => {
it(`should return the size of part ${part + 1} ` +
`when --part-number is set to ${part + 1}`, done => {
const partNumber = Number.parseInt(part, 0) + 1;
const partSize = bodySize + partNumber;
headObject({ PartNumber: partNumber }, (err, data) => {
checkNoError(err);
assert.equal(partSize, data.ContentLength);
done();
s3.headObject({ Bucket: bucket, Key: object }, (err, data) => {
checkNoError(err);
assert.equal(totalSize, data.ContentLength);
done();
});
});
partNumbers.forEach(part => {
it(`should return the size of part ${part + 1} ` +
`when --part-number is set to ${part + 1}`, done => {
const partNumber = Number.parseInt(part, 0) + 1;
const partSize = bodySize + partNumber;
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumber }, (err, data) => {
checkNoError(err);
if (data.ContentLength === 0) {
done();
}
assert.equal(partSize, data.ContentLength);
done();
});
});
});
invalidPartNumbers.forEach(part => {
it(`should return an error when --part-number is set to ${part}`,
done => {
s3.headObject({ Bucket: bucket, Key: object, PartNumber: part }, (err, data) => {
checkError(err, 400, 'BadRequest');
assert.strictEqual(data, null);
done();
});
});
});
it('when incorrect --part-number is used', done => {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumbers.length + 1 },
(err, data) => {
if (config.meta.objectIsEmpty) {
// returns metadata for the only empty part
checkNoError(err);
assert.strictEqual(data.ContentLength, 0);
done();
} else {
// returns a 416 error
// the error response does not contain the actual
// statusCode instead it has '416'
checkError(err, 416, 416);
assert.strictEqual(data, null);
done();
}
});
});
});
});
invalidPartNumbers.forEach(part => {
it(`should return an error when --part-number is set to ${part}`,
done => {
headObject({ PartNumber: part }, (err, data) => {
checkError(err, 400, 'BadRequest');
assert.strictEqual(data, null);
done();
});
});
});
it('should return an error when incorrect --part-number is used',
done => {
headObject({ PartNumber: partNumbers.length + 1 },
(err, data) => {
// the error response does not contain the actual
// statusCode instead it has '416'
checkError(err, 416, 416);
assert.strictEqual(data, null);
done();
});
});
});
});

View File

@ -0,0 +1,40 @@
const { maximumAllowedPartCount } = require('../../../../../constants');
const canonicalObjectConfig = {
bucket: 'mpu-test-bucket-canonical-object',
object: 'mpu-test-object-canonical',
bodySize: 1024 * 1024 * 5,
bodyContent: 'a',
howManyParts: 3,
partNumbers: Array.from(Array(3).keys()), // 3 corresponds to howManyParts
invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1],
signature: 'for canonical object',
meta: {
computeTotalSize: (partNumbers, bodySize) => partNumbers.reduce((total, current) =>
total + bodySize + current + 1
, 0),
objectIsEmpty: false,
},
};
const emptyObjectConfig = {
bucket: 'mpu-test-bucket-empty-object',
object: 'mpu-test-object-empty',
bodySize: 0,
bodyContent: null,
howManyParts: 1,
partNumbers: Array.from(Array(1).keys()), // 1 corresponds to howManyParts
invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1],
signature: 'for empty object',
meta: {
computeTotalSize: () => 0,
objectIsEmpty: true,
},
};
const objectConfigs = [
canonicalObjectConfig,
emptyObjectConfig,
];
module.exports = objectConfigs;

View File

@ -27,6 +27,7 @@ const testData = 'testkey data';
const testDataMd5 = crypto.createHash('md5')
.update(testData, 'utf-8')
.digest('hex');
const emptyContentsMd5 = 'd41d8cd98f00b204e9800998ecf8427e';
const testMd = {
'md-model-version': 2,
'owner-display-name': 'Bart',
@ -60,6 +61,17 @@ const testMd = {
},
};
function checkObjectData(s3, objectKey, dataValue, done) {
s3.getObject({
Bucket: TEST_BUCKET,
Key: objectKey,
}, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Body.toString(), dataValue);
done();
});
}
/** makeBackbeatRequest - utility function to generate a request going
* through backbeat route
* @param {object} params - params for making request
@ -416,8 +428,8 @@ describeSkipIfAWS('backbeat routes', () => {
});
});
it('should remove old object data locations if version is overwritten',
done => {
it('should remove old object data locations if version is overwritten ' +
'with same contents', done => {
let oldLocation;
const testKeyOldData = `${testKey}-old-data`;
async.waterfall([next => {
@ -491,14 +503,8 @@ describeSkipIfAWS('backbeat routes', () => {
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// give some time for the async deletes to complete
setTimeout(() => s3.getObject({
Bucket: TEST_BUCKET,
Key: testKey,
}, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Body.toString(), testData);
next();
}), 1000);
setTimeout(() => checkObjectData(s3, testKey, testData, next),
1000);
}, next => {
// check that the object copy referencing the old data
// locations is unreadable, confirming that the old
@ -516,6 +522,89 @@ describeSkipIfAWS('backbeat routes', () => {
done();
});
});
it('should remove old object data locations if version is overwritten ' +
'with empty contents', done => {
let oldLocation;
const testKeyOldData = `${testKey}-old-data`;
async.waterfall([next => {
// put object's data locations
makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET,
objectKey: testKey,
resourceType: 'data',
headers: {
'content-length': testData.length,
'content-md5': testDataMd5,
'x-scal-canonical-id': testArn,
},
authCredentials: backbeatAuthCredentials,
requestBody: testData }, next);
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// put object metadata
const newMd = Object.assign({}, testMd);
newMd.location = JSON.parse(response.body);
oldLocation = newMd.location;
makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET,
objectKey: testKey,
resourceType: 'metadata',
authCredentials: backbeatAuthCredentials,
requestBody: JSON.stringify(newMd),
}, next);
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// put another object which metadata reference the
// same data locations, we will attempt to retrieve
// this object at the end of the test to confirm that
// its locations have been deleted
const oldDataMd = Object.assign({}, testMd);
oldDataMd.location = oldLocation;
makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET,
objectKey: testKeyOldData,
resourceType: 'metadata',
authCredentials: backbeatAuthCredentials,
requestBody: JSON.stringify(oldDataMd),
}, next);
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// overwrite the original object version with an empty location
const newMd = Object.assign({}, testMd);
newMd['content-length'] = 0;
newMd['content-md5'] = emptyContentsMd5;
newMd.location = null;
makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET,
objectKey: testKey,
resourceType: 'metadata',
authCredentials: backbeatAuthCredentials,
requestBody: JSON.stringify(newMd),
}, next);
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// give some time for the async deletes to complete
setTimeout(() => checkObjectData(s3, testKey, '', next),
1000);
}, next => {
// check that the object copy referencing the old data
// locations is unreadable, confirming that the old
// data locations have been deleted
s3.getObject({
Bucket: TEST_BUCKET,
Key: testKeyOldData,
}, err => {
assert(err, 'expected error to get object with old data ' +
'locations, got success');
next();
});
}], err => {
assert.ifError(err);
done();
});
});
it('should not remove data locations on replayed metadata PUT',
done => {
let serializedNewMd;

View File

@ -3,14 +3,15 @@ const assert = require('assert');
const BucketInfo = require('arsenal').models.BucketInfo;
const getReplicationInfo =
require('../../../../lib/api/apiUtils/object/getReplicationInfo');
const { makeAuthInfo } = require('../../helpers');
function _getObjectReplicationInfo(replicationConfig) {
function _getObjectReplicationInfo(replicationConfig, authInfo, isDeleteMarker) {
const bucketInfo = new BucketInfo(
'testbucket', 'someCanonicalId', 'accountDisplayName',
new Date().toJSON(),
null, null, null, null, null, null, null, null, null,
replicationConfig);
return getReplicationInfo('fookey', bucketInfo, true, 123, null, null);
return getReplicationInfo('fookey', bucketInfo, true, 123, null, null, authInfo, isDeleteMarker);
}
describe('getReplicationInfo helper', () => {
@ -40,6 +41,65 @@ describe('getReplicationInfo helper', () => {
});
});
it('should get replication info when action comming from a non-lifecycle session', () => {
const replicationConfig = {
role: 'arn:aws:iam::root:role/s3-replication-role',
rules: [{
prefix: '',
enabled: true,
storageClass: 'awsbackend',
}],
destination: 'tosomewhere',
};
const authInfo = makeAuthInfo('accessKey1', null, 'another-session');
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, true);
assert.deepStrictEqual(replicationInfo, {
status: 'PENDING',
backends: [{
site: 'awsbackend',
status: 'PENDING',
dataStoreVersionId: '',
}],
content: ['METADATA'],
destination: 'tosomewhere',
storageClass: 'awsbackend',
role: 'arn:aws:iam::root:role/s3-replication-role',
storageType: 'aws_s3',
});
});
it('should get replication info when action comming from a lifecycle session ' +
'but action is not delete marker', () => {
const replicationConfig = {
role: 'arn:aws:iam::root:role/s3-replication-role',
rules: [{
prefix: '',
enabled: true,
storageClass: 'awsbackend',
}],
destination: 'tosomewhere',
};
const authInfo = makeAuthInfo('accessKey1', null, 'backbeat-lifecycle');
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, false);
assert.deepStrictEqual(replicationInfo, {
status: 'PENDING',
backends: [{
site: 'awsbackend',
status: 'PENDING',
dataStoreVersionId: '',
}],
content: ['METADATA'],
destination: 'tosomewhere',
storageClass: 'awsbackend',
role: 'arn:aws:iam::root:role/s3-replication-role',
storageType: 'aws_s3',
});
});
it('should not get replication info when rules are disabled', () => {
const replicationConfig = {
role: 'arn:aws:iam::root:role/s3-replication-role',
@ -53,4 +113,21 @@ describe('getReplicationInfo helper', () => {
const replicationInfo = _getObjectReplicationInfo(replicationConfig);
assert.deepStrictEqual(replicationInfo, undefined);
});
it('should not get replication info when action comming from lifecycle session', () => {
const replicationConfig = {
role: 'arn:aws:iam::root:role/s3-replication-role',
rules: [{
prefix: '',
enabled: true,
storageClass: 'awsbackend',
}],
destination: 'tosomewhere',
};
const authInfo = makeAuthInfo('accessKey1', null, 'backbeat-lifecycle');
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, true);
assert.deepStrictEqual(replicationInfo, undefined);
});
});

View File

@ -38,4 +38,16 @@ describe('Check if location keys have changed between object locations', () => {
const curr = [{ key: 'ddd' }, { key: 'eee' }, { key: 'fff' }];
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
});
it('should return true if curr location is null', () => {
const prev = [{ key: 'ddd' }, { key: 'eee' }, { key: 'fff' }];
const curr = null;
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
});
it('should return true if both prev and curr locations are null', () => {
const prev = null;
const curr = null;
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
});
});

View File

@ -6,9 +6,8 @@ const { DummyRequestLogger } = require('../../helpers');
const {
calculateRetainUntilDate,
validateHeaders,
validateObjectLockUpdate,
compareObjectLockInformation,
objectLockRequiresBypass,
ObjectLockInfo,
} = require('../../../../lib/api/apiUtils/object/objectLockHelpers');
const mockName = 'testbucket';
@ -180,199 +179,6 @@ describe('objectLockHelpers: calculateRetainUntilDate', () => {
});
});
describe('objectLockHelpers: objectLockRequiresBypass', () => {
it('should not require bypass if extending non-expired GOVERNANCE', () => {
const objMD = {
retentionMode: 'GOVERNANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'GOVERNANCE',
date: moment().add(2, 'days').toISOString(),
};
assert.strictEqual(objectLockRequiresBypass(objMD, retentionInfo), false);
});
it('should not require bypass if previous mode is undefined', () => {
const objMD = {
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'GOVERNANCE',
date: moment().add(2, 'days').toISOString(),
};
assert.strictEqual(objectLockRequiresBypass(objMD, retentionInfo), false);
});
it('should not require bypass if previous mode was COMPLIANCE', () => {
const objMD = {
retentionMode: 'COMPLIANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'GOVERNANCE',
date: moment().add(2, 'days').toISOString(),
};
assert.strictEqual(objectLockRequiresBypass(objMD, retentionInfo), false);
});
it('should require bypass if new mode is COMPLIANCE', () => {
const objMD = {
retentionMode: 'GOVERNANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'COMPLIANCE',
date: moment().add(2, 'days').toISOString(),
};
assert.strictEqual(objectLockRequiresBypass(objMD, retentionInfo), true);
});
it('should require bypass if we are shortening retention', () => {
const objMD = {
retentionMode: 'GOVERNANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'GOVERNANCE',
date: moment().toISOString(),
};
assert.strictEqual(objectLockRequiresBypass(objMD, retentionInfo), true);
});
});
describe('objectLockHelpers: validateObjectLockUpdate', () => {
it('should allow GOVERNANCE => COMPLIANCE if bypassGovernanceRetention is true', () => {
const objMD = {
retentionMode: 'GOVERNANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'COMPLIANCE',
date: moment().add(1, 'days').toISOString(),
};
const error = validateObjectLockUpdate(objMD, retentionInfo, true);
assert.strictEqual(error, null);
});
it('should disallow GOVERNANCE => COMPLIANCE if bypassGovernanceRetention is false', () => {
const objMD = {
retentionMode: 'GOVERNANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'COMPLIANCE',
date: moment().add(1, 'days').toISOString(),
};
const error = validateObjectLockUpdate(objMD, retentionInfo, false);
assert.deepStrictEqual(error, errors.AccessDenied);
});
it('should disallow COMPLIANCE => GOVERNANCE if retention is not expired', () => {
const objMD = {
retentionMode: 'COMPLIANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'GOVERNANCE',
date: moment().add(1, 'days').toISOString(),
};
const error = validateObjectLockUpdate(objMD, retentionInfo);
assert.deepStrictEqual(error, errors.AccessDenied);
});
it('should allow COMPLIANCE => GOVERNANCE if retention is expired', () => {
const objMD = {
retentionMode: 'COMPLIANCE',
retentionDate: moment().subtract(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'GOVERNANCE',
date: moment().add(1, 'days').toISOString(),
};
const error = validateObjectLockUpdate(objMD, retentionInfo);
assert.strictEqual(error, null);
});
it('should allow extending retention period if in COMPLIANCE', () => {
const objMD = {
retentionMode: 'COMPLIANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'COMPLIANCE',
date: moment().add(2, 'days').toISOString(),
};
const error = validateObjectLockUpdate(objMD, retentionInfo);
assert.strictEqual(error, null);
});
it('should allow extending retention period if in GOVERNANCE if bypassGovernanceRetention is false', () => {
const objMD = {
retentionMode: 'GOVERNANCE',
retentionDate: moment().add(1, 'days').toISOString(),
};
const retentionInfo = {
mode: 'GOVERNANCE',
date: moment().add(2, 'days').toISOString(),
};
const error = validateObjectLockUpdate(objMD, retentionInfo, false);
assert.strictEqual(error, null);
});
it('should disallow shortening retention period if in COMPLIANCE', () => {
const objMD = {
retentionMode: 'COMPLIANCE',
retentionDate: moment().add(2, 'days').toISOString(),
};
const retentionInfo = {
mode: 'COMPLIANCE',
date: moment().add(1, 'days').toISOString(),
};
const error = validateObjectLockUpdate(objMD, retentionInfo);
assert.deepStrictEqual(error, errors.AccessDenied);
});
it('should allow shortening retention period if in GOVERNANCE', () => {
const objMD = {
retentionMode: 'GOVERNANCE',
retentionDate: moment().add(2, 'days').toISOString(),
};
const retentionInfo = {
mode: 'GOVERNANCE',
date: moment().add(1, 'days').toISOString(),
};
const error = validateObjectLockUpdate(objMD, retentionInfo, true);
assert.strictEqual(error, null);
});
});
describe('objectLockHelpers: compareObjectLockInformation', () => {
const mockDate = new Date();
let origNow = null;
@ -462,3 +268,368 @@ describe('objectLockHelpers: compareObjectLockInformation', () => {
assert.deepStrictEqual(res, { legalHold: true });
});
});
const pastDate = moment().subtract(1, 'days');
const futureDate = moment().add(100, 'days');
const isLockedTestCases = [
{
desc: 'no mode and no date',
policy: {},
expected: false,
},
{
desc: 'mode and no date',
policy: {
mode: 'GOVERNANCE',
},
expected: false,
},
{
desc: 'mode and past date',
policy: {
mode: 'GOVERNANCE',
date: pastDate.toISOString(),
},
expected: false,
},
{
desc: 'mode and future date',
policy: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
expected: true,
},
];
const isExpiredTestCases = [
{
desc: 'should return true, no date is the same as expired',
expected: true,
},
{
desc: 'should return true, past date.',
date: pastDate.toISOString(),
expected: true,
},
{
desc: 'should return false, future date.',
date: futureDate.toISOString(),
expected: false,
},
];
const policyChangeTestCases = [
{
desc: 'enable governance policy',
from: {},
to: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'modifying expired governance policy',
from: {
mode: 'GOVERNANCE',
date: pastDate.toISOString(),
},
to: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'extending governance policy',
from: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
to: {
mode: 'GOVERNANCE',
date: futureDate.add(1, 'days').toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'shortening governance policy',
from: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
to: {
mode: 'GOVERNANCE',
date: futureDate.subtract(1, 'days').toISOString(),
},
allowed: false,
allowedWithBypass: true,
},
{
desc: 'extending governance policy using same date',
from: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
to: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'removing governance policy',
from: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
to: {},
allowed: false,
allowedWithBypass: true,
},
{
desc: 'changing governance policy to compliance',
from: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
to: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
allowed: false,
allowedWithBypass: true,
},
{
desc: 'enable compliance policy',
from: {},
to: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'modifying expired compliance policy',
from: {
mode: 'COMPLIANCE',
date: pastDate.toISOString(),
},
to: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'extending compliance policy',
from: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
to: {
mode: 'COMPLIANCE',
date: futureDate.add(1, 'days').toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'shortening compliance policy',
from: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
to: {
mode: 'COMPLIANCE',
date: futureDate.subtract(1, 'days').toISOString(),
},
allowed: false,
allowedWithBypass: false,
},
{
desc: 'extending compliance policy with the same date',
from: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
to: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'removing compliance policy',
from: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
to: {},
allowed: false,
allowedWithBypass: false,
},
{
desc: 'changing compliance to governance policy',
from: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
to: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
allowed: false,
allowedWithBypass: false,
},
{
desc: 'invalid starting mode',
from: {
mode: 'IM_AN_INVALID_MODE',
date: futureDate.toISOString(),
},
to: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
allowed: false,
allowedWithBypass: false,
},
{
desc: 'date with no mode',
from: {
date: futureDate.toISOString(),
},
to: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
];
const canModifyObjectTestCases = [
{
desc: 'No object lock config',
policy: {},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'active governance mode',
policy: {
mode: 'GOVERNANCE',
date: futureDate.toISOString(),
},
allowed: false,
allowedWithBypass: true,
},
{
desc: 'expired governance mode',
policy: {
mode: 'GOVERNANCE',
date: pastDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'active compliance mode',
policy: {
mode: 'COMPLIANCE',
date: futureDate.toISOString(),
},
allowed: false,
allowedWithBypass: false,
},
{
desc: 'expired compliance mode',
policy: {
mode: 'COMPLIANCE',
date: pastDate.toISOString(),
},
allowed: true,
allowedWithBypass: true,
},
{
desc: 'invalid mode',
policy: {
mode: 'IM_AN_INVALID_MODE',
date: futureDate.toISOString(),
},
allowed: false,
allowedWithBypass: false,
},
];
describe('objectLockHelpers: ObjectLockInfo', () => {
['GOVERNANCE', 'COMPLIANCE'].forEach(mode => {
it(`should return ${mode === 'GOVERNANCE'} for isGovernance`, () => {
const info = new ObjectLockInfo({
mode,
});
assert.strictEqual(info.isGovernanceMode(), mode === 'GOVERNANCE');
});
it(`should return ${mode === 'COMPLIANCE'} for isCompliance`, () => {
const info = new ObjectLockInfo({
mode,
});
assert.strictEqual(info.isComplianceMode(), mode === 'COMPLIANCE');
});
});
describe('isExpired: ', () => isExpiredTestCases.forEach(testCase => {
const objLockInfo = new ObjectLockInfo({ date: testCase.date });
it(testCase.desc, () => assert.strictEqual(objLockInfo.isExpired(), testCase.expected));
}));
describe('isLocked: ', () => isLockedTestCases.forEach(testCase => {
describe(`${testCase.desc}`, () => {
it(`should show policy as ${testCase.expected ? '' : 'not'} locked without legal hold`, () => {
const objLockInfo = new ObjectLockInfo(testCase.policy);
assert.strictEqual(objLockInfo.isLocked(), testCase.expected);
});
// legal hold should show as locked regardless of policy
it('should show policy as locked with legal hold', () => {
const policy = Object.assign({}, testCase.policy, { legalHold: true });
const objLockInfo = new ObjectLockInfo(policy);
assert.strictEqual(objLockInfo.isLocked(), true);
});
});
}));
describe('canModifyPolicy: ', () => policyChangeTestCases.forEach(testCase => {
describe(testCase.desc, () => {
const objLockInfo = new ObjectLockInfo(testCase.from);
it(`should ${testCase.allowed ? 'allow' : 'deny'} modifying the policy without bypass`,
() => assert.strictEqual(objLockInfo.canModifyPolicy(testCase.to), testCase.allowed));
it(`should ${testCase.allowedWithBypass ? 'allow' : 'deny'} modifying the policy with bypass`,
() => assert.strictEqual(objLockInfo.canModifyPolicy(testCase.to, true), testCase.allowedWithBypass));
});
}));
describe('canModifyObject: ', () => canModifyObjectTestCases.forEach(testCase => {
describe(testCase.desc, () => {
const objLockInfo = new ObjectLockInfo(testCase.policy);
it(`should ${testCase.allowed ? 'allow' : 'deny'} modifying object without bypass`,
() => assert.strictEqual(objLockInfo.canModifyObject(), testCase.allowed));
it(`should ${testCase.allowedWithBypass ? 'allow' : 'deny'} modifying object with bypass`,
() => assert.strictEqual(objLockInfo.canModifyObject(true), testCase.allowedWithBypass));
});
}));
});

View File

@ -0,0 +1,41 @@
const assert = require('assert');
const { isLifecycleSession } =
require('../../../../lib/api/apiUtils/authorization/permissionChecks.js');
const tests = [
{
arn: 'arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle',
description: 'a role assumed by lifecycle service',
expectedResult: true,
},
{
arn: undefined,
description: 'undefined',
expectedResult: false,
},
{
arn: '',
description: 'empty',
expectedResult: false,
},
{
arn: 'arn:aws:iam::257038443293:user/bart',
description: 'a user',
expectedResult: false,
},
{
arn: 'arn:aws:sts::257038443293:assumed-role/rolename/other-service',
description: 'a role assumed by another service',
expectedResult: false,
},
];
describe('authInfoHelper', () => {
tests.forEach(t => {
it(`should return ${t.expectedResult} if arn is ${t.description}`, () => {
const result = isLifecycleSession(t.arn);
assert.equal(result, t.expectedResult);
});
});
});

View File

@ -267,7 +267,9 @@ describe('versioning helpers', () => {
objMD: {
versionId: 'v1',
},
expectedRes: {},
expectedRes: {
isNull: true,
},
},
{
description: 'delete non-null object version',

View File

@ -377,6 +377,26 @@ describe('bucket policy authorization', () => {
});
});
it('should allow access to non-object owner for objectHead action with s3:GetObject permission',
function itFn(done) {
const newPolicy = this.test.basePolicy;
newPolicy.Statement[0].Action = ['s3:GetObject'];
bucket.setBucketPolicy(newPolicy);
const allowed = isObjAuthorized(bucket, object, 'objectHead',
altAcctCanonicalId, altAcctAuthInfo, log);
assert.equal(allowed, true);
done();
});
it('should deny access to non-object owner for objectHead action without s3:GetObject permission',
function itFn(done) {
const newPolicy = this.test.basePolicy;
newPolicy.Statement[0].Action = ['s3:PutObject'];
bucket.setBucketPolicy(newPolicy);
const allowed = isObjAuthorized(bucket, object, 'objectHead',
altAcctCanonicalId, altAcctAuthInfo, log);
assert.equal(allowed, false);
done();
});
it('should deny access to non-object owner if two statements apply ' +
'to principal but one denies access', function itFn(done) {
const newPolicy = this.test.basePolicy;

View File

@ -1641,6 +1641,78 @@ describe('Multipart Upload API', () => {
});
});
it('should leave orphaned data when overwriting an object part during completeMPU',
done => {
const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024);
const overWritePart = Buffer.from('Overwrite content', 'utf8');
let uploadId;
async.waterfall([
next => bucketPut(authInfo, bucketPutRequest, log, next),
(corsHeaders, next) => initiateMultipartUpload(authInfo,
initiateRequest, log, next),
(result, corsHeaders, next) => parseString(result, next),
(json, next) => {
uploadId = json.InitiateMultipartUploadResult.UploadId[0];
const requestObj = {
bucketName,
namespace,
objectKey,
headers: { host: `${bucketName}.s3.amazonaws.com` },
url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`,
query: {
partNumber: '1',
uploadId,
},
};
const partRequest = new DummyRequest(requestObj, fullSizedPart);
objectPutPart(authInfo, partRequest, undefined, log, (err, partCalculatedHash) => {
assert.deepStrictEqual(err, null);
next(null, requestObj, partCalculatedHash);
});
},
(requestObj, partCalculatedHash, next) => {
assert.deepStrictEqual(ds[1].value, fullSizedPart);
async.parallel([
done => {
const partRequest = new DummyRequest(requestObj, overWritePart);
objectPutPart(authInfo, partRequest, undefined, log, err => {
assert.deepStrictEqual(err, null);
done();
});
},
done => {
const completeBody = '<CompleteMultipartUpload>' +
'<Part>' +
'<PartNumber>1</PartNumber>' +
`<ETag>"${partCalculatedHash}"</ETag>` +
'</Part>' +
'</CompleteMultipartUpload>';
const completeRequest = {
bucketName,
namespace,
objectKey,
parsedHost: 's3.amazonaws.com',
url: `/${objectKey}?uploadId=${uploadId}`,
headers: { host: `${bucketName}.s3.amazonaws.com` },
query: { uploadId },
post: completeBody,
};
completeMultipartUpload(authInfo, completeRequest, log, done);
},
], err => next(err));
},
],
err => {
assert.deepStrictEqual(err, null);
assert.strictEqual(ds[0], undefined);
assert.deepStrictEqual(ds[1].value, fullSizedPart);
assert.deepStrictEqual(ds[2].value, overWritePart);
done();
});
});
it('should throw an error on put of an object part with an invalid ' +
'uploadId', done => {
const testUploadId = 'invalidUploadID';
@ -1841,12 +1913,22 @@ describe('complete mpu with versioning', () => {
},
(eTag, testUploadId, next) => {
const origPutObject = metadataBackend.putObject;
let callCount = 0;
metadataBackend.putObject =
(bucketName, objName, objVal, params, log, cb) => {
assert.strictEqual(params.replayId, testUploadId);
metadataBackend.putObject = origPutObject;
metadataBackend.putObject(
bucketName, objName, objVal, params, log, cb);
(putBucketName, objName, objVal, params, log, cb) => {
if (callCount === 0) {
// first putObject sets the completeInProgress flag in the overview key
assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`);
assert.strictEqual(
objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`);
assert.strictEqual(objVal.completeInProgress, true);
} else {
assert.strictEqual(params.replayId, testUploadId);
metadataBackend.putObject = origPutObject;
}
origPutObject(
putBucketName, objName, objVal, params, log, cb);
callCount += 1;
};
const parts = [{ partNumber: 1, eTag }];
const completeRequest = _createCompleteMpuRequest(testUploadId,
@ -1903,12 +1985,22 @@ describe('complete mpu with versioning', () => {
},
(eTag, testUploadId, next) => {
const origPutObject = metadataBackend.putObject;
let callCount = 0;
metadataBackend.putObject =
(bucketName, objName, objVal, params, log, cb) => {
assert.strictEqual(params.replayId, testUploadId);
metadataBackend.putObject = origPutObject;
metadataBackend.putObject(
bucketName, objName, objVal, params, log, cb);
(putBucketName, objName, objVal, params, log, cb) => {
if (callCount === 0) {
// first putObject sets the completeInProgress flag in the overview key
assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`);
assert.strictEqual(
objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`);
assert.strictEqual(objVal.completeInProgress, true);
} else {
assert.strictEqual(params.replayId, testUploadId);
metadataBackend.putObject = origPutObject;
}
origPutObject(
putBucketName, objName, objVal, params, log, cb);
callCount += 1;
};
const parts = [{ partNumber: 1, eTag }];
const completeRequest = _createCompleteMpuRequest(testUploadId,

View File

@ -16,8 +16,8 @@ const bucketName = 'bucketname';
const objectName = 'objectName';
const postBody = Buffer.from('I am a body', 'utf8');
const date = new Date();
date.setDate(date.getDate() + 1);
const expectedMode = 'GOVERNANCE';
const expectedDate = moment().add(2, 'days').toISOString();
const bucketPutRequest = {
bucketName,
@ -36,13 +36,13 @@ const putObjectRequest = new DummyRequest({
const objectRetentionXmlGovernance = '<Retention ' +
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<Mode>GOVERNANCE</Mode>' +
`<RetainUntilDate>${date.toISOString()}</RetainUntilDate>` +
`<RetainUntilDate>${expectedDate}</RetainUntilDate>` +
'</Retention>';
const objectRetentionXmlCompliance = '<Retention ' +
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<Mode>COMPLIANCE</Mode>' +
`<RetainUntilDate>${moment().add(2, 'days').toISOString()}</RetainUntilDate>` +
`<RetainUntilDate>${expectedDate}</RetainUntilDate>` +
'</Retention>';
const objectRetentionXmlGovernanceLonger = '<Retention ' +
@ -51,6 +51,12 @@ const objectRetentionXmlGovernanceLonger = '<Retention ' +
`<RetainUntilDate>${moment().add(5, 'days').toISOString()}</RetainUntilDate>` +
'</Retention>';
const objectRetentionXmlGovernanceShorter = '<Retention ' +
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<Mode>GOVERNANCE</Mode>' +
`<RetainUntilDate>${moment().add(1, 'days').toISOString()}</RetainUntilDate>` +
'</Retention>';
const objectRetentionXmlComplianceShorter = '<Retention ' +
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<Mode>COMPLIANCE</Mode>' +
@ -95,8 +101,12 @@ const putObjRetRequestGovernanceLonger = {
post: objectRetentionXmlGovernanceLonger,
};
const expectedMode = 'GOVERNANCE';
const expectedDate = date.toISOString();
const putObjRetRequestGovernanceShorter = {
bucketName,
objectKey: objectName,
headers: { host: `${bucketName}.s3.amazonaws.com` },
post: objectRetentionXmlGovernanceShorter,
};
describe('putObjectRetention API', () => {
before(() => cleanup());
@ -178,13 +188,24 @@ describe('putObjectRetention API', () => {
+ 'GOVERNANCE mode is enabled', done => {
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
assert.ifError(err);
return objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
return objectPutRetention(authInfo, putObjRetRequestGovernanceShorter, log, err => {
assert.deepStrictEqual(err, errors.AccessDenied);
done();
});
});
});
it('should allow update if the x-amz-bypass-governance-retention header is missing and '
+ 'GOVERNANCE mode is enabled and the same date is used', done => {
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
assert.ifError(err);
return objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
assert.ifError(err);
done();
});
});
});
it('should allow update if the x-amz-bypass-governance-retention header is present and '
+ 'GOVERNANCE mode is enabled', done => {
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {

View File

@ -66,7 +66,7 @@ function timeDiff(startTime) {
return milliseconds;
}
function makeAuthInfo(accessKey, userName) {
function makeAuthInfo(accessKey, userName, sessionName) {
const canIdMap = {
accessKey1: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7'
+ 'cd47ef2be',
@ -94,6 +94,11 @@ function makeAuthInfo(accessKey, userName) {
params.arn = `arn:aws:iam::${shortid}:user/${userName}`;
}
if (sessionName) {
params.IAMdisplayName = `[assumedRole] rolename:${sessionName}`;
params.arn = `arn:aws:sts::${shortid}:assumed-role/rolename/${sessionName}`;
}
return new AuthInfo(params);
}

View File

@ -466,9 +466,9 @@ arraybuffer.slice@~0.0.7:
optionalDependencies:
ioctl "^2.0.2"
"arsenal@git+https://github.com/scality/Arsenal#7.10.37":
version "7.10.37"
resolved "git+https://github.com/scality/Arsenal#9d614a4ab30a200501681a2e80dc14d758ac9338"
"arsenal@git+https://github.com/scality/Arsenal#7.10.38":
version "7.10.38"
resolved "git+https://github.com/scality/Arsenal#0f9da6a44e21984b463464ee6cdcbfff6c21e9c8"
dependencies:
"@types/async" "^3.2.12"
"@types/utf8" "^3.0.1"
@ -5208,9 +5208,9 @@ user-home@^2.0.0:
dependencies:
os-homedir "^1.0.0"
"utapi@git+https://github.com/scality/utapi#7.10.7":
version "7.10.7"
resolved "git+https://github.com/scality/utapi#a072535050c84c30719cfe258333940750240614"
"utapi@git+https://github.com/scality/utapi#7.10.7-1":
version "7.10.7-1"
resolved "git+https://github.com/scality/utapi#4cd1b53461129a903b9deba6afb8f9ab19e336d2"
dependencies:
"@hapi/joi" "^17.1.1"
"@senx/warp10" "^1.0.14"