Compare commits

..

1 Commits

Author SHA1 Message Date
williamlardier 9fb6e0ee53
wip 2023-05-24 00:28:31 +02:00
271 changed files with 9866 additions and 15079 deletions

View File

@ -1,8 +1,5 @@
{ {
"extends": "scality", "extends": "scality",
"plugins": [
"mocha"
],
"rules": { "rules": {
"import/extensions": "off", "import/extensions": "off",
"lines-around-directive": "off", "lines-around-directive": "off",
@ -45,8 +42,7 @@
"no-restricted-properties": "off", "no-restricted-properties": "off",
"new-parens": "off", "new-parens": "off",
"no-multi-spaces": "off", "no-multi-spaces": "off",
"quote-props": "off", "quote-props": "off"
"mocha/no-exclusive-tests": "error",
}, },
"parserOptions": { "parserOptions": {
"ecmaVersion": 2020 "ecmaVersion": 2020

View File

@ -16,28 +16,30 @@ runs:
run: |- run: |-
set -exu; set -exu;
mkdir -p /tmp/artifacts/${JOB_NAME}/; mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v4 - uses: actions/setup-node@v2
with: with:
node-version: '16' node-version: '16'
cache: 'yarn' cache: 'yarn'
- name: install dependencies - name: install dependencies
shell: bash shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1 run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v3 - uses: actions/cache@v2
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip key: ${{ runner.os }}-pip
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: 3.9 python-version: |
2.7
3.9
- name: Install python deps
shell: bash
run: pip install docker-compose
- name: Setup python2 test environment - name: Setup python2 test environment
shell: bash shell: bash
run: | run: |
sudo apt-get install -y libdigest-hmac-perl sudo apt-get install -y libdigest-hmac-perl
pip install 's3cmd==2.3.0' pip install virtualenv==20.21.0
- name: fix sproxyd.conf permissions virtualenv -p $(which python2) ~/.virtualenv/py2
shell: bash source ~/.virtualenv/py2/bin/activate
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf pip install 's3cmd==1.6.1'
- name: ensure fuse kernel module is loaded (for sproxyd)
shell: bash
run: sudo modprobe fuse

View File

@ -40,11 +40,6 @@ services:
- DEFAULT_BUCKET_KEY_FORMAT - DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS - METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE - ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file: env_file:
- creds.env - creds.env
depends_on: depends_on:
@ -72,7 +67,7 @@ services:
pykmip: pykmip:
network_mode: "host" network_mode: "host"
profiles: ['pykmip'] profiles: ['pykmip']
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip} image: registry.scality.com/cloudserver-dev/pykmip
volumes: volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts - /tmp/artifacts/${JOB_NAME}:/artifacts
mongo: mongo:
@ -83,10 +78,3 @@ services:
network_mode: "host" network_mode: "host"
profiles: ['ceph'] profiles: ['ceph']
image: ghcr.io/scality/cloudserver/ci-ceph image: ghcr.io/scality/cloudserver/ci-ceph
sproxyd:
network_mode: "host"
profiles: ['sproxyd']
image: sproxyd-standalone
build: ./sproxyd
user: 0:0
privileged: yes

View File

@ -1,4 +1,4 @@
FROM mongo:5.0.21 FROM mongo:4.2.24
ENV USER=scality \ ENV USER=scality \
HOME_DIR=/home/scality \ HOME_DIR=/home/scality \

View File

@ -1,3 +0,0 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -1,26 +0,0 @@
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param SCRIPT_NAME /var/www;
fastcgi_param PATH_INFO $document_uri;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
# PHP only, required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;

View File

@ -1,88 +0,0 @@
worker_processes 1;
error_log /logs/error.log;
user root root;
events {
worker_connections 1000;
reuse_port on;
multi_accept on;
}
worker_rlimit_nofile 20000;
http {
root /var/www/;
upstream sproxyds {
least_conn;
keepalive 40;
server 127.0.0.1:20000;
}
server {
client_max_body_size 0;
client_body_timeout 150;
client_header_timeout 150;
postpone_output 0;
client_body_postpone_size 0;
keepalive_requests 1100;
keepalive_timeout 300s;
server_tokens off;
default_type application/octet-stream;
gzip off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
listen 81;
server_name localhost;
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
location ~* ^/proxy/(.*)$ {
rewrite ^/proxy/(.*)$ /$1 last;
}
allow 127.0.0.1;
deny all;
set $usermd '-';
set $sentusermd '-';
set $elapsed_ms '-';
set $now '-';
log_by_lua '
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
end
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
end
local elapsed_ms = tonumber(ngx.var.request_time)
if not ( elapsed_ms == nil) then
elapsed_ms = elapsed_ms * 1000
ngx.var.elapsed_ms = tostring(elapsed_ms)
end
local time = tonumber(ngx.var.msec) * 1000
ngx.var.now = time
';
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
'"contentType":"$content_type","s3Address":"$remote_addr",'
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
access_log /dev/stdout irm;
error_log /dev/stdout error;
location / {
proxy_request_buffering off;
fastcgi_request_buffering off;
fastcgi_no_cache 1;
fastcgi_cache_bypass 1;
fastcgi_buffering off;
fastcgi_ignore_client_abort on;
fastcgi_keep_conn on;
include fastcgi_params;
fastcgi_pass sproxyds;
fastcgi_next_upstream error timeout;
fastcgi_send_timeout 285s;
fastcgi_read_timeout 285s;
}
}
}

View File

@ -1,12 +0,0 @@
{
"general": {
"ring": "DATA",
"port": 20000,
"syslog_facility": "local0"
},
"ring_driver:0": {
"alias": "dc1",
"type": "local",
"queue_path": "/tmp/ring-objs"
},
}

View File

@ -1,43 +0,0 @@
[supervisord]
nodaemon = true
loglevel = info
logfile = %(ENV_LOG_DIR)s/supervisord.log
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
logfile_maxbytes = 20MB
logfile_backups = 2
[unix_http_server]
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
[program:nginx]
directory=%(ENV_SUP_RUN_DIR)s
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=7
autorestart=true
autostart=true
user=root
[program:sproxyd]
directory=%(ENV_SUP_RUN_DIR)s
process_name=%(program_name)s-%(process_num)s
numprocs=1
numprocs_start=0
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
redirect_stderr=true
autorestart=true
autostart=true
user=root

View File

@ -1,10 +1,7 @@
name: Test alerts name: Test alerts
on: on:
push: push
branches-ignore:
- 'development/**'
- 'q/*/**'
jobs: jobs:
run-alert-tests: run-alert-tests:
@ -20,16 +17,13 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Render and test ${{ matrix.tests.name }} - name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.3 uses: scality/action-prom-render-test@1.0.1
with: with:
alert_file_path: monitoring/alerts.yaml alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }} test_file_path: ${{ matrix.tests.file }}
alert_inputs: | alert_inputs: >-
namespace=zenko namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,25 +0,0 @@
---
name: codeQL
on:
push:
branches: [w/**, q/*]
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
workflow_dispatch:
jobs:
analyze:
name: Static analysis with CodeQL
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: javascript, python, ruby
- name: Build and analyze
uses: github/codeql-action/analyze@v3

View File

@ -1,16 +0,0 @@
---
name: dependency review
on:
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4

View File

@ -1,6 +1,5 @@
--- ---
name: release name: release
run-name: release ${{ inputs.tag }}
on: on:
workflow_dispatch: workflow_dispatch:
@ -10,69 +9,58 @@ on:
required: true required: true
env: env:
REGISTRY_NAME: registry.scality.com
PROJECT_NAME: ${{ github.event.repository.name }} PROJECT_NAME: ${{ github.event.repository.name }}
jobs: jobs:
build-federation-image: build-federation-image:
runs-on: ubuntu-20.04 uses: scality/workflows/.github/workflows/docker-build.yaml@v1
steps: secrets: inherit
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with: with:
push: true push: true
registry: registry.scality.com
namespace: ${{ github.event.repository.name }}
name: ${{ github.event.repository.name }}
context: . context: .
file: images/svc-base/Dockerfile file: images/svc-base/Dockerfile
tags: | tag: ${{ github.event.inputs.tag }}-svc-base
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
release: release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Set up Docker Buildk - name: Set up Docker Buildk
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v1
- name: Login to Registry - name: Login to Registry
uses: docker/login-action@v3 uses: docker/login-action@v1
with: with:
registry: ghcr.io registry: ${{ env.REGISTRY_NAME }}
username: ${{ github.repository_owner }} username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ github.token }} password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Push dashboards into the production namespace - name: Push dashboards into the production namespace
run: | run: |
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \ oras push ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \ dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring working-directory: monitoring
- name: Build and push - name: Build and push
uses: docker/build-push-action@v5 uses: docker/build-push-action@v2
with: with:
context: . context: .
push: true push: true
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }} tags: ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}:${{ github.event.inputs.tag }}
cache-from: type=gha cache-from: type=gha
cache-to: type=gha,mode=max cache-to: type=gha,mode=max
- name: Create Release - name: Create Release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v1
env: env:
GITHUB_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
name: Release ${{ github.event.inputs.tag }} name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }} tag_name: ${{ github.event.inputs.tag }}

View File

@ -2,8 +2,6 @@
name: tests name: tests
on: on:
workflow_dispatch:
push: push:
branches-ignore: branches-ignore:
- 'development/**' - 'development/**'
@ -67,24 +65,23 @@ env:
ENABLE_LOCAL_CACHE: "true" ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1" REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1" REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs: jobs:
linting-coverage: linting-coverage:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
- uses: actions/setup-node@v4 - uses: actions/setup-node@v2
with: with:
node-version: '16' node-version: '16'
cache: yarn cache: yarn
- name: install dependencies - name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1 run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v5 - uses: actions/setup-python@v4
with: with:
python-version: '3.9' python-version: '3.9'
- uses: actions/cache@v4 - uses: actions/cache@v2
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip key: ${{ runner.os }}-pip
@ -117,7 +114,7 @@ jobs:
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";" find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always() if: always()
- name: Upload files to artifacts - name: Upload files to artifacts
uses: scality/action-artifacts@v4 uses: scality/action-artifacts@v2
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -133,83 +130,67 @@ jobs:
packages: write packages: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v1.6.0
- name: Login to GitHub Registry - name: Login to GitHub Registry
uses: docker/login-action@v3 uses: docker/login-action@v1.10.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ github.token }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Registry
uses: docker/login-action@v1
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push cloudserver image - name: Build and push cloudserver image
uses: docker/build-push-action@v5 uses: docker/build-push-action@v3
with: with:
push: true push: true
context: . context: .
provenance: false provenance: false
tags: | tags: |
ghcr.io/${{ github.repository }}:${{ github.sha }} ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
labels: | registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
uses: docker/build-push-action@v5
with:
push: true
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB - name: Build and push MongoDB
uses: docker/build-push-action@v5 uses: docker/build-push-action@v2
with: with:
push: true push: true
context: .github/docker/mongodb context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }} tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb cache-from: type=gha
cache-to: type=gha,mode=max,scope=mongodb cache-to: type=gha,mode=max
multiple-backend: multiple-backend:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
env: env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple S3DATA: multiple
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker compose --profile sproxyd up -d run: docker-compose up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run multiple backend test - name: Run multiple backend test
run: |- run: |-
set -o pipefail; set -o pipefail;
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 81 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env: env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v4 uses: scality/action-artifacts@v3
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -229,25 +210,26 @@ jobs:
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0 DEFAULT_BUCKET_KEY_FORMAT: v0
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }} MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker compose --profile mongo up -d run: docker-compose --profile mongo up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run functional tests - name: Run functional tests
run: |- run: |-
set -o pipefail; set -o pipefail;
source ~/.virtualenv/py2/bin/activate
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env: env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v4 uses: scality/action-artifacts@v3
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -268,26 +250,27 @@ jobs:
DEFAULT_BUCKET_KEY_FORMAT: v1 DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1 METADATA_MAX_CACHED_BUCKETS: 1
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }} MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker compose --profile mongo up -d run: docker-compose --profile mongo up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run functional tests - name: Run functional tests
run: |- run: |-
set -o pipefail; set -o pipefail;
source ~/.virtualenv/py2/bin/activate
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
env: env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v4 uses: scality/action-artifacts@v3
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -307,13 +290,12 @@ jobs:
env: env:
S3BACKEND: file S3BACKEND: file
S3VAULT: mem S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes" MPU_TESTING: "yes"
JOB_NAME: ${{ matrix.job-name }} JOB_NAME: ${{ matrix.job-name }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory - name: Setup matrix job artifacts directory
@ -322,15 +304,16 @@ jobs:
set -exu set -exu
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/ mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
- name: Setup CI services - name: Setup CI services
run: docker compose up -d run: docker-compose up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run file ft tests - name: Run file ft tests
run: |- run: |-
set -o pipefail; set -o pipefail;
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
source ~/.virtualenv/py2/bin/activate
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v4 uses: scality/action-artifacts@v3
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -346,16 +329,15 @@ jobs:
ENABLE_UTAPI_V2: t ENABLE_UTAPI_V2: t
S3BACKEND: mem S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker compose up -d run: docker-compose up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run file utapi v2 tests - name: Run file utapi v2 tests
run: |- run: |-
@ -363,51 +345,7 @@ jobs:
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v4 uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -423,20 +361,18 @@ jobs:
S3BACKEND: file S3BACKEND: file
S3VAULT: mem S3VAULT: mem
MPU_TESTING: "yes" MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Copy KMIP certs - name: Copy KMIP certs
run: cp -r ./certs /tmp/ssl-kmip run: cp -r ./certs /tmp/ssl-kmip
working-directory: .github/pykmip working-directory: .github/pykmip
- name: Setup CI services - name: Setup CI services
run: docker compose --profile pykmip up -d run: docker-compose --profile pykmip up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run file KMIP tests - name: Run file KMIP tests
run: |- run: |-
@ -445,7 +381,7 @@ jobs:
bash wait_for_local_port.bash 5696 40 bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v4 uses: scality/action-artifacts@v3
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -465,30 +401,30 @@ jobs:
MPU_TESTING: "yes" MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }} MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Login to GitHub Registry - name: Login to GitHub Registry
uses: docker/login-action@v3 uses: docker/login-action@v1.10.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ github.token }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1 - uses: ruby/setup-ruby@v1
with: with:
ruby-version: '2.5.9' ruby-version: '2.5.0'
- name: Install Ruby dependencies - name: Install Ruby dependencies
run: | run: |
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5 gem install nokogiri:1.12.5 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
- name: Install Java dependencies - name: Install Java dependencies
run: | run: |
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
- name: Setup CI services - name: Setup CI services
run: docker compose --profile ceph up -d run: docker-compose --profile ceph up -d
working-directory: .github/docker working-directory: .github/docker
env: env:
S3METADATA: mongodb S3METADATA: mongodb
@ -510,11 +446,12 @@ jobs:
- name: Run Ruby tests - name: Run Ruby tests
run: |- run: |-
set -ex -o pipefail; set -ex -o pipefail;
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log rspec tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
working-directory: tests/functional/fog working-directory: tests/functional/fog
- name: Run Javascript AWS SDK tests - name: Run Javascript AWS SDK tests
run: |- run: |-
set -ex -o pipefail; set -ex -o pipefail;
source ~/.virtualenv/py2/bin/activate
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log; yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log; yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
env: env:
@ -523,7 +460,7 @@ jobs:
S3VAULT: mem S3VAULT: mem
S3METADATA: mongodb S3METADATA: mongodb
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v4 uses: scality/action-artifacts@v3
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net

View File

@ -23,7 +23,6 @@ RUN apt-get update \
ENV PYTHON=python3 ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/ COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1 RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
################################################################################ ################################################################################
@ -43,7 +42,6 @@ EXPOSE 8002
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
jq \ jq \
tini \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app WORKDIR /usr/src/app
@ -55,6 +53,6 @@ COPY --from=builder /usr/src/app/node_modules ./node_modules/
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"] VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"] ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ] CMD [ "yarn", "start" ]

175
README.md
View File

@ -1,7 +1,10 @@
# Zenko CloudServer with Vitastor Backend # Zenko CloudServer
![Zenko CloudServer logo](res/scality-cloudserver-logo.png) ![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview ## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -11,71 +14,137 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud. backend data storage both on-premise or public in the cloud.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor) CloudServer is useful for Developers, either to run as part of a
backend support. continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
## Quick Start with Vitastor ## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
Vitastor Backend is in experimental status, however you can already try to ## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
Installation instructions: ## Docker
### Install Vitastor [Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md). ## Contributing
### Install Zenko with Vitastor Backend In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor` ## Installation
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Install and Configure MongoDB ### Dependencies
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/). Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
### Setup Zenko ### Clone source code
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data` ```shell
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data` git clone https://github.com/scality/S3.git
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
``` ```
``` ### Install js dependencies
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \ Go to the ./S3 folder,
geesefs --endpoint http://localhost:8000 testbucket mountdir
```shell
yarn install --frozen-lockfile
``` ```
# Author & License If you get an error regarding installation of the diskUsage module,
please install g++.
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0) If you get an error regarding level-down bindings, try clearing your yarn cache:
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way) ```shell
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it with Vault user management
Note: Vault is proprietary and must be accessed separately.
```shell
export S3VAULT=vault
yarn start
```
This starts a Zenko CloudServer using Vault for user management.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae

46
bin/metrics_server.js Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

46
bin/secure_channel_proxy.js Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

BIN
cldsrv-10.gzip Normal file

Binary file not shown.

View File

@ -4,7 +4,6 @@
"metricsPort": 8002, "metricsPort": 8002,
"metricsListenOn": [], "metricsListenOn": [],
"replicationGroupId": "RG001", "replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": { "restEndpoints": {
"localhost": "us-east-1", "localhost": "us-east-1",
"127.0.0.1": "us-east-1", "127.0.0.1": "us-east-1",
@ -102,14 +101,6 @@
"readPreference": "primary", "readPreference": "primary",
"database": "metadata" "database": "metadata"
}, },
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": { "externalBackends": {
"aws_s3": { "aws_s3": {
"httpAgent": { "httpAgent": {

View File

@ -1,71 +0,0 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -116,8 +116,7 @@ const constants = {
], ],
// user metadata header to set object locationConstraint // user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-storage-class', objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'], legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties // declare here all existing service accounts and their properties
// (if any, otherwise an empty object) // (if any, otherwise an empty object)
@ -176,8 +175,6 @@ const constants = {
'objectDeleteTagging', 'objectDeleteTagging',
'objectGetTagging', 'objectGetTagging',
'objectPutTagging', 'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
], ],
// response header to be sent when there are invalid // response header to be sent when there are invalid
// user metadata in the object's metadata // user metadata in the object's metadata
@ -198,51 +195,16 @@ const constants = {
'user', 'user',
'bucket', 'bucket',
], ],
arrayOfAllowed: [
'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
],
allowedUtapiEventFilterStates: ['allow', 'deny'], allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'], allowedRestoreObjectRequestTierValues: ['Standard'],
validStorageClasses: [
'STANDARD',
],
lifecycleListing: { lifecycleListing: {
CURRENT_TYPE: 'current', CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent', NON_CURRENT_TYPE: 'noncurrent',
ORPHAN_DM_TYPE: 'orphan', ORPHAN_DM_TYPE: 'orphan',
}, },
multiObjectDeleteConcurrency: 50,
maxScannedLifecycleListingEntries: 10000,
overheadField: [
'content-length',
'owner-id',
'versionId',
'isNull',
'isDeleteMarker',
],
unsupportedSignatureChecksums: new Set([
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
]),
supportedSignatureChecksums: new Set([
'UNSIGNED-PAYLOAD',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
]),
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
actionsToConsiderAsObjectPut: [
'initiateMultipartUpload',
'objectPutPart',
'completeMultipartUpload',
],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
}; };
module.exports = constants; module.exports = constants;

View File

@ -199,10 +199,6 @@ if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]" JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
fi fi
if [[ "$TESTING_MODE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json mv config.json.tmp config.json

View File

@ -2,12 +2,11 @@
## Docker Image Generation ## Docker Image Generation
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages). Docker images are hosted on [registry.scality.com](registry.scality.com).
CloudServer has a few images there: CloudServer has two namespaces there:
* Cloudserver container image: ghcr.io/scality/cloudserver * Production Namespace: registry.scality.com/cloudserver
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard * Dev Namespace: registry.scality.com/cloudserver-dev
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash. content with the developer branch's short SHA-1 commit hash.
@ -19,8 +18,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images ## How to Pull Docker Images
```sh ```sh
docker pull ghcr.io/scality/cloudserver:<commit hash> docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
docker pull ghcr.io/scality/cloudserver:<tag> docker pull registry.scality.com/cloudserver/cloudserver:<tag>
``` ```
## Release Process ## Release Process

View File

@ -1,4 +1,4 @@
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0 FROM registry.scality.com/federation/nodesvc-base:7.10.6.0
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
@ -14,10 +14,8 @@ RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \ git config --global --add safe.directory . && \
git lfs install && \ git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \ GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \ yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all && \ yarn cache clean --all
yarn global remove typescript
# run symlinking separately to avoid yarn installation errors # run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed! # we might have to check if the symlinking is really needed!

View File

@ -1,10 +1,3 @@
'use strict'; // eslint-disable-line strict 'use strict'; // eslint-disable-line strict
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
require('./lib/server.js')(); require('./lib/server.js')();

View File

@ -8,18 +8,16 @@ const crypto = require('crypto');
const { v4: uuidv4 } = require('uuid'); const { v4: uuidv4 } = require('uuid');
const cronParser = require('cron-parser'); const cronParser = require('cron-parser');
const joi = require('@hapi/joi'); const joi = require('@hapi/joi');
const { s3routes, auth: arsenalAuth, s3middleware } = require('arsenal');
const { isValidBucketName } = s3routes.routesUtils; const { isValidBucketName } = require('arsenal').s3routes.routesUtils;
const validateAuthConfig = arsenalAuth.inMemory.validateAuthConfig; const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig;
const { buildAuthDataAccount } = require('./auth/in_memory/builder'); const { buildAuthDataAccount } = require('./auth/in_memory/builder');
const validExternalBackends = require('../constants').externalBackends; const validExternalBackends = require('../constants').externalBackends;
const { azureAccountNameRegex, base64Regex, const { azureAccountNameRegex, base64Regex,
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates, allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
} = require('../constants'); } = require('../constants');
const { utapiVersion } = require('utapi'); const { utapiVersion } = require('utapi');
const { scaleMsPerDay } = s3middleware.objectUtils;
const constants = require('../constants');
// config paths // config paths
const configSearchPaths = [ const configSearchPaths = [
@ -107,47 +105,6 @@ function parseSproxydConfig(configSproxyd) {
return joi.attempt(configSproxyd, joiSchema, 'bad config'); return joi.attempt(configSproxyd, joiSchema, 'bad config');
} }
function parseRedisConfig(redisConfig) {
const joiSchema = joi.object({
password: joi.string().allow(''),
host: joi.string(),
port: joi.number(),
retry: joi.object({
connectBackoff: joi.object({
min: joi.number().required(),
max: joi.number().required(),
jitter: joi.number().required(),
factor: joi.number().required(),
deadline: joi.number().required(),
}),
}),
// sentinel config
sentinels: joi.alternatives().try(
joi.string()
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
.custom(hosts => hosts.split(',').map(item => {
const [host, port] = item.split(':');
return { host, port: Number.parseInt(port, 10) };
})),
joi.array().items(
joi.object({
host: joi.string().required(),
port: joi.number().required(),
})
).min(1),
),
name: joi.string(),
sentinelPassword: joi.string().allow(''),
})
.and('host', 'port')
.and('sentinels', 'name')
.xor('host', 'sentinels')
.without('sentinels', ['host', 'port'])
.without('host', ['sentinels', 'sentinelPassword']);
return joi.attempt(redisConfig, joiSchema, 'bad config');
}
function restEndpointsAssert(restEndpoints, locationConstraints) { function restEndpointsAssert(restEndpoints, locationConstraints) {
assert(typeof restEndpoints === 'object', assert(typeof restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints'); 'bad config: restEndpoints must be an object of endpoints');
@ -377,7 +334,7 @@ function dmfLocationConstraintAssert(locationObj) {
function locationConstraintAssert(locationConstraints) { function locationConstraintAssert(locationConstraints) {
const supportedBackends = const supportedBackends =
['mem', 'file', 'scality', ['mem', 'file', 'scality',
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends)); 'mongodb', 'dmf', 'azure_archive'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object', assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object'); 'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => { Object.keys(locationConstraints).forEach(l => {
@ -502,33 +459,26 @@ function locationConstraintAssert(locationConstraints) {
locationConstraints[l].details.connector.hdclient); locationConstraints[l].details.connector.hdclient);
} }
}); });
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
} }
function parseUtapiReindex(config) { function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
const {
enabled,
schedule,
redis,
bucketd,
onlyCountLatestWhenObjectLocked,
} = config;
assert(typeof enabled === 'boolean', assert(typeof enabled === 'boolean',
'bad config: utapi.reindex.enabled must be a boolean'); 'bad config: utapi.reindex.enabled must be a boolean');
assert(typeof sentinel === 'object',
const parsedRedis = parseRedisConfig(redis); 'bad config: utapi.reindex.sentinel must be an object');
assert(Array.isArray(parsedRedis.sentinels), assert(typeof sentinel.port === 'number',
'bad config: utapi reindex redis config requires a list of sentinels'); 'bad config: utapi.reindex.sentinel.port must be a number');
assert(typeof sentinel.name === 'string',
'bad config: utapi.reindex.sentinel.name must be a string');
assert(typeof bucketd === 'object', assert(typeof bucketd === 'object',
'bad config: utapi.reindex.bucketd must be an object'); 'bad config: utapi.reindex.bucketd must be an object');
assert(typeof bucketd.port === 'number', assert(typeof bucketd.port === 'number',
'bad config: utapi.reindex.bucketd.port must be a number'); 'bad config: utapi.reindex.bucketd.port must be a number');
assert(typeof schedule === 'string', assert(typeof schedule === 'string',
'bad config: utapi.reindex.schedule must be a string'); 'bad config: utapi.reindex.schedule must be a string');
if (onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: utapi.reindex.onlyCountLatestWhenObjectLocked must be a boolean');
}
try { try {
cronParser.parseExpression(schedule); cronParser.parseExpression(schedule);
} catch (e) { } catch (e) {
@ -536,13 +486,6 @@ function parseUtapiReindex(config) {
'bad config: utapi.reindex.schedule must be a valid ' + 'bad config: utapi.reindex.schedule must be a valid ' +
`cron schedule. ${e.message}.`); `cron schedule. ${e.message}.`);
} }
return {
enabled,
schedule,
redis: parsedRedis,
bucketd,
onlyCountLatestWhenObjectLocked,
};
} }
function requestsConfigAssert(requestsConfig) { function requestsConfigAssert(requestsConfig) {
@ -630,6 +573,7 @@ class Config extends EventEmitter {
// Read config automatically // Read config automatically
this._getLocationConfig(); this._getLocationConfig();
this._getConfig(); this._getConfig();
this._configureBackends();
} }
_getLocationConfig() { _getLocationConfig() {
@ -841,11 +785,11 @@ class Config extends EventEmitter {
this.websiteEndpoints = config.websiteEndpoints; this.websiteEndpoints = config.websiteEndpoints;
} }
this.workers = false; this.clusters = false;
if (config.workers !== undefined) { if (config.clusters !== undefined) {
assert(Number.isInteger(config.workers) && config.workers > 0, assert(Number.isInteger(config.clusters) && config.clusters > 0,
'bad config: workers must be a positive integer'); 'bad config: clusters must be a positive integer');
this.workers = config.workers; this.clusters = config.clusters;
} }
if (config.usEastBehavior !== undefined) { if (config.usEastBehavior !== undefined) {
@ -1083,7 +1027,8 @@ class Config extends EventEmitter {
assert(typeof config.localCache.port === 'number', assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number'); 'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) { if (config.localCache.password !== undefined) {
assert(typeof config.localCache.password === 'string', assert(
this._verifyRedisPassword(config.localCache.password),
'config: vad password for localCache. password must' + 'config: vad password for localCache. password must' +
' be a string'); ' be a string');
} }
@ -1109,46 +1054,56 @@ class Config extends EventEmitter {
} }
if (config.redis) { if (config.redis) {
this.redis = parseRedisConfig(config.redis); if (config.redis.sentinels) {
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels) ||
typeof config.redis.sentinels === 'string',
'bad config: redis sentinels must be an array or string');
if (typeof config.redis.sentinels === 'string') {
config.redis.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.redis.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.redis.sentinels)) {
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
} }
if (config.scuba) {
this.scuba = {}; if (config.redis.sentinelPassword !== undefined) {
if (config.scuba.host) { assert(
assert(typeof config.scuba.host === 'string', this._verifyRedisPassword(config.redis.sentinelPassword));
'bad config: scuba host must be a string'); this.redis.sentinelPassword = config.redis.sentinelPassword;
this.scuba.host = config.scuba.host;
} }
if (config.scuba.port) { } else {
assert(Number.isInteger(config.scuba.port) // check for standalone configuration
&& config.scuba.port > 0, this.redis = {};
'bad config: scuba port must be a positive integer'); assert(typeof config.redis.host === 'string',
this.scuba.port = config.scuba.port; 'bad config: redis.host must be a string');
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
}
if (config.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.redis.password),
'bad config: invalid password for redis. password must ' +
'be a string');
this.redis.password = config.redis.password;
} }
} }
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
assert(typeof process.env.SCUBA_HOST === 'string',
'bad config: scuba host must be a string');
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
&& Number(process.env.SCUBA_PORT) > 0,
'bad config: scuba port must be a positive integer');
this.scuba = {
host: process.env.SCUBA_HOST,
port: Number(process.env.SCUBA_PORT),
};
}
if (this.scuba) {
this.quotaEnabled = true;
}
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
config.quota?.maxStatenessMS ||
24 * 60 * 60 * 1000;
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
config.quota?.enableInflights || false;
this.quota = {
maxStaleness,
enableInflights,
};
if (config.utapi) { if (config.utapi) {
this.utapi = { component: 's3' }; this.utapi = { component: 's3' };
if (config.utapi.host) { if (config.utapi.host) {
@ -1177,8 +1132,50 @@ class Config extends EventEmitter {
assert(config.redis, 'missing required property of utapi ' + assert(config.redis, 'missing required property of utapi ' +
'configuration: redis'); 'configuration: redis');
if (config.utapi.redis) { if (config.utapi.redis) {
this.utapi.redis = parseRedisConfig(config.utapi.redis); if (config.utapi.redis.sentinels) {
if (this.utapi.redis.retry === undefined) { this.utapi.redis = { sentinels: [], name: null };
assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port });
});
} else {
// check for standalone configuration
this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port;
}
if (config.utapi.redis.retry !== undefined) {
if (config.utapi.redis.retry.connectBackoff !== undefined) {
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
assert.strictEqual(typeof min, 'number',
'utapi.redis.retry.connectBackoff: min must be a number');
assert.strictEqual(typeof max, 'number',
'utapi.redis.retry.connectBackoff: max must be a number');
assert.strictEqual(typeof jitter, 'number',
'utapi.redis.retry.connectBackoff: jitter must be a number');
assert.strictEqual(typeof factor, 'number',
'utapi.redis.retry.connectBackoff: factor must be a number');
assert.strictEqual(typeof deadline, 'number',
'utapi.redis.retry.connectBackoff: deadline must be a number');
}
this.utapi.redis.retry = config.utapi.redis.retry;
} else {
this.utapi.redis.retry = { this.utapi.redis.retry = {
connectBackoff: { connectBackoff: {
min: 10, min: 10,
@ -1189,6 +1186,22 @@ class Config extends EventEmitter {
}, },
}; };
} }
if (config.utapi.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.password = config.utapi.redis.password;
}
if (config.utapi.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(
config.utapi.redis.sentinelPassword),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
} }
if (config.utapi.metrics) { if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics; this.utapi.metrics = config.utapi.metrics;
@ -1258,7 +1271,8 @@ class Config extends EventEmitter {
} }
if (config.utapi && config.utapi.reindex) { if (config.utapi && config.utapi.reindex) {
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex); parseUtapiReindex(config.utapi.reindex);
this.utapi.reindex = config.utapi.reindex;
} }
} }
@ -1303,8 +1317,6 @@ class Config extends EventEmitter {
} }
} }
this.authdata = config.authdata || 'authdata.json';
this.kms = {}; this.kms = {};
if (config.kms) { if (config.kms) {
assert(typeof config.kms.userName === 'string'); assert(typeof config.kms.userName === 'string');
@ -1524,6 +1536,25 @@ class Config extends EventEmitter {
this.outboundProxy.certs = certObj.certs; this.outboundProxy.certs = certObj.certs;
} }
this.managementAgent = {};
this.managementAgent.port = 8010;
this.managementAgent.host = 'localhost';
if (config.managementAgent !== undefined) {
if (config.managementAgent.port !== undefined) {
assert(Number.isInteger(config.managementAgent.port)
&& config.managementAgent.port > 0,
'bad config: managementAgent port must be a positive ' +
'integer');
this.managementAgent.port = config.managementAgent.port;
}
if (config.managementAgent.host !== undefined) {
assert.strictEqual(typeof config.managementAgent.host, 'string',
'bad config: management agent host must ' +
'be a string');
this.managementAgent.host = config.managementAgent.host;
}
}
// Ephemeral token to protect the reporting endpoint: // Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file, // try inherited from parent first, then hardcoded in conf file,
// then create a fresh one as last resort. // then create a fresh one as last resort.
@ -1588,102 +1619,37 @@ class Config extends EventEmitter {
// Version of the configuration we're running under // Version of the configuration we're running under
this.overlayVersion = config.overlayVersion || 0; this.overlayVersion = config.overlayVersion || 0;
this._setTimeOptions();
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
this.multiObjectDeleteConcurrency = extractedNumber;
}
this.multiObjectDeleteEnableOptimizations = true;
if (config.multiObjectDeleteEnableOptimizations === false) {
this.multiObjectDeleteEnableOptimizations = false;
}
this.testingMode = config.testingMode || false;
this.maxScannedLifecycleListingEntries = constants.maxScannedLifecycleListingEntries;
if (config.maxScannedLifecycleListingEntries !== undefined) {
// maxScannedLifecycleListingEntries > 2 is required as a minimum because we must
// scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
assert(Number.isInteger(config.maxScannedLifecycleListingEntries) &&
config.maxScannedLifecycleListingEntries > 2,
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
}
this._configureBackends(config);
}
_setTimeOptions() {
// NOTE: EXPIRE_ONE_DAY_EARLIER and TRANSITION_ONE_DAY_EARLIER are deprecated in favor of
// TIME_PROGRESSION_FACTOR which decreases the weight attributed to a day in order to among other things
// expedite the lifecycle of objects.
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
// decreases the weight attributed to a day in order to expedite the lifecycle of objects.
const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1;
const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1);
assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' +
'"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.');
// The scaledMsPerDay value is initially set to the number of milliseconds per day
// (24 * 60 * 60 * 1000) as the default value.
// However, during testing, if the timeProgressionFactor is defined and greater than 1,
// the scaledMsPerDay value is decreased. This adjustment allows for simulating actions occurring
// earlier in time.
const scaledMsPerDay = scaleMsPerDay(timeProgressionFactor);
this.timeOptions = {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
};
}
getTimeOptions() {
return this.timeOptions;
} }
_getAuthData() { _getAuthData() {
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' })); return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
} }
_configureBackends(config) { _configureBackends() {
const backends = config.backends || {};
/** /**
* Configure the backends for Authentication, Data and Metadata. * Configure the backends for Authentication, Data and Metadata.
*/ */
let auth = backends.auth || 'mem'; let auth = 'mem';
let data = backends.data || 'multiple'; let data = 'multiple';
let metadata = backends.metadata || 'file'; let metadata = 'file';
let kms = backends.kms || 'file'; let kms = 'file';
let quota = backends.quota || 'none';
if (process.env.S3BACKEND) { if (process.env.S3BACKEND) {
const validBackends = ['mem', 'file', 'scality', 'cdmi']; const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1, assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' + 'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi' 'should be one of mem/file/scality/cdmi'
); );
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem'; auth = process.env.S3BACKEND;
data = process.env.S3BACKEND; data = process.env.S3BACKEND;
metadata = process.env.S3BACKEND; metadata = process.env.S3BACKEND;
kms = process.env.S3BACKEND; kms = process.env.S3BACKEND;
} }
if (process.env.S3VAULT) { if (process.env.S3VAULT) {
auth = process.env.S3VAULT; auth = process.env.S3VAULT;
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
} }
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') { if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
// Auth only checks for 'mem' since mem === file // Auth only checks for 'mem' since mem === file
auth = 'mem';
let authData; let authData;
if (process.env.SCALITY_ACCESS_KEY_ID && if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) { process.env.SCALITY_SECRET_ACCESS_KEY) {
@ -1712,10 +1678,10 @@ class Config extends EventEmitter {
'should be one of mem/file/scality/multiple' 'should be one of mem/file/scality/multiple'
); );
data = process.env.S3DATA; data = process.env.S3DATA;
}
if (data === 'scality' || data === 'multiple') { if (data === 'scality' || data === 'multiple') {
data = 'multiple'; data = 'multiple';
} }
}
assert(this.locationConstraints !== undefined && assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined, this.restEndpoints !== undefined,
'bad config: locationConstraints and restEndpoints must be set' 'bad config: locationConstraints and restEndpoints must be set'
@ -1727,18 +1693,18 @@ class Config extends EventEmitter {
if (process.env.S3KMS) { if (process.env.S3KMS) {
kms = process.env.S3KMS; kms = process.env.S3KMS;
} }
if (process.env.S3QUOTA) {
quota = process.env.S3QUOTA;
}
this.backends = { this.backends = {
auth, auth,
data, data,
metadata, metadata,
kms, kms,
quota,
}; };
} }
_verifyRedisPassword(password) {
return typeof password === 'string';
}
setAuthDataAccounts(accounts) { setAuthDataAccounts(accounts) {
this.authData.accounts = accounts; this.authData.accounts = accounts;
this.emit('authdata-update'); this.emit('authdata-update');
@ -1861,19 +1827,10 @@ class Config extends EventEmitter {
.update(instanceId) .update(instanceId)
.digest('hex'); .digest('hex');
} }
isQuotaEnabled() {
return !!this.quotaEnabled;
}
isQuotaInflightEnabled() {
return this.quota.enableInflights;
}
} }
module.exports = { module.exports = {
parseSproxydConfig, parseSproxydConfig,
parseRedisConfig,
locationConstraintAssert, locationConstraintAssert,
ConfigObject: Config, ConfigObject: Config,
config: new Config(), config: new Config(),

View File

@ -7,7 +7,6 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite'); const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle'); const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy'); const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketDeleteQuota = require('./bucketDeleteQuota');
const { bucketGet } = require('./bucketGet'); const { bucketGet } = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL'); const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors'); const bucketGetCors = require('./bucketGetCors');
@ -18,7 +17,6 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification'); const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock'); const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy'); const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption'); const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead'); const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut'); const { bucketPut } = require('./bucketPut');
@ -35,7 +33,6 @@ const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption'); const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy'); const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock'); const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication'); const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication'); const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight'); const corsPreflight = require('./corsPreflight');
@ -67,7 +64,8 @@ const prepareRequestContexts
= require('./apiUtils/authorization/prepareRequestContexts'); = require('./apiUtils/authorization/prepareRequestContexts');
const serviceGet = require('./serviceGet'); const serviceGet = require('./serviceGet');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const website = require('./website'); const websiteGet = require('./websiteGet');
const websiteHead = require('./websiteHead');
const writeContinue = require('../utilities/writeContinue'); const writeContinue = require('../utilities/writeContinue');
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders'); const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
const parseCopySource = require('./apiUtils/object/parseCopySource'); const parseCopySource = require('./apiUtils/object/parseCopySource');
@ -85,10 +83,6 @@ const api = {
// Attach the apiMethod method to the request, so it can used by monitoring in the server // Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod; request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod]; const actionLog = monitoringMap[apiMethod];
if (!actionLog && if (!actionLog &&
@ -123,7 +117,6 @@ const api = {
// no need to check auth on website or cors preflight requests // no need to check auth on website or cors preflight requests
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' || if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
apiMethod === 'corsPreflight') { apiMethod === 'corsPreflight') {
request.actionImplicitDenies = false;
return this[apiMethod](request, log, callback); return this[apiMethod](request, log, callback);
} }
@ -146,25 +139,15 @@ const api = {
const requestContexts = prepareRequestContexts(apiMethod, request, const requestContexts = prepareRequestContexts(apiMethod, request,
sourceBucket, sourceObject, sourceVersionId); sourceBucket, sourceObject, sourceVersionId);
// Extract all the _apiMethods and store them in an array
const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : [];
// Attach the names to the current request
// eslint-disable-next-line no-param-reassign
request.apiMethods = apiMethods;
function checkAuthResults(authResults) { function checkAuthResults(authResults) {
let returnTagCount = true; let returnTagCount = true;
const isImplicitDeny = {};
let isOnlyImplicitDeny = true;
if (apiMethod === 'objectGet') { if (apiMethod === 'objectGet') {
// first item checks s3:GetObject(Version) action // first item checks s3:GetObject(Version) action
if (!authResults[0].isAllowed && !authResults[0].isImplicit) { if (!authResults[0].isAllowed) {
log.trace('get object authorization denial from Vault'); log.trace('get object authorization denial from Vault');
return errors.AccessDenied; return errors.AccessDenied;
} }
// TODO add support for returnTagCount in the bucket policy
// checks
isImplicitDeny[authResults[0].action] = authResults[0].isImplicit;
// second item checks s3:GetObject(Version)Tagging action // second item checks s3:GetObject(Version)Tagging action
if (!authResults[1].isAllowed) { if (!authResults[1].isAllowed) {
log.trace('get tagging authorization denial ' + log.trace('get tagging authorization denial ' +
@ -173,41 +156,25 @@ const api = {
} }
} else { } else {
for (let i = 0; i < authResults.length; i++) { for (let i = 0; i < authResults.length; i++) {
isImplicitDeny[authResults[i].action] = true; if (!authResults[i].isAllowed) {
if (!authResults[i].isAllowed && !authResults[i].isImplicit) {
// Any explicit deny rejects the current API call
log.trace('authorization denial from Vault'); log.trace('authorization denial from Vault');
return errors.AccessDenied; return errors.AccessDenied;
} }
if (authResults[i].isAllowed) {
// If the action is allowed, the result is not implicit
// Deny.
isImplicitDeny[authResults[i].action] = false;
isOnlyImplicitDeny = false;
} }
} }
} return returnTagCount;
// These two APIs cannot use ACLs or Bucket Policies, hence, any
// implicit deny from vault must be treated as an explicit deny.
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
return errors.AccessDenied;
}
return { returnTagCount, isImplicitDeny };
} }
return async.waterfall([ return async.waterfall([
next => auth.server.doAuth( next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
if (err) { if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err }); log.trace('authentication error', { error: err });
return next(arsenalError); return next(err);
} }
return next(null, userInfo, authorizationResults, streamingV4Params, infos); return next(null, userInfo, authorizationResults, streamingV4Params);
}, 's3', requestContexts), }, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, infos, next) => { (userInfo, authorizationResults, streamingV4Params, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() }; const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) { if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName(); authNames.userName = userInfo.getIAMdisplayName();
@ -217,7 +184,7 @@ const api = {
} }
log.addDefaultFields(authNames); log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params, infos); return next(null, userInfo, authorizationResults, streamingV4Params);
} }
// issue 100 Continue to the client // issue 100 Continue to the client
writeContinue(request, response); writeContinue(request, response);
@ -248,12 +215,12 @@ const api = {
} }
// Convert array of post buffers into one string // Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString(); request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params, infos); return next(null, userInfo, authorizationResults, streamingV4Params);
}); });
return undefined; return undefined;
}, },
// Tag condition keys require information from CloudServer for evaluation // Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth( (userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
authorizationResults, authorizationResults,
request, request,
requestContexts, requestContexts,
@ -264,47 +231,33 @@ const api = {
log.trace('tag authentication error', { error: err }); log.trace('tag authentication error', { error: err });
return next(err); return next(err);
} }
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos); return next(null, userInfo, authResultsWithTags, streamingV4Params);
}, },
), ),
], (err, userInfo, authorizationResults, streamingV4Params, infos) => { ], (err, userInfo, authorizationResults, streamingV4Params) => {
if (err) { if (err) {
return callback(err); return callback(err);
} }
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) { if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults); const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) { if (checkedResults instanceof Error) {
return callback(checkedResults); return callback(checkedResults);
} }
returnTagCount = checkedResults.returnTagCount; returnTagCount = checkedResults;
request.actionImplicitDenies = checkedResults.isImplicitDeny;
} else {
// create an object of keys apiMethods with all values to false:
// for backward compatibility, all apiMethods are allowed by default
// thus it is explicitly allowed, so implicit deny is false
request.actionImplicitDenies = apiMethods.reduce((acc, curr) => {
acc[curr] = false;
return acc;
}, {});
} }
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response; request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params, return this[apiMethod](userInfo, request, streamingV4Params,
log, methodCallback, authorizationResults); log, callback, authorizationResults);
} }
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket, return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, methodCallback); sourceObject, sourceVersionId, log, callback);
} }
if (apiMethod === 'objectGet') { if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback); return this[apiMethod](userInfo, request, returnTagCount, log, callback);
} }
return this[apiMethod](userInfo, request, log, methodCallback); return this[apiMethod](userInfo, request, log, callback);
}); });
}, },
bucketDelete, bucketDelete,
@ -331,14 +284,11 @@ const api = {
bucketPutReplication, bucketPutReplication,
bucketGetReplication, bucketGetReplication,
bucketDeleteReplication, bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle, bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle, bucketGetLifecycle,
bucketDeleteLifecycle, bucketDeleteLifecycle,
bucketPutPolicy, bucketPutPolicy,
bucketGetPolicy, bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy, bucketDeletePolicy,
bucketPutObjectLock, bucketPutObjectLock,
bucketPutNotification, bucketPutNotification,
@ -370,8 +320,8 @@ const api = {
objectPutRetention, objectPutRetention,
objectRestore, objectRestore,
serviceGet, serviceGet,
websiteGet: website, websiteGet,
websiteHead: website, websiteHead,
}; };
module.exports = api; module.exports = api;

View File

@ -1,23 +1,11 @@
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies; const { evaluators, actionMaps, RequestContext } = require('arsenal').policies;
const { errors } = require('arsenal');
const { parseCIDR, isValid } = require('ipaddr.js');
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { config } = require('../../../Config');
const { const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants;
allAuthedUsersId,
bucketOwnerActions,
logId,
publicId,
arrayOfAllowed,
assumedRoleArnResourceType,
backbeatLifecycleSessionName,
actionsToConsiderAsObjectPut,
} = constants;
// whitelist buckets to allow public read on objects // whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : []; process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
function getServiceAccountProperties(canonicalID) { function getServiceAccountProperties(canonicalID) {
const canonicalIDArray = canonicalID.split('/'); const canonicalIDArray = canonicalID.split('/');
@ -38,41 +26,13 @@ function isRequesterNonAccountUser(authInfo) {
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo); return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
} }
/** function checkBucketAcls(bucket, requestType, canonicalID) {
* Checks the access control for a given bucket based on the request type and user's canonical ID.
*
* @param {Bucket} bucket - The bucket to check access control for.
* @param {string} requestType - The list of s3 actions to check within the API call.
* @param {string} canonicalID - The canonical ID of the user making the request.
* @param {string} mainApiCall - The main API call (first item of the requestType).
*
* @returns {boolean} - Returns true if the user has the necessary access rights, otherwise false.
*/
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
// Same logic applies on the Versioned APIs, so let's simplify it.
let requestTypeParsed = requestType.endsWith('Version') ?
requestType.slice(0, 'Version'.length * -1) : requestType;
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
'objectPut' : requestTypeParsed;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
if (bucket.getOwner() === canonicalID) { if (bucket.getOwner() === canonicalID) {
return true; return true;
} }
if (parsedMainApiCall === 'objectGet') {
if (requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (parsedMainApiCall === 'objectPut') {
if (arrayOfAllowed.includes(requestTypeParsed)) {
return true;
}
}
const bucketAcl = bucket.getAcl(); const bucketAcl = bucket.getAcl();
if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') { if (requestType === 'bucketGet' || requestType === 'bucketHead') {
if (bucketAcl.Canned === 'public-read' if (bucketAcl.Canned === 'public-read'
|| bucketAcl.Canned === 'public-read-write' || bucketAcl.Canned === 'public-read-write'
|| (bucketAcl.Canned === 'authenticated-read' || (bucketAcl.Canned === 'authenticated-read'
@ -90,7 +50,7 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
return true; return true;
} }
} }
if (requestTypeParsed === 'bucketGetACL') { if (requestType === 'bucketGetACL') {
if ((bucketAcl.Canned === 'log-delivery-write' if ((bucketAcl.Canned === 'log-delivery-write'
&& canonicalID === logId) && canonicalID === logId)
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -106,7 +66,7 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
} }
} }
if (requestTypeParsed === 'bucketPutACL') { if (requestType === 'bucketPutACL') {
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) { || bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true; return true;
@ -120,7 +80,11 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
} }
} }
if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') { if (requestType === 'bucketDelete' && bucket.getOwner() === canonicalID) {
return true;
}
if (requestType === 'objectDelete' || requestType === 'objectPut') {
if (bucketAcl.Canned === 'public-read-write' if (bucketAcl.Canned === 'public-read-write'
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) { || bucketAcl.WRITE.indexOf(canonicalID) > -1) {
@ -140,39 +104,25 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket // objectPutACL, objectGetACL, objectHead or objectGet, the bucket
// authorization check should just return true so can move on to check // authorization check should just return true so can move on to check
// rights at the object level. // rights at the object level.
return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL' return (requestType === 'objectPutACL' || requestType === 'objectGetACL' ||
|| requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead'); requestType === 'objectGet' || requestType === 'objectHead');
} }
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser, function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
isUserUnauthenticated, mainApiCall) {
const bucketOwner = bucket.getOwner(); const bucketOwner = bucket.getOwner();
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
'objectPut' : requestType;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
// acls don't distinguish between users and accounts, so both should be allowed // acls don't distinguish between users and accounts, so both should be allowed
if (bucketOwnerActions.includes(requestTypeParsed) if (bucketOwnerActions.includes(requestType)
&& (bucketOwner === canonicalID)) { && (bucketOwner === canonicalID)) {
return true; return true;
} }
if (objectMD['owner-id'] === canonicalID) { if (objectMD['owner-id'] === canonicalID) {
return true; return true;
} }
// Backward compatibility
if (parsedMainApiCall === 'objectGet') {
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
&& requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (!objectMD.acl) { if (!objectMD.acl) {
return false; return false;
} }
if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') { if (requestType === 'objectGet' || requestType === 'objectHead') {
if (objectMD.acl.Canned === 'public-read' if (objectMD.acl.Canned === 'public-read'
|| objectMD.acl.Canned === 'public-read-write' || objectMD.acl.Canned === 'public-read-write'
|| (objectMD.acl.Canned === 'authenticated-read' || (objectMD.acl.Canned === 'authenticated-read'
@ -198,11 +148,11 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
// User is already authorized on the bucket for FULL_CONTROL or WRITE or // User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write // bucket has canned ACL public-read-write
if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') { if (requestType === 'objectPut' || requestType === 'objectDelete') {
return true; return true;
} }
if (requestTypeParsed === 'objectPutACL') { if (requestType === 'objectPutACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control' if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID) && bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -218,7 +168,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
} }
} }
if (requestTypeParsed === 'objectGetACL') { if (requestType === 'objectGetACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control' if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID) && bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -237,9 +187,9 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
// allow public reads on buckets that are whitelisted for anonymous reads // allow public reads on buckets that are whitelisted for anonymous reads
// TODO: remove this after bucket policies are implemented // TODO: remove this after bucket policies are implemented
const bucketAcl = bucket.getAcl(); const bucketAcl = bucket.getAcl();
const allowPublicReads = publicReadBuckets.includes(bucket.getName()) const allowPublicReads = publicReadBuckets.includes(bucket.getName()) &&
&& bucketAcl.Canned === 'public-read' bucketAcl.Canned === 'public-read' &&
&& (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead'); (requestType === 'objectGet' || requestType === 'objectHead');
if (allowPublicReads) { if (allowPublicReads) {
return true; return true;
} }
@ -266,20 +216,6 @@ function _checkBucketPolicyResources(request, resource, log) {
return evaluators.isResourceApplicable(requestContext, resource, log); return evaluators.isResourceApplicable(requestContext, resource, log);
} }
function _checkBucketPolicyConditions(request, conditions, log) {
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
if (!conditions) {
return true;
}
// build request context from the request!
const requestContext = new RequestContext(request.headers, request.query,
request.bucketName, request.objectKey, ip,
request.connection.encrypted, request.resourceType, 's3', null, null,
null, null, null, null, null, null, null, null, null,
request.objectLockRetentionDays);
return evaluators.meetConditions(requestContext, conditions, log);
}
function _getAccountId(arn) { function _getAccountId(arn) {
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc... // account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
return arn.substr(13, 12); return arn.substr(13, 12);
@ -324,11 +260,11 @@ function _checkPrincipals(canonicalID, arn, principal) {
return false; return false;
} }
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) { function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request) {
let permission = 'defaultDeny'; let permission = 'defaultDeny';
// if requester is user within bucket owner account, actions should be // if requester is user within bucket owner account, actions should be
// allowed unless explicitly denied (assumes allowed by IAM policy) // allowed unless explicitly denied (assumes allowed by IAM policy)
if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) { if (bucketOwner === canonicalID) {
permission = 'allow'; permission = 'allow';
} }
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement)); let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
@ -337,13 +273,12 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal); const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log); const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log); const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') { if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Deny') {
// explicit deny trumps any allows, so return immediately // explicit deny trumps any allows, so return immediately
return 'explicitDeny'; return 'explicitDeny';
} }
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') { if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Allow') {
permission = 'allow'; permission = 'allow';
} }
copiedStatement = copiedStatement.splice(1); copiedStatement = copiedStatement.splice(1);
@ -351,37 +286,7 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
return permission; return permission;
} }
function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log, function isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request) {
request, aclPermission, results, actionImplicitDenies) {
const bucketPolicy = bucket.getBucketPolicy();
let processedResult = results[requestType];
if (!bucketPolicy) {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
} else {
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
bucketOwner, log, request, actionImplicitDenies);
if (bucketPolicyPermission === 'explicitDeny') {
processedResult = false;
} else if (bucketPolicyPermission === 'allow') {
processedResult = true;
} else {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
}
}
return processedResult;
}
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const mainApiCall = requestTypes[0];
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
// Check to see if user is authorized to perform a // Check to see if user is authorized to perform a
// particular action on bucket based on ACLs. // particular action on bucket based on ACLs.
// TODO: Add IAM checks // TODO: Add IAM checks
@ -392,100 +297,69 @@ function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, lo
arn = authInfo.getArn(); arn = authInfo.getArn();
} }
// if the bucket owner is an account, users should not have default access // if the bucket owner is an account, users should not have default access
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) { if (((bucket.getOwner() === canonicalID) && requesterIsNotUser)
results[_requestType] = actionImplicitDenies[_requestType] === false; || isServiceAccount(canonicalID)) {
return results[_requestType]; return true;
} }
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall); const aclPermission = checkBucketAcls(bucket, requestType, canonicalID);
// In case of error bucket access is checked with bucketGet const bucketPolicy = bucket.getBucketPolicy();
// For website, bucket policy only uses objectGet and ignores bucketGet if (!bucketPolicy) {
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html return aclPermission;
// bucketGet should be used to check acl but switched to objectGet for bucket policy
if (isWebsite && _requestType === 'bucketGet') {
// eslint-disable-next-line no-param-reassign
_requestType = 'objectGet';
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
} }
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log, const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
request, aclPermission, results, actionImplicitDenies); canonicalID, arn, bucket.getOwner(), log, request);
}); if (bucketPolicyPermission === 'explicitDeny') {
return false;
}
return (aclPermission || (bucketPolicyPermission === 'allow'));
} }
function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {}, function isObjAuthorized(bucket, objectMD, requestType, canonicalID, authInfo, log, request) {
log, request) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
let arn = null;
if (authInfo) {
arn = authInfo.getArn();
}
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
request, true, results, actionImplicitDenies);
});
}
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
const mainApiCall = requestTypes[0];
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
const parsedMethodName = _requestType.endsWith('Version')
? _requestType.slice(0, -7) : _requestType;
const bucketOwner = bucket.getOwner(); const bucketOwner = bucket.getOwner();
if (!objectMD) { if (!objectMD) {
// check bucket has read access
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
let permission = 'bucketGet';
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
permission = 'objectPut';
}
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
actionImplicitDenies, isWebsite);
// User is already authorized on the bucket for FULL_CONTROL or WRITE or // User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write // bucket has canned ACL public-read-write
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete') if (requestType === 'objectPut' || requestType === 'objectDelete') {
&& results[_requestType] === false) { return true;
results[_requestType] = actionImplicitDenies[_requestType] === false;
} }
return results[_requestType]; // check bucket has read access
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
return isBucketAuthorized(bucket, 'bucketGet', canonicalID, authInfo, log, request);
} }
let requesterIsNotUser = true; let requesterIsNotUser = true;
let arn = null; let arn = null;
let isUserUnauthenticated = false;
if (authInfo) { if (authInfo) {
requesterIsNotUser = !isRequesterNonAccountUser(authInfo); requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn(); arn = authInfo.getArn();
isUserUnauthenticated = arn === undefined;
} }
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) { if (objectMD['owner-id'] === canonicalID && requesterIsNotUser) {
results[_requestType] = actionImplicitDenies[_requestType] === false; return true;
return results[_requestType]; }
if (isServiceAccount(canonicalID)) {
return true;
} }
// account is authorized if: // account is authorized if:
// - requesttype is included in bucketOwnerActions and // - requesttype is included in bucketOwnerActions and
// - account is the bucket owner // - account is the bucket owner
// - requester is account, not user // - requester is account, not user
if (bucketOwnerActions.includes(parsedMethodName) if (bucketOwnerActions.includes(requestType)
&& (bucketOwner === canonicalID) && (bucketOwner === canonicalID)
&& requesterIsNotUser) { && requesterIsNotUser) {
results[_requestType] = actionImplicitDenies[_requestType] === false; return true;
return results[_requestType];
} }
const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName, const aclPermission = checkObjectAcls(bucket, objectMD, requestType,
canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall); canonicalID);
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner, const bucketPolicy = bucket.getBucketPolicy();
log, request, aclPermission, results, actionImplicitDenies); if (!bucketPolicy) {
}); return aclPermission;
}
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
canonicalID, arn, bucket.getOwner(), log, request);
if (bucketPolicyPermission === 'explicitDeny') {
return false;
}
return (aclPermission || (bucketPolicyPermission === 'allow'));
} }
function _checkResource(resource, bucketArn) { function _checkResource(resource, bucketArn) {
@ -514,117 +388,6 @@ function validatePolicyResource(bucketName, policy) {
}); });
} }
function checkIp(value) {
const errString = 'Invalid IP address in Conditions';
const values = Array.isArray(value) ? value : [value];
for (let i = 0; i < values.length; i++) {
// these preliminary checks are validating the provided
// ip address against ipaddr.js, the library we use when
// evaluating IP condition keys. It ensures compatibility,
// but additional checks are required to enforce the right
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
// we would accept different ip formats, which is not
// standard in an AWS use case.
try {
try {
parseCIDR(values[i]);
} catch (err) {
isValid(values[i]);
}
} catch (err) {
return errString;
}
// Apply the existing IP validation logic to each element
const validateIpRegex = ip => {
if (constants.ipv4Regex.test(ip)) {
return ip.split('.').every(part => parseInt(part, 10) <= 255);
}
if (constants.ipv6Regex.test(ip)) {
return ip.split(':').every(part => part.length <= 4);
}
return false;
};
if (validateIpRegex(values[i]) !== true) {
return errString;
}
}
// If the function hasn't returned by now, all elements are valid
return null;
}
// This function checks all bucket policy conditions if the values provided
// are valid for the condition type. If not it returns a relevant Malformed policy error string
function validatePolicyConditions(policy) {
const validConditions = [
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
{ conditionKey: 's3:object-lock-remaining-retention-days' },
];
// keys where value type does not seem to be checked by AWS:
// - s3:object-lock-remaining-retention-days
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
return null;
}
// there can be multiple statements in the policy, each with a Condition enclosure
for (let i = 0; i < policy.Statement.length; i++) {
const s = policy.Statement[i];
if (s.Condition) {
const conditionOperators = Object.keys(s.Condition);
// there can be multiple condition operations in the Condition enclosure
// eslint-disable-next-line no-restricted-syntax
for (const conditionOperator of conditionOperators) {
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
const conditionValue = s.Condition[conditionOperator][conditionKey];
const validCondition = validConditions.find(validCondition =>
validCondition.conditionKey === conditionKey
);
// AWS returns does not return an error if the condition starts with 'aws:'
// so we reproduce this behaviour
if (!validCondition && !conditionKey.startsWith('aws:')) {
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
}
if (validCondition && validCondition.conditionValueTypeChecker) {
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
if (conditionValueTypeError) {
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
}
}
}
}
}
return null;
}
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
* @param {string} arn - Amazon resource name - example:
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
*/
function isLifecycleSession(arn) {
if (!arn) {
return false;
}
const arnSplits = arn.split(':');
const service = arnSplits[2];
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
const resourceType = resourceNames[0];
const sessionName = resourceNames[resourceNames.length - 1];
return (service === 'sts'
&& resourceType === assumedRoleArnResourceType
&& sessionName === backbeatLifecycleSessionName);
}
module.exports = { module.exports = {
isBucketAuthorized, isBucketAuthorized,
isObjAuthorized, isObjAuthorized,
@ -635,7 +398,4 @@ module.exports = {
checkBucketAcls, checkBucketAcls,
checkObjectAcls, checkObjectAcls,
validatePolicyResource, validatePolicyResource,
validatePolicyConditions,
isLifecycleSession,
evaluateBucketPolicyWithIAM,
}; };

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3'); apiMethod, 's3');
} }
if (apiMethod === 'bucketPut') { if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
return null; return null;
} }
@ -65,17 +65,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = []; const requestContexts = [];
if (apiMethod === 'multiObjectDelete') { if (apiMethodAfterVersionCheck === 'objectCopy'
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') { || apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' : const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet'; 'objectGet';

View File

@ -24,7 +24,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
}); });
} }
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) { function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
async.mapLimit(mpus, 1, (mpu, next) => { async.mapLimit(mpus, 1, (mpu, next) => {
const splitterChar = mpu.key.includes(oldSplitter) ? const splitterChar = mpu.key.includes(oldSplitter) ?
oldSplitter : splitter; oldSplitter : splitter;
@ -40,7 +40,7 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log,
byteLength: partSizeSum, byteLength: partSizeSum,
}); });
next(err); next(err);
}, request); });
}, cb); }, cb);
} }
/** /**
@ -49,13 +49,11 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log,
* @param {object} bucketMD - bucket attributes/metadata * @param {object} bucketMD - bucket attributes/metadata
* @param {string} bucketName - bucket in which objectMetadata is stored * @param {string} bucketName - bucket in which objectMetadata is stored
* @param {string} canonicalID - account canonicalID of requester * @param {string} canonicalID - account canonicalID of requester
* @param {object} request - request object given by router
* including normalized headers
* @param {object} log - Werelogs logger * @param {object} log - Werelogs logger
* @param {function} cb - callback from async.waterfall in bucketDelete * @param {function} cb - callback from async.waterfall in bucketDelete
* @return {undefined} * @return {undefined}
*/ */
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) { function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
log.trace('deleting bucket from metadata'); log.trace('deleting bucket from metadata');
assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof canonicalID, 'string'); assert.strictEqual(typeof canonicalID, 'string');
@ -102,7 +100,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
} }
if (objectsListRes.Contents.length) { if (objectsListRes.Contents.length) {
return _deleteOngoingMPUs(authInfo, bucketName, return _deleteOngoingMPUs(authInfo, bucketName,
bucketMD, objectsListRes.Contents, request, log, err => { bucketMD, objectsListRes.Contents, log, err => {
if (err) { if (err) {
return next(err); return next(err);
} }

View File

@ -30,9 +30,6 @@ function bucketShield(bucket, requestType) {
// Otherwise return an error to the client // Otherwise return an error to the client
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) && if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
(requestType !== 'objectPut' && (requestType !== 'objectPut' &&
requestType !== 'initiateMultipartUpload' &&
requestType !== 'objectPutPart' &&
requestType !== 'completeMultipartUpload' &&
requestType !== 'bucketPutACL' && requestType !== 'bucketPutACL' &&
requestType !== 'bucketDelete')) { requestType !== 'bucketDelete')) {
return true; return true;

View File

@ -3,7 +3,7 @@ const async = require('async');
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper'); const { data } = require('../../../data/wrapper');
const locationConstraintCheck = require('../object/locationConstraintCheck'); const locationConstraintCheck = require('../object/locationConstraintCheck');
const { standardMetadataValidateBucketAndObj } = const { metadataValidateBucketAndObj } =
require('../../../metadata/metadataUtils'); require('../../../metadata/metadataUtils');
const services = require('../../../services'); const services = require('../../../services');
@ -14,7 +14,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
bucketName, bucketName,
objectKey, objectKey,
uploadId, uploadId,
preciseRequestType: request.apiMethods || 'multipartDelete', preciseRequestType: 'multipartDelete',
request, request,
}; };
// For validating the request at the destinationBucket level // For validating the request at the destinationBucket level
@ -22,11 +22,10 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
// but the requestType is the more general 'objectDelete' // but the requestType is the more general 'objectDelete'
const metadataValParams = Object.assign({}, metadataValMPUparams); const metadataValParams = Object.assign({}, metadataValMPUparams);
metadataValParams.requestType = 'objectPut'; metadataValParams.requestType = 'objectPut';
const authzIdentityResult = request ? request.actionImplicitDenies : false;
async.waterfall([ async.waterfall([
function checkDestBucketVal(next) { function checkDestBucketVal(next) {
standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log, metadataValidateBucketAndObj(metadataValParams, log,
(err, destinationBucket) => { (err, destinationBucket) => {
if (err) { if (err) {
return next(err, destinationBucket); return next(err, destinationBucket);
@ -57,14 +56,9 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
next) { next) {
const location = mpuOverviewObj.controllingLocationConstraint; const location = mpuOverviewObj.controllingLocationConstraint;
const originalIdentityAuthzResults = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.abortMPU(objectKey, uploadId, location, bucketName, return data.abortMPU(objectKey, uploadId, location, bucketName,
request, destBucket, locationConstraintCheck, log, request, destBucket, locationConstraintCheck, log,
(err, skipDataDelete) => { (err, skipDataDelete) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityAuthzResults;
if (err) { if (err) {
return next(err, destBucket); return next(err, destBucket);
} }

View File

@ -13,12 +13,11 @@ const { maximumMetaHeadersSize,
function checkUserMetadataSize(responseMetadata) { function checkUserMetadataSize(responseMetadata) {
let userMetadataSize = 0; let userMetadataSize = 0;
// collect the user metadata keys from the object metadata // collect the user metadata keys from the object metadata
const userMetadataHeaders = Object.keys(responseMetadata) for (let key in responseMetadata) {
.filter(key => key.startsWith('x-amz-meta-')); if (responseMetadata.hasOwnProperty(key) && key.startsWith('x-amz-meta-')) {
// compute the size of all user metadata key and its value userMetadataSize += key.length + responseMetadata[key].length;
userMetadataHeaders.forEach(header => { }
userMetadataSize += header.length + responseMetadata[header].length; }
});
// check the size computed against the maximum allowed // check the size computed against the maximum allowed
// if the computed size is greater, then remove all the // if the computed size is greater, then remove all the
// user metadata from the response object // user metadata from the response object

View File

@ -2,13 +2,11 @@
* Code based on Yutaka Oishi (Fujifilm) contributions * Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020 * Date: 11 Sep 2020
*/ */
const { ObjectMDArchive } = require('arsenal').models; const ObjectMDArchive = require('arsenal').models.ObjectMDArchive;
const errors = require('arsenal').errors; const errors = require('arsenal').errors;
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const { locationConstraints } = config; const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/** /**
* Get response header "x-amz-restore" * Get response header "x-amz-restore"
* Be called by objectHead.js * Be called by objectHead.js
@ -42,23 +40,6 @@ function getAmzRestoreResHeader(objMD) {
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled * @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
*/ */
function _validateStartRestore(objectMD, log) { function _validateStartRestore(objectMD, log) {
if (objectMD.archive?.restoreCompletedAt) {
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
// If object is already restored, no further check is needed
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
// been reset.
return undefined;
}
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold; const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
if (!isLocationCold) { if (!isLocationCold) {
// return InvalidObjectState error if the object is not in cold storage, // return InvalidObjectState error if the object is not in cold storage,
@ -70,7 +51,18 @@ function _validateStartRestore(objectMD, log) {
}); });
return errors.InvalidObjectState; return errors.InvalidObjectState;
} }
if (objectMD.archive?.restoreRequestedAt) { if (objectMD.archive?.restoreCompletedAt
&& new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreRequestedAt && !objectMD.archive?.restoreCompletedAt) {
// return RestoreAlreadyInProgress error if the object is currently being restored // return RestoreAlreadyInProgress error if the object is currently being restored
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists // check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
log.debug('The object is currently being restored.', log.debug('The object is currently being restored.',
@ -127,36 +119,22 @@ function validatePutVersionId(objMD, versionId, log) {
} }
/** /**
* Check if the object is already restored, and update the expiration date accordingly: * Check if the object is already restored
* > After restoring an archived object, you can update the restoration period by reissuing the
* > request with a new period. Amazon S3 updates the restoration period relative to the current
* > time.
* *
* @param {ObjectMD} objectMD - object metadata * @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger * @param {object} log - werelogs logger
* @return {boolean} - true if the object is already restored * @return {boolean} - true if the object is already restored
*/ */
function _updateObjectExpirationDate(objectMD, log) { function isObjectAlreadyRestored(objectMD, log) {
// Check if restoreCompletedAt field exists // check if restoreCompletedAt field exists
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is // and archive.restoreWillExpireAt > current time
// checked earlier in the process, so checking again here would create weird states const isObjectAlreadyRestored = objectMD.archive?.restoreCompletedAt
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt; && new Date(objectMD.archive?.restoreWillExpireAt) >= new Date(Date.now());
log.debug('The restore status of the object.', { log.debug('The restore status of the object.',
{
isObjectAlreadyRestored, isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored' method: 'isObjectAlreadyRestored'
}); });
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;
objectMD['x-amz-restore'] = {
'ongoing-request': false,
'expiry-date': expiryDate,
};
/* eslint-enable no-param-reassign */
}
return isObjectAlreadyRestored; return isObjectAlreadyRestored;
} }
@ -216,8 +194,7 @@ function startRestore(objectMD, restoreParam, log, cb) {
if (updateResultError) { if (updateResultError) {
return cb(updateResultError); return cb(updateResultError);
} }
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log); return cb(null, isObjectAlreadyRestored(objectMD, log));
return cb(null, isObjectAlreadyRestored);
} }
/** /**

View File

@ -5,6 +5,7 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper'); const { data } = require('../../../data/wrapper');
const services = require('../../../services'); const services = require('../../../services');
const logger = require('../../../utilities/logger');
const { dataStore } = require('./storeObject'); const { dataStore } = require('./storeObject');
const locationConstraintCheck = require('./locationConstraintCheck'); const locationConstraintCheck = require('./locationConstraintCheck');
const { versioningPreprocessing, overwritingVersioning } = require('./versioning'); const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
@ -20,7 +21,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure or GCP.'; 'a versioned object to a location-constraint of type Azure or GCP.';
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle, function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, log, requestMethod, callback) { metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
services.metadataStoreObject(bucketName, dataGetInfo, services.metadataStoreObject(bucketName, dataGetInfo,
cipherBundle, metadataStoreParams, (err, result) => { cipherBundle, metadataStoreParams, (err, result) => {
if (err) { if (err) {
@ -30,7 +31,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
const newDataStoreName = Array.isArray(dataGetInfo) ? const newDataStoreName = Array.isArray(dataGetInfo) ?
dataGetInfo[0].dataStoreName : null; dataGetInfo[0].dataStoreName : null;
return data.batchDelete(dataToDelete, requestMethod, return data.batchDelete(dataToDelete, requestMethod,
newDataStoreName, log, err => callback(err, result)); newDataStoreName, deleteLog, err => callback(err, result));
} }
return callback(null, result); return callback(null, result);
}); });
@ -50,7 +51,6 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* @param {(object|null)} streamingV4Params - if v4 auth, object containing * @param {(object|null)} streamingV4Params - if v4 auth, object containing
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and * accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable) * credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance * @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation * @param {string} originOp - Origin operation
* @param {function} callback - callback function * @param {function} callback - callback function
@ -60,7 +60,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/ */
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params, canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
overheadField, log, originOp, callback) { log, originOp, callback) {
const putVersionId = request.headers['x-scal-s3-version-id']; const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === ''; const isPutVersion = putVersionId || putVersionId === '';
@ -116,7 +116,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
isDeleteMarker, isDeleteMarker,
replicationInfo: getReplicationInfo( replicationInfo: getReplicationInfo(
objectKey, bucketMD, false, size, null, null, authInfo), objectKey, bucketMD, false, size, null, null, authInfo),
overheadField,
log, log,
}; };
@ -197,9 +196,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
const dontSkipBackend = externalBackends; const dontSkipBackend = externalBackends;
/* eslint-enable camelcase */ /* eslint-enable camelcase */
const requestLogger =
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
const mdOnlyHeader = request.headers['x-amz-meta-mdonly']; const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
const mdOnlySize = request.headers['x-amz-meta-size']; const mdOnlySize = request.headers['x-amz-meta-size'];
return async.waterfall([ return async.waterfall([
function storeData(next) { function storeData(next) {
if (size === 0) { if (size === 0) {
@ -294,7 +294,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
} }
return _storeInMDandDeleteData(bucketName, infoArr, return _storeInMDandDeleteData(bucketName, infoArr,
cipherBundle, metadataStoreParams, cipherBundle, metadataStoreParams,
options.dataToDelete, log, requestMethod, next); options.dataToDelete, requestLogger, requestMethod, next);
}, },
], callback); ], callback);
} }

View File

@ -1,18 +0,0 @@
/**
* _bucketRequiresOplogUpdate - DELETE an object from a bucket
* @param {BucketInfo} bucket - bucket object
* @return {boolean} whether objects require oplog updates on deletion, or not
*/
function _bucketRequiresOplogUpdate(bucket) {
// Default behavior is to require an oplog update
if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) {
return true;
}
// If the bucket has lifecycle configuration or notification configuration
// set, we also require an oplog update
return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration();
}
module.exports = {
_bucketRequiresOplogUpdate,
};

View File

@ -4,25 +4,23 @@ const {
LifecycleDateTime, LifecycleDateTime,
LifecycleUtils, LifecycleUtils,
} = require('arsenal').s3middleware.lifecycleHelpers; } = require('arsenal').s3middleware.lifecycleHelpers;
const { config } = require('../../../Config');
const { // moves lifecycle transition deadlines 1 day earlier, mostly for testing
expireOneDayEarlier, const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
transitionOneDayEarlier, // moves lifecycle expiration deadlines 1 day earlier, mostly for testing
timeProgressionFactor, const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
scaledMsPerDay,
} = config.getTimeOptions();
const lifecycleDateTime = new LifecycleDateTime({ const lifecycleDateTime = new LifecycleDateTime({
transitionOneDayEarlier, transitionOneDayEarlier,
expireOneDayEarlier, expireOneDayEarlier,
timeProgressionFactor,
}); });
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor); const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime);
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
function calculateDate(objDate, expDays, datetime) { function calculateDate(objDate, expDays, datetime) {
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay)); return new Date(datetime.getTimestamp(objDate) + expDays * oneDay);
} }
function formatExpirationHeader(date, id) { function formatExpirationHeader(date, id) {
@ -39,10 +37,8 @@ const AMZ_ABORT_ID_HEADER = 'x-amz-abort-rule-id';
function _generateExpHeadersObjects(rules, params, datetime) { function _generateExpHeadersObjects(rules, params, datetime) {
const tags = { const tags = {
TagSet: params.tags TagSet: Object.keys(params.tags)
? Object.keys(params.tags) .map(key => ({ Key: key, Value: params.tags[key] })),
.map(key => ({ Key: key, Value: params.tags[key] }))
: [],
}; };
const objectInfo = { Key: params.key }; const objectInfo = { Key: params.key };

View File

@ -21,7 +21,6 @@ function processCurrents(bucketName, listParams, isBucketVersioned, list) {
Name: bucketName, Name: bucketName,
Prefix: listParams.prefix, Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys, MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated, IsTruncated: !!list.IsTruncated,
Marker: listParams.marker, Marker: listParams.marker,
BeforeDate: listParams.beforeDate, BeforeDate: listParams.beforeDate,
@ -39,7 +38,7 @@ function processCurrents(bucketName, listParams, isBucketVersioned, list) {
Size: v.Size, Size: v.Size,
Owner: { Owner: {
ID: v.Owner.ID, ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName, DisplayName: v.Owner.DisplayName
}, },
StorageClass: v.StorageClass, StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags), TagSet: _makeTags(v.tags),
@ -78,7 +77,6 @@ function processNonCurrents(bucketName, listParams, list) {
Name: bucketName, Name: bucketName,
Prefix: listParams.prefix, Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys, MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated, IsTruncated: !!list.IsTruncated,
KeyMarker: listParams.keyMarker, KeyMarker: listParams.keyMarker,
VersionIdMarker: versionIdMarker, VersionIdMarker: versionIdMarker,
@ -100,7 +98,7 @@ function processNonCurrents(bucketName, listParams, list) {
Size: v.Size, Size: v.Size,
Owner: { Owner: {
ID: v.Owner.ID, ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName, DisplayName: v.Owner.DisplayName
}, },
StorageClass: v.StorageClass, StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags), TagSet: _makeTags(v.tags),
@ -121,7 +119,6 @@ function processOrphans(bucketName, listParams, list) {
Name: bucketName, Name: bucketName,
Prefix: listParams.prefix, Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys, MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated, IsTruncated: !!list.IsTruncated,
Marker: listParams.marker, Marker: listParams.marker,
BeforeDate: listParams.beforeDate, BeforeDate: listParams.beforeDate,
@ -138,7 +135,7 @@ function processOrphans(bucketName, listParams, list) {
LastModified: v.LastModified, LastModified: v.LastModified,
Owner: { Owner: {
ID: v.Owner.ID, ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName, DisplayName: v.Owner.DisplayName
}, },
VersionId: versionId, VersionId: versionId,
IsLatest: true, // for compatibility with AWS ListObjectVersions. IsLatest: true, // for compatibility with AWS ListObjectVersions.
@ -154,37 +151,9 @@ function getLocationConstraintErrorMessage(locationName) {
`- ${locationName} - is not listed in the locationConstraint config`; `- ${locationName} - is not listed in the locationConstraint config`;
} }
/**
* validateMaxScannedEntries - Validates and returns the maximum scanned entries value.
*
* @param {object} params - Query parameters
* @param {object} config - CloudServer configuration
* @param {number} min - Minimum number of entries to be scanned
* @returns {Object} - An object indicating the validation result:
* - isValid (boolean): Whether the validation is successful.
* - maxScannedLifecycleListingEntries (number): The validated maximum scanned entries value if isValid is true.
*/
function validateMaxScannedEntries(params, config, min) {
let maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
if (params['max-scanned-lifecycle-listing-entries']) {
const maxEntriesParams = Number.parseInt(params['max-scanned-lifecycle-listing-entries'], 10);
if (Number.isNaN(maxEntriesParams) || maxEntriesParams < min ||
maxEntriesParams > maxScannedLifecycleListingEntries) {
return { isValid: false };
}
maxScannedLifecycleListingEntries = maxEntriesParams;
}
return { isValid: true, maxScannedLifecycleListingEntries };
}
module.exports = { module.exports = {
processCurrents, processCurrents,
processNonCurrents, processNonCurrents,
processOrphans, processOrphans,
getLocationConstraintErrorMessage, getLocationConstraintErrorMessage,
validateMaxScannedEntries,
}; };

View File

@ -3,9 +3,7 @@ const moment = require('moment');
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const vault = require('../../../auth/vault'); const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/** /**
* Calculates retain until date for the locked object version * Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period * @param {object} retention - includes days or years retention period
@ -21,9 +19,8 @@ function calculateRetainUntilDate(retention) {
const date = moment(); const date = moment();
// Calculate the number of days to retain the lock on the object // Calculate the number of days to retain the lock on the object
const retainUntilDays = days || years * 365; const retainUntilDays = days || years * 365;
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
const retainUntilDate const retainUntilDate
= date.add(retainUntilDaysInMs, 'ms'); = date.add(retainUntilDays, 'days');
return retainUntilDate.toISOString(); return retainUntilDate.toISOString();
} }
/** /**
@ -305,9 +302,7 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
if (err) { if (err) {
return cb(err); return cb(err);
} }
const explicitDenyExists = authorizationResults.some( if (authorizationResults[0].isAllowed !== true) {
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
if (explicitDenyExists) {
log.trace('authorization check failed for user', log.trace('authorization check failed for user',
{ {
'method': 'checkUserPolicyGovernanceBypass', 'method': 'checkUserPolicyGovernanceBypass',
@ -315,25 +310,7 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
}); });
return cb(errors.AccessDenied); return cb(errors.AccessDenied);
} }
// Convert authorization results into an easier to handle format return cb(null);
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
authInfo.getCanonicalID(),
authInfo,
actionImplicitDenies,
log,
request);
return cb(areAllActionsAllowed === true ? null : errors.AccessDenied);
}); });
} }

View File

@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning'); const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore; const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/** /**
* Check if tier is supported * Check if tier is supported
@ -58,22 +58,13 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
versionId: decodedVidResult, versionId: decodedVidResult,
requestType: request.apiMethods || 'restoreObject', requestType: 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
}; };
return async.waterfall([ return async.waterfall([
// get metadata of bucket and object // get metadata of bucket and object
function validateBucketAndObject(next) { function validateBucketAndObject(next) {
return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies, return mdUtils.metadataValidateBucketAndObj(mdValueParams, log, (err, bucketMD, objectMD) => {
log, (err, bucketMD, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', { method: METHOD, error: err }); log.trace('request authorization failed', { method: METHOD, error: err });
return next(err); return next(err);
@ -124,16 +115,6 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
return next(err, bucketMD, objectMD); return next(err, bucketMD, objectMD);
}); });
}, },
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) { function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {}; const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params, metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,

View File

@ -1,32 +0,0 @@
const { errors } = require('arsenal');
const { unsupportedSignatureChecksums, supportedSignatureChecksums } = require('../../../../constants');
function validateChecksumHeaders(headers) {
// If the x-amz-trailer header is present the request is using one of the
// trailing checksum algorithms, which are not supported.
if (headers['x-amz-trailer'] !== undefined) {
return errors.BadRequest.customizeDescription('trailing checksum is not supported');
}
const signatureChecksum = headers['x-amz-content-sha256'];
if (signatureChecksum === undefined) {
return null;
}
if (supportedSignatureChecksums.has(signatureChecksum)) {
return null;
}
// If the value is not one of the possible checksum algorithms
// the only other valid value is the actual sha256 checksum of the payload.
// Do a simple sanity check of the length to guard against future algos.
// If the value is an unknown algo, then it will fail checksum validation.
if (!unsupportedSignatureChecksums.has(signatureChecksum) && signatureChecksum.length === 64) {
return null;
}
return errors.BadRequest.customizeDescription('unsupported checksum algorithm');
}
module.exports = validateChecksumHeaders;

View File

@ -4,7 +4,7 @@ const async = require('async');
const metadata = require('../../../metadata/wrapper'); const metadata = require('../../../metadata/wrapper');
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const { scaledMsPerDay } = config.getTimeOptions(); const oneDay = 24 * 60 * 60 * 1000;
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
// Use Arsenal function to generate a version ID used internally by metadata // Use Arsenal function to generate a version ID used internally by metadata
@ -210,7 +210,7 @@ function processVersioningState(mst, vstat, nullVersionCompatMode) {
// null keys are used, which is used as an optimization to // null keys are used, which is used as an optimization to
// avoid having to check the versioned key since there can // avoid having to check the versioned key since there can
// be no more versioned key to clean up // be no more versioned key to clean up
if (mst.isNull && mst.versionId && !mst.isNull2) { if (mst.isNull && !mst.isNull2) {
const delOptions = { versionId: mst.versionId }; const delOptions = { versionId: mst.versionId };
return { options, delOptions }; return { options, delOptions };
} }
@ -241,7 +241,7 @@ function processVersioningState(mst, vstat, nullVersionCompatMode) {
if (masterIsNull) { if (masterIsNull) {
// if master is a null version or a non-versioned key, // if master is a null version or a non-versioned key,
// copy it to a new null key // copy it to a new null key
const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId; const nullVersionId = mst.isNull ? mst.versionId : nonVersionedObjId;
if (nullVersionCompatMode) { if (nullVersionCompatMode) {
options.extraMD = { options.extraMD = {
nullVersionId, nullVersionId,
@ -377,130 +377,63 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
}); });
} }
/** Return options to pass to Metadata layer for version-specific
* operations with the given requested version ID
*
* @param {object} objectMD - object metadata
* @param {boolean} nullVersionCompatMode - if true, behaves in null
* version compatibility mode
* @return {object} options object with params:
* {string} [options.versionId] - specific versionId to update
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
* Metadata backend if we're updating or deleting a new-style null
* version (stored in master or null key), or not a null version.
*/
function getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode) {
// Use the internal versionId if it is a "real" null version (not
// non-versioned)
//
// If the target object is non-versioned: do not specify a
// "versionId" attribute nor "isNull"
//
// If the target version is a null version, i.e. has the "isNull"
// attribute:
//
// - send the "isNull=true" param to Metadata if the version is
// already a null key put by a non-compat mode Cloudserver, to
// let Metadata know that the null key is to be updated or
// deleted. This is the case if the "isNull2" metadata attribute
// exists
//
// - otherwise, do not send the "isNull" parameter to hint
// Metadata that it is a legacy null version
//
// If the target version is not a null version and is versioned:
//
// - send the "isNull=false" param to Metadata in non-compat
// mode (mandatory for v1 format)
//
// - otherwise, do not send the "isNull" parameter to hint
// Metadata that an existing null version may not be stored in a
// null key
//
//
if (objectMD.versionId === undefined) {
return {};
}
const options = { versionId: objectMD.versionId };
if (objectMD.isNull) {
if (objectMD.isNull2) {
options.isNull = true;
}
} else if (!nullVersionCompatMode) {
options.isNull = false;
}
return options;
}
/** preprocessingVersioningDelete - return versioning information for S3 to /** preprocessingVersioningDelete - return versioning information for S3 to
* manage deletion of objects and versions, including creation of delete markers * manage deletion of objects and versions, including creation of delete markers
* @param {string} bucketName - name of bucket * @param {string} bucketName - name of bucket
* @param {object} bucketMD - bucket metadata * @param {object} bucketMD - bucket metadata
* @param {object} objectMD - obj metadata * @param {object} objectMD - obj metadata
* @param {string} [reqVersionId] - specific version ID sent as part of request * @param {string} [reqVersionId] - specific version ID sent as part of request
* @param {boolean} nullVersionCompatMode - if true, behaves in null version compatibility mode * @param {boolean} nullVersionCompatMode - if true, behaves in null
* version compatibility mode and return appropriate values:
* - in normal mode, returns an 'isNull' boolean sent to Metadata (true or false)
* - in compatibility mode, does not return an 'isNull' property
* @return {object} options object with params: * @return {object} options object with params:
* {boolean} [options.deleteData=true|undefined] - whether to delete data (if undefined * {boolean} [options.deleteData=true|undefined] - whether to delete data (if undefined
* means creating a delete marker instead) * means creating a delete marker instead)
* {string} [options.versionId] - specific versionId to delete * {string} [options.versionId] - specific versionId to delete
* {boolean} [options.isNull=true|false|undefined] - if set, tells the * {boolean} [options.isNull=true|false|undefined] - if set, tells the
* Metadata backend if we're deleting a new-style null version (stored * Metadata backend if we're deleting a null version or not a null
* in master or null key), or not a null version. * version. Not set if `nullVersionCompatMode` is true.
*/ */
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId, nullVersionCompatMode) { function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId, nullVersionCompatMode) {
let options = {}; const options = {};
if (bucketMD.getVersioningConfiguration() && reqVersionId) {
options = getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode);
}
if (!bucketMD.getVersioningConfiguration() || reqVersionId) { if (!bucketMD.getVersioningConfiguration() || reqVersionId) {
// delete data if bucket is non-versioned or the request // delete data if bucket is non-versioned or the request
// deletes a specific version // deletes a specific version
options.deleteData = true; options.deleteData = true;
} }
if (bucketMD.getVersioningConfiguration() && reqVersionId) {
if (reqVersionId === 'null') {
// deleting the 'null' version if it exists:
//
// - use its internal versionId if it is a "real" null
// version (not non-versioned)
//
// - send the "isNull" param to Metadata if:
//
// - in non-compat mode (mandatory for v1 format)
//
// - OR if the version is already a null key put by a
// non-compat mode Cloudserver, to let Metadata know that
// the null key is to be deleted. This is the case if the
// "isNull2" param is set.
if (objectMD.versionId !== undefined) {
options.versionId = objectMD.versionId;
if (objectMD.isNull2) {
options.isNull = true;
}
}
} else {
// deleting a specific version
options.versionId = reqVersionId;
if (!nullVersionCompatMode) {
options.isNull = false;
}
}
}
return options; return options;
} }
/**
* Keep metadatas when the object is restored from cold storage
* but remove the specific ones we don't want to keep
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {undefined}
*/
function restoreMetadata(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
// We need to keep user metadata and tags
Object.keys(objMD).forEach(key => {
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
metadataStoreParams.metaHeaders[key] = objMD[key];
}
});
if (objMD['x-amz-website-redirect-location']) {
if (!metadataStoreParams.headers) {
metadataStoreParams.headers = {};
}
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
}
if (objMD.replicationInfo) {
metadataStoreParams.replicationInfo = objMD.replicationInfo;
}
if (objMD.legalHold) {
metadataStoreParams.legalHold = objMD.legalHold;
}
if (objMD.acl) {
metadataStoreParams.acl = objMD.acl;
}
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.taggingCopy = objMD.tags;
}
/** overwritingVersioning - return versioning information for S3 to handle /** overwritingVersioning - return versioning information for S3 to handle
* storing version metadata with a specific version id. * storing version metadata with a specific version id.
* @param {object} objMD - obj metadata * @param {object} objMD - obj metadata
@ -512,8 +445,10 @@ function restoreMetadata(objMD, metadataStoreParams) {
* version id of the null version * version id of the null version
*/ */
function overwritingVersioning(objMD, metadataStoreParams) { function overwritingVersioning(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.updateMicroVersionId = true; metadataStoreParams.updateMicroVersionId = true;
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
// set correct originOp // set correct originOp
metadataStoreParams.originOp = 's3:ObjectRestore:Completed'; metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
@ -526,7 +461,7 @@ function overwritingVersioning(objMD, metadataStoreParams) {
restoreRequestedAt: objMD.archive?.restoreRequestedAt, restoreRequestedAt: objMD.archive?.restoreRequestedAt,
restoreRequestedDays: objMD.archive?.restoreRequestedDays, restoreRequestedDays: objMD.archive?.restoreRequestedDays,
restoreCompletedAt: new Date(now), restoreCompletedAt: new Date(now),
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)), restoreWillExpireAt: new Date(now + (days * oneDay)),
}; };
/* eslint-enable no-param-reassign */ /* eslint-enable no-param-reassign */
@ -542,8 +477,6 @@ function overwritingVersioning(objMD, metadataStoreParams) {
}; };
} }
restoreMetadata(objMD, metadataStoreParams);
return options; return options;
} }
@ -554,7 +487,6 @@ module.exports = {
processVersioningState, processVersioningState,
getMasterState, getMasterState,
versioningPreprocessing, versioningPreprocessing,
getVersionSpecificMetadataOptions,
preprocessingVersioningDelete, preprocessingVersioningDelete,
overwritingVersioning, overwritingVersioning,
decodeVID, decodeVID,

View File

@ -101,33 +101,8 @@ function validateWebsiteHeader(header) {
header.startsWith('http://') || header.startsWith('https://')); header.startsWith('http://') || header.startsWith('https://'));
} }
/**
* appendWebsiteIndexDocument - append index to objectKey if necessary
* @param {object} request - normalized request object
* @param {string} indexDocumentSuffix - index document from website config
* @param {boolean} force - flag to force append index
* @return {undefined}
*/
function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) {
const reqObjectKey = request.objectKey ? request.objectKey : '';
/* eslint-disable no-param-reassign */
// find index document if "directory" sent in request
if (reqObjectKey.endsWith('/')) {
request.objectKey += indexDocumentSuffix;
// find index document if no key provided
} else if (reqObjectKey === '') {
request.objectKey = indexDocumentSuffix;
// force for redirect 302 on folder without trailing / that has an index
} else if (force) {
request.objectKey += `/${indexDocumentSuffix}`;
}
/* eslint-enable no-param-reassign */
}
module.exports = { module.exports = {
findRoutingRule, findRoutingRule,
extractRedirectInfo, extractRedirectInfo,
validateWebsiteHeader, validateWebsiteHeader,
appendWebsiteIndexDocument,
}; };

View File

@ -1,314 +0,0 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -1,12 +1,11 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const constants = require('../../../constants'); const constants = require('../../../constants');
const services = require('../../services'); const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils'); const { metadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities'); const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler'); const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processCurrents, const { getLocationConstraintErrorMessage, processCurrents } = require('../apiUtils/object/lifecycle');
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo, function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, isBucketVersioned, log, callback) { bucketName, list, isBucketVersioned, log, callback) {
@ -44,14 +43,6 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call
} }
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys); const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
const minEntriesToBeScanned = 1;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidArgument);
}
const excludedDataStoreName = params['excluded-data-store-name']; const excludedDataStoreName = params['excluded-data-store-name'];
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) { if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName); const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
@ -74,10 +65,9 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call
beforeDate: params['before-date'], beforeDate: params['before-date'],
marker: params.marker, marker: params.marker,
excludedDataStoreName, excludedDataStoreName,
maxScannedLifecycleListingEntries,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
if (err) { if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err }); log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics( monitoring.promMetrics(

View File

@ -1,13 +1,11 @@
const { errors, versioning } = require('arsenal'); const { errors, versioning } = require('arsenal');
const constants = require('../../../constants'); const constants = require('../../../constants');
const services = require('../../services'); const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils'); const { metadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities'); const { pushMetric } = require('../../utapi/utilities');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const monitoring = require('../../utilities/monitoringHandler'); const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processNonCurrents, const { getLocationConstraintErrorMessage, processNonCurrents } = require('../apiUtils/object/lifecycle');
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo, function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback) { bucketName, list, log, callback) {
@ -45,16 +43,6 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c
} }
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys); const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
const minEntriesToBeScanned = 3;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleNonCurrents');
return callback(errors.InvalidArgument);
}
const excludedDataStoreName = params['excluded-data-store-name']; const excludedDataStoreName = params['excluded-data-store-name'];
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) { if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName); const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
@ -77,13 +65,12 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c
beforeDate: params['before-date'], beforeDate: params['before-date'],
keyMarker: params['key-marker'], keyMarker: params['key-marker'],
excludedDataStoreName, excludedDataStoreName,
maxScannedLifecycleListingEntries,
}; };
listParams.versionIdMarker = params['version-id-marker'] ? listParams.versionIdMarker = params['version-id-marker'] ?
versionIdUtils.decode(params['version-id-marker']) : undefined; versionIdUtils.decode(params['version-id-marker']) : undefined;
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
if (err) { if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err }); log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics( monitoring.promMetrics(

View File

@ -1,11 +1,10 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const constants = require('../../../constants'); const constants = require('../../../constants');
const services = require('../../services'); const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils'); const { metadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities'); const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler'); const monitoring = require('../../utilities/monitoringHandler');
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle'); const { processOrphans } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo, function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback) { bucketName, list, log, callback) {
@ -43,16 +42,6 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request
} }
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys); const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
const minEntriesToBeScanned = 3;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
return callback(errors.InvalidArgument);
}
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
@ -65,10 +54,9 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request
prefix: params.prefix, prefix: params.prefix,
beforeDate: params['before-date'], beforeDate: params['before-date'],
marker: params.marker, marker: params.marker,
maxScannedLifecycleListingEntries,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
if (err) { if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err }); log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics( monitoring.promMetrics(

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const deleteBucket = require('./apiUtils/bucket/bucketDeletion'); const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -34,7 +34,7 @@ function bucketDelete(authInfo, request, log, cb) {
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, return metadataValidateBucket(metadataValParams, log,
(err, bucketMD) => { (err, bucketMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucketMD); request.method, bucketMD);
@ -48,7 +48,7 @@ function bucketDelete(authInfo, request, log, cb) {
log.trace('passed checks', log.trace('passed checks',
{ method: 'metadataValidateBucket' }); { method: 'metadataValidateBucket' });
return deleteBucket(authInfo, bucketMD, bucketName, return deleteBucket(authInfo, bucketMD, bucketName,
authInfo.getCanonicalID(), request, log, err => { authInfo.getCanonicalID(), log, err => {
if (err) { if (err) {
monitoring.promMetrics( monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucket'); 'DELETE', bucketName, err.code, 'deleteBucket');

View File

@ -38,8 +38,7 @@ function bucketDeleteCors(authInfo, request, log, callback) {
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketDeleteCors', method: 'bucketDeleteCors',

View File

@ -1,7 +1,7 @@
const async = require('async'); const async = require('async');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
@ -21,12 +21,12 @@ function bucketDeleteEncryption(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketDeleteEncryption', requestType: 'bucketDeleteEncryption',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), next => metadataValidateBucket(metadataValParams, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => { (bucket, next) => {
const sseConfig = bucket.getServerSideEncryption(); const sseConfig = bucket.getServerSideEncryption();

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -18,10 +18,10 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketDeleteLifecycle', requestType: 'bucketDeleteLifecycle',
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/** /**
@ -16,10 +16,10 @@ function bucketDeletePolicy(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketDeletePolicy', requestType: 'bucketDeletePolicy',
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,58 +0,0 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteQuota';
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketDeleteQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketDeleteQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || requestType,
request,
};
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)),
(bucket, next) => {
bucket.setQuota(0);
metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketDeleteQuota'
});
monitoring.promMetrics('DELETE', bucketName, err.code,
'bucketDeleteQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, 204, corsHeaders);
});
}
module.exports = bucketDeleteQuota;

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -18,10 +18,10 @@ function bucketDeleteReplication(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketDeleteReplication', requestType: 'bucketDeleteReplication',
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,6 +1,6 @@
const { waterfall } = require('async'); const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
@ -20,20 +20,16 @@ function bucketDeleteTagging(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketDeleteTagging', requestType: 'bucketDeleteTagging',
request,
}; };
let bucket = null; let bucket = null;
return waterfall([ return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucket(metadataValParams, log,
(err, b) => { (err, b) => {
if (err) {
return next(err);
}
bucket = b; bucket = b;
bucket.setTags([]); bucket.setTags([]);
return next(); return next(err);
}), }),
next => metadata.updateBucket(bucket.getName(), bucket, log, next), next => metadata.updateBucket(bucket.getName(), bucket, log, next),
], err => { ], err => {

View File

@ -30,8 +30,7 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketDeleteWebsite', method: 'bucketDeleteWebsite',

View File

@ -2,7 +2,7 @@ const querystring = require('querystring');
const { errors, versioning, s3middleware } = require('arsenal'); const { errors, versioning, s3middleware } = require('arsenal');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -322,7 +322,7 @@ function bucketGet(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGet', requestType: 'bucketGet',
request, request,
}; };
const listParams = { const listParams = {
@ -345,7 +345,7 @@ function bucketGet(authInfo, request, log, callback) {
listParams.marker = params.marker; listParams.marker = params.marker;
} }
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -1,5 +1,5 @@
const aclUtils = require('../utilities/aclUtils'); const aclUtils = require('../utilities/aclUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -44,7 +44,7 @@ function bucketGetACL(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetACL', requestType: 'bucketGetACL',
request, request,
}; };
const grantInfo = { const grantInfo = {
@ -55,7 +55,7 @@ function bucketGetACL(authInfo, request, log, callback) {
}, },
}; };
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -39,8 +39,7 @@ function bucketGetCors(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketGetCors', method: 'bucketGetCors',

View File

@ -4,7 +4,7 @@ const async = require('async');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
/** /**
@ -22,12 +22,12 @@ function bucketGetEncryption(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetEncryption', requestType: 'bucketGetEncryption',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), next => metadataValidateBucket(metadataValParams, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => { (bucket, next) => {
// If sseInfo is present but the `mandatory` flag is not set // If sseInfo is present but the `mandatory` flag is not set

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const LifecycleConfiguration = const LifecycleConfiguration =
require('arsenal').models.LifecycleConfiguration; require('arsenal').models.LifecycleConfiguration;
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -21,10 +21,10 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetLifecycle', requestType: 'bucketGetLifecycle',
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -41,8 +41,7 @@ function bucketGetLocation(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for account on bucket', { log.debug('access denied for account on bucket', {
requestType, requestType,
method: 'bucketGetLocation', method: 'bucketGetLocation',

View File

@ -1,4 +1,4 @@
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { NotificationConfiguration } = require('arsenal').models; const { NotificationConfiguration } = require('arsenal').models;
@ -37,11 +37,11 @@ function bucketGetNotification(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetNotification', requestType: 'bucketGetNotification',
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,5 +1,5 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const ObjectLockConfiguration = const ObjectLockConfiguration =
@ -33,10 +33,10 @@ function bucketGetObjectLock(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetObjectLock', requestType: 'bucketGetObjectLock',
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,6 +1,6 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/** /**
@ -17,11 +17,11 @@ function bucketGetPolicy(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetPolicy', requestType: 'bucketGetPolicy',
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,58 +0,0 @@
const { errors } = require('arsenal');
const { pushMetric } = require('../utapi/utilities');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/**
* bucketGetQuota - Get the bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketGetQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGetQuota' });
const { bucketName, headers, method } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketGetQuota',
request,
};
const xml = [];
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketGetQuota',
});
return callback(err, null, corsHeaders);
}
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<GetBucketQuota>',
'<Name>', bucket.getName(), '</Name>',
);
const bucketQuota = bucket.getQuota();
if (!bucketQuota) {
log.debug('bucket has no quota', {
method: 'bucketGetQuota',
});
return callback(errors.NoSuchQuota, null,
corsHeaders);
}
xml.push('<Quota>', bucketQuota, '</Quota>',
'</GetBucketQuota>');
pushMetric('getBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml.join(''), corsHeaders);
});
}
module.exports = bucketGetQuota;

View File

@ -1,6 +1,6 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { getReplicationConfigurationXML } = const { getReplicationConfigurationXML } =
require('./apiUtils/bucket/getReplicationConfiguration'); require('./apiUtils/bucket/getReplicationConfiguration');
@ -21,10 +21,10 @@ function bucketGetReplication(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetReplication', requestType: 'bucketGetReplication',
request, request,
}; };
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,4 +1,4 @@
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -67,7 +67,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetTagging', requestType: 'bucketGetTagging',
request, request,
}; };
let bucket = null; let bucket = null;
@ -75,7 +75,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
let tags = null; let tags = null;
return waterfall([ return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucket(metadataValParams, log,
(err, b) => { (err, b) => {
bucket = b; bucket = b;
return next(err); return next(err);

View File

@ -1,4 +1,4 @@
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -54,11 +54,11 @@ function bucketGetVersioning(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketGetVersioning', requestType: 'bucketGetVersioning',
request, request,
}; };
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -39,8 +39,7 @@ function bucketGetWebsite(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketGetWebsite', method: 'bucketGetWebsite',

View File

@ -1,5 +1,5 @@
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -19,10 +19,10 @@ function bucketHead(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketHead', requestType: 'bucketHead',
request, request,
}; };
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -45,8 +45,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
} else if (parsedHost && restEndpoints[parsedHost]) { } else if (parsedHost && restEndpoints[parsedHost]) {
locationConstraintChecked = restEndpoints[parsedHost]; locationConstraintChecked = restEndpoints[parsedHost];
} else { } else {
locationConstraintChecked = Object.keys(locationConstrains)[0]; log.trace('no location constraint provided on bucket put;' +
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked); 'setting us-east-1');
locationConstraintChecked = 'us-east-1';
} }
if (!locationConstraints[locationConstraintChecked]) { if (!locationConstraints[locationConstraintChecked]) {

View File

@ -6,7 +6,7 @@ const aclUtils = require('../utilities/aclUtils');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation'); const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants'); const constants = require('../../constants');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -44,7 +44,7 @@ const monitoring = require('../utilities/monitoringHandler');
function bucketPutACL(authInfo, request, log, callback) { function bucketPutACL(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutACL' }); log.debug('processing request', { method: 'bucketPutACL' });
const { bucketName } = request; const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
const newCannedACL = request.headers['x-amz-acl']; const newCannedACL = request.headers['x-amz-acl'];
const possibleCannedACL = [ const possibleCannedACL = [
@ -54,6 +54,19 @@ function bucketPutACL(authInfo, request, log, callback) {
'authenticated-read', 'authenticated-read',
'log-delivery-write', 'log-delivery-write',
]; ];
if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) {
log.trace('invalid canned acl argument', {
acl: newCannedACL,
method: 'bucketPutACL',
});
monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL');
return callback(errors.InvalidArgument);
}
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header');
monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL');
return callback(errors.InvalidArgument);
}
const possibleGroups = [constants.allAuthedUsersId, const possibleGroups = [constants.allAuthedUsersId,
constants.publicId, constants.publicId,
constants.logId, constants.logId,
@ -61,7 +74,7 @@ function bucketPutACL(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutACL', requestType: 'bucketPutACL',
request, request,
}; };
const possibleGrants = ['FULL_CONTROL', 'WRITE', const possibleGrants = ['FULL_CONTROL', 'WRITE',
@ -92,7 +105,7 @@ function bucketPutACL(authInfo, request, log, callback) {
return async.waterfall([ return async.waterfall([
function waterfall1(next) { function waterfall1(next) {
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, metadataValidateBucket(metadataValParams, log,
(err, bucket) => { (err, bucket) => {
if (err) { if (err) {
log.trace('request authorization failed', { log.trace('request authorization failed', {
@ -101,18 +114,6 @@ function bucketPutACL(authInfo, request, log, callback) {
}); });
return next(err, bucket); return next(err, bucket);
} }
// if the API call is allowed, ensure that the parameters are valid
if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) {
log.trace('invalid canned acl argument', {
acl: newCannedACL,
method: 'bucketPutACL',
});
return next(errors.InvalidArgument);
}
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header');
return next(errors.InvalidArgument);
}
return next(null, bucket); return next(null, bucket);
}); });
}, },

View File

@ -23,7 +23,7 @@ const requestType = 'bucketPutCors';
*/ */
function bucketPutCors(authInfo, request, log, callback) { function bucketPutCors(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutCors' }); log.debug('processing request', { method: 'bucketPutCors' });
const { bucketName } = request; const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
if (!request.post) { if (!request.post) {
@ -70,8 +70,7 @@ function bucketPutCors(authInfo, request, log, callback) {
}); });
}, },
function validateBucketAuthorization(bucket, rules, corsHeaders, next) { function validateBucketAuthorization(bucket, rules, corsHeaders, next) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for account on bucket', { log.debug('access denied for account on bucket', {
requestType, requestType,
}); });

View File

@ -3,7 +3,7 @@ const async = require('async');
const { parseEncryptionXml } = require('./apiUtils/bucket/bucketEncryption'); const { parseEncryptionXml } = require('./apiUtils/bucket/bucketEncryption');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const kms = require('../kms/wrapper'); const kms = require('../kms/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -18,17 +18,17 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
*/ */
function bucketPutEncryption(authInfo, request, log, callback) { function bucketPutEncryption(authInfo, request, log, callback) {
const { bucketName } = request; const bucketName = request.bucketName;
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutEncryption', requestType: 'bucketPutEncryption',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next), next => metadataValidateBucket(metadataValParams, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => { (bucket, next) => {
log.trace('parsing encryption config', { method: 'bucketPutEncryption' }); log.trace('parsing encryption config', { method: 'bucketPutEncryption' });

View File

@ -7,7 +7,7 @@ const config = require('../Config').config;
const parseXML = require('../utilities/parseXML'); const parseXML = require('../utilities/parseXML');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -23,11 +23,11 @@ const monitoring = require('../utilities/monitoringHandler');
function bucketPutLifecycle(authInfo, request, log, callback) { function bucketPutLifecycle(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutLifecycle' }); log.debug('processing request', { method: 'bucketPutLifecycle' });
const { bucketName } = request; const bucketName = request.bucketName;
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutLifecycle', requestType: 'bucketPutLifecycle',
request, request,
}; };
return waterfall([ return waterfall([
@ -45,7 +45,7 @@ function bucketPutLifecycle(authInfo, request, log, callback) {
return next(null, configObj); return next(null, configObj);
}); });
}, },
(lcConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (lcConfig, next) => metadataValidateBucket(metadataValParams, log,
(err, bucket) => { (err, bucket) => {
if (err) { if (err) {
return next(err, bucket); return next(err, bucket);

View File

@ -4,7 +4,7 @@ const parseXML = require('../utilities/parseXML');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const getNotificationConfiguration = require('./apiUtils/bucket/getNotificationConfiguration'); const getNotificationConfiguration = require('./apiUtils/bucket/getNotificationConfiguration');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
/** /**
@ -19,11 +19,11 @@ const { pushMetric } = require('../utapi/utilities');
function bucketPutNotification(authInfo, request, log, callback) { function bucketPutNotification(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutNotification' }); log.debug('processing request', { method: 'bucketPutNotification' });
const { bucketName } = request; const bucketName = request.bucketName;
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutNotification', requestType: 'bucketPutNotification',
request, request,
}; };
@ -34,7 +34,7 @@ function bucketPutNotification(authInfo, request, log, callback) {
const notifConfig = notificationConfig.error ? undefined : notificationConfig; const notifConfig = notificationConfig.error ? undefined : notificationConfig;
process.nextTick(() => next(notificationConfig.error, notifConfig)); process.nextTick(() => next(notificationConfig.error, notifConfig));
}, },
(notifConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (notifConfig, next) => metadataValidateBucket(metadataValParams, log,
(err, bucket) => next(err, bucket, notifConfig)), (err, bucket) => next(err, bucket, notifConfig)),
(bucket, notifConfig, next) => { (bucket, notifConfig, next) => {
bucket.setNotificationConfiguration(notifConfig); bucket.setNotificationConfiguration(notifConfig);

View File

@ -1,13 +1,13 @@
const { waterfall } = require('async'); const { waterfall } = require('async');
const arsenal = require('arsenal'); const arsenal = require('arsenal');
const { errors } = arsenal; const errors = arsenal.errors;
const { models: { ObjectLockConfiguration } } = arsenal; const ObjectLockConfiguration = arsenal.models.ObjectLockConfiguration;
const parseXML = require('../utilities/parseXML'); const parseXML = require('../utilities/parseXML');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
/** /**
@ -26,7 +26,7 @@ function bucketPutObjectLock(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutObjectLock', requestType: 'bucketPutObjectLock',
request, request,
}; };
return waterfall([ return waterfall([
@ -41,7 +41,7 @@ function bucketPutObjectLock(authInfo, request, log, callback) {
return next(configObj.error || null, configObj); return next(configObj.error || null, configObj);
}); });
}, },
(objectLockConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, (objectLockConfig, next) => metadataValidateBucket(metadataValParams,
log, (err, bucket) => { log, (err, bucket) => {
if (err) { if (err) {
return next(err, bucket); return next(err, bucket);

View File

@ -1,9 +1,10 @@
const async = require('async'); const async = require('async');
const { errors, models } = require('arsenal'); const { errors, models } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { validatePolicyResource, validatePolicyConditions } = const { validatePolicyResource } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
const { BucketPolicy } = models; const { BucketPolicy } = models;
@ -16,7 +17,8 @@ const { BucketPolicy } = models;
function _checkNotImplementedPolicy(policyString) { function _checkNotImplementedPolicy(policyString) {
// bucket names and key names cannot include "", so including those // bucket names and key names cannot include "", so including those
// isolates not implemented keys // isolates not implemented keys
return policyString.includes('"Service"') return policyString.includes('"Condition"')
|| policyString.includes('"Service"')
|| policyString.includes('"Federated"'); || policyString.includes('"Federated"');
} }
@ -35,7 +37,7 @@ function bucketPutPolicy(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutPolicy', requestType: 'bucketPutPolicy',
request, request,
}; };
@ -65,10 +67,10 @@ function bucketPutPolicy(authInfo, request, log, callback) {
return next(errors.MalformedPolicy.customizeDescription( return next(errors.MalformedPolicy.customizeDescription(
'Policy has invalid resource')); 'Policy has invalid resource'));
} }
return next(validatePolicyConditions(bucketPolicy), bucketPolicy); return next(null, bucketPolicy);
}); });
}, },
(bucketPolicy, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (bucketPolicy, next) => metadataValidateBucket(metadataValParams, log,
(err, bucket) => { (err, bucket) => {
if (err) { if (err) {
return next(err, bucket); return next(err, bucket);

View File

@ -2,7 +2,7 @@ const { waterfall } = require('async');
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { getReplicationConfiguration } = const { getReplicationConfiguration } =
require('./apiUtils/bucket/getReplicationConfiguration'); require('./apiUtils/bucket/getReplicationConfiguration');
@ -30,7 +30,7 @@ function bucketPutReplication(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutReplication', requestType: 'bucketPutReplication',
request, request,
}; };
return waterfall([ return waterfall([
@ -39,7 +39,7 @@ function bucketPutReplication(authInfo, request, log, callback) {
// Check bucket user privileges and ensure versioning is 'Enabled'. // Check bucket user privileges and ensure versioning is 'Enabled'.
(config, next) => (config, next) =>
// TODO: Validate that destination bucket exists and has versioning. // TODO: Validate that destination bucket exists and has versioning.
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { metadataValidateBucket(metadataValParams, log, (err, bucket) => {
if (err) { if (err) {
return next(err); return next(err);
} }

View File

@ -3,7 +3,7 @@ const { s3middleware } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
@ -38,12 +38,11 @@ function bucketPutTagging(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutTagging', requestType: 'bucketPutTagging',
request,
}; };
let bucket = null; let bucket = null;
return waterfall([ return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucket(metadataValParams, log,
(err, b) => { (err, b) => {
bucket = b; bucket = b;
return next(err); return next(err);

View File

@ -4,7 +4,7 @@ const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const versioningNotImplBackends = const versioningNotImplBackends =
require('../../constants').versioningNotImplBackends; require('../../constants').versioningNotImplBackends;
@ -119,12 +119,12 @@ function bucketPutVersioning(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'bucketPutVersioning', requestType: 'bucketPutVersioning',
request, request,
}; };
return waterfall([ return waterfall([
next => _parseXML(request, log, next), next => _parseXML(request, log, next),
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucket(metadataValParams, log,
(err, bucket) => next(err, bucket)), // ignore extra null object, (err, bucket) => next(err, bucket)), // ignore extra null object,
(bucket, next) => parseString(request.post, (err, result) => { (bucket, next) => parseString(request.post, (err, result) => {
// just for linting; there should not be any parsing error here // just for linting; there should not be any parsing error here

View File

@ -22,7 +22,7 @@ const requestType = 'bucketPutWebsite';
*/ */
function bucketPutWebsite(authInfo, request, log, callback) { function bucketPutWebsite(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutWebsite' }); log.debug('processing request', { method: 'bucketPutWebsite' });
const { bucketName } = request; const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
if (!request.post) { if (!request.post) {
@ -49,8 +49,7 @@ function bucketPutWebsite(authInfo, request, log, callback) {
}); });
}, },
function validateBucketAuthorization(bucket, config, next) { function validateBucketAuthorization(bucket, config, next) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketPutWebsite', method: 'bucketPutWebsite',

View File

@ -1,85 +0,0 @@
const { waterfall } = require('async');
const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const { parseString } = require('xml2js');
function validateBucketQuotaProperty(requestBody, next) {
const quota = requestBody.quota;
const quotaValue = parseInt(quota, 10);
if (Number.isNaN(quotaValue)) {
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
}
if (quotaValue <= 0) {
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
}
return next(null, quotaValue);
}
function parseRequestBody(requestBody, next) {
try {
const jsonData = JSON.parse(requestBody);
if (typeof jsonData !== 'object') {
throw new Error('Invalid JSON');
}
return next(null, jsonData);
} catch (jsonError) {
return parseString(requestBody, (xmlError, xmlData) => {
if (xmlError) {
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
}
return next(null, xmlData);
});
}
}
function bucketUpdateQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketUpdateQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketUpdateQuota',
request,
};
let bucket = null;
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
bucket = b;
return next(err, bucket);
}),
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
next(err, bucket, quotaValue)),
(bucket, quotaValue, next) => {
bucket.setQuota(quotaValue);
return metadata.updateBucket(bucket.getName(), bucket, log, next);
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketUpdateQuota'
});
monitoring.promMetrics('PUT', bucketName, err.code,
'updateBucketQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'PUT', bucketName, '200', 'updateBucketQuota');
pushMetric('updateBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, corsHeaders);
});
}
module.exports = bucketUpdateQuota;

View File

@ -12,7 +12,7 @@ const constants = require('../../constants');
const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning } const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const services = require('../services'); const services = require('../services');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const locationConstraintCheck const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck'); = require('./apiUtils/object/locationConstraintCheck');
const { skipMpuPartProcessing } = storage.data.external.backendUtils; const { skipMpuPartProcessing } = storage.data.external.backendUtils;
@ -21,6 +21,8 @@ const { validateAndFilterMpuParts, generateMpuPartStorageInfo } =
const locationKeysHaveChanged const locationKeysHaveChanged
= require('./apiUtils/object/locationKeysHaveChanged'); = require('./apiUtils/object/locationKeysHaveChanged');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const logger = require('../utilities/logger');
const { validatePutVersionId } = require('./apiUtils/object/coldStorage'); const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
@ -80,7 +82,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
uploadId, uploadId,
// Note: permissions for completing a multipart upload are the // Note: permissions for completing a multipart upload are the
// same as putting a part. // same as putting a part.
requestType: request.apiMethods || 'putPart or complete', requestType: 'putPart or complete',
log, log,
request, request,
}; };
@ -131,11 +133,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
bucketName, bucketName,
// Required permissions for this action // Required permissions for this action
// at the destinationBucket level are same as objectPut // at the destinationBucket level are same as objectPut
requestType: request.apiMethods || 'completeMultipartUpload', requestType: 'objectPut',
versionId, versionId,
request,
}; };
standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next); metadataValidateBucketAndObj(metadataValParams, log, next);
}, },
function validateMultipart(destBucket, objMD, next) { function validateMultipart(destBucket, objMD, next) {
if (objMD) { if (objMD) {
@ -213,14 +214,9 @@ function completeMultipartUpload(authInfo, request, log, callback) {
const mdInfo = { storedParts, mpuOverviewKey, splitter }; const mdInfo = { storedParts, mpuOverviewKey, splitter };
const mpuInfo = const mpuInfo =
{ objectKey, uploadId, jsonList, bucketName, destBucket }; { objectKey, uploadId, jsonList, bucketName, destBucket };
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.completeMPU(request, mpuInfo, mdInfo, location, return data.completeMPU(request, mpuInfo, mdInfo, location,
null, null, null, locationConstraintCheck, log, null, null, null, locationConstraintCheck, log,
(err, completeObjData) => { (err, completeObjData) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) { if (err) {
return next(err, destBucket); return next(err, destBucket);
} }
@ -329,7 +325,6 @@ function completeMultipartUpload(authInfo, request, log, callback) {
replicationInfo: getReplicationInfo(objectKey, destBucket, replicationInfo: getReplicationInfo(objectKey, destBucket,
false, calculatedSize, REPLICATION_ACTION), false, calculatedSize, REPLICATION_ACTION),
originOp: 's3:ObjectCreated:CompleteMultipartUpload', originOp: 's3:ObjectCreated:CompleteMultipartUpload',
overheadField: constants.overheadField,
log, log,
}; };
// If key already exists // If key already exists
@ -474,9 +469,12 @@ function completeMultipartUpload(authInfo, request, log, callback) {
const newDataStoreName = const newDataStoreName =
Array.isArray(dataLocations) && dataLocations[0] ? Array.isArray(dataLocations) && dataLocations[0] ?
dataLocations[0].dataStoreName : null; dataLocations[0].dataStoreName : null;
const delLog =
logger.newRequestLoggerFromSerializedUids(log
.getSerializedUids());
return data.batchDelete(dataToDelete, return data.batchDelete(dataToDelete,
request.method, request.method,
newDataStoreName, log, err => { newDataStoreName, delLog, err => {
if (err) { if (err) {
return next(err); return next(err);
} }
@ -499,8 +497,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
function batchDeleteExtraParts(extraPartLocations, destinationBucket, function batchDeleteExtraParts(extraPartLocations, destinationBucket,
aggregateETag, generatedVersionId, next) { aggregateETag, generatedVersionId, next) {
if (extraPartLocations && extraPartLocations.length > 0) { if (extraPartLocations && extraPartLocations.length > 0) {
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(extraPartLocations, request.method, return data.batchDelete(extraPartLocations, request.method,
null, log, err => { null, delLog, err => {
if (err) { if (err) {
return next(err); return next(err);
} }

View File

@ -6,11 +6,10 @@ const convertToXml = s3middleware.convertToXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { hasNonPrintables } = require('../utilities/stringChecks'); const { hasNonPrintables } = require('../utilities/stringChecks');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation'); const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const locationConstraintCheck const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck'); = require('./apiUtils/object/locationConstraintCheck');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing') const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
@ -66,7 +65,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
const websiteRedirectHeader = const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location']; request.headers['x-amz-website-redirect-location'];
if (request.headers['x-amz-storage-class'] && if (request.headers['x-amz-storage-class'] &&
!config.locationConstraints[request.headers['x-amz-storage-class']]) { !constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header'); log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', bucketName, monitoring.promMetrics('PUT', bucketName,
errors.InvalidStorageClass.code, 'initiateMultipartUpload'); errors.InvalidStorageClass.code, 'initiateMultipartUpload');
@ -82,7 +81,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
if (metaHeaders instanceof Error) { if (metaHeaders instanceof Error) {
log.debug('user metadata validation failed', { log.debug('user metadata validation failed', {
error: metaHeaders, error: metaHeaders,
method: 'initiateMultipartUpload', method: 'createAndStoreObject',
}); });
return process.nextTick(() => callback(metaHeaders)); return process.nextTick(() => callback(metaHeaders));
} }
@ -106,7 +105,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
authInfo, authInfo,
bucketName, bucketName,
// Required permissions for this action are same as objectPut // Required permissions for this action are same as objectPut
requestType: request.apiMethods || 'initiateMultipartUpload', requestType: 'objectPut',
request, request,
}; };
const accountCanonicalID = authInfo.getCanonicalID(); const accountCanonicalID = authInfo.getCanonicalID();
@ -275,7 +274,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
} }
async.waterfall([ async.waterfall([
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucketAndObj(metadataValParams, log,
(error, destinationBucket) => { (error, destinationBucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket);
if (error) { if (error) {

View File

@ -6,7 +6,7 @@ const convertToXml = s3middleware.convertToXml;
const constants = require('../../constants'); const constants = require('../../constants');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const services = require('../services'); const services = require('../services');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -96,8 +96,8 @@ function listMultipartUploads(authInfo, request, log, callback) {
// to list the multipart uploads so we have provided here that // to list the multipart uploads so we have provided here that
// the authorization to list multipart uploads is the same // the authorization to list multipart uploads is the same
// as listing objects in a bucket. // as listing objects in a bucket.
requestType: request.apiMethods || 'bucketGet', requestType: 'bucketGet',
preciseRequestType: request.apiMethods || 'listMultipartUploads', preciseRequestType: 'listMultipartUploads',
request, request,
}; };
@ -105,7 +105,7 @@ function listMultipartUploads(authInfo, request, log, callback) {
function waterfall1(next) { function waterfall1(next) {
// Check final destination bucket for authorization rather // Check final destination bucket for authorization rather
// than multipart upload bucket // than multipart upload bucket
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, metadataValidateBucket(metadataValParams, log,
(err, bucket) => next(err, bucket)); (err, bucket) => next(err, bucket));
}, },
function getMPUBucket(bucket, next) { function getMPUBucket(bucket, next) {

View File

@ -8,7 +8,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const locationConstraintCheck = const locationConstraintCheck =
require('./apiUtils/object/locationConstraintCheck'); require('./apiUtils/object/locationConstraintCheck');
const services = require('../services'); const services = require('../services');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -97,7 +97,7 @@ function listParts(authInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
uploadId, uploadId,
preciseRequestType: request.apiMethods || 'listParts', preciseRequestType: 'listParts',
request, request,
}; };
// For validating the request at the destinationBucket level // For validating the request at the destinationBucket level
@ -114,7 +114,7 @@ function listParts(authInfo, request, log, callback) {
async.waterfall([ async.waterfall([
function checkDestBucketVal(next) { function checkDestBucketVal(next) {
standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, metadataValidateBucketAndObj(metadataValParams, log,
(err, destinationBucket) => { (err, destinationBucket) => {
if (err) { if (err) {
return next(err, destinationBucket, null); return next(err, destinationBucket, null);
@ -152,13 +152,8 @@ function listParts(authInfo, request, log, callback) {
mpuOverviewObj, mpuOverviewObj,
destBucket, destBucket,
}; };
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.listParts(mpuInfo, request, locationConstraintCheck, return data.listParts(mpuInfo, request, locationConstraintCheck,
log, (err, backendPartList) => { log, (err, backendPartList) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) { if (err) {
return next(err, destBucket); return next(err, destBucket);
} }

View File

@ -1,7 +1,7 @@
const { errors, versioning } = require('arsenal'); const { errors, versioning } = require('arsenal');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const validateSearchParams = require('../api/apiUtils/bucket/validateSearch'); const validateSearchParams = require('../api/apiUtils/bucket/validateSearch');
@ -71,7 +71,7 @@ function metadataSearch(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: request.apiMethods || 'metadataSearch', requestType: 'metadataSearch',
request, request,
}; };
const listParams = { const listParams = {
@ -103,7 +103,7 @@ function metadataSearch(authInfo, request, log, callback) {
listParams.marker = params.marker; listParams.marker = params.marker;
} }
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => { metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -11,13 +11,13 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const services = require('../services'); const services = require('../services');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const { isBucketAuthorized, evaluateBucketPolicyWithIAM } = const { isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
const { preprocessingVersioningDelete } const { preprocessingVersioningDelete }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const metadataUtils = require('../metadata/metadataUtils'); const { metadataGetObject } = require('../metadata/metadataUtils');
const { config } = require('../Config'); const { config } = require('../Config');
const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks'); const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks');
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
@ -25,13 +25,9 @@ const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
const requestUtils = policies.requestUtils; const requestUtils = policies.requestUtils;
const { validObjectKeys } = require('../routes/routeVeeam'); const { validObjectKeys } = require('../routes/routeVeeam');
const { deleteVeeamCapabilities } = require('../routes/veeam/delete'); const { deleteVeeamCapabilities } = require('../routes/veeam/delete');
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
const { overheadField } = require('../../constants');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
/* /*
Format of xml request: Format of xml request:
@ -173,63 +169,6 @@ function _parseXml(xmlToParse, next) {
}); });
} }
/**
* decodeObjectVersion - decode object version to be deleted
* @param {object} entry - entry from data model
* @param {function} next - callback to call with error or decoded version
* @return {undefined}
**/
function decodeObjectVersion(entry) {
let decodedVersionId;
if (entry.versionId) {
decodedVersionId = entry.versionId === 'null' ?
'null' : versionIdUtils.decode(entry.versionId);
}
if (decodedVersionId instanceof Error) {
return [errors.NoSuchVersion];
}
return [null, decodedVersionId];
}
/**
* Initialization function for the MultiObjectDelete API that will, based on the
* current metadata backend, assess if metadata READ batching is supported. If
* yes, the initialization step will call the metadataGetObjects function from
* the MetadataWrapper.
* @param {string} bucketName - bucket name
* @param {string []} inPlay - list of object keys still in play
* @param {object} log - logger object
* @param {function} callback - callback to call with error or list of objects
* @return {undefined}
*/
function initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback) {
if (config.multiObjectDeleteEnableOptimizations === false) {
return callback(null, {});
}
// If the backend supports batching, we want to optimize the API latency by
// first getting all the objects metadata, stored in memory, for later use
// in the API. This approach does not change the API architecture, but
// transplants an additional piece of code that can greatly improve the API
// latency when the database supports batching.
const objectKeys = Object.values(inPlay).map(entry => {
const [err, versionId] = decodeObjectVersion(entry, bucketName);
if (err) {
return null;
}
return {
versionId,
inPlay: entry,
};
});
return metadataUtils.metadataGetObjects(bucketName, objectKeys, log, (err, cache) => {
// This optional step is read-only, so any error can be safely ignored
if (err) {
return callback(null, {});
}
return callback(null, cache);
});
}
/** /**
* gets object metadata and deletes object * gets object metadata and deletes object
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info * @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
@ -255,18 +194,34 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
let numOfObjectsRemoved = 0; let numOfObjectsRemoved = 0;
const skipError = new Error('skip'); const skipError = new Error('skip');
const objectLockedError = new Error('object locked'); const objectLockedError = new Error('object locked');
let deleteFromStorage = [];
return async.waterfall([ // doing 5 requests at a time. note that the data wrapper
callback => initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback), // will do 5 parallel requests to data backend to delete parts
(cache, callback) => async.forEachLimit(inPlay, config.multiObjectDeleteConcurrency, (entry, moveOn) => { return async.forEachLimit(inPlay, Number(process.env.OPTIM_WORKERS) || 25, (entry, moveOn) => {
async.waterfall([ async.waterfall([
callback => callback(...decodeObjectVersion(entry, bucketName)), callback => {
let decodedVersionId;
if (entry.versionId) {
decodedVersionId = entry.versionId === 'null' ?
'null' : versionIdUtils.decode(entry.versionId);
}
if (decodedVersionId instanceof Error) {
monitoring.promMetrics('DELETE', bucketName, 404,
'multiObjectDelete');
return callback(errors.NoSuchVersion);
}
return callback(null, decodedVersionId);
},
// for obj deletes, no need to check acl's at object level // for obj deletes, no need to check acl's at object level
// (authority is at the bucket level for obj deletes) // (authority is at the bucket level for obj deletes)
(versionId, callback) => metadataUtils.metadataGetObject(bucketName, entry.key, (versionId, callback) => metadataGetObject(bucketName, entry.key,
versionId, cache, log, (err, objMD) => callback(err, objMD, versionId)), versionId, log, (err, objMD) => {
(objMD, versionId, callback) => { // if general error from metadata return error
if (err) {
monitoring.promMetrics('DELETE', bucketName, err.code,
'multiObjectDelete');
return callback(err);
}
if (!objMD) { if (!objMD) {
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
// To adhere to AWS behavior, create a delete marker // To adhere to AWS behavior, create a delete marker
@ -290,7 +245,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
objMD.location[0].deleteVersion = true; objMD.location[0].deleteVersion = true;
} }
return callback(null, objMD, versionId); return callback(null, objMD, versionId);
}, }),
(objMD, versionId, callback) => { (objMD, versionId, callback) => {
// AWS only returns an object lock error if a version id // AWS only returns an object lock error if a version id
// is specified, else continue to create a delete marker // is specified, else continue to create a delete marker
@ -301,8 +256,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) { if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) {
return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => { return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => {
if (error && error.is.AccessDenied) { if (error && error.is.AccessDenied) {
log.debug('user does not have BypassGovernanceRetention and object is locked', log.debug('user does not have BypassGovernanceRetention and object is locked', { error });
{ error });
return callback(objectLockedError); return callback(objectLockedError);
} }
if (error) { if (error) {
@ -333,41 +287,26 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
return callback(null, objMD, versionId); return callback(null, objMD, versionId);
}, },
(objMD, versionId, callback) => validateQuotas(
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
(objMD, versionId, callback) => { (objMD, versionId, callback) => {
const options = preprocessingVersioningDelete( const options = preprocessingVersioningDelete(
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode); bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
const deleteInfo = {}; const deleteInfo = {};
if (options && options.deleteData) { if (options && options.deleteData) {
options.overheadField = overheadField;
deleteInfo.deleted = true; deleteInfo.deleted = true;
if (!_bucketRequiresOplogUpdate(bucket)) {
options.doesNotNeedOpogUpdate = true;
}
if (objMD.uploadId) { if (objMD.uploadId) {
// eslint-disable-next-line // eslint-disable-next-line
options.replayId = objMD.uploadId; options.replayId = objMD.uploadId;
} }
return services.deleteObject(bucketName, objMD, return services.deleteObject(bucketName, objMD,
entry.key, options, config.multiObjectDeleteEnableOptimizations, log, entry.key, options, log, 's3:ObjectRemoved:Delete', err =>
's3:ObjectRemoved:Delete', (err, toDelete) => { callback(err, objMD, deleteInfo));
if (err) {
return callback(err);
}
if (toDelete) {
deleteFromStorage = deleteFromStorage.concat(toDelete);
}
return callback(null, objMD, deleteInfo);
});
} }
deleteInfo.newDeleteMarker = true; deleteInfo.newDeleteMarker = true;
// This call will create a delete-marker // This call will create a delete-marker
return createAndStoreObject(bucketName, bucket, entry.key, return createAndStoreObject(bucketName, bucket, entry.key,
objMD, authInfo, canonicalID, null, request, objMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, overheadField, log, deleteInfo.newDeleteMarker, null, log, 's3:ObjectRemoved:DeleteMarkerCreated',
's3:ObjectRemoved:DeleteMarkerCreated', (err, result) => (err, result) =>
callback(err, objMD, deleteInfo, result.versionId)); callback(err, objMD, deleteInfo, result.versionId));
}, },
], (err, objMD, deleteInfo, versionId) => { ], (err, objMD, deleteInfo, versionId) => {
@ -404,51 +343,17 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
isDeleteMarker = true; isDeleteMarker = true;
deleteMarkerVersionId = entry.versionId; deleteMarkerVersionId = entry.versionId;
} }
successfullyDeleted.push({ successfullyDeleted.push({ entry, isDeleteMarker,
entry, isDeleteMarker, deleteMarkerVersionId });
deleteMarkerVersionId,
});
return moveOn(); return moveOn();
}); });
}, },
// end of forEach func // end of forEach func
err => { err => {
// Batch delete all objects log.trace('finished deleting objects', { numOfObjectsRemoved });
const onDone = () => callback(err, quietSetting, errorResults, numOfObjectsRemoved, console.log('end of multi delete object');
return next(err, quietSetting, errorResults, numOfObjectsRemoved,
successfullyDeleted, totalContentLengthDeleted, bucket); successfullyDeleted, totalContentLengthDeleted, bucket);
if (err && deleteFromStorage.length === 0) {
log.trace('no objects to delete from data backend');
return onDone();
}
// If error but we have objects in the list, delete them to ensure
// consistent state.
log.trace('deleting objects from data backend');
// Split the array into chunks
const chunks = [];
while (deleteFromStorage.length > 0) {
chunks.push(deleteFromStorage.splice(0, config.multiObjectDeleteConcurrency));
}
return async.each(chunks, (chunk, done) => data.batchDelete(chunk, null, null,
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids()), done),
err => {
if (err) {
log.error('error deleting objects from data backend', { error: err });
return onDone(err);
}
return onDone();
});
}),
], (err, ...results) => {
// if general error from metadata return error
if (err) {
monitoring.promMetrics('DELETE', bucketName, err.code,
'multiObjectDelete');
return next(err);
}
return next(null, ...results);
}); });
} }
@ -496,47 +401,15 @@ function multiObjectDelete(authInfo, request, log, callback) {
return next(null, quietSetting, objects); return next(null, quietSetting, objects);
}); });
}, },
function checkBucketMetadata(quietSetting, objects, next) { function checkPolicies(quietSetting, objects, next) {
const errorResults = [];
return metadata.getBucket(bucketName, log, (err, bucketMD) => {
if (err) {
log.trace('error retrieving bucket metadata',
{ error: err });
return next(err);
}
// check whether bucket has transient or deleted flag
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
// affects the objects.
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results
objects.forEach(entry => {
errorResults.push({
entry,
error: errors.AccessDenied,
});
});
// by sending an empty array as the objects array
// async.forEachLimit below will not actually
// make any calls to metadata or data but will continue on
// to the next step to build xml
return next(null, quietSetting, errorResults, [], bucketMD);
}
return next(null, quietSetting, errorResults, objects, bucketMD);
});
},
function checkPolicies(quietSetting, errorResults, objects, bucketMD, next) {
// track keys that are still on track to be deleted // track keys that are still on track to be deleted
const inPlay = []; const inPlay = [];
const errorResults = [];
// if request from account, no need to check policies // if request from account, no need to check policies
// all objects are inPlay so send array of object keys // all objects are inPlay so send array of object keys
// as inPlay argument // as inPlay argument
if (!isRequesterNonAccountUser(authInfo)) { if (!isRequesterNonAccountUser(authInfo)) {
return next(null, quietSetting, errorResults, objects, bucketMD); return next(null, quietSetting, errorResults, objects);
} }
// TODO: once arsenal's extractParams is separated from doAuth // TODO: once arsenal's extractParams is separated from doAuth
@ -580,7 +453,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
error: errors.AccessDenied }); error: errors.AccessDenied });
}); });
// send empty array for inPlay // send empty array for inPlay
return next(null, quietSetting, errorResults, [], bucketMD); return next(null, quietSetting, errorResults, []);
} }
if (err) { if (err) {
log.trace('error checking policies', { log.trace('error checking policies', {
@ -598,13 +471,6 @@ function multiObjectDelete(authInfo, request, log, callback) {
}); });
return next(errors.InternalError); return next(errors.InternalError);
} }
// Convert authorization results into an easier to handle format
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
for (let i = 0; i < authorizationResults.length; i++) { for (let i = 0; i < authorizationResults.length; i++) {
const result = authorizationResults[i]; const result = authorizationResults[i];
// result is { isAllowed: true, // result is { isAllowed: true,
@ -620,26 +486,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
key: result.arn.slice(slashIndex + 1), key: result.arn.slice(slashIndex + 1),
versionId: result.versionId, versionId: result.versionId,
}; };
// Deny immediately if there is an explicit deny if (result.isAllowed) {
if (!result.isImplicit && !result.isAllowed) {
errorResults.push({
entry,
error: errors.AccessDenied,
});
continue;
}
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
canonicalID,
authInfo,
actionImplicitDenies,
log,
request);
if (areAllActionsAllowed) {
if (validObjectKeys.includes(entry.key)) { if (validObjectKeys.includes(entry.key)) {
inPlayInternal.push(entry.key); inPlayInternal.push(entry.key);
} else { } else {
@ -652,6 +499,47 @@ function multiObjectDelete(authInfo, request, log, callback) {
}); });
} }
} }
return next(null, quietSetting, errorResults, inPlay);
});
},
function checkBucketMetadata(quietSetting, errorResults, inPlay, next) {
// if no objects in play, no need to check ACLs / get metadata,
// just move on if there is no Origin header
if (inPlay.length === 0 && !request.headers.origin) {
return next(null, quietSetting, errorResults, inPlay,
undefined);
}
return metadata.getBucket(bucketName, log, (err, bucketMD) => {
if (err) {
log.trace('error retrieving bucket metadata',
{ error: err });
return next(err);
}
// check whether bucket has transient or deleted flag
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
// if no objects in play, no need to check ACLs
if (inPlay.length === 0) {
return next(null, quietSetting, errorResults, inPlay,
bucketMD);
}
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results
inPlay.forEach(entry => {
errorResults.push({
entry,
error: errors.AccessDenied,
});
});
// by sending an empty array as the inPlay array
// async.forEachLimit below will not actually
// make any calls to metadata or data but will continue on
// to the next step to build xml
return next(null, quietSetting, errorResults, [], bucketMD);
}
return next(null, quietSetting, errorResults, inPlay, bucketMD); return next(null, quietSetting, errorResults, inPlay, bucketMD);
}); });
}, },
@ -702,6 +590,4 @@ function multiObjectDelete(authInfo, request, log, callback) {
module.exports = { module.exports = {
getObjMetadataAndDelete, getObjMetadataAndDelete,
multiObjectDelete, multiObjectDelete,
decodeObjectVersion,
initializeMultiObjectDeleteWithBatchingSupport,
}; };

View File

@ -12,10 +12,11 @@ const { checkQueryVersionId, versioningPreprocessing }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo'); const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
const { data } = require('../data/wrapper'); const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const services = require('../services'); const services = require('../services');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked'); const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing') const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
.validateWebsiteHeader; .validateWebsiteHeader;
const { config } = require('../Config'); const { config } = require('../Config');
@ -220,14 +221,6 @@ function objectCopy(authInfo, request, sourceBucket,
versionId: sourceVersionId, versionId: sourceVersionId,
getDeleteMarker: true, getDeleteMarker: true,
requestType: 'objectGet', requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request, request,
}; };
const valPutParams = { const valPutParams = {
@ -235,7 +228,6 @@ function objectCopy(authInfo, request, sourceBucket,
bucketName: destBucketName, bucketName: destBucketName,
objectKey: destObjectKey, objectKey: destObjectKey,
requestType: 'objectPut', requestType: 'objectPut',
checkQuota: false,
request, request,
}; };
const dataStoreContext = { const dataStoreContext = {
@ -249,7 +241,7 @@ function objectCopy(authInfo, request, sourceBucket,
const responseHeaders = {}; const responseHeaders = {};
if (request.headers['x-amz-storage-class'] && if (request.headers['x-amz-storage-class'] &&
!config.locationConstraints[request.headers['x-amz-storage-class']]) { !constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header'); log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', destBucketName, monitoring.promMetrics('PUT', destBucketName,
errors.InvalidStorageClass.code, 'copyObject'); errors.InvalidStorageClass.code, 'copyObject');
@ -269,7 +261,7 @@ function objectCopy(authInfo, request, sourceBucket,
} }
return async.waterfall([ return async.waterfall([
function checkDestAuth(next) { function checkDestAuth(next) {
return standardMetadataValidateBucketAndObj(valPutParams, request.actionImplicitDenies, log, return metadataValidateBucketAndObj(valPutParams, log,
(err, destBucketMD, destObjMD) => { (err, destBucketMD, destObjMD) => {
if (err) { if (err) {
log.debug('error validating put part of request', log.debug('error validating put part of request',
@ -287,10 +279,7 @@ function objectCopy(authInfo, request, sourceBucket,
}); });
}, },
function checkSourceAuthorization(destBucketMD, destObjMD, next) { function checkSourceAuthorization(destBucketMD, destObjMD, next) {
return standardMetadataValidateBucketAndObj({ return metadataValidateBucketAndObj(valGetParams, log,
...valGetParams,
destObjMD,
}, request.actionImplicitDenies, log,
(err, sourceBucketMD, sourceObjMD) => { (err, sourceBucketMD, sourceObjMD) => {
if (err) { if (err) {
log.debug('error validating get part of request', log.debug('error validating get part of request',
@ -342,10 +331,6 @@ function objectCopy(authInfo, request, sourceBucket,
dataStoreContext.metaHeaders = dataStoreContext.metaHeaders =
storeMetadataParams.metaHeaders; storeMetadataParams.metaHeaders;
} }
// eslint-disable-next-line no-param-reassign
storeMetadataParams.overheadField = constants.overheadField;
let dataLocator; let dataLocator;
// If 0 byte object just set dataLocator to empty array // If 0 byte object just set dataLocator to empty array
if (!sourceObjMD.location) { if (!sourceObjMD.location) {
@ -461,15 +446,10 @@ function objectCopy(authInfo, request, sourceBucket,
return next(null, storeMetadataParams, dataLocator, destObjMD, return next(null, storeMetadataParams, dataLocator, destObjMD,
serverSideEncryption, destBucketMD); serverSideEncryption, destBucketMD);
} }
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.copyObject(request, sourceLocationConstraintName, return data.copyObject(request, sourceLocationConstraintName,
storeMetadataParams, dataLocator, dataStoreContext, storeMetadataParams, dataLocator, dataStoreContext,
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log, backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
(err, results) => { (err, results) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) { if (err) {
return next(err, destBucketMD); return next(err, destBucketMD);
} }
@ -544,8 +524,10 @@ function objectCopy(authInfo, request, sourceBucket,
// the same as the destination // the same as the destination
if (!sourceIsDestination && dataToDelete) { if (!sourceIsDestination && dataToDelete) {
const newDataStoreName = storeMetadataParams.dataStoreName; const newDataStoreName = storeMetadataParams.dataStoreName;
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(dataToDelete, request.method, return data.batchDelete(dataToDelete, request.method,
newDataStoreName, log, err => { newDataStoreName, delLog, err => {
if (err) { if (err) {
// if error, log the error and move on as it is not // if error, log the error and move on as it is not
// relevant to the client as the client's // relevant to the client as the client's

View File

@ -8,17 +8,15 @@ const { pushMetric } = require('../utapi/utilities');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { decodeVersionId, preprocessingVersioningDelete } const { decodeVersionId, preprocessingVersioningDelete }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
= require('./apiUtils/object/objectLockHelpers'); = require('./apiUtils/object/objectLockHelpers');
const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks'); const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks');
const { config } = require('../Config'); const { config } = require('../Config');
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const objectLockedError = new Error('object locked'); const objectLockedError = new Error('object locked');
const { overheadField } = require('../../constants');
/** /**
* objectDeleteInternal - DELETE an object from a bucket * objectDeleteInternal - DELETE an object from a bucket
@ -56,14 +54,14 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
bucketName, bucketName,
objectKey, objectKey,
versionId: reqVersionId, versionId: reqVersionId,
requestType: request.apiMethods || 'objectDelete', requestType: 'objectDelete',
request, request,
}; };
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
return async.waterfall([ return async.waterfall([
function validateBucketAndObj(next) { function validateBucketAndObj(next) {
return standardMetadataValidateBucketAndObj(valParams, request.actionImplicitDenies, log, return metadataValidateBucketAndObj(valParams, log,
(err, bucketMD, objMD) => { (err, bucketMD, objMD) => {
if (err) { if (err) {
return next(err, bucketMD); return next(err, bucketMD);
@ -180,7 +178,7 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
deleteInfo.removeDeleteMarker = true; deleteInfo.removeDeleteMarker = true;
} }
return services.deleteObject(bucketName, objectMD, return services.deleteObject(bucketName, objectMD,
objectKey, delOptions, false, log, isExpiration ? objectKey, delOptions, log, isExpiration ?
's3:LifecycleExpiration:Delete' : 's3:LifecycleExpiration:Delete' :
's3:ObjectRemoved:Delete', 's3:ObjectRemoved:Delete',
(err, delResult) => (err, delResult) =>
@ -188,7 +186,6 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
}); });
} }
if (delOptions && delOptions.deleteData) { if (delOptions && delOptions.deleteData) {
delOptions.overheadField = overheadField;
if (objectMD.isDeleteMarker) { if (objectMD.isDeleteMarker) {
// record that we deleted a delete marker to set // record that we deleted a delete marker to set
// response headers accordingly // response headers accordingly
@ -200,12 +197,8 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
delOptions.replayId = objectMD.uploadId; delOptions.replayId = objectMD.uploadId;
} }
if (!_bucketRequiresOplogUpdate(bucketMD)) {
delOptions.doesNotNeedOpogUpdate = true;
}
return services.deleteObject(bucketName, objectMD, objectKey, return services.deleteObject(bucketName, objectMD, objectKey,
delOptions, false, log, isExpiration ? delOptions, log, isExpiration ?
's3:LifecycleExpiration:Delete' : 's3:LifecycleExpiration:Delete' :
's3:ObjectRemoved:Delete', 's3:ObjectRemoved:Delete',
(err, delResult) => next(err, bucketMD, (err, delResult) => next(err, bucketMD,
@ -215,7 +208,7 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
deleteInfo.newDeleteMarker = true; deleteInfo.newDeleteMarker = true;
return createAndStoreObject(bucketName, bucketMD, return createAndStoreObject(bucketName, bucketMD,
objectKey, objectMD, authInfo, canonicalID, null, request, objectKey, objectMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, overheadField, log, isExpiration ? deleteInfo.newDeleteMarker, null, log, isExpiration ?
's3:LifecycleExpiration:DeleteMarkerCreated' : 's3:LifecycleExpiration:DeleteMarkerCreated' :
's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRemoved:DeleteMarkerCreated',
(err, newDelMarkerRes) => { (err, newDelMarkerRes) => {

View File

@ -1,10 +1,10 @@
const async = require('async'); const async = require('async');
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -44,12 +44,12 @@ function objectDeleteTagging(authInfo, request, log, callback) {
objectKey, objectKey,
versionId: reqVersionId, versionId: reqVersionId,
getDeleteMarker: true, getDeleteMarker: true,
requestType: request.apiMethods || 'objectDeleteTagging', requestType: 'objectDeleteTagging',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',
@ -75,7 +75,13 @@ function objectDeleteTagging(authInfo, request, log, callback) {
(bucket, objectMD, next) => { (bucket, objectMD, next) => {
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
objectMD.tags = {}; objectMD.tags = {};
const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode); const params = {};
if (objectMD.versionId) {
params.versionId = objectMD.versionId;
if (!config.nullVersionCompatMode) {
params.isNull = objectMD.isNull || false;
}
}
const replicationInfo = getReplicationInfo(objectKey, bucket, true, const replicationInfo = getReplicationInfo(objectKey, bucket, true,
0, REPLICATION_ACTION, objectMD); 0, REPLICATION_ACTION, objectMD);
if (replicationInfo) { if (replicationInfo) {

View File

@ -15,7 +15,7 @@ const getReplicationBackendDataLocator =
require('./apiUtils/object/getReplicationBackendDataLocator'); require('./apiUtils/object/getReplicationBackendDataLocator');
const checkReadLocation = require('./apiUtils/object/checkReadLocation'); const checkReadLocation = require('./apiUtils/object/checkReadLocation');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { config } = require('../Config'); const { config } = require('../Config');
const { locationConstraints } = config; const { locationConstraints } = config;
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -66,11 +66,11 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
objectKey, objectKey,
versionId, versionId,
getDeleteMarker: true, getDeleteMarker: true,
requestType: request.apiMethods || 'objectGet', requestType: 'objectGet',
request, request,
}; };
return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log, return metadataValidateBucketAndObj(mdValParams, log,
(err, bucket, objMD) => { (err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
@ -305,17 +305,16 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
'GET', bucketName, err.code, 'getObject'); 'GET', bucketName, err.code, 'getObject');
return callback(err); return callback(err);
} }
const contentLength = Number.parseInt(responseMetaHeaders['Content-Length'], 10);
pushMetric('getObject', log, { pushMetric('getObject', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
keys: [objectKey], keys: [objectKey],
newByteLength: newByteLength: contentLength,
Number.parseInt(responseMetaHeaders['Content-Length'], 10),
versionId: objMD.versionId, versionId: objMD.versionId,
location: objMD.dataStoreName, location: objMD.dataStoreName,
}); });
monitoring.promMetrics('GET', bucketName, '200', 'getObject', monitoring.promMetrics('GET', bucketName, '200', 'getObject', contentLength);
Number.parseInt(responseMetaHeaders['Content-Length'], 10));
return callback(null, dataLocator, responseMetaHeaders, return callback(null, dataLocator, responseMetaHeaders,
byteRange); byteRange);
}); });

View File

@ -7,7 +7,7 @@ const { pushMetric } = require('../utapi/utilities');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
// Sample XML response: // Sample XML response:
@ -61,7 +61,7 @@ function objectGetACL(authInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
versionId, versionId,
requestType: request.apiMethods || 'objectGetACL', requestType: 'objectGetACL',
request, request,
}; };
const grantInfo = { const grantInfo = {
@ -74,7 +74,7 @@ function objectGetACL(authInfo, request, log, callback) {
return async.waterfall([ return async.waterfall([
function validateBucketAndObj(next) { function validateBucketAndObj(next) {
return standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, return metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',

View File

@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -40,12 +40,12 @@ function objectGetLegalHold(authInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
versionId, versionId,
requestType: request.apiMethods || 'objectGetLegalHold', requestType: 'objectGetLegalHold',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',

View File

@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -40,12 +40,12 @@ function objectGetRetention(authInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
versionId: reqVersionId, versionId: reqVersionId,
requestType: request.apiMethods || 'objectGetRetention', requestType: 'objectGetRetention',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',

View File

@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { convertToXml } = s3middleware.tagging; const { convertToXml } = s3middleware.tagging;
@ -41,12 +41,12 @@ function objectGetTagging(authInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
versionId: reqVersionId, versionId: reqVersionId,
requestType: request.apiMethods || 'objectGetTagging', requestType: 'objectGetTagging',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next => metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',

Some files were not shown because too many files have changed in this diff Show More