Compare commits
3 Commits
developmen
...
list-lifec
Author | SHA1 | Date |
---|---|---|
Nicolas Humbert | 41af186cf7 | |
Nicolas Humbert | 7c049b1329 | |
Nicolas Humbert | 371648ec0a |
|
@ -1,8 +1,5 @@
|
||||||
{
|
{
|
||||||
"extends": "scality",
|
"extends": "scality",
|
||||||
"plugins": [
|
|
||||||
"mocha"
|
|
||||||
],
|
|
||||||
"rules": {
|
"rules": {
|
||||||
"import/extensions": "off",
|
"import/extensions": "off",
|
||||||
"lines-around-directive": "off",
|
"lines-around-directive": "off",
|
||||||
|
@ -45,8 +42,7 @@
|
||||||
"no-restricted-properties": "off",
|
"no-restricted-properties": "off",
|
||||||
"new-parens": "off",
|
"new-parens": "off",
|
||||||
"no-multi-spaces": "off",
|
"no-multi-spaces": "off",
|
||||||
"quote-props": "off",
|
"quote-props": "off"
|
||||||
"mocha/no-exclusive-tests": "error",
|
|
||||||
},
|
},
|
||||||
"parserOptions": {
|
"parserOptions": {
|
||||||
"ecmaVersion": 2020
|
"ecmaVersion": 2020
|
||||||
|
|
|
@ -16,28 +16,30 @@ runs:
|
||||||
run: |-
|
run: |-
|
||||||
set -exu;
|
set -exu;
|
||||||
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v2
|
||||||
with:
|
with:
|
||||||
node-version: '16'
|
node-version: '16'
|
||||||
cache: 'yarn'
|
cache: 'yarn'
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
shell: bash
|
shell: bash
|
||||||
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
||||||
- uses: actions/cache@v3
|
- uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pip
|
path: ~/.cache/pip
|
||||||
key: ${{ runner.os }}-pip
|
key: ${{ runner.os }}-pip
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: |
|
||||||
|
2.7
|
||||||
|
3.9
|
||||||
|
- name: Install python deps
|
||||||
|
shell: bash
|
||||||
|
run: pip install docker-compose
|
||||||
- name: Setup python2 test environment
|
- name: Setup python2 test environment
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get install -y libdigest-hmac-perl
|
sudo apt-get install -y libdigest-hmac-perl
|
||||||
pip install 's3cmd==2.3.0'
|
pip install virtualenv
|
||||||
- name: fix sproxyd.conf permissions
|
virtualenv -p $(which python2) ~/.virtualenv/py2
|
||||||
shell: bash
|
source ~/.virtualenv/py2/bin/activate
|
||||||
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
|
pip install 's3cmd==1.6.1'
|
||||||
- name: ensure fuse kernel module is loaded (for sproxyd)
|
|
||||||
shell: bash
|
|
||||||
run: sudo modprobe fuse
|
|
||||||
|
|
|
@ -39,12 +39,6 @@ services:
|
||||||
- MONGODB_RS=rs0
|
- MONGODB_RS=rs0
|
||||||
- DEFAULT_BUCKET_KEY_FORMAT
|
- DEFAULT_BUCKET_KEY_FORMAT
|
||||||
- METADATA_MAX_CACHED_BUCKETS
|
- METADATA_MAX_CACHED_BUCKETS
|
||||||
- ENABLE_NULL_VERSION_COMPAT_MODE
|
|
||||||
- SCUBA_HOST
|
|
||||||
- SCUBA_PORT
|
|
||||||
- SCUBA_HEALTHCHECK_FREQUENCY
|
|
||||||
- S3QUOTA
|
|
||||||
- QUOTA_ENABLE_INFLIGHTS
|
|
||||||
env_file:
|
env_file:
|
||||||
- creds.env
|
- creds.env
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -72,21 +66,14 @@ services:
|
||||||
pykmip:
|
pykmip:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
profiles: ['pykmip']
|
profiles: ['pykmip']
|
||||||
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
|
image: registry.scality.com/cloudserver-dev/pykmip
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||||
mongo:
|
mongo:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
profiles: ['mongo', 'ceph']
|
profiles: ['mongo', 'ceph']
|
||||||
image: ${MONGODB_IMAGE}
|
image: scality/ci-mongo:3.6.8
|
||||||
ceph:
|
ceph:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
profiles: ['ceph']
|
profiles: ['ceph']
|
||||||
image: ghcr.io/scality/cloudserver/ci-ceph
|
image: ghcr.io/scality/cloudserver/ci-ceph
|
||||||
sproxyd:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['sproxyd']
|
|
||||||
image: sproxyd-standalone
|
|
||||||
build: ./sproxyd
|
|
||||||
user: 0:0
|
|
||||||
privileged: yes
|
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
FROM mongo:5.0.21
|
|
||||||
|
|
||||||
ENV USER=scality \
|
|
||||||
HOME_DIR=/home/scality \
|
|
||||||
CONF_DIR=/conf \
|
|
||||||
DATA_DIR=/data
|
|
||||||
|
|
||||||
# Set up directories and permissions
|
|
||||||
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
|
|
||||||
mkdir /logs; \
|
|
||||||
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
|
|
||||||
|
|
||||||
# Set up environment variables and directories for scality user
|
|
||||||
RUN mkdir ${CONF_DIR} && \
|
|
||||||
chown -R ${USER} ${CONF_DIR} && \
|
|
||||||
chown -R ${USER} ${DATA_DIR}
|
|
||||||
|
|
||||||
# copy the mongo config file
|
|
||||||
COPY /conf/mongod.conf /conf/mongod.conf
|
|
||||||
COPY /conf/mongo-run.sh /conf/mongo-run.sh
|
|
||||||
COPY /conf/initReplicaSet /conf/initReplicaSet.js
|
|
||||||
|
|
||||||
EXPOSE 27017/tcp
|
|
||||||
EXPOSE 27018
|
|
||||||
|
|
||||||
# Set up CMD
|
|
||||||
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
|
|
||||||
CMD ["bash", "/conf/mongo-run.sh"]
|
|
|
@ -1,4 +0,0 @@
|
||||||
rs.initiate({
|
|
||||||
_id: "rs0",
|
|
||||||
members: [{ _id: 0, host: "127.0.0.1:27018" }]
|
|
||||||
});
|
|
|
@ -1,10 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set -exo pipefail
|
|
||||||
|
|
||||||
init_RS() {
|
|
||||||
sleep 5
|
|
||||||
mongo --port 27018 /conf/initReplicaSet.js
|
|
||||||
}
|
|
||||||
init_RS &
|
|
||||||
|
|
||||||
mongod --bind_ip_all --config=/conf/mongod.conf
|
|
|
@ -1,15 +0,0 @@
|
||||||
storage:
|
|
||||||
journal:
|
|
||||||
enabled: true
|
|
||||||
engine: wiredTiger
|
|
||||||
dbPath: "/data/db"
|
|
||||||
processManagement:
|
|
||||||
fork: false
|
|
||||||
net:
|
|
||||||
port: 27018
|
|
||||||
bindIp: 0.0.0.0
|
|
||||||
replication:
|
|
||||||
replSetName: "rs0"
|
|
||||||
enableMajorityReadConcern: true
|
|
||||||
security:
|
|
||||||
authorization: disabled
|
|
|
@ -1,3 +0,0 @@
|
||||||
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
|
|
||||||
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
|
|
||||||
RUN chown root:root /conf/sproxyd0.conf
|
|
|
@ -1,26 +0,0 @@
|
||||||
fastcgi_param QUERY_STRING $query_string;
|
|
||||||
fastcgi_param REQUEST_METHOD $request_method;
|
|
||||||
fastcgi_param CONTENT_TYPE $content_type;
|
|
||||||
fastcgi_param CONTENT_LENGTH $content_length;
|
|
||||||
|
|
||||||
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
|
||||||
fastcgi_param SCRIPT_NAME /var/www;
|
|
||||||
fastcgi_param PATH_INFO $document_uri;
|
|
||||||
|
|
||||||
fastcgi_param REQUEST_URI $request_uri;
|
|
||||||
fastcgi_param DOCUMENT_URI $document_uri;
|
|
||||||
fastcgi_param DOCUMENT_ROOT $document_root;
|
|
||||||
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
|
||||||
fastcgi_param HTTPS $https if_not_empty;
|
|
||||||
|
|
||||||
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
|
||||||
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
|
||||||
|
|
||||||
fastcgi_param REMOTE_ADDR $remote_addr;
|
|
||||||
fastcgi_param REMOTE_PORT $remote_port;
|
|
||||||
fastcgi_param SERVER_ADDR $server_addr;
|
|
||||||
fastcgi_param SERVER_PORT $server_port;
|
|
||||||
fastcgi_param SERVER_NAME $server_name;
|
|
||||||
|
|
||||||
# PHP only, required if PHP was built with --enable-force-cgi-redirect
|
|
||||||
fastcgi_param REDIRECT_STATUS 200;
|
|
|
@ -1,88 +0,0 @@
|
||||||
worker_processes 1;
|
|
||||||
error_log /logs/error.log;
|
|
||||||
user root root;
|
|
||||||
events {
|
|
||||||
worker_connections 1000;
|
|
||||||
reuse_port on;
|
|
||||||
multi_accept on;
|
|
||||||
}
|
|
||||||
worker_rlimit_nofile 20000;
|
|
||||||
http {
|
|
||||||
root /var/www/;
|
|
||||||
upstream sproxyds {
|
|
||||||
least_conn;
|
|
||||||
keepalive 40;
|
|
||||||
server 127.0.0.1:20000;
|
|
||||||
}
|
|
||||||
server {
|
|
||||||
client_max_body_size 0;
|
|
||||||
client_body_timeout 150;
|
|
||||||
client_header_timeout 150;
|
|
||||||
postpone_output 0;
|
|
||||||
client_body_postpone_size 0;
|
|
||||||
keepalive_requests 1100;
|
|
||||||
keepalive_timeout 300s;
|
|
||||||
server_tokens off;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
gzip off;
|
|
||||||
tcp_nodelay on;
|
|
||||||
tcp_nopush on;
|
|
||||||
sendfile on;
|
|
||||||
listen 81;
|
|
||||||
server_name localhost;
|
|
||||||
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
|
|
||||||
location ~* ^/proxy/(.*)$ {
|
|
||||||
rewrite ^/proxy/(.*)$ /$1 last;
|
|
||||||
}
|
|
||||||
allow 127.0.0.1;
|
|
||||||
|
|
||||||
deny all;
|
|
||||||
set $usermd '-';
|
|
||||||
set $sentusermd '-';
|
|
||||||
set $elapsed_ms '-';
|
|
||||||
set $now '-';
|
|
||||||
log_by_lua '
|
|
||||||
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
|
|
||||||
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
|
|
||||||
end
|
|
||||||
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
|
|
||||||
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
|
|
||||||
end
|
|
||||||
local elapsed_ms = tonumber(ngx.var.request_time)
|
|
||||||
if not ( elapsed_ms == nil) then
|
|
||||||
elapsed_ms = elapsed_ms * 1000
|
|
||||||
ngx.var.elapsed_ms = tostring(elapsed_ms)
|
|
||||||
end
|
|
||||||
local time = tonumber(ngx.var.msec) * 1000
|
|
||||||
ngx.var.now = time
|
|
||||||
';
|
|
||||||
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
|
|
||||||
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
|
|
||||||
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
|
|
||||||
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
|
|
||||||
'"contentType":"$content_type","s3Address":"$remote_addr",'
|
|
||||||
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
|
|
||||||
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
|
|
||||||
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
|
|
||||||
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
|
|
||||||
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
|
|
||||||
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
|
|
||||||
access_log /dev/stdout irm;
|
|
||||||
error_log /dev/stdout error;
|
|
||||||
location / {
|
|
||||||
proxy_request_buffering off;
|
|
||||||
fastcgi_request_buffering off;
|
|
||||||
fastcgi_no_cache 1;
|
|
||||||
fastcgi_cache_bypass 1;
|
|
||||||
fastcgi_buffering off;
|
|
||||||
fastcgi_ignore_client_abort on;
|
|
||||||
fastcgi_keep_conn on;
|
|
||||||
include fastcgi_params;
|
|
||||||
fastcgi_pass sproxyds;
|
|
||||||
fastcgi_next_upstream error timeout;
|
|
||||||
fastcgi_send_timeout 285s;
|
|
||||||
fastcgi_read_timeout 285s;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
{
|
|
||||||
"general": {
|
|
||||||
"ring": "DATA",
|
|
||||||
"port": 20000,
|
|
||||||
"syslog_facility": "local0"
|
|
||||||
},
|
|
||||||
"ring_driver:0": {
|
|
||||||
"alias": "dc1",
|
|
||||||
"type": "local",
|
|
||||||
"queue_path": "/tmp/ring-objs"
|
|
||||||
},
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
[supervisord]
|
|
||||||
nodaemon = true
|
|
||||||
loglevel = info
|
|
||||||
logfile = %(ENV_LOG_DIR)s/supervisord.log
|
|
||||||
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
|
|
||||||
logfile_maxbytes = 20MB
|
|
||||||
logfile_backups = 2
|
|
||||||
|
|
||||||
[unix_http_server]
|
|
||||||
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
|
|
||||||
|
|
||||||
[rpcinterface:supervisor]
|
|
||||||
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
|
||||||
|
|
||||||
[supervisorctl]
|
|
||||||
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
|
|
||||||
|
|
||||||
[program:nginx]
|
|
||||||
directory=%(ENV_SUP_RUN_DIR)s
|
|
||||||
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
|
|
||||||
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
|
||||||
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
|
|
||||||
stdout_logfile_maxbytes=100MB
|
|
||||||
stdout_logfile_backups=7
|
|
||||||
stderr_logfile_maxbytes=100MB
|
|
||||||
stderr_logfile_backups=7
|
|
||||||
autorestart=true
|
|
||||||
autostart=true
|
|
||||||
user=root
|
|
||||||
|
|
||||||
[program:sproxyd]
|
|
||||||
directory=%(ENV_SUP_RUN_DIR)s
|
|
||||||
process_name=%(program_name)s-%(process_num)s
|
|
||||||
numprocs=1
|
|
||||||
numprocs_start=0
|
|
||||||
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
|
|
||||||
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
|
||||||
stdout_logfile_maxbytes=100MB
|
|
||||||
stdout_logfile_backups=7
|
|
||||||
redirect_stderr=true
|
|
||||||
autorestart=true
|
|
||||||
autostart=true
|
|
||||||
user=root
|
|
|
@ -1,10 +1,7 @@
|
||||||
name: Test alerts
|
name: Test alerts
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push
|
||||||
branches-ignore:
|
|
||||||
- 'development/**'
|
|
||||||
- 'q/*/**'
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run-alert-tests:
|
run-alert-tests:
|
||||||
|
@ -20,16 +17,13 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Render and test ${{ matrix.tests.name }}
|
- name: Render and test ${{ matrix.tests.name }}
|
||||||
uses: scality/action-prom-render-test@1.0.3
|
uses: scality/action-prom-render-test@1.0.1
|
||||||
with:
|
with:
|
||||||
alert_file_path: monitoring/alerts.yaml
|
alert_file_path: monitoring/alerts.yaml
|
||||||
test_file_path: ${{ matrix.tests.file }}
|
test_file_path: ${{ matrix.tests.file }}
|
||||||
alert_inputs: |
|
alert_inputs: >-
|
||||||
namespace=zenko
|
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
|
||||||
service=artesca-data-connector-s3api-metrics
|
|
||||||
reportJob=artesca-data-ops-report-handler
|
|
||||||
replicas=3
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
---
|
|
||||||
name: codeQL
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [w/**, q/*]
|
|
||||||
pull_request:
|
|
||||||
branches: [development/*, stabilization/*, hotfix/*]
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
name: Static analysis with CodeQL
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v3
|
|
||||||
with:
|
|
||||||
languages: javascript, python, ruby
|
|
||||||
|
|
||||||
- name: Build and analyze
|
|
||||||
uses: github/codeql-action/analyze@v3
|
|
|
@ -1,16 +0,0 @@
|
||||||
---
|
|
||||||
name: dependency review
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: [development/*, stabilization/*, hotfix/*]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dependency-review:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: 'Checkout Repository'
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: 'Dependency Review'
|
|
||||||
uses: actions/dependency-review-action@v4
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
name: release
|
name: release
|
||||||
run-name: release ${{ inputs.tag }}
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
@ -10,69 +9,58 @@ on:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
REGISTRY_NAME: registry.scality.com
|
||||||
PROJECT_NAME: ${{ github.event.repository.name }}
|
PROJECT_NAME: ${{ github.event.repository.name }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-federation-image:
|
build-federation-image:
|
||||||
runs-on: ubuntu-20.04
|
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
|
||||||
steps:
|
secrets: inherit
|
||||||
- name: Checkout
|
with:
|
||||||
uses: actions/checkout@v4
|
push: true
|
||||||
- name: Set up Docker Buildx
|
registry: registry.scality.com
|
||||||
uses: docker/setup-buildx-action@v3
|
namespace: ${{ github.event.repository.name }}
|
||||||
- name: Login to GitHub Registry
|
name: ${{ github.event.repository.name }}
|
||||||
uses: docker/login-action@v3
|
context: .
|
||||||
with:
|
file: images/svc-base/Dockerfile
|
||||||
registry: ghcr.io
|
tag: ${{ github.event.inputs.tag }}-svc-base
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Build and push image for federation
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .
|
|
||||||
file: images/svc-base/Dockerfile
|
|
||||||
tags: |
|
|
||||||
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
|
|
||||||
cache-from: type=gha,scope=federation
|
|
||||||
cache-to: type=gha,mode=max,scope=federation
|
|
||||||
|
|
||||||
release:
|
release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildk
|
- name: Set up Docker Buildk
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v1
|
||||||
|
|
||||||
- name: Login to Registry
|
- name: Login to Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ${{ env.REGISTRY_NAME }}
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ secrets.REGISTRY_LOGIN }}
|
||||||
password: ${{ github.token }}
|
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
|
|
||||||
- name: Push dashboards into the production namespace
|
- name: Push dashboards into the production namespace
|
||||||
run: |
|
run: |
|
||||||
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
oras push ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
||||||
dashboard.json:application/grafana-dashboard+json \
|
dashboard.json:application/grafana-dashboard+json \
|
||||||
alerts.yaml:application/prometheus-alerts+yaml
|
alerts.yaml:application/prometheus-alerts+yaml
|
||||||
working-directory: monitoring
|
working-directory: monitoring
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v2
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: true
|
push: true
|
||||||
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
|
tags: ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}:${{ github.event.inputs.tag }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v1
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
name: Release ${{ github.event.inputs.tag }}
|
name: Release ${{ github.event.inputs.tag }}
|
||||||
tag_name: ${{ github.event.inputs.tag }}
|
tag_name: ${{ github.event.inputs.tag }}
|
||||||
|
|
|
@ -2,8 +2,6 @@
|
||||||
name: tests
|
name: tests
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
push:
|
push:
|
||||||
branches-ignore:
|
branches-ignore:
|
||||||
- 'development/**'
|
- 'development/**'
|
||||||
|
@ -67,24 +65,23 @@ env:
|
||||||
ENABLE_LOCAL_CACHE: "true"
|
ENABLE_LOCAL_CACHE: "true"
|
||||||
REPORT_TOKEN: "report-token-1"
|
REPORT_TOKEN: "report-token-1"
|
||||||
REMOTE_MANAGEMENT_DISABLE: "1"
|
REMOTE_MANAGEMENT_DISABLE: "1"
|
||||||
# https://github.com/git-lfs/git-lfs/issues/5749
|
|
||||||
GIT_CLONE_PROTECTION_ACTIVE: 'false'
|
|
||||||
jobs:
|
jobs:
|
||||||
linting-coverage:
|
linting-coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v2
|
||||||
with:
|
with:
|
||||||
node-version: '16'
|
node-version: '16'
|
||||||
cache: yarn
|
cache: yarn
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
run: yarn install --frozen-lockfile --network-concurrency 1
|
run: yarn install --frozen-lockfile --network-concurrency 1
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
- uses: actions/cache@v4
|
- uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pip
|
path: ~/.cache/pip
|
||||||
key: ${{ runner.os }}-pip
|
key: ${{ runner.os }}-pip
|
||||||
|
@ -117,7 +114,7 @@ jobs:
|
||||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
||||||
if: always()
|
if: always()
|
||||||
- name: Upload files to artifacts
|
- name: Upload files to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v2
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -128,88 +125,61 @@ jobs:
|
||||||
|
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v1.6.0
|
||||||
- name: Login to GitHub Registry
|
- name: Login to GitHub Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v1.10.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ github.token }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Login to Registry
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
registry: registry.scality.com
|
||||||
|
username: ${{ secrets.REGISTRY_LOGIN }}
|
||||||
|
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
- name: Build and push cloudserver image
|
- name: Build and push cloudserver image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
context: .
|
context: .
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: |
|
tags: |
|
||||||
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
labels: |
|
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=cloudserver
|
cache-from: type=gha,scope=cloudserver
|
||||||
cache-to: type=gha,mode=max,scope=cloudserver
|
cache-to: type=gha,mode=max,scope=cloudserver
|
||||||
- name: Build and push pykmip image
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .github/pykmip
|
|
||||||
tags: |
|
|
||||||
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
|
||||||
labels: |
|
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=pykmip
|
|
||||||
cache-to: type=gha,mode=max,scope=pykmip
|
|
||||||
- name: Build and push MongoDB
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .github/docker/mongodb
|
|
||||||
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=mongodb
|
|
||||||
cache-to: type=gha,mode=max,scope=mongodb
|
|
||||||
|
|
||||||
multiple-backend:
|
multiple-backend:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
S3BACKEND: mem
|
S3BACKEND: mem
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
S3DATA: multiple
|
S3DATA: multiple
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Login to Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose --profile sproxyd up -d
|
run: docker-compose up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
- name: Run multiple backend test
|
- name: Run multiple backend test
|
||||||
run: |-
|
run: |-
|
||||||
set -o pipefail;
|
set -o pipefail;
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
bash wait_for_local_port.bash 81 40
|
|
||||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -228,26 +198,26 @@ jobs:
|
||||||
S3KMS: file
|
S3KMS: file
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v0
|
DEFAULT_BUCKET_KEY_FORMAT: v0
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose --profile mongo up -d
|
run: docker-compose --profile mongo up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
- name: Run functional tests
|
- name: Run functional tests
|
||||||
run: |-
|
run: |-
|
||||||
set -o pipefail;
|
set -o pipefail;
|
||||||
|
source ~/.virtualenv/py2/bin/activate
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -267,27 +237,27 @@ jobs:
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v1
|
DEFAULT_BUCKET_KEY_FORMAT: v1
|
||||||
METADATA_MAX_CACHED_BUCKETS: 1
|
METADATA_MAX_CACHED_BUCKETS: 1
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose --profile mongo up -d
|
run: docker-compose --profile mongo up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
- name: Run functional tests
|
- name: Run functional tests
|
||||||
run: |-
|
run: |-
|
||||||
set -o pipefail;
|
set -o pipefail;
|
||||||
|
source ~/.virtualenv/py2/bin/activate
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
|
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -297,40 +267,30 @@ jobs:
|
||||||
if: always()
|
if: always()
|
||||||
|
|
||||||
file-ft-tests:
|
file-ft-tests:
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- job-name: file-ft-tests
|
|
||||||
name: ${{ matrix.job-name }}
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
S3BACKEND: file
|
S3BACKEND: file
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
JOB_NAME: ${{ matrix.job-name }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup matrix job artifacts directory
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -exu
|
|
||||||
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
|
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose up -d
|
run: docker-compose up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
- name: Run file ft tests
|
- name: Run file ft tests
|
||||||
run: |-
|
run: |-
|
||||||
set -o pipefail;
|
set -o pipefail;
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
source ~/.virtualenv/py2/bin/activate
|
||||||
|
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -344,18 +304,17 @@ jobs:
|
||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
ENABLE_UTAPI_V2: t
|
ENABLE_UTAPI_V2: t
|
||||||
S3BACKEND: mem
|
S3BACKEND: mem
|
||||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose up -d
|
run: docker-compose up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
- name: Run file utapi v2 tests
|
- name: Run file utapi v2 tests
|
||||||
run: |-
|
run: |-
|
||||||
|
@ -363,51 +322,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
quota-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
inflights:
|
|
||||||
- name: "With Inflights"
|
|
||||||
value: "true"
|
|
||||||
- name: "Without Inflights"
|
|
||||||
value: "false"
|
|
||||||
env:
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3BACKEND: mem
|
|
||||||
S3QUOTA: scuba
|
|
||||||
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
|
|
||||||
SCUBA_HOST: localhost
|
|
||||||
SCUBA_PORT: 8100
|
|
||||||
SCUBA_HEALTHCHECK_FREQUENCY: 100
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile mongo up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run quota tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -423,20 +338,18 @@ jobs:
|
||||||
S3BACKEND: file
|
S3BACKEND: file
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Copy KMIP certs
|
- name: Copy KMIP certs
|
||||||
run: cp -r ./certs /tmp/ssl-kmip
|
run: cp -r ./certs /tmp/ssl-kmip
|
||||||
working-directory: .github/pykmip
|
working-directory: .github/pykmip
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose --profile pykmip up -d
|
run: docker-compose --profile pykmip up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
- name: Run file KMIP tests
|
- name: Run file KMIP tests
|
||||||
run: |-
|
run: |-
|
||||||
|
@ -445,7 +358,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 5696 40
|
bash wait_for_local_port.bash 5696 40
|
||||||
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -453,7 +366,7 @@ jobs:
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
source: /tmp/artifacts
|
source: /tmp/artifacts
|
||||||
if: always()
|
if: always()
|
||||||
|
|
||||||
ceph-backend-test:
|
ceph-backend-test:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
|
@ -464,31 +377,30 @@ jobs:
|
||||||
CI_CEPH: 'true'
|
CI_CEPH: 'true'
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Login to GitHub Registry
|
- name: Login to GitHub Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v1.10.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ github.token }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- uses: ruby/setup-ruby@v1
|
- uses: ruby/setup-ruby@v1
|
||||||
with:
|
with:
|
||||||
ruby-version: '2.5.9'
|
ruby-version: '2.5.0'
|
||||||
- name: Install Ruby dependencies
|
- name: Install Ruby dependencies
|
||||||
run: |
|
run: |
|
||||||
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
|
gem install nokogiri:1.12.5 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
|
||||||
- name: Install Java dependencies
|
- name: Install Java dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
|
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose --profile ceph up -d
|
run: docker-compose --profile ceph up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
env:
|
env:
|
||||||
S3METADATA: mongodb
|
S3METADATA: mongodb
|
||||||
|
@ -510,11 +422,12 @@ jobs:
|
||||||
- name: Run Ruby tests
|
- name: Run Ruby tests
|
||||||
run: |-
|
run: |-
|
||||||
set -ex -o pipefail;
|
set -ex -o pipefail;
|
||||||
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
|
rspec tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
|
||||||
working-directory: tests/functional/fog
|
working-directory: tests/functional/fog
|
||||||
- name: Run Javascript AWS SDK tests
|
- name: Run Javascript AWS SDK tests
|
||||||
run: |-
|
run: |-
|
||||||
set -ex -o pipefail;
|
set -ex -o pipefail;
|
||||||
|
source ~/.virtualenv/py2/bin/activate
|
||||||
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
|
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
|
||||||
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
|
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
|
||||||
env:
|
env:
|
||||||
|
@ -523,7 +436,7 @@ jobs:
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
S3METADATA: mongodb
|
S3METADATA: mongodb
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
ARG NODE_VERSION=16.20-bullseye-slim
|
ARG NODE_VERSION=16.17.1-bullseye-slim
|
||||||
|
|
||||||
FROM node:${NODE_VERSION} as builder
|
FROM node:${NODE_VERSION} as builder
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ RUN apt-get update \
|
||||||
|
|
||||||
ENV PYTHON=python3
|
ENV PYTHON=python3
|
||||||
COPY package.json yarn.lock /usr/src/app/
|
COPY package.json yarn.lock /usr/src/app/
|
||||||
RUN npm install typescript -g
|
|
||||||
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
@ -43,7 +42,6 @@ EXPOSE 8002
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
jq \
|
jq \
|
||||||
tini \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
@ -55,6 +53,6 @@ COPY --from=builder /usr/src/app/node_modules ./node_modules/
|
||||||
|
|
||||||
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
||||||
|
|
||||||
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
|
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||||
|
|
||||||
CMD [ "yarn", "start" ]
|
CMD [ "yarn", "start" ]
|
||||||
|
|
175
README.md
175
README.md
|
@ -1,7 +1,10 @@
|
||||||
# Zenko CloudServer with Vitastor Backend
|
# Zenko CloudServer
|
||||||
|
|
||||||
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
||||||
|
|
||||||
|
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
|
||||||
|
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||||
|
@ -11,71 +14,137 @@ Scality’s Open Source Multi-Cloud Data Controller.
|
||||||
CloudServer provides a single AWS S3 API interface to access multiple
|
CloudServer provides a single AWS S3 API interface to access multiple
|
||||||
backend data storage both on-premise or public in the cloud.
|
backend data storage both on-premise or public in the cloud.
|
||||||
|
|
||||||
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
|
CloudServer is useful for Developers, either to run as part of a
|
||||||
backend support.
|
continous integration test environment to emulate the AWS S3 service locally
|
||||||
|
or as an abstraction layer to develop object storage enabled
|
||||||
|
application on the go.
|
||||||
|
|
||||||
## Quick Start with Vitastor
|
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
|
||||||
|
|
||||||
Vitastor Backend is in experimental status, however you can already try to
|
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
||||||
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
|
|
||||||
it works too 😊.
|
|
||||||
|
|
||||||
Installation instructions:
|
## Docker
|
||||||
|
|
||||||
### Install Vitastor
|
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
|
||||||
|
|
||||||
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
|
## Contributing
|
||||||
|
|
||||||
### Install Zenko with Vitastor Backend
|
In order to contribute, please follow the
|
||||||
|
[Contributing Guidelines](
|
||||||
|
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
|
## Installation
|
||||||
- Install dependencies: `npm install --omit dev` or just `npm install`
|
|
||||||
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
|
|
||||||
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
|
|
||||||
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
|
|
||||||
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
|
|
||||||
|
|
||||||
### Install and Configure MongoDB
|
### Dependencies
|
||||||
|
|
||||||
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
|
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
|
||||||
|
. Up-to-date versions can be found at
|
||||||
|
[Nodesource](https://github.com/nodesource/distributions).
|
||||||
|
|
||||||
### Setup Zenko
|
### Clone source code
|
||||||
|
|
||||||
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
|
```shell
|
||||||
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
|
git clone https://github.com/scality/S3.git
|
||||||
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
|
|
||||||
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
|
|
||||||
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
|
|
||||||
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
|
|
||||||
access keys, but it's not published, so let's use a file for now.
|
|
||||||
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
|
|
||||||
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
|
|
||||||
in this file.
|
|
||||||
|
|
||||||
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
|
|
||||||
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
|
|
||||||
|
|
||||||
### Start Zenko
|
|
||||||
|
|
||||||
Start the S3 server with: `node index.js`
|
|
||||||
|
|
||||||
If you use default settings, Zenko CloudServer starts on port 8000.
|
|
||||||
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
|
|
||||||
|
|
||||||
Now you can access your S3 with `s3cmd` or `geesefs`:
|
|
||||||
|
|
||||||
```
|
|
||||||
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
### Install js dependencies
|
||||||
AWS_ACCESS_KEY_ID=accessKey1 \
|
|
||||||
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
|
Go to the ./S3 folder,
|
||||||
geesefs --endpoint http://localhost:8000 testbucket mountdir
|
|
||||||
|
```shell
|
||||||
|
yarn install --frozen-lockfile
|
||||||
```
|
```
|
||||||
|
|
||||||
# Author & License
|
If you get an error regarding installation of the diskUsage module,
|
||||||
|
please install g++.
|
||||||
|
|
||||||
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
If you get an error regarding level-down bindings, try clearing your yarn cache:
|
||||||
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
|
|
||||||
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
|
```shell
|
||||||
|
yarn cache clean
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with a file backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
|
||||||
|
9991 are also open locally for internal transfer of metadata and data,
|
||||||
|
respectively.
|
||||||
|
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
By default the metadata files will be saved in the
|
||||||
|
localMetadata directory and the data files will be saved
|
||||||
|
in the localData directory within the ./S3 directory on your
|
||||||
|
machine. These directories have been pre-created within the
|
||||||
|
repository. If you would like to save the data or metadata in
|
||||||
|
different locations of your choice, you must specify them with absolute paths.
|
||||||
|
So, when starting the server:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||||
|
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||||
|
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with multiple data backends
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3DATA='multiple'
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to
|
||||||
|
choose where each object will be saved by setting
|
||||||
|
the following header with a locationConstraint on
|
||||||
|
a PUT request:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
```
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the
|
||||||
|
location constraint of the bucket will determine
|
||||||
|
where the data is saved. If the bucket has no location
|
||||||
|
constraint, the endpoint of the PUT request will be
|
||||||
|
used to determine location.
|
||||||
|
|
||||||
|
See the Configuration section in our documentation
|
||||||
|
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
|
||||||
|
to learn how to set location constraints.
|
||||||
|
|
||||||
|
## Run it with an in-memory backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
yarn run mem_backend
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
## Run it with Vault user management
|
||||||
|
|
||||||
|
Note: Vault is proprietary and must be accessed separately.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3VAULT=vault
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer using Vault for user management.
|
||||||
|
|
||||||
|
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
|
||||||
|
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
||||||
|
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||||
|
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env node
|
||||||
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
|
const {
|
||||||
|
startWSManagementClient,
|
||||||
|
startPushConnectionHealthCheckServer,
|
||||||
|
} = require('../lib/management/push');
|
||||||
|
|
||||||
|
const logger = require('../lib/utilities/logger');
|
||||||
|
|
||||||
|
const {
|
||||||
|
PUSH_ENDPOINT: pushEndpoint,
|
||||||
|
INSTANCE_ID: instanceId,
|
||||||
|
MANAGEMENT_TOKEN: managementToken,
|
||||||
|
} = process.env;
|
||||||
|
|
||||||
|
if (!pushEndpoint) {
|
||||||
|
logger.error('missing push endpoint env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!instanceId) {
|
||||||
|
logger.error('missing instance id env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!managementToken) {
|
||||||
|
logger.error('missing management token env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
startPushConnectionHealthCheckServer(err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('could not start healthcheck server', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
|
||||||
|
startWSManagementClient(url, managementToken, err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('connection failed, exiting', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
logger.info('no more connection, exiting');
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
});
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env node
|
||||||
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
|
const {
|
||||||
|
startWSManagementClient,
|
||||||
|
startPushConnectionHealthCheckServer,
|
||||||
|
} = require('../lib/management/push');
|
||||||
|
|
||||||
|
const logger = require('../lib/utilities/logger');
|
||||||
|
|
||||||
|
const {
|
||||||
|
PUSH_ENDPOINT: pushEndpoint,
|
||||||
|
INSTANCE_ID: instanceId,
|
||||||
|
MANAGEMENT_TOKEN: managementToken,
|
||||||
|
} = process.env;
|
||||||
|
|
||||||
|
if (!pushEndpoint) {
|
||||||
|
logger.error('missing push endpoint env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!instanceId) {
|
||||||
|
logger.error('missing instance id env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!managementToken) {
|
||||||
|
logger.error('missing management token env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
startPushConnectionHealthCheckServer(err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('could not start healthcheck server', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
|
||||||
|
startWSManagementClient(url, managementToken, err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('connection failed, exiting', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
logger.info('no more connection, exiting');
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
});
|
|
@ -4,7 +4,6 @@
|
||||||
"metricsPort": 8002,
|
"metricsPort": 8002,
|
||||||
"metricsListenOn": [],
|
"metricsListenOn": [],
|
||||||
"replicationGroupId": "RG001",
|
"replicationGroupId": "RG001",
|
||||||
"workers": 4,
|
|
||||||
"restEndpoints": {
|
"restEndpoints": {
|
||||||
"localhost": "us-east-1",
|
"localhost": "us-east-1",
|
||||||
"127.0.0.1": "us-east-1",
|
"127.0.0.1": "us-east-1",
|
||||||
|
@ -37,6 +36,12 @@
|
||||||
}, {
|
}, {
|
||||||
"site": "us-east-2",
|
"site": "us-east-2",
|
||||||
"type": "aws_s3"
|
"type": "aws_s3"
|
||||||
|
}, {
|
||||||
|
"site": "aws-location",
|
||||||
|
"type": "aws_s3"
|
||||||
|
}, {
|
||||||
|
"site": "location-dmf-v1",
|
||||||
|
"type": "dmf"
|
||||||
}],
|
}],
|
||||||
"backbeat": {
|
"backbeat": {
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
|
@ -102,14 +107,6 @@
|
||||||
"readPreference": "primary",
|
"readPreference": "primary",
|
||||||
"database": "metadata"
|
"database": "metadata"
|
||||||
},
|
},
|
||||||
"authdata": "authdata.json",
|
|
||||||
"backends": {
|
|
||||||
"auth": "file",
|
|
||||||
"data": "file",
|
|
||||||
"metadata": "mongodb",
|
|
||||||
"kms": "file",
|
|
||||||
"quota": "none"
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
"externalBackends": {
|
||||||
"aws_s3": {
|
"aws_s3": {
|
||||||
"httpAgent": {
|
"httpAgent": {
|
|
@ -1,71 +0,0 @@
|
||||||
{
|
|
||||||
"port": 8000,
|
|
||||||
"listenOn": [],
|
|
||||||
"metricsPort": 8002,
|
|
||||||
"metricsListenOn": [],
|
|
||||||
"replicationGroupId": "RG001",
|
|
||||||
"restEndpoints": {
|
|
||||||
"localhost": "STANDARD",
|
|
||||||
"127.0.0.1": "STANDARD",
|
|
||||||
"yourhostname.ru": "STANDARD"
|
|
||||||
},
|
|
||||||
"websiteEndpoints": [
|
|
||||||
"static.yourhostname.ru"
|
|
||||||
],
|
|
||||||
"replicationEndpoints": [ {
|
|
||||||
"site": "zenko",
|
|
||||||
"servers": ["127.0.0.1:8000"],
|
|
||||||
"default": true
|
|
||||||
} ],
|
|
||||||
"log": {
|
|
||||||
"logLevel": "info",
|
|
||||||
"dumpLevel": "error"
|
|
||||||
},
|
|
||||||
"healthChecks": {
|
|
||||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
|
||||||
},
|
|
||||||
"backends": {
|
|
||||||
"metadata": "mongodb"
|
|
||||||
},
|
|
||||||
"mongodb": {
|
|
||||||
"replicaSetHosts": "127.0.0.1:27017",
|
|
||||||
"writeConcern": "majority",
|
|
||||||
"replicaSet": "rs0",
|
|
||||||
"readPreference": "primary",
|
|
||||||
"database": "s3",
|
|
||||||
"authCredentials": {
|
|
||||||
"username": "s3",
|
|
||||||
"password": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
|
||||||
"aws_s3": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": false,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gcp": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": true,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"requests": {
|
|
||||||
"viaProxy": false,
|
|
||||||
"trustedProxyCIDRs": [],
|
|
||||||
"extractClientIPFromHeader": ""
|
|
||||||
},
|
|
||||||
"bucketNotificationDestinations": [
|
|
||||||
{
|
|
||||||
"resource": "target1",
|
|
||||||
"type": "dummy",
|
|
||||||
"host": "localhost:6000"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
51
constants.js
51
constants.js
|
@ -116,8 +116,7 @@ const constants = {
|
||||||
],
|
],
|
||||||
|
|
||||||
// user metadata header to set object locationConstraint
|
// user metadata header to set object locationConstraint
|
||||||
objectLocationConstraintHeader: 'x-amz-storage-class',
|
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
||||||
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
|
||||||
legacyLocations: ['sproxyd', 'legacy'],
|
legacyLocations: ['sproxyd', 'legacy'],
|
||||||
// declare here all existing service accounts and their properties
|
// declare here all existing service accounts and their properties
|
||||||
// (if any, otherwise an empty object)
|
// (if any, otherwise an empty object)
|
||||||
|
@ -130,7 +129,7 @@ const constants = {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
/* eslint-disable camelcase */
|
/* eslint-disable camelcase */
|
||||||
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
|
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true },
|
||||||
// some of the available data backends (if called directly rather
|
// some of the available data backends (if called directly rather
|
||||||
// than through the multiple backend gateway) need a key provided
|
// than through the multiple backend gateway) need a key provided
|
||||||
// as a string as first parameter of the get/delete methods.
|
// as a string as first parameter of the get/delete methods.
|
||||||
|
@ -176,8 +175,6 @@ const constants = {
|
||||||
'objectDeleteTagging',
|
'objectDeleteTagging',
|
||||||
'objectGetTagging',
|
'objectGetTagging',
|
||||||
'objectPutTagging',
|
'objectPutTagging',
|
||||||
'objectPutLegalHold',
|
|
||||||
'objectPutRetention',
|
|
||||||
],
|
],
|
||||||
// response header to be sent when there are invalid
|
// response header to be sent when there are invalid
|
||||||
// user metadata in the object's metadata
|
// user metadata in the object's metadata
|
||||||
|
@ -198,51 +195,11 @@ const constants = {
|
||||||
'user',
|
'user',
|
||||||
'bucket',
|
'bucket',
|
||||||
],
|
],
|
||||||
arrayOfAllowed: [
|
|
||||||
'objectPutTagging',
|
|
||||||
'objectPutLegalHold',
|
|
||||||
'objectPutRetention',
|
|
||||||
],
|
|
||||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||||
allowedRestoreObjectRequestTierValues: ['Standard'],
|
allowedRestoreObjectRequestTierValues: ['Standard'],
|
||||||
lifecycleListing: {
|
validStorageClasses: [
|
||||||
CURRENT_TYPE: 'current',
|
'STANDARD',
|
||||||
NON_CURRENT_TYPE: 'noncurrent',
|
|
||||||
ORPHAN_DM_TYPE: 'orphan',
|
|
||||||
},
|
|
||||||
multiObjectDeleteConcurrency: 50,
|
|
||||||
maxScannedLifecycleListingEntries: 10000,
|
|
||||||
overheadField: [
|
|
||||||
'content-length',
|
|
||||||
'owner-id',
|
|
||||||
'versionId',
|
|
||||||
'isNull',
|
|
||||||
'isDeleteMarker',
|
|
||||||
],
|
],
|
||||||
unsupportedSignatureChecksums: new Set([
|
|
||||||
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
|
|
||||||
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
|
|
||||||
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
|
|
||||||
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
|
|
||||||
]),
|
|
||||||
supportedSignatureChecksums: new Set([
|
|
||||||
'UNSIGNED-PAYLOAD',
|
|
||||||
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
|
|
||||||
]),
|
|
||||||
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
|
|
||||||
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
|
|
||||||
// The AWS assumed Role resource type
|
|
||||||
assumedRoleArnResourceType: 'assumed-role',
|
|
||||||
// Session name of the backbeat lifecycle assumed role session.
|
|
||||||
backbeatLifecycleSessionName: 'backbeat-lifecycle',
|
|
||||||
actionsToConsiderAsObjectPut: [
|
|
||||||
'initiateMultipartUpload',
|
|
||||||
'objectPutPart',
|
|
||||||
'completeMultipartUpload',
|
|
||||||
],
|
|
||||||
// if requester is not bucket owner, bucket policy actions should be denied with
|
|
||||||
// MethodNotAllowed error
|
|
||||||
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = constants;
|
module.exports = constants;
|
||||||
|
|
|
@ -199,10 +199,6 @@ if [[ -n "$BUCKET_DENY_FILTER" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$TESTING_MODE" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
||||||
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
||||||
mv config.json.tmp config.json
|
mv config.json.tmp config.json
|
||||||
|
|
|
@ -2,12 +2,11 @@
|
||||||
|
|
||||||
## Docker Image Generation
|
## Docker Image Generation
|
||||||
|
|
||||||
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
|
Docker images are hosted on [registry.scality.com](registry.scality.com).
|
||||||
CloudServer has a few images there:
|
CloudServer has two namespaces there:
|
||||||
|
|
||||||
* Cloudserver container image: ghcr.io/scality/cloudserver
|
* Production Namespace: registry.scality.com/cloudserver
|
||||||
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
* Dev Namespace: registry.scality.com/cloudserver-dev
|
||||||
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
|
||||||
|
|
||||||
With every CI build, the CI will push images, tagging the
|
With every CI build, the CI will push images, tagging the
|
||||||
content with the developer branch's short SHA-1 commit hash.
|
content with the developer branch's short SHA-1 commit hash.
|
||||||
|
@ -19,8 +18,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
|
||||||
## How to Pull Docker Images
|
## How to Pull Docker Images
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
docker pull ghcr.io/scality/cloudserver:<commit hash>
|
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
|
||||||
docker pull ghcr.io/scality/cloudserver:<tag>
|
docker pull registry.scality.com/cloudserver/cloudserver:<tag>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Release Process
|
## Release Process
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
|
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0
|
||||||
|
|
||||||
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
||||||
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
||||||
|
@ -14,10 +14,8 @@ RUN rm -f ~/.gitconfig && \
|
||||||
git config --global --add safe.directory . && \
|
git config --global --add safe.directory . && \
|
||||||
git lfs install && \
|
git lfs install && \
|
||||||
GIT_LFS_SKIP_SMUDGE=1 && \
|
GIT_LFS_SKIP_SMUDGE=1 && \
|
||||||
yarn global add typescript && \
|
|
||||||
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
||||||
yarn cache clean --all && \
|
yarn cache clean --all
|
||||||
yarn global remove typescript
|
|
||||||
|
|
||||||
# run symlinking separately to avoid yarn installation errors
|
# run symlinking separately to avoid yarn installation errors
|
||||||
# we might have to check if the symlinking is really needed!
|
# we might have to check if the symlinking is really needed!
|
||||||
|
|
7
index.js
7
index.js
|
@ -1,10 +1,3 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr(
|
|
||||||
undefined,
|
|
||||||
// Do not exit as workers have their own listener that will exit
|
|
||||||
// But primary don't have another listener
|
|
||||||
require('cluster').isPrimary ? 1 : null,
|
|
||||||
);
|
|
||||||
|
|
||||||
require('./lib/server.js')();
|
require('./lib/server.js')();
|
||||||
|
|
445
lib/Config.js
445
lib/Config.js
|
@ -8,18 +8,16 @@ const crypto = require('crypto');
|
||||||
const { v4: uuidv4 } = require('uuid');
|
const { v4: uuidv4 } = require('uuid');
|
||||||
const cronParser = require('cron-parser');
|
const cronParser = require('cron-parser');
|
||||||
const joi = require('@hapi/joi');
|
const joi = require('@hapi/joi');
|
||||||
const { s3routes, auth: arsenalAuth, s3middleware } = require('arsenal');
|
|
||||||
const { isValidBucketName } = s3routes.routesUtils;
|
const { isValidBucketName } = require('arsenal').s3routes.routesUtils;
|
||||||
const validateAuthConfig = arsenalAuth.inMemory.validateAuthConfig;
|
const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig;
|
||||||
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
|
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
|
||||||
const validExternalBackends = require('../constants').externalBackends;
|
const validExternalBackends = require('../constants').externalBackends;
|
||||||
const { azureAccountNameRegex, base64Regex,
|
const { azureAccountNameRegex, base64Regex,
|
||||||
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
|
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
|
||||||
} = require('../constants');
|
} = require('../constants');
|
||||||
const { utapiVersion } = require('utapi');
|
const { utapiVersion } = require('utapi');
|
||||||
const { scaleMsPerDay } = s3middleware.objectUtils;
|
|
||||||
|
|
||||||
const constants = require('../constants');
|
|
||||||
|
|
||||||
// config paths
|
// config paths
|
||||||
const configSearchPaths = [
|
const configSearchPaths = [
|
||||||
|
@ -107,47 +105,6 @@ function parseSproxydConfig(configSproxyd) {
|
||||||
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
||||||
}
|
}
|
||||||
|
|
||||||
function parseRedisConfig(redisConfig) {
|
|
||||||
const joiSchema = joi.object({
|
|
||||||
password: joi.string().allow(''),
|
|
||||||
host: joi.string(),
|
|
||||||
port: joi.number(),
|
|
||||||
retry: joi.object({
|
|
||||||
connectBackoff: joi.object({
|
|
||||||
min: joi.number().required(),
|
|
||||||
max: joi.number().required(),
|
|
||||||
jitter: joi.number().required(),
|
|
||||||
factor: joi.number().required(),
|
|
||||||
deadline: joi.number().required(),
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
// sentinel config
|
|
||||||
sentinels: joi.alternatives().try(
|
|
||||||
joi.string()
|
|
||||||
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
|
|
||||||
.custom(hosts => hosts.split(',').map(item => {
|
|
||||||
const [host, port] = item.split(':');
|
|
||||||
return { host, port: Number.parseInt(port, 10) };
|
|
||||||
})),
|
|
||||||
joi.array().items(
|
|
||||||
joi.object({
|
|
||||||
host: joi.string().required(),
|
|
||||||
port: joi.number().required(),
|
|
||||||
})
|
|
||||||
).min(1),
|
|
||||||
),
|
|
||||||
name: joi.string(),
|
|
||||||
sentinelPassword: joi.string().allow(''),
|
|
||||||
})
|
|
||||||
.and('host', 'port')
|
|
||||||
.and('sentinels', 'name')
|
|
||||||
.xor('host', 'sentinels')
|
|
||||||
.without('sentinels', ['host', 'port'])
|
|
||||||
.without('host', ['sentinels', 'sentinelPassword']);
|
|
||||||
|
|
||||||
return joi.attempt(redisConfig, joiSchema, 'bad config');
|
|
||||||
}
|
|
||||||
|
|
||||||
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
||||||
assert(typeof restEndpoints === 'object',
|
assert(typeof restEndpoints === 'object',
|
||||||
'bad config: restEndpoints must be an object of endpoints');
|
'bad config: restEndpoints must be an object of endpoints');
|
||||||
|
@ -280,60 +237,6 @@ function hdClientLocationConstraintAssert(configHd) {
|
||||||
return hdclientFields;
|
return hdclientFields;
|
||||||
}
|
}
|
||||||
|
|
||||||
function azureArchiveLocationConstraintAssert(locationObj) {
|
|
||||||
const checkedFields = [
|
|
||||||
'azureContainerName',
|
|
||||||
'azureStorageEndpoint',
|
|
||||||
];
|
|
||||||
if (Object.keys(locationObj.details).length === 0 ||
|
|
||||||
!checkedFields.every(field => field in locationObj.details)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const {
|
|
||||||
azureContainerName,
|
|
||||||
azureStorageEndpoint,
|
|
||||||
} = locationObj.details;
|
|
||||||
const stringFields = [
|
|
||||||
azureContainerName,
|
|
||||||
azureStorageEndpoint,
|
|
||||||
];
|
|
||||||
stringFields.forEach(field => {
|
|
||||||
assert(typeof field === 'string',
|
|
||||||
`bad config: ${field} must be a string`);
|
|
||||||
});
|
|
||||||
|
|
||||||
let hasAuthMethod = false;
|
|
||||||
if (locationObj.details.sasToken !== undefined) {
|
|
||||||
assert(typeof locationObj.details.sasToken === 'string',
|
|
||||||
`bad config: ${locationObj.details.sasToken} must be a string`);
|
|
||||||
hasAuthMethod = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (locationObj.details.azureStorageAccountName !== undefined &&
|
|
||||||
locationObj.details.azureStorageAccessKey !== undefined) {
|
|
||||||
assert(typeof locationObj.details.azureStorageAccountName === 'string',
|
|
||||||
`bad config: ${locationObj.details.azureStorageAccountName} must be a string`);
|
|
||||||
assert(typeof locationObj.details.azureStorageAccessKey === 'string',
|
|
||||||
`bad config: ${locationObj.details.azureStorageAccessKey} must be a string`);
|
|
||||||
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
|
|
||||||
hasAuthMethod = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (locationObj.details.tenantId !== undefined &&
|
|
||||||
locationObj.details.clientId !== undefined &&
|
|
||||||
locationObj.details.clientKey !== undefined) {
|
|
||||||
assert(typeof locationObj.details.tenantId === 'string',
|
|
||||||
`bad config: ${locationObj.details.tenantId} must be a string`);
|
|
||||||
assert(typeof locationObj.details.clientId === 'string',
|
|
||||||
`bad config: ${locationObj.details.clientId} must be a string`);
|
|
||||||
assert(typeof locationObj.details.clientKey === 'string',
|
|
||||||
`bad config: ${locationObj.details.clientKey} must be a string`);
|
|
||||||
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
|
|
||||||
hasAuthMethod = true;
|
|
||||||
}
|
|
||||||
assert(hasAuthMethod, 'Missing authentication method');
|
|
||||||
}
|
|
||||||
|
|
||||||
function dmfLocationConstraintAssert(locationObj) {
|
function dmfLocationConstraintAssert(locationObj) {
|
||||||
const checkedFields = [
|
const checkedFields = [
|
||||||
'endpoint',
|
'endpoint',
|
||||||
|
@ -377,7 +280,7 @@ function dmfLocationConstraintAssert(locationObj) {
|
||||||
function locationConstraintAssert(locationConstraints) {
|
function locationConstraintAssert(locationConstraints) {
|
||||||
const supportedBackends =
|
const supportedBackends =
|
||||||
['mem', 'file', 'scality',
|
['mem', 'file', 'scality',
|
||||||
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
|
'mongodb', 'dmf'].concat(Object.keys(validExternalBackends));
|
||||||
assert(typeof locationConstraints === 'object',
|
assert(typeof locationConstraints === 'object',
|
||||||
'bad config: locationConstraints must be an object');
|
'bad config: locationConstraints must be an object');
|
||||||
Object.keys(locationConstraints).forEach(l => {
|
Object.keys(locationConstraints).forEach(l => {
|
||||||
|
@ -488,9 +391,6 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
if (locationConstraints[l].type === 'dmf') {
|
if (locationConstraints[l].type === 'dmf') {
|
||||||
dmfLocationConstraintAssert(locationConstraints[l]);
|
dmfLocationConstraintAssert(locationConstraints[l]);
|
||||||
}
|
}
|
||||||
if (locationConstraints[l].type === 'azure_archive') {
|
|
||||||
azureArchiveLocationConstraintAssert(locationConstraints[l]);
|
|
||||||
}
|
|
||||||
if (locationConstraints[l].type === 'pfs') {
|
if (locationConstraints[l].type === 'pfs') {
|
||||||
assert(typeof details.pfsDaemonEndpoint === 'object',
|
assert(typeof details.pfsDaemonEndpoint === 'object',
|
||||||
'bad config: pfsDaemonEndpoint is mandatory and must be an object');
|
'bad config: pfsDaemonEndpoint is mandatory and must be an object');
|
||||||
|
@ -502,33 +402,26 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
locationConstraints[l].details.connector.hdclient);
|
locationConstraints[l].details.connector.hdclient);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
assert(Object.keys(locationConstraints)
|
||||||
|
.includes('us-east-1'), 'bad locationConfig: must ' +
|
||||||
|
'include us-east-1 as a locationConstraint');
|
||||||
}
|
}
|
||||||
|
|
||||||
function parseUtapiReindex(config) {
|
function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
|
||||||
const {
|
|
||||||
enabled,
|
|
||||||
schedule,
|
|
||||||
redis,
|
|
||||||
bucketd,
|
|
||||||
onlyCountLatestWhenObjectLocked,
|
|
||||||
} = config;
|
|
||||||
assert(typeof enabled === 'boolean',
|
assert(typeof enabled === 'boolean',
|
||||||
'bad config: utapi.reindex.enabled must be a boolean');
|
'bad config: utapi.reindex.enabled must be a boolean');
|
||||||
|
assert(typeof sentinel === 'object',
|
||||||
const parsedRedis = parseRedisConfig(redis);
|
'bad config: utapi.reindex.sentinel must be an object');
|
||||||
assert(Array.isArray(parsedRedis.sentinels),
|
assert(typeof sentinel.port === 'number',
|
||||||
'bad config: utapi reindex redis config requires a list of sentinels');
|
'bad config: utapi.reindex.sentinel.port must be a number');
|
||||||
|
assert(typeof sentinel.name === 'string',
|
||||||
|
'bad config: utapi.reindex.sentinel.name must be a string');
|
||||||
assert(typeof bucketd === 'object',
|
assert(typeof bucketd === 'object',
|
||||||
'bad config: utapi.reindex.bucketd must be an object');
|
'bad config: utapi.reindex.bucketd must be an object');
|
||||||
assert(typeof bucketd.port === 'number',
|
assert(typeof bucketd.port === 'number',
|
||||||
'bad config: utapi.reindex.bucketd.port must be a number');
|
'bad config: utapi.reindex.bucketd.port must be a number');
|
||||||
assert(typeof schedule === 'string',
|
assert(typeof schedule === 'string',
|
||||||
'bad config: utapi.reindex.schedule must be a string');
|
'bad config: utapi.reindex.schedule must be a string');
|
||||||
if (onlyCountLatestWhenObjectLocked !== undefined) {
|
|
||||||
assert(typeof onlyCountLatestWhenObjectLocked === 'boolean',
|
|
||||||
'bad config: utapi.reindex.onlyCountLatestWhenObjectLocked must be a boolean');
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
cronParser.parseExpression(schedule);
|
cronParser.parseExpression(schedule);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
|
@ -536,13 +429,6 @@ function parseUtapiReindex(config) {
|
||||||
'bad config: utapi.reindex.schedule must be a valid ' +
|
'bad config: utapi.reindex.schedule must be a valid ' +
|
||||||
`cron schedule. ${e.message}.`);
|
`cron schedule. ${e.message}.`);
|
||||||
}
|
}
|
||||||
return {
|
|
||||||
enabled,
|
|
||||||
schedule,
|
|
||||||
redis: parsedRedis,
|
|
||||||
bucketd,
|
|
||||||
onlyCountLatestWhenObjectLocked,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function requestsConfigAssert(requestsConfig) {
|
function requestsConfigAssert(requestsConfig) {
|
||||||
|
@ -630,6 +516,7 @@ class Config extends EventEmitter {
|
||||||
// Read config automatically
|
// Read config automatically
|
||||||
this._getLocationConfig();
|
this._getLocationConfig();
|
||||||
this._getConfig();
|
this._getConfig();
|
||||||
|
this._configureBackends();
|
||||||
}
|
}
|
||||||
|
|
||||||
_getLocationConfig() {
|
_getLocationConfig() {
|
||||||
|
@ -841,11 +728,11 @@ class Config extends EventEmitter {
|
||||||
this.websiteEndpoints = config.websiteEndpoints;
|
this.websiteEndpoints = config.websiteEndpoints;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.workers = false;
|
this.clusters = false;
|
||||||
if (config.workers !== undefined) {
|
if (config.clusters !== undefined) {
|
||||||
assert(Number.isInteger(config.workers) && config.workers > 0,
|
assert(Number.isInteger(config.clusters) && config.clusters > 0,
|
||||||
'bad config: workers must be a positive integer');
|
'bad config: clusters must be a positive integer');
|
||||||
this.workers = config.workers;
|
this.clusters = config.clusters;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.usEastBehavior !== undefined) {
|
if (config.usEastBehavior !== undefined) {
|
||||||
|
@ -1083,7 +970,8 @@ class Config extends EventEmitter {
|
||||||
assert(typeof config.localCache.port === 'number',
|
assert(typeof config.localCache.port === 'number',
|
||||||
'config: bad port for localCache. port must be a number');
|
'config: bad port for localCache. port must be a number');
|
||||||
if (config.localCache.password !== undefined) {
|
if (config.localCache.password !== undefined) {
|
||||||
assert(typeof config.localCache.password === 'string',
|
assert(
|
||||||
|
this._verifyRedisPassword(config.localCache.password),
|
||||||
'config: vad password for localCache. password must' +
|
'config: vad password for localCache. password must' +
|
||||||
' be a string');
|
' be a string');
|
||||||
}
|
}
|
||||||
|
@ -1109,46 +997,56 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.redis) {
|
if (config.redis) {
|
||||||
this.redis = parseRedisConfig(config.redis);
|
if (config.redis.sentinels) {
|
||||||
}
|
this.redis = { sentinels: [], name: null };
|
||||||
if (config.scuba) {
|
|
||||||
this.scuba = {};
|
assert(typeof config.redis.name === 'string',
|
||||||
if (config.scuba.host) {
|
'bad config: redis sentinel name must be a string');
|
||||||
assert(typeof config.scuba.host === 'string',
|
this.redis.name = config.redis.name;
|
||||||
'bad config: scuba host must be a string');
|
assert(Array.isArray(config.redis.sentinels) ||
|
||||||
this.scuba.host = config.scuba.host;
|
typeof config.redis.sentinels === 'string',
|
||||||
|
'bad config: redis sentinels must be an array or string');
|
||||||
|
|
||||||
|
if (typeof config.redis.sentinels === 'string') {
|
||||||
|
config.redis.sentinels.split(',').forEach(item => {
|
||||||
|
const [host, port] = item.split(':');
|
||||||
|
this.redis.sentinels.push({ host,
|
||||||
|
port: Number.parseInt(port, 10) });
|
||||||
|
});
|
||||||
|
} else if (Array.isArray(config.redis.sentinels)) {
|
||||||
|
config.redis.sentinels.forEach(item => {
|
||||||
|
const { host, port } = item;
|
||||||
|
assert(typeof host === 'string',
|
||||||
|
'bad config: redis sentinel host must be a string');
|
||||||
|
assert(typeof port === 'number',
|
||||||
|
'bad config: redis sentinel port must be a number');
|
||||||
|
this.redis.sentinels.push({ host, port });
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.redis.sentinelPassword !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(config.redis.sentinelPassword));
|
||||||
|
this.redis.sentinelPassword = config.redis.sentinelPassword;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// check for standalone configuration
|
||||||
|
this.redis = {};
|
||||||
|
assert(typeof config.redis.host === 'string',
|
||||||
|
'bad config: redis.host must be a string');
|
||||||
|
assert(typeof config.redis.port === 'number',
|
||||||
|
'bad config: redis.port must be a number');
|
||||||
|
this.redis.host = config.redis.host;
|
||||||
|
this.redis.port = config.redis.port;
|
||||||
}
|
}
|
||||||
if (config.scuba.port) {
|
if (config.redis.password !== undefined) {
|
||||||
assert(Number.isInteger(config.scuba.port)
|
assert(
|
||||||
&& config.scuba.port > 0,
|
this._verifyRedisPassword(config.redis.password),
|
||||||
'bad config: scuba port must be a positive integer');
|
'bad config: invalid password for redis. password must ' +
|
||||||
this.scuba.port = config.scuba.port;
|
'be a string');
|
||||||
|
this.redis.password = config.redis.password;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
|
|
||||||
assert(typeof process.env.SCUBA_HOST === 'string',
|
|
||||||
'bad config: scuba host must be a string');
|
|
||||||
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
|
|
||||||
&& Number(process.env.SCUBA_PORT) > 0,
|
|
||||||
'bad config: scuba port must be a positive integer');
|
|
||||||
this.scuba = {
|
|
||||||
host: process.env.SCUBA_HOST,
|
|
||||||
port: Number(process.env.SCUBA_PORT),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if (this.scuba) {
|
|
||||||
this.quotaEnabled = true;
|
|
||||||
}
|
|
||||||
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
|
|
||||||
config.quota?.maxStatenessMS ||
|
|
||||||
24 * 60 * 60 * 1000;
|
|
||||||
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
|
|
||||||
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
|
|
||||||
config.quota?.enableInflights || false;
|
|
||||||
this.quota = {
|
|
||||||
maxStaleness,
|
|
||||||
enableInflights,
|
|
||||||
};
|
|
||||||
if (config.utapi) {
|
if (config.utapi) {
|
||||||
this.utapi = { component: 's3' };
|
this.utapi = { component: 's3' };
|
||||||
if (config.utapi.host) {
|
if (config.utapi.host) {
|
||||||
|
@ -1177,8 +1075,50 @@ class Config extends EventEmitter {
|
||||||
assert(config.redis, 'missing required property of utapi ' +
|
assert(config.redis, 'missing required property of utapi ' +
|
||||||
'configuration: redis');
|
'configuration: redis');
|
||||||
if (config.utapi.redis) {
|
if (config.utapi.redis) {
|
||||||
this.utapi.redis = parseRedisConfig(config.utapi.redis);
|
if (config.utapi.redis.sentinels) {
|
||||||
if (this.utapi.redis.retry === undefined) {
|
this.utapi.redis = { sentinels: [], name: null };
|
||||||
|
|
||||||
|
assert(typeof config.utapi.redis.name === 'string',
|
||||||
|
'bad config: redis sentinel name must be a string');
|
||||||
|
this.utapi.redis.name = config.utapi.redis.name;
|
||||||
|
|
||||||
|
assert(Array.isArray(config.utapi.redis.sentinels),
|
||||||
|
'bad config: redis sentinels must be an array');
|
||||||
|
config.utapi.redis.sentinels.forEach(item => {
|
||||||
|
const { host, port } = item;
|
||||||
|
assert(typeof host === 'string',
|
||||||
|
'bad config: redis sentinel host must be a string');
|
||||||
|
assert(typeof port === 'number',
|
||||||
|
'bad config: redis sentinel port must be a number');
|
||||||
|
this.utapi.redis.sentinels.push({ host, port });
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// check for standalone configuration
|
||||||
|
this.utapi.redis = {};
|
||||||
|
assert(typeof config.utapi.redis.host === 'string',
|
||||||
|
'bad config: redis.host must be a string');
|
||||||
|
assert(typeof config.utapi.redis.port === 'number',
|
||||||
|
'bad config: redis.port must be a number');
|
||||||
|
this.utapi.redis.host = config.utapi.redis.host;
|
||||||
|
this.utapi.redis.port = config.utapi.redis.port;
|
||||||
|
}
|
||||||
|
if (config.utapi.redis.retry !== undefined) {
|
||||||
|
if (config.utapi.redis.retry.connectBackoff !== undefined) {
|
||||||
|
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
|
||||||
|
assert.strictEqual(typeof min, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: min must be a number');
|
||||||
|
assert.strictEqual(typeof max, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: max must be a number');
|
||||||
|
assert.strictEqual(typeof jitter, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
||||||
|
assert.strictEqual(typeof factor, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: factor must be a number');
|
||||||
|
assert.strictEqual(typeof deadline, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
||||||
|
}
|
||||||
|
|
||||||
|
this.utapi.redis.retry = config.utapi.redis.retry;
|
||||||
|
} else {
|
||||||
this.utapi.redis.retry = {
|
this.utapi.redis.retry = {
|
||||||
connectBackoff: {
|
connectBackoff: {
|
||||||
min: 10,
|
min: 10,
|
||||||
|
@ -1189,6 +1129,22 @@ class Config extends EventEmitter {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
if (config.utapi.redis.password !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(config.utapi.redis.password),
|
||||||
|
'config: invalid password for utapi redis. password' +
|
||||||
|
' must be a string');
|
||||||
|
this.utapi.redis.password = config.utapi.redis.password;
|
||||||
|
}
|
||||||
|
if (config.utapi.redis.sentinelPassword !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(
|
||||||
|
config.utapi.redis.sentinelPassword),
|
||||||
|
'config: invalid password for utapi redis. password' +
|
||||||
|
' must be a string');
|
||||||
|
this.utapi.redis.sentinelPassword =
|
||||||
|
config.utapi.redis.sentinelPassword;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (config.utapi.metrics) {
|
if (config.utapi.metrics) {
|
||||||
this.utapi.metrics = config.utapi.metrics;
|
this.utapi.metrics = config.utapi.metrics;
|
||||||
|
@ -1258,7 +1214,8 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.utapi && config.utapi.reindex) {
|
if (config.utapi && config.utapi.reindex) {
|
||||||
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
|
parseUtapiReindex(config.utapi.reindex);
|
||||||
|
this.utapi.reindex = config.utapi.reindex;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1303,8 +1260,6 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.authdata = config.authdata || 'authdata.json';
|
|
||||||
|
|
||||||
this.kms = {};
|
this.kms = {};
|
||||||
if (config.kms) {
|
if (config.kms) {
|
||||||
assert(typeof config.kms.userName === 'string');
|
assert(typeof config.kms.userName === 'string');
|
||||||
|
@ -1524,6 +1479,25 @@ class Config extends EventEmitter {
|
||||||
this.outboundProxy.certs = certObj.certs;
|
this.outboundProxy.certs = certObj.certs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.managementAgent = {};
|
||||||
|
this.managementAgent.port = 8010;
|
||||||
|
this.managementAgent.host = 'localhost';
|
||||||
|
if (config.managementAgent !== undefined) {
|
||||||
|
if (config.managementAgent.port !== undefined) {
|
||||||
|
assert(Number.isInteger(config.managementAgent.port)
|
||||||
|
&& config.managementAgent.port > 0,
|
||||||
|
'bad config: managementAgent port must be a positive ' +
|
||||||
|
'integer');
|
||||||
|
this.managementAgent.port = config.managementAgent.port;
|
||||||
|
}
|
||||||
|
if (config.managementAgent.host !== undefined) {
|
||||||
|
assert.strictEqual(typeof config.managementAgent.host, 'string',
|
||||||
|
'bad config: management agent host must ' +
|
||||||
|
'be a string');
|
||||||
|
this.managementAgent.host = config.managementAgent.host;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ephemeral token to protect the reporting endpoint:
|
// Ephemeral token to protect the reporting endpoint:
|
||||||
// try inherited from parent first, then hardcoded in conf file,
|
// try inherited from parent first, then hardcoded in conf file,
|
||||||
// then create a fresh one as last resort.
|
// then create a fresh one as last resort.
|
||||||
|
@ -1576,10 +1550,6 @@ class Config extends EventEmitter {
|
||||||
requestsConfigAssert(config.requests);
|
requestsConfigAssert(config.requests);
|
||||||
this.requests = config.requests;
|
this.requests = config.requests;
|
||||||
}
|
}
|
||||||
// CLDSRV-378: on 8.x branches, null version compatibility
|
|
||||||
// mode is enforced because null keys are not supported by the
|
|
||||||
// MongoDB backend.
|
|
||||||
this.nullVersionCompatMode = true;
|
|
||||||
if (config.bucketNotificationDestinations) {
|
if (config.bucketNotificationDestinations) {
|
||||||
this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations);
|
this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations);
|
||||||
}
|
}
|
||||||
|
@ -1588,108 +1558,43 @@ class Config extends EventEmitter {
|
||||||
|
|
||||||
// Version of the configuration we're running under
|
// Version of the configuration we're running under
|
||||||
this.overlayVersion = config.overlayVersion || 0;
|
this.overlayVersion = config.overlayVersion || 0;
|
||||||
|
|
||||||
this._setTimeOptions();
|
|
||||||
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
|
|
||||||
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
|
|
||||||
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
|
|
||||||
this.multiObjectDeleteConcurrency = extractedNumber;
|
|
||||||
}
|
|
||||||
|
|
||||||
this.multiObjectDeleteEnableOptimizations = true;
|
|
||||||
if (config.multiObjectDeleteEnableOptimizations === false) {
|
|
||||||
this.multiObjectDeleteEnableOptimizations = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
this.testingMode = config.testingMode || false;
|
|
||||||
|
|
||||||
this.maxScannedLifecycleListingEntries = constants.maxScannedLifecycleListingEntries;
|
|
||||||
if (config.maxScannedLifecycleListingEntries !== undefined) {
|
|
||||||
// maxScannedLifecycleListingEntries > 2 is required as a minimum because we must
|
|
||||||
// scan at least three entries to determine version eligibility.
|
|
||||||
// Two entries representing the master key and the following one representing the non-current version.
|
|
||||||
assert(Number.isInteger(config.maxScannedLifecycleListingEntries) &&
|
|
||||||
config.maxScannedLifecycleListingEntries > 2,
|
|
||||||
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
|
|
||||||
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
|
||||||
}
|
|
||||||
|
|
||||||
this._configureBackends(config);
|
|
||||||
}
|
|
||||||
|
|
||||||
_setTimeOptions() {
|
|
||||||
// NOTE: EXPIRE_ONE_DAY_EARLIER and TRANSITION_ONE_DAY_EARLIER are deprecated in favor of
|
|
||||||
// TIME_PROGRESSION_FACTOR which decreases the weight attributed to a day in order to among other things
|
|
||||||
// expedite the lifecycle of objects.
|
|
||||||
|
|
||||||
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
|
|
||||||
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
|
|
||||||
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
|
|
||||||
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
|
|
||||||
// decreases the weight attributed to a day in order to expedite the lifecycle of objects.
|
|
||||||
const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1;
|
|
||||||
|
|
||||||
const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1);
|
|
||||||
assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' +
|
|
||||||
'"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.');
|
|
||||||
|
|
||||||
// The scaledMsPerDay value is initially set to the number of milliseconds per day
|
|
||||||
// (24 * 60 * 60 * 1000) as the default value.
|
|
||||||
// However, during testing, if the timeProgressionFactor is defined and greater than 1,
|
|
||||||
// the scaledMsPerDay value is decreased. This adjustment allows for simulating actions occurring
|
|
||||||
// earlier in time.
|
|
||||||
const scaledMsPerDay = scaleMsPerDay(timeProgressionFactor);
|
|
||||||
|
|
||||||
this.timeOptions = {
|
|
||||||
expireOneDayEarlier,
|
|
||||||
transitionOneDayEarlier,
|
|
||||||
timeProgressionFactor,
|
|
||||||
scaledMsPerDay,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
getTimeOptions() {
|
|
||||||
return this.timeOptions;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_getAuthData() {
|
_getAuthData() {
|
||||||
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
|
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
|
||||||
}
|
}
|
||||||
|
|
||||||
_configureBackends(config) {
|
_configureBackends() {
|
||||||
const backends = config.backends || {};
|
|
||||||
/**
|
/**
|
||||||
* Configure the backends for Authentication, Data and Metadata.
|
* Configure the backends for Authentication, Data and Metadata.
|
||||||
*/
|
*/
|
||||||
let auth = backends.auth || 'mem';
|
let auth = 'mem';
|
||||||
let data = backends.data || 'multiple';
|
let data = 'multiple';
|
||||||
let metadata = backends.metadata || 'file';
|
let metadata = 'file';
|
||||||
let kms = backends.kms || 'file';
|
let kms = 'file';
|
||||||
let quota = backends.quota || 'none';
|
|
||||||
if (process.env.S3BACKEND) {
|
if (process.env.S3BACKEND) {
|
||||||
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
||||||
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
||||||
'bad environment variable: S3BACKEND environment variable ' +
|
'bad environment variable: S3BACKEND environment variable ' +
|
||||||
'should be one of mem/file/scality/cdmi'
|
'should be one of mem/file/scality/cdmi'
|
||||||
);
|
);
|
||||||
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
|
auth = process.env.S3BACKEND;
|
||||||
data = process.env.S3BACKEND;
|
data = process.env.S3BACKEND;
|
||||||
metadata = process.env.S3BACKEND;
|
metadata = process.env.S3BACKEND;
|
||||||
kms = process.env.S3BACKEND;
|
kms = process.env.S3BACKEND;
|
||||||
}
|
}
|
||||||
if (process.env.S3VAULT) {
|
if (process.env.S3VAULT) {
|
||||||
auth = process.env.S3VAULT;
|
auth = process.env.S3VAULT;
|
||||||
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
||||||
// Auth only checks for 'mem' since mem === file
|
// Auth only checks for 'mem' since mem === file
|
||||||
|
auth = 'mem';
|
||||||
let authData;
|
let authData;
|
||||||
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
||||||
authData = buildAuthDataAccount(
|
authData = buildAuthDataAccount(
|
||||||
process.env.SCALITY_ACCESS_KEY_ID,
|
process.env.SCALITY_ACCESS_KEY_ID,
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY);
|
process.env.SCALITY_SECRET_ACCESS_KEY);
|
||||||
} else {
|
} else {
|
||||||
authData = this._getAuthData();
|
authData = this._getAuthData();
|
||||||
}
|
}
|
||||||
|
@ -1697,7 +1602,7 @@ class Config extends EventEmitter {
|
||||||
throw new Error('bad config: invalid auth config file.');
|
throw new Error('bad config: invalid auth config file.');
|
||||||
}
|
}
|
||||||
this.authData = authData;
|
this.authData = authData;
|
||||||
} else if (auth === 'multiple') {
|
} else if (auth === 'multiple') {
|
||||||
const authData = this._getAuthData();
|
const authData = this._getAuthData();
|
||||||
if (validateAuthConfig(authData)) {
|
if (validateAuthConfig(authData)) {
|
||||||
throw new Error('bad config: invalid auth config file.');
|
throw new Error('bad config: invalid auth config file.');
|
||||||
|
@ -1712,9 +1617,9 @@ class Config extends EventEmitter {
|
||||||
'should be one of mem/file/scality/multiple'
|
'should be one of mem/file/scality/multiple'
|
||||||
);
|
);
|
||||||
data = process.env.S3DATA;
|
data = process.env.S3DATA;
|
||||||
if (data === 'scality' || data === 'multiple') {
|
}
|
||||||
data = 'multiple';
|
if (data === 'scality' || data === 'multiple') {
|
||||||
}
|
data = 'multiple';
|
||||||
}
|
}
|
||||||
assert(this.locationConstraints !== undefined &&
|
assert(this.locationConstraints !== undefined &&
|
||||||
this.restEndpoints !== undefined,
|
this.restEndpoints !== undefined,
|
||||||
|
@ -1727,18 +1632,18 @@ class Config extends EventEmitter {
|
||||||
if (process.env.S3KMS) {
|
if (process.env.S3KMS) {
|
||||||
kms = process.env.S3KMS;
|
kms = process.env.S3KMS;
|
||||||
}
|
}
|
||||||
if (process.env.S3QUOTA) {
|
|
||||||
quota = process.env.S3QUOTA;
|
|
||||||
}
|
|
||||||
this.backends = {
|
this.backends = {
|
||||||
auth,
|
auth,
|
||||||
data,
|
data,
|
||||||
metadata,
|
metadata,
|
||||||
kms,
|
kms,
|
||||||
quota,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_verifyRedisPassword(password) {
|
||||||
|
return typeof password === 'string';
|
||||||
|
}
|
||||||
|
|
||||||
setAuthDataAccounts(accounts) {
|
setAuthDataAccounts(accounts) {
|
||||||
this.authData.accounts = accounts;
|
this.authData.accounts = accounts;
|
||||||
this.emit('authdata-update');
|
this.emit('authdata-update');
|
||||||
|
@ -1861,19 +1766,10 @@ class Config extends EventEmitter {
|
||||||
.update(instanceId)
|
.update(instanceId)
|
||||||
.digest('hex');
|
.digest('hex');
|
||||||
}
|
}
|
||||||
|
|
||||||
isQuotaEnabled() {
|
|
||||||
return !!this.quotaEnabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
isQuotaInflightEnabled() {
|
|
||||||
return this.quota.enableInflights;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
parseSproxydConfig,
|
parseSproxydConfig,
|
||||||
parseRedisConfig,
|
|
||||||
locationConstraintAssert,
|
locationConstraintAssert,
|
||||||
ConfigObject: Config,
|
ConfigObject: Config,
|
||||||
config: new Config(),
|
config: new Config(),
|
||||||
|
@ -1881,5 +1777,4 @@ module.exports = {
|
||||||
bucketNotifAssert,
|
bucketNotifAssert,
|
||||||
azureGetStorageAccountName,
|
azureGetStorageAccountName,
|
||||||
azureGetLocationCredentials,
|
azureGetLocationCredentials,
|
||||||
azureArchiveLocationConstraintAssert,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -7,7 +7,6 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
|
||||||
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
||||||
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
||||||
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
||||||
const bucketDeleteQuota = require('./bucketDeleteQuota');
|
|
||||||
const { bucketGet } = require('./bucketGet');
|
const { bucketGet } = require('./bucketGet');
|
||||||
const bucketGetACL = require('./bucketGetACL');
|
const bucketGetACL = require('./bucketGetACL');
|
||||||
const bucketGetCors = require('./bucketGetCors');
|
const bucketGetCors = require('./bucketGetCors');
|
||||||
|
@ -18,7 +17,6 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
|
||||||
const bucketGetNotification = require('./bucketGetNotification');
|
const bucketGetNotification = require('./bucketGetNotification');
|
||||||
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
||||||
const bucketGetPolicy = require('./bucketGetPolicy');
|
const bucketGetPolicy = require('./bucketGetPolicy');
|
||||||
const bucketGetQuota = require('./bucketGetQuota');
|
|
||||||
const bucketGetEncryption = require('./bucketGetEncryption');
|
const bucketGetEncryption = require('./bucketGetEncryption');
|
||||||
const bucketHead = require('./bucketHead');
|
const bucketHead = require('./bucketHead');
|
||||||
const { bucketPut } = require('./bucketPut');
|
const { bucketPut } = require('./bucketPut');
|
||||||
|
@ -35,7 +33,6 @@ const bucketPutNotification = require('./bucketPutNotification');
|
||||||
const bucketPutEncryption = require('./bucketPutEncryption');
|
const bucketPutEncryption = require('./bucketPutEncryption');
|
||||||
const bucketPutPolicy = require('./bucketPutPolicy');
|
const bucketPutPolicy = require('./bucketPutPolicy');
|
||||||
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
||||||
const bucketUpdateQuota = require('./bucketUpdateQuota');
|
|
||||||
const bucketGetReplication = require('./bucketGetReplication');
|
const bucketGetReplication = require('./bucketGetReplication');
|
||||||
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
||||||
const corsPreflight = require('./corsPreflight');
|
const corsPreflight = require('./corsPreflight');
|
||||||
|
@ -47,7 +44,7 @@ const metadataSearch = require('./metadataSearch');
|
||||||
const { multiObjectDelete } = require('./multiObjectDelete');
|
const { multiObjectDelete } = require('./multiObjectDelete');
|
||||||
const multipartDelete = require('./multipartDelete');
|
const multipartDelete = require('./multipartDelete');
|
||||||
const objectCopy = require('./objectCopy');
|
const objectCopy = require('./objectCopy');
|
||||||
const { objectDelete } = require('./objectDelete');
|
const objectDelete = require('./objectDelete');
|
||||||
const objectDeleteTagging = require('./objectDeleteTagging');
|
const objectDeleteTagging = require('./objectDeleteTagging');
|
||||||
const objectGet = require('./objectGet');
|
const objectGet = require('./objectGet');
|
||||||
const objectGetACL = require('./objectGetACL');
|
const objectGetACL = require('./objectGetACL');
|
||||||
|
@ -67,7 +64,8 @@ const prepareRequestContexts
|
||||||
= require('./apiUtils/authorization/prepareRequestContexts');
|
= require('./apiUtils/authorization/prepareRequestContexts');
|
||||||
const serviceGet = require('./serviceGet');
|
const serviceGet = require('./serviceGet');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const website = require('./website');
|
const websiteGet = require('./websiteGet');
|
||||||
|
const websiteHead = require('./websiteHead');
|
||||||
const writeContinue = require('../utilities/writeContinue');
|
const writeContinue = require('../utilities/writeContinue');
|
||||||
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
|
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
|
||||||
const parseCopySource = require('./apiUtils/object/parseCopySource');
|
const parseCopySource = require('./apiUtils/object/parseCopySource');
|
||||||
|
@ -85,10 +83,6 @@ const api = {
|
||||||
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.apiMethod = apiMethod;
|
request.apiMethod = apiMethod;
|
||||||
// Array of end of API callbacks, used to perform some logic
|
|
||||||
// at the end of an API.
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
request.finalizerHooks = [];
|
|
||||||
|
|
||||||
const actionLog = monitoringMap[apiMethod];
|
const actionLog = monitoringMap[apiMethod];
|
||||||
if (!actionLog &&
|
if (!actionLog &&
|
||||||
|
@ -123,7 +117,6 @@ const api = {
|
||||||
// no need to check auth on website or cors preflight requests
|
// no need to check auth on website or cors preflight requests
|
||||||
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
|
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
|
||||||
apiMethod === 'corsPreflight') {
|
apiMethod === 'corsPreflight') {
|
||||||
request.actionImplicitDenies = false;
|
|
||||||
return this[apiMethod](request, log, callback);
|
return this[apiMethod](request, log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,25 +139,15 @@ const api = {
|
||||||
|
|
||||||
const requestContexts = prepareRequestContexts(apiMethod, request,
|
const requestContexts = prepareRequestContexts(apiMethod, request,
|
||||||
sourceBucket, sourceObject, sourceVersionId);
|
sourceBucket, sourceObject, sourceVersionId);
|
||||||
// Extract all the _apiMethods and store them in an array
|
|
||||||
const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : [];
|
|
||||||
// Attach the names to the current request
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
request.apiMethods = apiMethods;
|
|
||||||
|
|
||||||
function checkAuthResults(authResults) {
|
function checkAuthResults(authResults) {
|
||||||
let returnTagCount = true;
|
let returnTagCount = true;
|
||||||
const isImplicitDeny = {};
|
|
||||||
let isOnlyImplicitDeny = true;
|
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
// first item checks s3:GetObject(Version) action
|
// first item checks s3:GetObject(Version) action
|
||||||
if (!authResults[0].isAllowed && !authResults[0].isImplicit) {
|
if (!authResults[0].isAllowed) {
|
||||||
log.trace('get object authorization denial from Vault');
|
log.trace('get object authorization denial from Vault');
|
||||||
return errors.AccessDenied;
|
return errors.AccessDenied;
|
||||||
}
|
}
|
||||||
// TODO add support for returnTagCount in the bucket policy
|
|
||||||
// checks
|
|
||||||
isImplicitDeny[authResults[0].action] = authResults[0].isImplicit;
|
|
||||||
// second item checks s3:GetObject(Version)Tagging action
|
// second item checks s3:GetObject(Version)Tagging action
|
||||||
if (!authResults[1].isAllowed) {
|
if (!authResults[1].isAllowed) {
|
||||||
log.trace('get tagging authorization denial ' +
|
log.trace('get tagging authorization denial ' +
|
||||||
|
@ -173,41 +156,25 @@ const api = {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (let i = 0; i < authResults.length; i++) {
|
for (let i = 0; i < authResults.length; i++) {
|
||||||
isImplicitDeny[authResults[i].action] = true;
|
if (!authResults[i].isAllowed) {
|
||||||
if (!authResults[i].isAllowed && !authResults[i].isImplicit) {
|
|
||||||
// Any explicit deny rejects the current API call
|
|
||||||
log.trace('authorization denial from Vault');
|
log.trace('authorization denial from Vault');
|
||||||
return errors.AccessDenied;
|
return errors.AccessDenied;
|
||||||
}
|
}
|
||||||
if (authResults[i].isAllowed) {
|
|
||||||
// If the action is allowed, the result is not implicit
|
|
||||||
// Deny.
|
|
||||||
isImplicitDeny[authResults[i].action] = false;
|
|
||||||
isOnlyImplicitDeny = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// These two APIs cannot use ACLs or Bucket Policies, hence, any
|
return returnTagCount;
|
||||||
// implicit deny from vault must be treated as an explicit deny.
|
|
||||||
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
|
|
||||||
return errors.AccessDenied;
|
|
||||||
}
|
|
||||||
return { returnTagCount, isImplicitDeny };
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => auth.server.doAuth(
|
next => auth.server.doAuth(
|
||||||
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// VaultClient returns standard errors, but the route requires
|
|
||||||
// Arsenal errors
|
|
||||||
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
|
|
||||||
log.trace('authentication error', { error: err });
|
log.trace('authentication error', { error: err });
|
||||||
return next(arsenalError);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
}, 's3', requestContexts),
|
}, 's3', requestContexts),
|
||||||
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
|
(userInfo, authorizationResults, streamingV4Params, next) => {
|
||||||
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
||||||
if (userInfo.isRequesterAnIAMUser()) {
|
if (userInfo.isRequesterAnIAMUser()) {
|
||||||
authNames.userName = userInfo.getIAMdisplayName();
|
authNames.userName = userInfo.getIAMdisplayName();
|
||||||
|
@ -217,7 +184,7 @@ const api = {
|
||||||
}
|
}
|
||||||
log.addDefaultFields(authNames);
|
log.addDefaultFields(authNames);
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
}
|
}
|
||||||
// issue 100 Continue to the client
|
// issue 100 Continue to the client
|
||||||
writeContinue(request, response);
|
writeContinue(request, response);
|
||||||
|
@ -248,12 +215,12 @@ const api = {
|
||||||
}
|
}
|
||||||
// Convert array of post buffers into one string
|
// Convert array of post buffers into one string
|
||||||
request.post = Buffer.concat(post, postLength).toString();
|
request.post = Buffer.concat(post, postLength).toString();
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
},
|
},
|
||||||
// Tag condition keys require information from CloudServer for evaluation
|
// Tag condition keys require information from CloudServer for evaluation
|
||||||
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
|
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
|
||||||
authorizationResults,
|
authorizationResults,
|
||||||
request,
|
request,
|
||||||
requestContexts,
|
requestContexts,
|
||||||
|
@ -264,47 +231,33 @@ const api = {
|
||||||
log.trace('tag authentication error', { error: err });
|
log.trace('tag authentication error', { error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
|
return next(null, userInfo, authResultsWithTags, streamingV4Params);
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
], (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
request.accountQuotas = infos?.accountQuota;
|
|
||||||
if (authorizationResults) {
|
if (authorizationResults) {
|
||||||
const checkedResults = checkAuthResults(authorizationResults);
|
const checkedResults = checkAuthResults(authorizationResults);
|
||||||
if (checkedResults instanceof Error) {
|
if (checkedResults instanceof Error) {
|
||||||
return callback(checkedResults);
|
return callback(checkedResults);
|
||||||
}
|
}
|
||||||
returnTagCount = checkedResults.returnTagCount;
|
returnTagCount = checkedResults;
|
||||||
request.actionImplicitDenies = checkedResults.isImplicitDeny;
|
|
||||||
} else {
|
|
||||||
// create an object of keys apiMethods with all values to false:
|
|
||||||
// for backward compatibility, all apiMethods are allowed by default
|
|
||||||
// thus it is explicitly allowed, so implicit deny is false
|
|
||||||
request.actionImplicitDenies = apiMethods.reduce((acc, curr) => {
|
|
||||||
acc[curr] = false;
|
|
||||||
return acc;
|
|
||||||
}, {});
|
|
||||||
}
|
}
|
||||||
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
|
|
||||||
(hook, done) => hook(err, done),
|
|
||||||
() => callback(err, ...results));
|
|
||||||
|
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
request._response = response;
|
request._response = response;
|
||||||
return this[apiMethod](userInfo, request, streamingV4Params,
|
return this[apiMethod](userInfo, request, streamingV4Params,
|
||||||
log, methodCallback, authorizationResults);
|
log, callback, authorizationResults);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||||
return this[apiMethod](userInfo, request, sourceBucket,
|
return this[apiMethod](userInfo, request, sourceBucket,
|
||||||
sourceObject, sourceVersionId, log, methodCallback);
|
sourceObject, sourceVersionId, log, callback);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
||||||
}
|
}
|
||||||
return this[apiMethod](userInfo, request, log, methodCallback);
|
return this[apiMethod](userInfo, request, log, callback);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
bucketDelete,
|
bucketDelete,
|
||||||
|
@ -331,14 +284,11 @@ const api = {
|
||||||
bucketPutReplication,
|
bucketPutReplication,
|
||||||
bucketGetReplication,
|
bucketGetReplication,
|
||||||
bucketDeleteReplication,
|
bucketDeleteReplication,
|
||||||
bucketDeleteQuota,
|
|
||||||
bucketPutLifecycle,
|
bucketPutLifecycle,
|
||||||
bucketUpdateQuota,
|
|
||||||
bucketGetLifecycle,
|
bucketGetLifecycle,
|
||||||
bucketDeleteLifecycle,
|
bucketDeleteLifecycle,
|
||||||
bucketPutPolicy,
|
bucketPutPolicy,
|
||||||
bucketGetPolicy,
|
bucketGetPolicy,
|
||||||
bucketGetQuota,
|
|
||||||
bucketDeletePolicy,
|
bucketDeletePolicy,
|
||||||
bucketPutObjectLock,
|
bucketPutObjectLock,
|
||||||
bucketPutNotification,
|
bucketPutNotification,
|
||||||
|
@ -370,8 +320,8 @@ const api = {
|
||||||
objectPutRetention,
|
objectPutRetention,
|
||||||
objectRestore,
|
objectRestore,
|
||||||
serviceGet,
|
serviceGet,
|
||||||
websiteGet: website,
|
websiteGet,
|
||||||
websiteHead: website,
|
websiteHead,
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = api;
|
module.exports = api;
|
||||||
|
|
|
@ -1,23 +1,11 @@
|
||||||
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
|
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies;
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const { parseCIDR, isValid } = require('ipaddr.js');
|
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { config } = require('../../../Config');
|
|
||||||
|
|
||||||
const {
|
const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants;
|
||||||
allAuthedUsersId,
|
|
||||||
bucketOwnerActions,
|
|
||||||
logId,
|
|
||||||
publicId,
|
|
||||||
arrayOfAllowed,
|
|
||||||
assumedRoleArnResourceType,
|
|
||||||
backbeatLifecycleSessionName,
|
|
||||||
actionsToConsiderAsObjectPut,
|
|
||||||
} = constants;
|
|
||||||
|
|
||||||
// whitelist buckets to allow public read on objects
|
// whitelist buckets to allow public read on objects
|
||||||
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
|
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
|
||||||
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
|
process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
|
||||||
|
|
||||||
function getServiceAccountProperties(canonicalID) {
|
function getServiceAccountProperties(canonicalID) {
|
||||||
const canonicalIDArray = canonicalID.split('/');
|
const canonicalIDArray = canonicalID.split('/');
|
||||||
|
@ -38,41 +26,13 @@ function isRequesterNonAccountUser(authInfo) {
|
||||||
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
|
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
function checkBucketAcls(bucket, requestType, canonicalID) {
|
||||||
* Checks the access control for a given bucket based on the request type and user's canonical ID.
|
|
||||||
*
|
|
||||||
* @param {Bucket} bucket - The bucket to check access control for.
|
|
||||||
* @param {string} requestType - The list of s3 actions to check within the API call.
|
|
||||||
* @param {string} canonicalID - The canonical ID of the user making the request.
|
|
||||||
* @param {string} mainApiCall - The main API call (first item of the requestType).
|
|
||||||
*
|
|
||||||
* @returns {boolean} - Returns true if the user has the necessary access rights, otherwise false.
|
|
||||||
*/
|
|
||||||
|
|
||||||
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
|
||||||
// Same logic applies on the Versioned APIs, so let's simplify it.
|
|
||||||
let requestTypeParsed = requestType.endsWith('Version') ?
|
|
||||||
requestType.slice(0, 'Version'.length * -1) : requestType;
|
|
||||||
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
|
|
||||||
'objectPut' : requestTypeParsed;
|
|
||||||
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
|
|
||||||
'objectPut' : mainApiCall;
|
|
||||||
if (bucket.getOwner() === canonicalID) {
|
if (bucket.getOwner() === canonicalID) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (parsedMainApiCall === 'objectGet') {
|
|
||||||
if (requestTypeParsed === 'objectGetTagging') {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (parsedMainApiCall === 'objectPut') {
|
|
||||||
if (arrayOfAllowed.includes(requestTypeParsed)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const bucketAcl = bucket.getAcl();
|
const bucketAcl = bucket.getAcl();
|
||||||
if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') {
|
if (requestType === 'bucketGet' || requestType === 'bucketHead') {
|
||||||
if (bucketAcl.Canned === 'public-read'
|
if (bucketAcl.Canned === 'public-read'
|
||||||
|| bucketAcl.Canned === 'public-read-write'
|
|| bucketAcl.Canned === 'public-read-write'
|
||||||
|| (bucketAcl.Canned === 'authenticated-read'
|
|| (bucketAcl.Canned === 'authenticated-read'
|
||||||
|
@ -90,7 +50,7 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (requestTypeParsed === 'bucketGetACL') {
|
if (requestType === 'bucketGetACL') {
|
||||||
if ((bucketAcl.Canned === 'log-delivery-write'
|
if ((bucketAcl.Canned === 'log-delivery-write'
|
||||||
&& canonicalID === logId)
|
&& canonicalID === logId)
|
||||||
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|
@ -106,7 +66,7 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestTypeParsed === 'bucketPutACL') {
|
if (requestType === 'bucketPutACL') {
|
||||||
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
|
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
|
||||||
return true;
|
return true;
|
||||||
|
@ -120,7 +80,11 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') {
|
if (requestType === 'bucketDelete' && bucket.getOwner() === canonicalID) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (requestType === 'objectDelete' || requestType === 'objectPut') {
|
||||||
if (bucketAcl.Canned === 'public-read-write'
|
if (bucketAcl.Canned === 'public-read-write'
|
||||||
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
|
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
|
||||||
|
@ -140,39 +104,25 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
||||||
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
|
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
|
||||||
// authorization check should just return true so can move on to check
|
// authorization check should just return true so can move on to check
|
||||||
// rights at the object level.
|
// rights at the object level.
|
||||||
return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
|
return (requestType === 'objectPutACL' || requestType === 'objectGetACL' ||
|
||||||
|| requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
requestType === 'objectGet' || requestType === 'objectHead');
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
|
function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
|
||||||
isUserUnauthenticated, mainApiCall) {
|
|
||||||
const bucketOwner = bucket.getOwner();
|
const bucketOwner = bucket.getOwner();
|
||||||
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
|
|
||||||
'objectPut' : requestType;
|
|
||||||
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
|
|
||||||
'objectPut' : mainApiCall;
|
|
||||||
// acls don't distinguish between users and accounts, so both should be allowed
|
// acls don't distinguish between users and accounts, so both should be allowed
|
||||||
if (bucketOwnerActions.includes(requestTypeParsed)
|
if (bucketOwnerActions.includes(requestType)
|
||||||
&& (bucketOwner === canonicalID)) {
|
&& (bucketOwner === canonicalID)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (objectMD['owner-id'] === canonicalID) {
|
if (objectMD['owner-id'] === canonicalID) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backward compatibility
|
|
||||||
if (parsedMainApiCall === 'objectGet') {
|
|
||||||
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
|
|
||||||
&& requestTypeParsed === 'objectGetTagging') {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!objectMD.acl) {
|
if (!objectMD.acl) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
|
if (requestType === 'objectGet' || requestType === 'objectHead') {
|
||||||
if (objectMD.acl.Canned === 'public-read'
|
if (objectMD.acl.Canned === 'public-read'
|
||||||
|| objectMD.acl.Canned === 'public-read-write'
|
|| objectMD.acl.Canned === 'public-read-write'
|
||||||
|| (objectMD.acl.Canned === 'authenticated-read'
|
|| (objectMD.acl.Canned === 'authenticated-read'
|
||||||
|
@ -198,11 +148,11 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|
||||||
|
|
||||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||||
// bucket has canned ACL public-read-write
|
// bucket has canned ACL public-read-write
|
||||||
if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
|
if (requestType === 'objectPut' || requestType === 'objectDelete') {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestTypeParsed === 'objectPutACL') {
|
if (requestType === 'objectPutACL') {
|
||||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||||
&& bucketOwner === canonicalID)
|
&& bucketOwner === canonicalID)
|
||||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|
@ -218,7 +168,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestTypeParsed === 'objectGetACL') {
|
if (requestType === 'objectGetACL') {
|
||||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||||
&& bucketOwner === canonicalID)
|
&& bucketOwner === canonicalID)
|
||||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|
@ -237,9 +187,9 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|
||||||
// allow public reads on buckets that are whitelisted for anonymous reads
|
// allow public reads on buckets that are whitelisted for anonymous reads
|
||||||
// TODO: remove this after bucket policies are implemented
|
// TODO: remove this after bucket policies are implemented
|
||||||
const bucketAcl = bucket.getAcl();
|
const bucketAcl = bucket.getAcl();
|
||||||
const allowPublicReads = publicReadBuckets.includes(bucket.getName())
|
const allowPublicReads = publicReadBuckets.includes(bucket.getName()) &&
|
||||||
&& bucketAcl.Canned === 'public-read'
|
bucketAcl.Canned === 'public-read' &&
|
||||||
&& (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
(requestType === 'objectGet' || requestType === 'objectHead');
|
||||||
if (allowPublicReads) {
|
if (allowPublicReads) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -266,20 +216,6 @@ function _checkBucketPolicyResources(request, resource, log) {
|
||||||
return evaluators.isResourceApplicable(requestContext, resource, log);
|
return evaluators.isResourceApplicable(requestContext, resource, log);
|
||||||
}
|
}
|
||||||
|
|
||||||
function _checkBucketPolicyConditions(request, conditions, log) {
|
|
||||||
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
|
|
||||||
if (!conditions) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
// build request context from the request!
|
|
||||||
const requestContext = new RequestContext(request.headers, request.query,
|
|
||||||
request.bucketName, request.objectKey, ip,
|
|
||||||
request.connection.encrypted, request.resourceType, 's3', null, null,
|
|
||||||
null, null, null, null, null, null, null, null, null,
|
|
||||||
request.objectLockRetentionDays);
|
|
||||||
return evaluators.meetConditions(requestContext, conditions, log);
|
|
||||||
}
|
|
||||||
|
|
||||||
function _getAccountId(arn) {
|
function _getAccountId(arn) {
|
||||||
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
|
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
|
||||||
return arn.substr(13, 12);
|
return arn.substr(13, 12);
|
||||||
|
@ -324,11 +260,11 @@ function _checkPrincipals(canonicalID, arn, principal) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) {
|
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request) {
|
||||||
let permission = 'defaultDeny';
|
let permission = 'defaultDeny';
|
||||||
// if requester is user within bucket owner account, actions should be
|
// if requester is user within bucket owner account, actions should be
|
||||||
// allowed unless explicitly denied (assumes allowed by IAM policy)
|
// allowed unless explicitly denied (assumes allowed by IAM policy)
|
||||||
if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) {
|
if (bucketOwner === canonicalID) {
|
||||||
permission = 'allow';
|
permission = 'allow';
|
||||||
}
|
}
|
||||||
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
|
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
|
||||||
|
@ -337,13 +273,12 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
|
||||||
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
|
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
|
||||||
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
|
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
|
||||||
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
|
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
|
||||||
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
|
|
||||||
|
|
||||||
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') {
|
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Deny') {
|
||||||
// explicit deny trumps any allows, so return immediately
|
// explicit deny trumps any allows, so return immediately
|
||||||
return 'explicitDeny';
|
return 'explicitDeny';
|
||||||
}
|
}
|
||||||
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') {
|
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Allow') {
|
||||||
permission = 'allow';
|
permission = 'allow';
|
||||||
}
|
}
|
||||||
copiedStatement = copiedStatement.splice(1);
|
copiedStatement = copiedStatement.splice(1);
|
||||||
|
@ -351,141 +286,80 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
|
||||||
return permission;
|
return permission;
|
||||||
}
|
}
|
||||||
|
|
||||||
function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log,
|
function isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request) {
|
||||||
request, aclPermission, results, actionImplicitDenies) {
|
// Check to see if user is authorized to perform a
|
||||||
const bucketPolicy = bucket.getBucketPolicy();
|
// particular action on bucket based on ACLs.
|
||||||
let processedResult = results[requestType];
|
// TODO: Add IAM checks
|
||||||
if (!bucketPolicy) {
|
let requesterIsNotUser = true;
|
||||||
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
|
let arn = null;
|
||||||
} else {
|
if (authInfo) {
|
||||||
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
|
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
||||||
bucketOwner, log, request, actionImplicitDenies);
|
arn = authInfo.getArn();
|
||||||
|
|
||||||
if (bucketPolicyPermission === 'explicitDeny') {
|
|
||||||
processedResult = false;
|
|
||||||
} else if (bucketPolicyPermission === 'allow') {
|
|
||||||
processedResult = true;
|
|
||||||
} else {
|
|
||||||
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return processedResult;
|
// if the bucket owner is an account, users should not have default access
|
||||||
|
if (((bucket.getOwner() === canonicalID) && requesterIsNotUser)
|
||||||
|
|| isServiceAccount(canonicalID)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
const aclPermission = checkBucketAcls(bucket, requestType, canonicalID);
|
||||||
|
const bucketPolicy = bucket.getBucketPolicy();
|
||||||
|
if (!bucketPolicy) {
|
||||||
|
return aclPermission;
|
||||||
|
}
|
||||||
|
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
|
||||||
|
canonicalID, arn, bucket.getOwner(), log, request);
|
||||||
|
if (bucketPolicyPermission === 'explicitDeny') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return (aclPermission || (bucketPolicyPermission === 'allow'));
|
||||||
}
|
}
|
||||||
|
|
||||||
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
|
function isObjAuthorized(bucket, objectMD, requestType, canonicalID, authInfo, log, request) {
|
||||||
actionImplicitDeniesInput = {}, isWebsite = false) {
|
const bucketOwner = bucket.getOwner();
|
||||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
if (!objectMD) {
|
||||||
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||||
const mainApiCall = requestTypes[0];
|
// bucket has canned ACL public-read-write
|
||||||
const results = {};
|
if (requestType === 'objectPut' || requestType === 'objectDelete') {
|
||||||
return requestTypes.every(_requestType => {
|
return true;
|
||||||
// By default, all missing actions are defined as allowed from IAM, to be
|
|
||||||
// backward compatible
|
|
||||||
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
|
||||||
// Check to see if user is authorized to perform a
|
|
||||||
// particular action on bucket based on ACLs.
|
|
||||||
// TODO: Add IAM checks
|
|
||||||
let requesterIsNotUser = true;
|
|
||||||
let arn = null;
|
|
||||||
if (authInfo) {
|
|
||||||
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
|
||||||
arn = authInfo.getArn();
|
|
||||||
}
|
}
|
||||||
// if the bucket owner is an account, users should not have default access
|
// check bucket has read access
|
||||||
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
|
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
|
||||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
return isBucketAuthorized(bucket, 'bucketGet', canonicalID, authInfo, log, request);
|
||||||
return results[_requestType];
|
}
|
||||||
}
|
let requesterIsNotUser = true;
|
||||||
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
|
let arn = null;
|
||||||
// In case of error bucket access is checked with bucketGet
|
if (authInfo) {
|
||||||
// For website, bucket policy only uses objectGet and ignores bucketGet
|
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
|
arn = authInfo.getArn();
|
||||||
// bucketGet should be used to check acl but switched to objectGet for bucket policy
|
}
|
||||||
if (isWebsite && _requestType === 'bucketGet') {
|
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser) {
|
||||||
// eslint-disable-next-line no-param-reassign
|
return true;
|
||||||
_requestType = 'objectGet';
|
}
|
||||||
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
|
|
||||||
}
|
|
||||||
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
|
||||||
request, aclPermission, results, actionImplicitDenies);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {},
|
if (isServiceAccount(canonicalID)) {
|
||||||
log, request) {
|
return true;
|
||||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
}
|
||||||
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
// account is authorized if:
|
||||||
const results = {};
|
// - requesttype is included in bucketOwnerActions and
|
||||||
return requestTypes.every(_requestType => {
|
// - account is the bucket owner
|
||||||
// By default, all missing actions are defined as allowed from IAM, to be
|
// - requester is account, not user
|
||||||
// backward compatible
|
if (bucketOwnerActions.includes(requestType)
|
||||||
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
|
||||||
let arn = null;
|
|
||||||
if (authInfo) {
|
|
||||||
arn = authInfo.getArn();
|
|
||||||
}
|
|
||||||
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
|
||||||
request, true, results, actionImplicitDenies);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
|
|
||||||
actionImplicitDeniesInput = {}, isWebsite = false) {
|
|
||||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
|
||||||
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
|
||||||
const results = {};
|
|
||||||
const mainApiCall = requestTypes[0];
|
|
||||||
return requestTypes.every(_requestType => {
|
|
||||||
// By default, all missing actions are defined as allowed from IAM, to be
|
|
||||||
// backward compatible
|
|
||||||
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
|
||||||
const parsedMethodName = _requestType.endsWith('Version')
|
|
||||||
? _requestType.slice(0, -7) : _requestType;
|
|
||||||
const bucketOwner = bucket.getOwner();
|
|
||||||
if (!objectMD) {
|
|
||||||
// check bucket has read access
|
|
||||||
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
|
|
||||||
let permission = 'bucketGet';
|
|
||||||
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
|
|
||||||
permission = 'objectPut';
|
|
||||||
}
|
|
||||||
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
|
|
||||||
actionImplicitDenies, isWebsite);
|
|
||||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
|
||||||
// bucket has canned ACL public-read-write
|
|
||||||
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
|
|
||||||
&& results[_requestType] === false) {
|
|
||||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
|
||||||
}
|
|
||||||
return results[_requestType];
|
|
||||||
}
|
|
||||||
let requesterIsNotUser = true;
|
|
||||||
let arn = null;
|
|
||||||
let isUserUnauthenticated = false;
|
|
||||||
if (authInfo) {
|
|
||||||
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
|
||||||
arn = authInfo.getArn();
|
|
||||||
isUserUnauthenticated = arn === undefined;
|
|
||||||
}
|
|
||||||
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
|
|
||||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
|
||||||
return results[_requestType];
|
|
||||||
}
|
|
||||||
// account is authorized if:
|
|
||||||
// - requesttype is included in bucketOwnerActions and
|
|
||||||
// - account is the bucket owner
|
|
||||||
// - requester is account, not user
|
|
||||||
if (bucketOwnerActions.includes(parsedMethodName)
|
|
||||||
&& (bucketOwner === canonicalID)
|
&& (bucketOwner === canonicalID)
|
||||||
&& requesterIsNotUser) {
|
&& requesterIsNotUser) {
|
||||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
return true;
|
||||||
return results[_requestType];
|
}
|
||||||
}
|
const aclPermission = checkObjectAcls(bucket, objectMD, requestType,
|
||||||
const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName,
|
canonicalID);
|
||||||
canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall);
|
const bucketPolicy = bucket.getBucketPolicy();
|
||||||
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner,
|
if (!bucketPolicy) {
|
||||||
log, request, aclPermission, results, actionImplicitDenies);
|
return aclPermission;
|
||||||
});
|
}
|
||||||
|
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
|
||||||
|
canonicalID, arn, bucket.getOwner(), log, request);
|
||||||
|
if (bucketPolicyPermission === 'explicitDeny') {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return (aclPermission || (bucketPolicyPermission === 'allow'));
|
||||||
}
|
}
|
||||||
|
|
||||||
function _checkResource(resource, bucketArn) {
|
function _checkResource(resource, bucketArn) {
|
||||||
|
@ -514,117 +388,6 @@ function validatePolicyResource(bucketName, policy) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkIp(value) {
|
|
||||||
const errString = 'Invalid IP address in Conditions';
|
|
||||||
|
|
||||||
const values = Array.isArray(value) ? value : [value];
|
|
||||||
|
|
||||||
for (let i = 0; i < values.length; i++) {
|
|
||||||
// these preliminary checks are validating the provided
|
|
||||||
// ip address against ipaddr.js, the library we use when
|
|
||||||
// evaluating IP condition keys. It ensures compatibility,
|
|
||||||
// but additional checks are required to enforce the right
|
|
||||||
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
|
|
||||||
// we would accept different ip formats, which is not
|
|
||||||
// standard in an AWS use case.
|
|
||||||
try {
|
|
||||||
try {
|
|
||||||
parseCIDR(values[i]);
|
|
||||||
} catch (err) {
|
|
||||||
isValid(values[i]);
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
return errString;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply the existing IP validation logic to each element
|
|
||||||
const validateIpRegex = ip => {
|
|
||||||
if (constants.ipv4Regex.test(ip)) {
|
|
||||||
return ip.split('.').every(part => parseInt(part, 10) <= 255);
|
|
||||||
}
|
|
||||||
if (constants.ipv6Regex.test(ip)) {
|
|
||||||
return ip.split(':').every(part => part.length <= 4);
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (validateIpRegex(values[i]) !== true) {
|
|
||||||
return errString;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the function hasn't returned by now, all elements are valid
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function checks all bucket policy conditions if the values provided
|
|
||||||
// are valid for the condition type. If not it returns a relevant Malformed policy error string
|
|
||||||
function validatePolicyConditions(policy) {
|
|
||||||
const validConditions = [
|
|
||||||
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
|
|
||||||
{ conditionKey: 's3:object-lock-remaining-retention-days' },
|
|
||||||
];
|
|
||||||
// keys where value type does not seem to be checked by AWS:
|
|
||||||
// - s3:object-lock-remaining-retention-days
|
|
||||||
|
|
||||||
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// there can be multiple statements in the policy, each with a Condition enclosure
|
|
||||||
for (let i = 0; i < policy.Statement.length; i++) {
|
|
||||||
const s = policy.Statement[i];
|
|
||||||
if (s.Condition) {
|
|
||||||
const conditionOperators = Object.keys(s.Condition);
|
|
||||||
// there can be multiple condition operations in the Condition enclosure
|
|
||||||
// eslint-disable-next-line no-restricted-syntax
|
|
||||||
for (const conditionOperator of conditionOperators) {
|
|
||||||
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
|
|
||||||
const conditionValue = s.Condition[conditionOperator][conditionKey];
|
|
||||||
const validCondition = validConditions.find(validCondition =>
|
|
||||||
validCondition.conditionKey === conditionKey
|
|
||||||
);
|
|
||||||
// AWS returns does not return an error if the condition starts with 'aws:'
|
|
||||||
// so we reproduce this behaviour
|
|
||||||
if (!validCondition && !conditionKey.startsWith('aws:')) {
|
|
||||||
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
|
|
||||||
}
|
|
||||||
if (validCondition && validCondition.conditionValueTypeChecker) {
|
|
||||||
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
|
|
||||||
if (conditionValueTypeError) {
|
|
||||||
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
|
|
||||||
* @param {string} arn - Amazon resource name - example:
|
|
||||||
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
|
|
||||||
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
|
|
||||||
*/
|
|
||||||
function isLifecycleSession(arn) {
|
|
||||||
if (!arn) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
const arnSplits = arn.split(':');
|
|
||||||
const service = arnSplits[2];
|
|
||||||
|
|
||||||
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
|
|
||||||
|
|
||||||
const resourceType = resourceNames[0];
|
|
||||||
const sessionName = resourceNames[resourceNames.length - 1];
|
|
||||||
|
|
||||||
return (service === 'sts'
|
|
||||||
&& resourceType === assumedRoleArnResourceType
|
|
||||||
&& sessionName === backbeatLifecycleSessionName);
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
isBucketAuthorized,
|
isBucketAuthorized,
|
||||||
isObjAuthorized,
|
isObjAuthorized,
|
||||||
|
@ -635,7 +398,4 @@ module.exports = {
|
||||||
checkBucketAcls,
|
checkBucketAcls,
|
||||||
checkObjectAcls,
|
checkObjectAcls,
|
||||||
validatePolicyResource,
|
validatePolicyResource,
|
||||||
validatePolicyConditions,
|
|
||||||
isLifecycleSession,
|
|
||||||
evaluateBucketPolicyWithIAM,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
apiMethod, 's3');
|
apiMethod, 's3');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (apiMethod === 'bucketPut') {
|
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,17 +65,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
|
|
||||||
const requestContexts = [];
|
const requestContexts = [];
|
||||||
|
|
||||||
if (apiMethod === 'multiObjectDelete') {
|
if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||||
// MultiObjectDelete does not require any authorization when evaluating
|
|
||||||
// the API. Instead, we authorize each object passed.
|
|
||||||
// But in order to get any relevant information from the authorization service
|
|
||||||
// for example, the account quota, we must send a request context object
|
|
||||||
// with no `specificResource`. We expect the result to be an implicit deny.
|
|
||||||
// In the API, we then ignore these authorization results, and we can use
|
|
||||||
// any information returned, e.g., the quota.
|
|
||||||
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
|
|
||||||
requestContexts.push(requestContextMultiObjectDelete);
|
|
||||||
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|
|
||||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||||
'objectGet';
|
'objectGet';
|
||||||
|
|
|
@ -24,7 +24,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) {
|
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
|
||||||
async.mapLimit(mpus, 1, (mpu, next) => {
|
async.mapLimit(mpus, 1, (mpu, next) => {
|
||||||
const splitterChar = mpu.key.includes(oldSplitter) ?
|
const splitterChar = mpu.key.includes(oldSplitter) ?
|
||||||
oldSplitter : splitter;
|
oldSplitter : splitter;
|
||||||
|
@ -40,7 +40,7 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log,
|
||||||
byteLength: partSizeSum,
|
byteLength: partSizeSum,
|
||||||
});
|
});
|
||||||
next(err);
|
next(err);
|
||||||
}, request);
|
});
|
||||||
}, cb);
|
}, cb);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -49,13 +49,11 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log,
|
||||||
* @param {object} bucketMD - bucket attributes/metadata
|
* @param {object} bucketMD - bucket attributes/metadata
|
||||||
* @param {string} bucketName - bucket in which objectMetadata is stored
|
* @param {string} bucketName - bucket in which objectMetadata is stored
|
||||||
* @param {string} canonicalID - account canonicalID of requester
|
* @param {string} canonicalID - account canonicalID of requester
|
||||||
* @param {object} request - request object given by router
|
|
||||||
* including normalized headers
|
|
||||||
* @param {object} log - Werelogs logger
|
* @param {object} log - Werelogs logger
|
||||||
* @param {function} cb - callback from async.waterfall in bucketDelete
|
* @param {function} cb - callback from async.waterfall in bucketDelete
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) {
|
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
|
||||||
log.trace('deleting bucket from metadata');
|
log.trace('deleting bucket from metadata');
|
||||||
assert.strictEqual(typeof bucketName, 'string');
|
assert.strictEqual(typeof bucketName, 'string');
|
||||||
assert.strictEqual(typeof canonicalID, 'string');
|
assert.strictEqual(typeof canonicalID, 'string');
|
||||||
|
@ -102,7 +100,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
|
||||||
}
|
}
|
||||||
if (objectsListRes.Contents.length) {
|
if (objectsListRes.Contents.length) {
|
||||||
return _deleteOngoingMPUs(authInfo, bucketName,
|
return _deleteOngoingMPUs(authInfo, bucketName,
|
||||||
bucketMD, objectsListRes.Contents, request, log, err => {
|
bucketMD, objectsListRes.Contents, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,9 +30,6 @@ function bucketShield(bucket, requestType) {
|
||||||
// Otherwise return an error to the client
|
// Otherwise return an error to the client
|
||||||
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
||||||
(requestType !== 'objectPut' &&
|
(requestType !== 'objectPut' &&
|
||||||
requestType !== 'initiateMultipartUpload' &&
|
|
||||||
requestType !== 'objectPutPart' &&
|
|
||||||
requestType !== 'completeMultipartUpload' &&
|
|
||||||
requestType !== 'bucketPutACL' &&
|
requestType !== 'bucketPutACL' &&
|
||||||
requestType !== 'bucketDelete')) {
|
requestType !== 'bucketDelete')) {
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -3,7 +3,7 @@ const async = require('async');
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { data } = require('../../../data/wrapper');
|
const { data } = require('../../../data/wrapper');
|
||||||
const locationConstraintCheck = require('../object/locationConstraintCheck');
|
const locationConstraintCheck = require('../object/locationConstraintCheck');
|
||||||
const { standardMetadataValidateBucketAndObj } =
|
const { metadataValidateBucketAndObj } =
|
||||||
require('../../../metadata/metadataUtils');
|
require('../../../metadata/metadataUtils');
|
||||||
const services = require('../../../services');
|
const services = require('../../../services');
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
uploadId,
|
uploadId,
|
||||||
preciseRequestType: request.apiMethods || 'multipartDelete',
|
preciseRequestType: 'multipartDelete',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
// For validating the request at the destinationBucket level
|
// For validating the request at the destinationBucket level
|
||||||
|
@ -22,11 +22,10 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
// but the requestType is the more general 'objectDelete'
|
// but the requestType is the more general 'objectDelete'
|
||||||
const metadataValParams = Object.assign({}, metadataValMPUparams);
|
const metadataValParams = Object.assign({}, metadataValMPUparams);
|
||||||
metadataValParams.requestType = 'objectPut';
|
metadataValParams.requestType = 'objectPut';
|
||||||
const authzIdentityResult = request ? request.actionImplicitDenies : false;
|
|
||||||
|
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
function checkDestBucketVal(next) {
|
function checkDestBucketVal(next) {
|
||||||
standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log,
|
metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, destinationBucket) => {
|
(err, destinationBucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destinationBucket);
|
return next(err, destinationBucket);
|
||||||
|
@ -57,14 +56,9 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
|
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
|
||||||
next) {
|
next) {
|
||||||
const location = mpuOverviewObj.controllingLocationConstraint;
|
const location = mpuOverviewObj.controllingLocationConstraint;
|
||||||
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
delete request.actionImplicitDenies;
|
|
||||||
return data.abortMPU(objectKey, uploadId, location, bucketName,
|
return data.abortMPU(objectKey, uploadId, location, bucketName,
|
||||||
request, destBucket, locationConstraintCheck, log,
|
request, destBucket, locationConstraintCheck, log,
|
||||||
(err, skipDataDelete) => {
|
(err, skipDataDelete) => {
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
request.actionImplicitDenies = originalIdentityAuthzResults;
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket);
|
return next(err, destBucket);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,13 +2,11 @@
|
||||||
* Code based on Yutaka Oishi (Fujifilm) contributions
|
* Code based on Yutaka Oishi (Fujifilm) contributions
|
||||||
* Date: 11 Sep 2020
|
* Date: 11 Sep 2020
|
||||||
*/
|
*/
|
||||||
const { ObjectMDArchive } = require('arsenal').models;
|
const ObjectMDArchive = require('arsenal').models.ObjectMDArchive;
|
||||||
const errors = require('arsenal').errors;
|
const errors = require('arsenal').errors;
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
const { locationConstraints } = config;
|
const { locationConstraints } = config;
|
||||||
|
|
||||||
const { scaledMsPerDay } = config.getTimeOptions();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get response header "x-amz-restore"
|
* Get response header "x-amz-restore"
|
||||||
* Be called by objectHead.js
|
* Be called by objectHead.js
|
||||||
|
@ -34,6 +32,7 @@ function getAmzRestoreResHeader(objMD) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if restore can be done.
|
* Check if restore can be done.
|
||||||
*
|
*
|
||||||
|
@ -42,23 +41,6 @@ function getAmzRestoreResHeader(objMD) {
|
||||||
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
|
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
|
||||||
*/
|
*/
|
||||||
function _validateStartRestore(objectMD, log) {
|
function _validateStartRestore(objectMD, log) {
|
||||||
if (objectMD.archive?.restoreCompletedAt) {
|
|
||||||
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
|
|
||||||
// return InvalidObjectState error if the restored object is expired
|
|
||||||
// but restore info md of this object has not yet been cleared
|
|
||||||
log.debug('The restored object already expired.',
|
|
||||||
{
|
|
||||||
archive: objectMD.archive,
|
|
||||||
method: '_validateStartRestore',
|
|
||||||
});
|
|
||||||
return errors.InvalidObjectState;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If object is already restored, no further check is needed
|
|
||||||
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
|
|
||||||
// been reset.
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
|
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
|
||||||
if (!isLocationCold) {
|
if (!isLocationCold) {
|
||||||
// return InvalidObjectState error if the object is not in cold storage,
|
// return InvalidObjectState error if the object is not in cold storage,
|
||||||
|
@ -70,7 +52,18 @@ function _validateStartRestore(objectMD, log) {
|
||||||
});
|
});
|
||||||
return errors.InvalidObjectState;
|
return errors.InvalidObjectState;
|
||||||
}
|
}
|
||||||
if (objectMD.archive?.restoreRequestedAt) {
|
if (objectMD.archive?.restoreCompletedAt
|
||||||
|
&& new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
|
||||||
|
// return InvalidObjectState error if the restored object is expired
|
||||||
|
// but restore info md of this object has not yet been cleared
|
||||||
|
log.debug('The restored object already expired.',
|
||||||
|
{
|
||||||
|
archive: objectMD.archive,
|
||||||
|
method: '_validateStartRestore',
|
||||||
|
});
|
||||||
|
return errors.InvalidObjectState;
|
||||||
|
}
|
||||||
|
if (objectMD.archive?.restoreRequestedAt && !objectMD.archive?.restoreCompletedAt) {
|
||||||
// return RestoreAlreadyInProgress error if the object is currently being restored
|
// return RestoreAlreadyInProgress error if the object is currently being restored
|
||||||
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
|
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
|
||||||
log.debug('The object is currently being restored.',
|
log.debug('The object is currently being restored.',
|
||||||
|
@ -127,36 +120,22 @@ function validatePutVersionId(objMD, versionId, log) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the object is already restored, and update the expiration date accordingly:
|
* Check if the object is already restored
|
||||||
* > After restoring an archived object, you can update the restoration period by reissuing the
|
|
||||||
* > request with a new period. Amazon S3 updates the restoration period relative to the current
|
|
||||||
* > time.
|
|
||||||
*
|
*
|
||||||
* @param {ObjectMD} objectMD - object metadata
|
* @param {ObjectMD} objectMD - object metadata
|
||||||
* @param {object} log - werelogs logger
|
* @param {object} log - werelogs logger
|
||||||
* @return {boolean} - true if the object is already restored
|
* @return {boolean} - true if the object is already restored
|
||||||
*/
|
*/
|
||||||
function _updateObjectExpirationDate(objectMD, log) {
|
function isObjectAlreadyRestored(objectMD, log) {
|
||||||
// Check if restoreCompletedAt field exists
|
// check if restoreCompletedAt field exists
|
||||||
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
|
// and archive.restoreWillExpireAt > current time
|
||||||
// checked earlier in the process, so checking again here would create weird states
|
const isObjectAlreadyRestored = objectMD.archive?.restoreCompletedAt
|
||||||
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
|
&& new Date(objectMD.archive?.restoreWillExpireAt) >= new Date(Date.now());
|
||||||
log.debug('The restore status of the object.', {
|
log.debug('The restore status of the object.',
|
||||||
isObjectAlreadyRestored,
|
{
|
||||||
method: 'isObjectAlreadyRestored'
|
isObjectAlreadyRestored,
|
||||||
});
|
method: 'isObjectAlreadyRestored'
|
||||||
if (isObjectAlreadyRestored) {
|
});
|
||||||
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
|
|
||||||
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
|
|
||||||
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
objectMD.archive.restoreWillExpireAt = expiryDate;
|
|
||||||
objectMD['x-amz-restore'] = {
|
|
||||||
'ongoing-request': false,
|
|
||||||
'expiry-date': expiryDate,
|
|
||||||
};
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
}
|
|
||||||
return isObjectAlreadyRestored;
|
return isObjectAlreadyRestored;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,32 +195,12 @@ function startRestore(objectMD, restoreParam, log, cb) {
|
||||||
if (updateResultError) {
|
if (updateResultError) {
|
||||||
return cb(updateResultError);
|
return cb(updateResultError);
|
||||||
}
|
}
|
||||||
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
|
return cb(null, isObjectAlreadyRestored(objectMD, log));
|
||||||
return cb(null, isObjectAlreadyRestored);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* checks if object data is available or if it's in cold storage
|
|
||||||
* @param {ObjectMD} objMD Object metadata
|
|
||||||
* @returns {ArsenalError|null} error if object data is not available
|
|
||||||
*/
|
|
||||||
function verifyColdObjectAvailable(objMD) {
|
|
||||||
// return error when object is cold
|
|
||||||
if (objMD.archive &&
|
|
||||||
// Object is in cold backend
|
|
||||||
(!objMD.archive.restoreRequestedAt ||
|
|
||||||
// Object is being restored
|
|
||||||
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
|
|
||||||
const err = errors.InvalidObjectState
|
|
||||||
.customizeDescription('The operation is not valid for the object\'s storage class');
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
startRestore,
|
startRestore,
|
||||||
getAmzRestoreResHeader,
|
getAmzRestoreResHeader,
|
||||||
validatePutVersionId,
|
validatePutVersionId,
|
||||||
verifyColdObjectAvailable,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -5,6 +5,7 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { data } = require('../../../data/wrapper');
|
const { data } = require('../../../data/wrapper');
|
||||||
const services = require('../../../services');
|
const services = require('../../../services');
|
||||||
|
const logger = require('../../../utilities/logger');
|
||||||
const { dataStore } = require('./storeObject');
|
const { dataStore } = require('./storeObject');
|
||||||
const locationConstraintCheck = require('./locationConstraintCheck');
|
const locationConstraintCheck = require('./locationConstraintCheck');
|
||||||
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
|
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
|
||||||
|
@ -20,7 +21,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
|
||||||
'a versioned object to a location-constraint of type Azure or GCP.';
|
'a versioned object to a location-constraint of type Azure or GCP.';
|
||||||
|
|
||||||
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
metadataStoreParams, dataToDelete, log, requestMethod, callback) {
|
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
|
||||||
services.metadataStoreObject(bucketName, dataGetInfo,
|
services.metadataStoreObject(bucketName, dataGetInfo,
|
||||||
cipherBundle, metadataStoreParams, (err, result) => {
|
cipherBundle, metadataStoreParams, (err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -30,7 +31,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
||||||
dataGetInfo[0].dataStoreName : null;
|
dataGetInfo[0].dataStoreName : null;
|
||||||
return data.batchDelete(dataToDelete, requestMethod,
|
return data.batchDelete(dataToDelete, requestMethod,
|
||||||
newDataStoreName, log, err => callback(err, result));
|
newDataStoreName, deleteLog, err => callback(err, result));
|
||||||
}
|
}
|
||||||
return callback(null, result);
|
return callback(null, result);
|
||||||
});
|
});
|
||||||
|
@ -50,9 +51,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
|
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
|
||||||
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
|
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
|
||||||
* credentialScope (to be used for streaming v4 auth if applicable)
|
* credentialScope (to be used for streaming v4 auth if applicable)
|
||||||
* @param {(object|null)} overheadField - fields to be included in metadata overhead
|
|
||||||
* @param {RequestLogger} log - logger instance
|
* @param {RequestLogger} log - logger instance
|
||||||
* @param {string} originOp - Origin operation
|
|
||||||
* @param {function} callback - callback function
|
* @param {function} callback - callback function
|
||||||
* @return {undefined} and call callback with (err, result) -
|
* @return {undefined} and call callback with (err, result) -
|
||||||
* result.contentMD5 - content md5 of new object or version
|
* result.contentMD5 - content md5 of new object or version
|
||||||
|
@ -60,7 +59,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
*/
|
*/
|
||||||
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||||
overheadField, log, originOp, callback) {
|
log, callback) {
|
||||||
const putVersionId = request.headers['x-scal-s3-version-id'];
|
const putVersionId = request.headers['x-scal-s3-version-id'];
|
||||||
const isPutVersion = putVersionId || putVersionId === '';
|
const isPutVersion = putVersionId || putVersionId === '';
|
||||||
|
|
||||||
|
@ -116,7 +115,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
isDeleteMarker,
|
isDeleteMarker,
|
||||||
replicationInfo: getReplicationInfo(
|
replicationInfo: getReplicationInfo(
|
||||||
objectKey, bucketMD, false, size, null, null, authInfo),
|
objectKey, bucketMD, false, size, null, null, authInfo),
|
||||||
overheadField,
|
|
||||||
log,
|
log,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -143,7 +141,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
removeAWSChunked(request.headers['content-encoding']);
|
removeAWSChunked(request.headers['content-encoding']);
|
||||||
metadataStoreParams.expires = request.headers.expires;
|
metadataStoreParams.expires = request.headers.expires;
|
||||||
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
||||||
metadataStoreParams.originOp = originOp;
|
metadataStoreParams.originOp = 's3:ObjectCreated:Put';
|
||||||
const defaultObjectLockConfiguration
|
const defaultObjectLockConfiguration
|
||||||
= bucketMD.getObjectLockConfiguration();
|
= bucketMD.getObjectLockConfiguration();
|
||||||
if (defaultObjectLockConfiguration) {
|
if (defaultObjectLockConfiguration) {
|
||||||
|
@ -158,7 +156,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.headers[constants.objectLocationConstraintHeader] =
|
request.headers[constants.objectLocationConstraintHeader] =
|
||||||
objMD[constants.objectLocationConstraintHeader];
|
objMD[constants.objectLocationConstraintHeader];
|
||||||
metadataStoreParams.originOp = originOp;
|
metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated';
|
||||||
}
|
}
|
||||||
|
|
||||||
const backendInfoObj =
|
const backendInfoObj =
|
||||||
|
@ -197,9 +195,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
const dontSkipBackend = externalBackends;
|
const dontSkipBackend = externalBackends;
|
||||||
/* eslint-enable camelcase */
|
/* eslint-enable camelcase */
|
||||||
|
|
||||||
|
const requestLogger =
|
||||||
|
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
|
||||||
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
|
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
|
||||||
const mdOnlySize = request.headers['x-amz-meta-size'];
|
const mdOnlySize = request.headers['x-amz-meta-size'];
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function storeData(next) {
|
function storeData(next) {
|
||||||
if (size === 0) {
|
if (size === 0) {
|
||||||
|
@ -288,13 +287,11 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
metadataStoreParams.versionId = options.versionId;
|
metadataStoreParams.versionId = options.versionId;
|
||||||
metadataStoreParams.versioning = options.versioning;
|
metadataStoreParams.versioning = options.versioning;
|
||||||
metadataStoreParams.isNull = options.isNull;
|
metadataStoreParams.isNull = options.isNull;
|
||||||
metadataStoreParams.deleteNullKey = options.deleteNullKey;
|
metadataStoreParams.nullVersionId = options.nullVersionId;
|
||||||
if (options.extraMD) {
|
metadataStoreParams.nullUploadId = options.nullUploadId;
|
||||||
Object.assign(metadataStoreParams, options.extraMD);
|
|
||||||
}
|
|
||||||
return _storeInMDandDeleteData(bucketName, infoArr,
|
return _storeInMDandDeleteData(bucketName, infoArr,
|
||||||
cipherBundle, metadataStoreParams,
|
cipherBundle, metadataStoreParams,
|
||||||
options.dataToDelete, log, requestMethod, next);
|
options.dataToDelete, requestLogger, requestMethod, next);
|
||||||
},
|
},
|
||||||
], callback);
|
], callback);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
/**
|
|
||||||
* _bucketRequiresOplogUpdate - DELETE an object from a bucket
|
|
||||||
* @param {BucketInfo} bucket - bucket object
|
|
||||||
* @return {boolean} whether objects require oplog updates on deletion, or not
|
|
||||||
*/
|
|
||||||
function _bucketRequiresOplogUpdate(bucket) {
|
|
||||||
// Default behavior is to require an oplog update
|
|
||||||
if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
// If the bucket has lifecycle configuration or notification configuration
|
|
||||||
// set, we also require an oplog update
|
|
||||||
return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration();
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
_bucketRequiresOplogUpdate,
|
|
||||||
};
|
|
|
@ -4,25 +4,23 @@ const {
|
||||||
LifecycleDateTime,
|
LifecycleDateTime,
|
||||||
LifecycleUtils,
|
LifecycleUtils,
|
||||||
} = require('arsenal').s3middleware.lifecycleHelpers;
|
} = require('arsenal').s3middleware.lifecycleHelpers;
|
||||||
const { config } = require('../../../Config');
|
|
||||||
|
|
||||||
const {
|
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
|
||||||
expireOneDayEarlier,
|
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
|
||||||
transitionOneDayEarlier,
|
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
|
||||||
timeProgressionFactor,
|
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
|
||||||
scaledMsPerDay,
|
|
||||||
} = config.getTimeOptions();
|
|
||||||
|
|
||||||
const lifecycleDateTime = new LifecycleDateTime({
|
const lifecycleDateTime = new LifecycleDateTime({
|
||||||
transitionOneDayEarlier,
|
transitionOneDayEarlier,
|
||||||
expireOneDayEarlier,
|
expireOneDayEarlier,
|
||||||
timeProgressionFactor,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
|
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime);
|
||||||
|
|
||||||
|
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||||
|
|
||||||
function calculateDate(objDate, expDays, datetime) {
|
function calculateDate(objDate, expDays, datetime) {
|
||||||
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
|
return new Date(datetime.getTimestamp(objDate) + expDays * oneDay);
|
||||||
}
|
}
|
||||||
|
|
||||||
function formatExpirationHeader(date, id) {
|
function formatExpirationHeader(date, id) {
|
||||||
|
@ -39,10 +37,8 @@ const AMZ_ABORT_ID_HEADER = 'x-amz-abort-rule-id';
|
||||||
|
|
||||||
function _generateExpHeadersObjects(rules, params, datetime) {
|
function _generateExpHeadersObjects(rules, params, datetime) {
|
||||||
const tags = {
|
const tags = {
|
||||||
TagSet: params.tags
|
TagSet: Object.keys(params.tags)
|
||||||
? Object.keys(params.tags)
|
.map(key => ({ Key: key, Value: params.tags[key] })),
|
||||||
.map(key => ({ Key: key, Value: params.tags[key] }))
|
|
||||||
: [],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const objectInfo = { Key: params.key };
|
const objectInfo = { Key: params.key };
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
const { versioning } = require('arsenal');
|
const { versioning } = require('arsenal');
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
const { lifecycleListing } = require('../../../../constants');
|
const CURRENT_TYPE = 'current';
|
||||||
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = lifecycleListing;
|
const NON_CURRENT_TYPE = 'noncurrent';
|
||||||
|
const ORPHAN_TYPE = 'orphan';
|
||||||
|
|
||||||
function _makeTags(tags) {
|
function _makeTags(tags) {
|
||||||
const res = [];
|
const res = [];
|
||||||
|
@ -16,16 +17,15 @@ function _makeTags(tags) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
function processCurrents(bucketName, listParams, isBucketVersioned, list) {
|
function processCurrents(bucketName, listParams, list) {
|
||||||
const data = {
|
const data = {
|
||||||
Name: bucketName,
|
Name: bucketName,
|
||||||
Prefix: listParams.prefix,
|
Prefix: listParams.prefix,
|
||||||
MaxKeys: listParams.maxKeys,
|
MaxKeys: listParams.maxKeys,
|
||||||
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
|
|
||||||
IsTruncated: !!list.IsTruncated,
|
IsTruncated: !!list.IsTruncated,
|
||||||
Marker: listParams.marker,
|
KeyMarker: listParams.marker,
|
||||||
BeforeDate: listParams.beforeDate,
|
BeforeDate: listParams.beforeDate,
|
||||||
NextMarker: list.NextMarker,
|
NextKeyMarker: list.NextKeyMarker,
|
||||||
Contents: [],
|
Contents: [],
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -35,50 +35,39 @@ function processCurrents(bucketName, listParams, isBucketVersioned, list) {
|
||||||
const content = {
|
const content = {
|
||||||
Key: item.key,
|
Key: item.key,
|
||||||
LastModified: v.LastModified,
|
LastModified: v.LastModified,
|
||||||
ETag: `"${v.ETag}"`,
|
Etag: v.ETag,
|
||||||
Size: v.Size,
|
Size: v.Size,
|
||||||
Owner: {
|
Owner: {
|
||||||
ID: v.Owner.ID,
|
ID: v.Owner.ID,
|
||||||
DisplayName: v.Owner.DisplayName,
|
DisplayName: v.Owner.DisplayName
|
||||||
},
|
},
|
||||||
StorageClass: v.StorageClass,
|
StorageClass: v.StorageClass,
|
||||||
TagSet: _makeTags(v.tags),
|
TagSet: _makeTags(v.tags),
|
||||||
IsLatest: true, // for compatibility with AWS ListObjectVersions.
|
IsLatest: true, // for compatibily
|
||||||
DataStoreName: v.dataStoreName,
|
DataStoreName: v.dataStoreName,
|
||||||
ListType: CURRENT_TYPE,
|
ListType: CURRENT_TYPE,
|
||||||
};
|
};
|
||||||
|
|
||||||
// NOTE: The current versions listed to be lifecycle should include version id
|
|
||||||
// if the bucket is versioned.
|
|
||||||
if (isBucketVersioned) {
|
|
||||||
const versionId = (v.IsNull || v.VersionId === undefined) ?
|
|
||||||
'null' : versionIdUtils.encode(v.VersionId);
|
|
||||||
content.VersionId = versionId;
|
|
||||||
}
|
|
||||||
|
|
||||||
data.Contents.push(content);
|
data.Contents.push(content);
|
||||||
});
|
});
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
function _encodeVersionId(vid) {
|
|
||||||
let versionId = vid;
|
|
||||||
if (versionId && versionId !== 'null') {
|
|
||||||
versionId = versionIdUtils.encode(versionId);
|
|
||||||
}
|
|
||||||
return versionId;
|
|
||||||
}
|
|
||||||
|
|
||||||
function processNonCurrents(bucketName, listParams, list) {
|
function processNonCurrents(bucketName, listParams, list) {
|
||||||
const nextVersionIdMarker = _encodeVersionId(list.NextVersionIdMarker);
|
let nextVersionIdMarker = list.NextVersionIdMarker;
|
||||||
const versionIdMarker = _encodeVersionId(listParams.versionIdMarker);
|
if (nextVersionIdMarker && nextVersionIdMarker !== 'null') {
|
||||||
|
nextVersionIdMarker = versionIdUtils.encode(nextVersionIdMarker);
|
||||||
|
}
|
||||||
|
|
||||||
|
let versionIdMarker = listParams.versionIdMarker;
|
||||||
|
if (versionIdMarker && versionIdMarker !== 'null') {
|
||||||
|
versionIdMarker = versionIdUtils.encode(versionIdMarker);
|
||||||
|
}
|
||||||
|
|
||||||
const data = {
|
const data = {
|
||||||
Name: bucketName,
|
Name: bucketName,
|
||||||
Prefix: listParams.prefix,
|
Prefix: listParams.prefix,
|
||||||
MaxKeys: listParams.maxKeys,
|
MaxKeys: listParams.maxKeys,
|
||||||
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
|
|
||||||
IsTruncated: !!list.IsTruncated,
|
IsTruncated: !!list.IsTruncated,
|
||||||
KeyMarker: listParams.keyMarker,
|
KeyMarker: listParams.keyMarker,
|
||||||
VersionIdMarker: versionIdMarker,
|
VersionIdMarker: versionIdMarker,
|
||||||
|
@ -96,11 +85,11 @@ function processNonCurrents(bucketName, listParams, list) {
|
||||||
const content = {
|
const content = {
|
||||||
Key: item.key,
|
Key: item.key,
|
||||||
LastModified: v.LastModified,
|
LastModified: v.LastModified,
|
||||||
ETag: `"${v.ETag}"`,
|
Etag: v.ETag,
|
||||||
Size: v.Size,
|
Size: v.Size,
|
||||||
Owner: {
|
Owner: {
|
||||||
ID: v.Owner.ID,
|
ID: v.Owner.ID,
|
||||||
DisplayName: v.Owner.DisplayName,
|
DisplayName: v.Owner.DisplayName
|
||||||
},
|
},
|
||||||
StorageClass: v.StorageClass,
|
StorageClass: v.StorageClass,
|
||||||
TagSet: _makeTags(v.tags),
|
TagSet: _makeTags(v.tags),
|
||||||
|
@ -121,11 +110,10 @@ function processOrphans(bucketName, listParams, list) {
|
||||||
Name: bucketName,
|
Name: bucketName,
|
||||||
Prefix: listParams.prefix,
|
Prefix: listParams.prefix,
|
||||||
MaxKeys: listParams.maxKeys,
|
MaxKeys: listParams.maxKeys,
|
||||||
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
|
|
||||||
IsTruncated: !!list.IsTruncated,
|
IsTruncated: !!list.IsTruncated,
|
||||||
Marker: listParams.marker,
|
KeyMarker: listParams.keyMarker,
|
||||||
BeforeDate: listParams.beforeDate,
|
BeforeDate: listParams.beforeDate,
|
||||||
NextMarker: list.NextMarker,
|
NextKeyMarker: list.NextKeyMarker,
|
||||||
Contents: [],
|
Contents: [],
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -136,55 +124,25 @@ function processOrphans(bucketName, listParams, list) {
|
||||||
data.Contents.push({
|
data.Contents.push({
|
||||||
Key: item.key,
|
Key: item.key,
|
||||||
LastModified: v.LastModified,
|
LastModified: v.LastModified,
|
||||||
|
Etag: v.ETag,
|
||||||
|
Size: v.Size,
|
||||||
Owner: {
|
Owner: {
|
||||||
ID: v.Owner.ID,
|
ID: v.Owner.ID,
|
||||||
DisplayName: v.Owner.DisplayName,
|
DisplayName: v.Owner.DisplayName
|
||||||
},
|
},
|
||||||
|
StorageClass: v.StorageClass,
|
||||||
VersionId: versionId,
|
VersionId: versionId,
|
||||||
IsLatest: true, // for compatibility with AWS ListObjectVersions.
|
IsLatest: true, // for compatibily
|
||||||
ListType: ORPHAN_DM_TYPE,
|
DataStoreName: v.dataStoreName,
|
||||||
|
ListType: ORPHAN_TYPE,
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
function getLocationConstraintErrorMessage(locationName) {
|
|
||||||
return 'value of the location you are attempting to set ' +
|
|
||||||
`- ${locationName} - is not listed in the locationConstraint config`;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* validateMaxScannedEntries - Validates and returns the maximum scanned entries value.
|
|
||||||
*
|
|
||||||
* @param {object} params - Query parameters
|
|
||||||
* @param {object} config - CloudServer configuration
|
|
||||||
* @param {number} min - Minimum number of entries to be scanned
|
|
||||||
* @returns {Object} - An object indicating the validation result:
|
|
||||||
* - isValid (boolean): Whether the validation is successful.
|
|
||||||
* - maxScannedLifecycleListingEntries (number): The validated maximum scanned entries value if isValid is true.
|
|
||||||
*/
|
|
||||||
function validateMaxScannedEntries(params, config, min) {
|
|
||||||
let maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
|
||||||
|
|
||||||
if (params['max-scanned-lifecycle-listing-entries']) {
|
|
||||||
const maxEntriesParams = Number.parseInt(params['max-scanned-lifecycle-listing-entries'], 10);
|
|
||||||
|
|
||||||
if (Number.isNaN(maxEntriesParams) || maxEntriesParams < min ||
|
|
||||||
maxEntriesParams > maxScannedLifecycleListingEntries) {
|
|
||||||
return { isValid: false };
|
|
||||||
}
|
|
||||||
|
|
||||||
maxScannedLifecycleListingEntries = maxEntriesParams;
|
|
||||||
}
|
|
||||||
|
|
||||||
return { isValid: true, maxScannedLifecycleListingEntries };
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
processCurrents,
|
processCurrents,
|
||||||
processNonCurrents,
|
processNonCurrents,
|
||||||
processOrphans,
|
processOrphans,
|
||||||
getLocationConstraintErrorMessage,
|
|
||||||
validateMaxScannedEntries,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,9 +3,7 @@ const moment = require('moment');
|
||||||
|
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
const vault = require('../../../auth/vault');
|
const vault = require('../../../auth/vault');
|
||||||
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
|
|
||||||
|
|
||||||
const { scaledMsPerDay } = config.getTimeOptions();
|
|
||||||
/**
|
/**
|
||||||
* Calculates retain until date for the locked object version
|
* Calculates retain until date for the locked object version
|
||||||
* @param {object} retention - includes days or years retention period
|
* @param {object} retention - includes days or years retention period
|
||||||
|
@ -21,9 +19,8 @@ function calculateRetainUntilDate(retention) {
|
||||||
const date = moment();
|
const date = moment();
|
||||||
// Calculate the number of days to retain the lock on the object
|
// Calculate the number of days to retain the lock on the object
|
||||||
const retainUntilDays = days || years * 365;
|
const retainUntilDays = days || years * 365;
|
||||||
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
|
|
||||||
const retainUntilDate
|
const retainUntilDate
|
||||||
= date.add(retainUntilDaysInMs, 'ms');
|
= date.add(retainUntilDays, 'days');
|
||||||
return retainUntilDate.toISOString();
|
return retainUntilDate.toISOString();
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -205,13 +202,7 @@ class ObjectLockInfo {
|
||||||
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
|
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
|
||||||
*/
|
*/
|
||||||
canModifyObject(hasGovernanceBypass) {
|
canModifyObject(hasGovernanceBypass) {
|
||||||
// can modify object if object is not locked
|
return !this.isLocked() || (this.isGovernanceMode() && !!hasGovernanceBypass);
|
||||||
// cannot modify object in any cases if legal hold is enabled
|
|
||||||
// if no legal hold, can only modify object if bypassing governance when locked
|
|
||||||
if (!this.isLocked()) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return !this.legalHold && this.isGovernanceMode() && !!hasGovernanceBypass;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -305,9 +296,7 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
|
||||||
if (err) {
|
if (err) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
const explicitDenyExists = authorizationResults.some(
|
if (authorizationResults[0].isAllowed !== true) {
|
||||||
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
|
|
||||||
if (explicitDenyExists) {
|
|
||||||
log.trace('authorization check failed for user',
|
log.trace('authorization check failed for user',
|
||||||
{
|
{
|
||||||
'method': 'checkUserPolicyGovernanceBypass',
|
'method': 'checkUserPolicyGovernanceBypass',
|
||||||
|
@ -315,25 +304,7 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
|
||||||
});
|
});
|
||||||
return cb(errors.AccessDenied);
|
return cb(errors.AccessDenied);
|
||||||
}
|
}
|
||||||
// Convert authorization results into an easier to handle format
|
return cb(null);
|
||||||
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
|
|
||||||
const apiMethod = authorizationResults[idx].action;
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
acc[apiMethod] = curr.isImplicit;
|
|
||||||
return acc;
|
|
||||||
}, {});
|
|
||||||
|
|
||||||
// Evaluate against the bucket policies
|
|
||||||
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
|
|
||||||
bucketMD,
|
|
||||||
Object.keys(actionImplicitDenies),
|
|
||||||
authInfo.getCanonicalID(),
|
|
||||||
authInfo,
|
|
||||||
actionImplicitDenies,
|
|
||||||
log,
|
|
||||||
request);
|
|
||||||
|
|
||||||
return cb(areAllActionsAllowed === true ? null : errors.AccessDenied);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
|
||||||
const { decodeVersionId } = require('./versioning');
|
const { decodeVersionId } = require('./versioning');
|
||||||
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
|
||||||
const { parseRestoreRequestXml } = s3middleware.objectRestore;
|
const { parseRestoreRequestXml } = s3middleware.objectRestore;
|
||||||
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if tier is supported
|
* Check if tier is supported
|
||||||
|
@ -58,22 +58,13 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
versionId: decodedVidResult,
|
versionId: decodedVidResult,
|
||||||
requestType: request.apiMethods || 'restoreObject',
|
requestType: 'restoreObject',
|
||||||
/**
|
|
||||||
* Restoring an object might not cause any impact on
|
|
||||||
* the storage, if the object is already restored: in
|
|
||||||
* this case, the duration is extended. We disable the
|
|
||||||
* quota evaluation and trigger it manually.
|
|
||||||
*/
|
|
||||||
checkQuota: false,
|
|
||||||
request,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
// get metadata of bucket and object
|
// get metadata of bucket and object
|
||||||
function validateBucketAndObject(next) {
|
function validateBucketAndObject(next) {
|
||||||
return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies,
|
return mdUtils.metadataValidateBucketAndObj(mdValueParams, log, (err, bucketMD, objectMD) => {
|
||||||
log, (err, bucketMD, objectMD) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed', { method: METHOD, error: err });
|
log.trace('request authorization failed', { method: METHOD, error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
|
@ -124,16 +115,6 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
||||||
return next(err, bucketMD, objectMD);
|
return next(err, bucketMD, objectMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function evaluateQuotas(bucketMD, objectMD, next) {
|
|
||||||
if (isObjectRestored) {
|
|
||||||
return next(null, bucketMD, objectMD);
|
|
||||||
}
|
|
||||||
const actions = Array.isArray(mdValueParams.requestType) ?
|
|
||||||
mdValueParams.requestType : [mdValueParams.requestType];
|
|
||||||
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
|
|
||||||
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
|
|
||||||
false, log, err => next(err, bucketMD, objectMD));
|
|
||||||
},
|
|
||||||
function updateObjectMD(bucketMD, objectMD, next) {
|
function updateObjectMD(bucketMD, objectMD, next) {
|
||||||
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
|
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
|
||||||
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
|
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
|
||||||
|
|
|
@ -1,32 +0,0 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const { unsupportedSignatureChecksums, supportedSignatureChecksums } = require('../../../../constants');
|
|
||||||
|
|
||||||
function validateChecksumHeaders(headers) {
|
|
||||||
// If the x-amz-trailer header is present the request is using one of the
|
|
||||||
// trailing checksum algorithms, which are not supported.
|
|
||||||
if (headers['x-amz-trailer'] !== undefined) {
|
|
||||||
return errors.BadRequest.customizeDescription('trailing checksum is not supported');
|
|
||||||
}
|
|
||||||
|
|
||||||
const signatureChecksum = headers['x-amz-content-sha256'];
|
|
||||||
if (signatureChecksum === undefined) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (supportedSignatureChecksums.has(signatureChecksum)) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the value is not one of the possible checksum algorithms
|
|
||||||
// the only other valid value is the actual sha256 checksum of the payload.
|
|
||||||
// Do a simple sanity check of the length to guard against future algos.
|
|
||||||
// If the value is an unknown algo, then it will fail checksum validation.
|
|
||||||
if (!unsupportedSignatureChecksums.has(signatureChecksum) && signatureChecksum.length === 64) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.BadRequest.customizeDescription('unsupported checksum algorithm');
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = validateChecksumHeaders;
|
|
|
@ -4,7 +4,7 @@ const async = require('async');
|
||||||
const metadata = require('../../../metadata/wrapper');
|
const metadata = require('../../../metadata/wrapper');
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
const { scaledMsPerDay } = config.getTimeOptions();
|
const oneDay = 24 * 60 * 60 * 1000;
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
// Use Arsenal function to generate a version ID used internally by metadata
|
// Use Arsenal function to generate a version ID used internally by metadata
|
||||||
|
@ -58,7 +58,7 @@ function decodeVersionId(reqQuery) {
|
||||||
*/
|
*/
|
||||||
function getVersionIdResHeader(verCfg, objectMD) {
|
function getVersionIdResHeader(verCfg, objectMD) {
|
||||||
if (verCfg) {
|
if (verCfg) {
|
||||||
if (objectMD.isNull || !objectMD.versionId) {
|
if (objectMD.isNull || (objectMD && !objectMD.versionId)) {
|
||||||
return 'null';
|
return 'null';
|
||||||
}
|
}
|
||||||
return versionIdUtils.encode(objectMD.versionId);
|
return versionIdUtils.encode(objectMD.versionId);
|
||||||
|
@ -79,34 +79,17 @@ function checkQueryVersionId(query) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
function _storeNullVersionMD(bucketName, objKey, nullVersionId, objMD, log, cb) {
|
function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
|
||||||
// In compatibility mode, create null versioned keys instead of null keys
|
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => {
|
||||||
let versionId;
|
|
||||||
let nullVersionMD;
|
|
||||||
if (config.nullVersionCompatMode) {
|
|
||||||
versionId = nullVersionId;
|
|
||||||
nullVersionMD = Object.assign({}, objMD, {
|
|
||||||
versionId: nullVersionId,
|
|
||||||
isNull: true,
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
versionId = 'null';
|
|
||||||
nullVersionMD = Object.assign({}, objMD, {
|
|
||||||
versionId: nullVersionId,
|
|
||||||
isNull: true,
|
|
||||||
isNull2: true,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
metadata.putObjectMD(bucketName, objKey, nullVersionMD, { versionId }, log, err => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error from metadata storing null version as new version',
|
log.debug('error from metadata storing null version as new version',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
}
|
}
|
||||||
cb(err);
|
cb(err, options);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/** check existence and get location of null version data for deletion
|
/** get location of null version data for deletion
|
||||||
* @param {string} bucketName - name of bucket
|
* @param {string} bucketName - name of bucket
|
||||||
* @param {string} objKey - name of object key
|
* @param {string} objKey - name of object key
|
||||||
* @param {object} options - metadata options for getting object MD
|
* @param {object} options - metadata options for getting object MD
|
||||||
|
@ -117,56 +100,50 @@ function _storeNullVersionMD(bucketName, objKey, nullVersionId, objMD, log, cb)
|
||||||
* @param {function} cb - callback
|
* @param {function} cb - callback
|
||||||
* @return {undefined} - and call callback with (err, dataToDelete)
|
* @return {undefined} - and call callback with (err, dataToDelete)
|
||||||
*/
|
*/
|
||||||
function _prepareNullVersionDeletion(bucketName, objKey, options, mst, log, cb) {
|
function _getNullVersionsToDelete(bucketName, objKey, options, mst, log, cb) {
|
||||||
const nullOptions = {};
|
|
||||||
if (!options.deleteData) {
|
|
||||||
return process.nextTick(cb, null, nullOptions);
|
|
||||||
}
|
|
||||||
if (options.versionId === mst.versionId) {
|
if (options.versionId === mst.versionId) {
|
||||||
// no need to get another key as the master is the target
|
// no need to get delete location, we already have the master's metadata
|
||||||
nullOptions.dataToDelete = mst.objLocation;
|
const dataToDelete = mst.objLocation;
|
||||||
return process.nextTick(cb, null, nullOptions);
|
return process.nextTick(cb, null, dataToDelete);
|
||||||
}
|
|
||||||
if (options.versionId === 'null') {
|
|
||||||
// deletion of the null key will be done by the main metadata
|
|
||||||
// PUT via this option
|
|
||||||
nullOptions.deleteNullKey = true;
|
|
||||||
}
|
}
|
||||||
return metadata.getObjectMD(bucketName, objKey, options, log,
|
return metadata.getObjectMD(bucketName, objKey, options, log,
|
||||||
(err, versionMD) => {
|
(err, versionMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// the null key may not exist, hence it's a normal
|
log.debug('err from metadata getting specified version', {
|
||||||
// situation to have a NoSuchKey error, in which case
|
error: err,
|
||||||
// there is nothing to delete
|
method: '_getNullVersionsToDelete',
|
||||||
if (err.is.NoSuchKey) {
|
});
|
||||||
log.debug('null version does not exist', {
|
|
||||||
method: '_prepareNullVersionDeletion',
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
log.warn('could not get null version metadata', {
|
|
||||||
error: err,
|
|
||||||
method: '_prepareNullVersionDeletion',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
if (versionMD.location) {
|
if (!versionMD.location) {
|
||||||
const dataToDelete = Array.isArray(versionMD.location) ?
|
return cb();
|
||||||
versionMD.location : [versionMD.location];
|
|
||||||
nullOptions.dataToDelete = dataToDelete;
|
|
||||||
}
|
}
|
||||||
return cb(null, nullOptions);
|
const dataToDelete = Array.isArray(versionMD.location) ?
|
||||||
|
versionMD.location : [versionMD.location];
|
||||||
|
return cb(null, dataToDelete);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function _deleteNullVersionMD(bucketName, objKey, options, log, cb) {
|
function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) {
|
||||||
return metadata.deleteObjectMD(bucketName, objKey, options, log, err => {
|
return _getNullVersionsToDelete(bucketName, objKey, options, mst, log,
|
||||||
if (err) {
|
(err, nullDataToDelete) => {
|
||||||
log.warn('metadata error deleting null versioned key',
|
if (err) {
|
||||||
{ bucketName, objKey, error: err, method: '_deleteNullVersionMD' });
|
log.warn('could not find null version metadata', {
|
||||||
}
|
error: err,
|
||||||
return cb(err);
|
method: '_deleteNullVersionMD',
|
||||||
});
|
});
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
return metadata.deleteObjectMD(bucketName, objKey, options, log,
|
||||||
|
err => {
|
||||||
|
if (err) {
|
||||||
|
log.warn('metadata error deleting null version',
|
||||||
|
{ error: err, method: '_deleteNullVersionMD' });
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
return cb(null, nullDataToDelete);
|
||||||
|
});
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -176,103 +153,73 @@ function _deleteNullVersionMD(bucketName, objKey, options, log, cb) {
|
||||||
* @param {object} mst - state of master version, as returned by
|
* @param {object} mst - state of master version, as returned by
|
||||||
* getMasterState()
|
* getMasterState()
|
||||||
* @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended'
|
* @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended'
|
||||||
* @param {boolean} nullVersionCompatMode - if true, behaves in null
|
|
||||||
* version compatibility mode and return appropriate values: this mode
|
|
||||||
* does not attempt to create null keys but create null versioned keys
|
|
||||||
* instead
|
|
||||||
*
|
*
|
||||||
* @return {object} result object with the following attributes:
|
* @return {object} result object with the following attributes:
|
||||||
* - {object} options: versioning-related options to pass to the
|
* - {object} options: versioning-related options to pass to the
|
||||||
services.metadataStoreObject() call
|
services.metadataStoreObject() call
|
||||||
* - {object} [options.extraMD]: extra attributes to set in object metadata
|
* - {object} [storeOptions]: options for metadata to create a new
|
||||||
* - {string} [nullVersionId]: null version key to create, if needed
|
null version key, if needed
|
||||||
* - {object} [delOptions]: options for metadata to delete the null
|
* - {object} [delOptions]: options for metadata to delete the null
|
||||||
version key, if needed
|
version key, if needed
|
||||||
*/
|
*/
|
||||||
function processVersioningState(mst, vstat, nullVersionCompatMode) {
|
function processVersioningState(mst, vstat) {
|
||||||
const versioningSuspended = (vstat === 'Suspended');
|
const options = {};
|
||||||
const masterIsNull = mst.exists && (mst.isNull || !mst.versionId);
|
const storeOptions = {};
|
||||||
|
const delOptions = {};
|
||||||
if (versioningSuspended) {
|
// object does not exist or is not versioned (before versioning)
|
||||||
// versioning is suspended: overwrite the existing null version
|
if (mst.versionId === undefined || mst.isNull) {
|
||||||
const options = { versionId: '', isNull: true };
|
// versioning is suspended, overwrite existing master version
|
||||||
if (masterIsNull) {
|
if (vstat === 'Suspended') {
|
||||||
// if the null version exists, clean it up prior to put
|
options.versionId = '';
|
||||||
if (mst.objLocation) {
|
options.isNull = true;
|
||||||
options.dataToDelete = mst.objLocation;
|
options.dataToDelete = mst.objLocation;
|
||||||
}
|
// if null version exists, clean it up prior to put
|
||||||
// backward-compat: a null version key may exist even with
|
if (mst.isNull) {
|
||||||
// a null master (due to S3C-7526), if so, delete it (its
|
delOptions.versionId = mst.versionId;
|
||||||
// data will be deleted as part of the master cleanup, so
|
if (mst.uploadId) {
|
||||||
// no "deleteData" param is needed)
|
delOptions.replayId = mst.uploadId;
|
||||||
//
|
}
|
||||||
// "isNull2" attribute is set in master metadata when
|
|
||||||
// null keys are used, which is used as an optimization to
|
|
||||||
// avoid having to check the versioned key since there can
|
|
||||||
// be no more versioned key to clean up
|
|
||||||
if (mst.isNull && mst.versionId && !mst.isNull2) {
|
|
||||||
const delOptions = { versionId: mst.versionId };
|
|
||||||
return { options, delOptions };
|
return { options, delOptions };
|
||||||
}
|
}
|
||||||
return { options };
|
return { options };
|
||||||
}
|
}
|
||||||
if (mst.nullVersionId) {
|
// versioning is enabled, create a new version
|
||||||
// backward-compat: delete the null versioned key and data
|
options.versioning = true;
|
||||||
const delOptions = { versionId: mst.nullVersionId, deleteData: true };
|
if (mst.exists) {
|
||||||
if (mst.nullUploadId) {
|
// store master version in a new key
|
||||||
delOptions.replayId = mst.nullUploadId;
|
const versionId = mst.isNull ? mst.versionId : nonVersionedObjId;
|
||||||
|
storeOptions.versionId = versionId;
|
||||||
|
storeOptions.isNull = true;
|
||||||
|
options.nullVersionId = versionId;
|
||||||
|
// non-versioned (non-null) MPU objects don't have a
|
||||||
|
// replay ID, so don't reference their uploadId
|
||||||
|
if (mst.isNull && mst.uploadId) {
|
||||||
|
options.nullUploadId = mst.uploadId;
|
||||||
}
|
}
|
||||||
return { options, delOptions };
|
return { options, storeOptions };
|
||||||
|
}
|
||||||
|
return { options };
|
||||||
|
}
|
||||||
|
// master is versioned and is not a null version
|
||||||
|
const nullVersionId = mst.nullVersionId;
|
||||||
|
if (vstat === 'Suspended') {
|
||||||
|
// versioning is suspended, overwrite the existing master version
|
||||||
|
options.versionId = '';
|
||||||
|
options.isNull = true;
|
||||||
|
if (nullVersionId === undefined) {
|
||||||
|
return { options };
|
||||||
|
}
|
||||||
|
delOptions.versionId = nullVersionId;
|
||||||
|
if (mst.nullUploadId) {
|
||||||
|
delOptions.replayId = mst.nullUploadId;
|
||||||
}
|
}
|
||||||
// clean up the eventual null key's location data prior to put
|
|
||||||
|
|
||||||
// NOTE: due to metadata v1 internal format, we cannot guess
|
|
||||||
// from the master key whether there is an associated null
|
|
||||||
// key, because the master key may be removed whenever the
|
|
||||||
// latest version becomes a delete marker. Hence we need to
|
|
||||||
// pessimistically try to get the null key metadata and delete
|
|
||||||
// it if it exists.
|
|
||||||
const delOptions = { versionId: 'null', deleteData: true };
|
|
||||||
return { options, delOptions };
|
return { options, delOptions };
|
||||||
}
|
}
|
||||||
|
// versioning is enabled, put the new version
|
||||||
// versioning is enabled: create a new version
|
options.versioning = true;
|
||||||
const options = { versioning: true };
|
options.nullVersionId = nullVersionId;
|
||||||
if (masterIsNull) {
|
if (mst.nullUploadId) {
|
||||||
// if master is a null version or a non-versioned key,
|
options.nullUploadId = mst.nullUploadId;
|
||||||
// copy it to a new null key
|
|
||||||
const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId;
|
|
||||||
if (nullVersionCompatMode) {
|
|
||||||
options.extraMD = {
|
|
||||||
nullVersionId,
|
|
||||||
};
|
|
||||||
if (mst.uploadId) {
|
|
||||||
options.extraMD.nullUploadId = mst.uploadId;
|
|
||||||
}
|
|
||||||
return { options, nullVersionId };
|
|
||||||
}
|
|
||||||
if (mst.isNull && !mst.isNull2) {
|
|
||||||
// if master null version was put with an older
|
|
||||||
// Cloudserver (or in compat mode), there is a
|
|
||||||
// possibility that it also has a null versioned key
|
|
||||||
// associated, so we need to delete it as we write the
|
|
||||||
// null key
|
|
||||||
const delOptions = {
|
|
||||||
versionId: nullVersionId,
|
|
||||||
};
|
|
||||||
return { options, nullVersionId, delOptions };
|
|
||||||
}
|
|
||||||
return { options, nullVersionId };
|
|
||||||
}
|
|
||||||
// backward-compat: keep a reference to the existing null
|
|
||||||
// versioned key
|
|
||||||
if (mst.nullVersionId) {
|
|
||||||
options.extraMD = {
|
|
||||||
nullVersionId: mst.nullVersionId,
|
|
||||||
};
|
|
||||||
if (mst.nullUploadId) {
|
|
||||||
options.extraMD.nullUploadId = mst.nullUploadId;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return { options };
|
return { options };
|
||||||
}
|
}
|
||||||
|
@ -299,7 +246,6 @@ function getMasterState(objMD) {
|
||||||
versionId: objMD.versionId,
|
versionId: objMD.versionId,
|
||||||
uploadId: objMD.uploadId,
|
uploadId: objMD.uploadId,
|
||||||
isNull: objMD.isNull,
|
isNull: objMD.isNull,
|
||||||
isNull2: objMD.isNull2,
|
|
||||||
nullVersionId: objMD.nullVersionId,
|
nullVersionId: objMD.nullVersionId,
|
||||||
nullUploadId: objMD.nullUploadId,
|
nullUploadId: objMD.nullUploadId,
|
||||||
};
|
};
|
||||||
|
@ -323,6 +269,9 @@ function getMasterState(objMD) {
|
||||||
* ('' overwrites the master version)
|
* ('' overwrites the master version)
|
||||||
* options.versioning - (true/undefined) metadata instruction to create new ver
|
* options.versioning - (true/undefined) metadata instruction to create new ver
|
||||||
* options.isNull - (true/undefined) whether new version is null or not
|
* options.isNull - (true/undefined) whether new version is null or not
|
||||||
|
* options.nullVersionId - if storing a null version in version history, the
|
||||||
|
* version id of the null version
|
||||||
|
* options.deleteNullVersionData - whether to delete the data of the null ver
|
||||||
*/
|
*/
|
||||||
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
||||||
log, callback) {
|
log, callback) {
|
||||||
|
@ -334,102 +283,42 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
||||||
return process.nextTick(callback, null, options);
|
return process.nextTick(callback, null, options);
|
||||||
}
|
}
|
||||||
// bucket is versioning configured
|
// bucket is versioning configured
|
||||||
const { options, nullVersionId, delOptions } =
|
const { options, storeOptions, delOptions } =
|
||||||
processVersioningState(mst, vCfg.Status, config.nullVersionCompatMode);
|
processVersioningState(mst, vCfg.Status);
|
||||||
return async.series([
|
return async.series([
|
||||||
function storeNullVersionMD(next) {
|
function storeVersion(next) {
|
||||||
if (!nullVersionId) {
|
if (!storeOptions) {
|
||||||
return process.nextTick(next);
|
return process.nextTick(next);
|
||||||
}
|
}
|
||||||
return _storeNullVersionMD(bucketName, objectKey, nullVersionId, objMD, log, next);
|
const versionMD = Object.assign({}, objMD, storeOptions);
|
||||||
|
const params = { versionId: storeOptions.versionId };
|
||||||
|
return _storeNullVersionMD(bucketName, objectKey, versionMD,
|
||||||
|
params, log, next);
|
||||||
},
|
},
|
||||||
function prepareNullVersionDeletion(next) {
|
function deleteNullVersion(next) {
|
||||||
if (!delOptions) {
|
if (!delOptions) {
|
||||||
return process.nextTick(next);
|
return process.nextTick(next);
|
||||||
}
|
}
|
||||||
return _prepareNullVersionDeletion(
|
return _deleteNullVersionMD(bucketName, objectKey, delOptions, mst,
|
||||||
bucketName, objectKey, delOptions, mst, log,
|
log, (err, nullDataToDelete) => {
|
||||||
(err, nullOptions) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
log.warn('unexpected error deleting null version md', {
|
||||||
|
error: err,
|
||||||
|
method: 'versioningPreprocessing',
|
||||||
|
});
|
||||||
|
// it's possible there was a concurrent request to
|
||||||
|
// delete the null version, so proceed with putting a
|
||||||
|
// new version
|
||||||
|
if (err.is.NoSuchKey) {
|
||||||
|
return next(null, options);
|
||||||
|
}
|
||||||
|
return next(errors.InternalError);
|
||||||
}
|
}
|
||||||
Object.assign(options, nullOptions);
|
Object.assign(options, { dataToDelete: nullDataToDelete });
|
||||||
return next();
|
return next();
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function deleteNullVersionMD(next) {
|
], err => callback(err, options));
|
||||||
if (delOptions &&
|
|
||||||
delOptions.versionId &&
|
|
||||||
delOptions.versionId !== 'null') {
|
|
||||||
// backward-compat: delete old null versioned key
|
|
||||||
return _deleteNullVersionMD(
|
|
||||||
bucketName, objectKey, { versionId: delOptions.versionId }, log, next);
|
|
||||||
}
|
|
||||||
return process.nextTick(next);
|
|
||||||
},
|
|
||||||
], err => {
|
|
||||||
// it's possible there was a prior request that deleted the
|
|
||||||
// null version, so proceed with putting a new version
|
|
||||||
if (err && err.is.NoSuchKey) {
|
|
||||||
return callback(null, options);
|
|
||||||
}
|
|
||||||
return callback(err, options);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Return options to pass to Metadata layer for version-specific
|
|
||||||
* operations with the given requested version ID
|
|
||||||
*
|
|
||||||
* @param {object} objectMD - object metadata
|
|
||||||
* @param {boolean} nullVersionCompatMode - if true, behaves in null
|
|
||||||
* version compatibility mode
|
|
||||||
* @return {object} options object with params:
|
|
||||||
* {string} [options.versionId] - specific versionId to update
|
|
||||||
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
|
|
||||||
* Metadata backend if we're updating or deleting a new-style null
|
|
||||||
* version (stored in master or null key), or not a null version.
|
|
||||||
*/
|
|
||||||
function getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode) {
|
|
||||||
// Use the internal versionId if it is a "real" null version (not
|
|
||||||
// non-versioned)
|
|
||||||
//
|
|
||||||
// If the target object is non-versioned: do not specify a
|
|
||||||
// "versionId" attribute nor "isNull"
|
|
||||||
//
|
|
||||||
// If the target version is a null version, i.e. has the "isNull"
|
|
||||||
// attribute:
|
|
||||||
//
|
|
||||||
// - send the "isNull=true" param to Metadata if the version is
|
|
||||||
// already a null key put by a non-compat mode Cloudserver, to
|
|
||||||
// let Metadata know that the null key is to be updated or
|
|
||||||
// deleted. This is the case if the "isNull2" metadata attribute
|
|
||||||
// exists
|
|
||||||
//
|
|
||||||
// - otherwise, do not send the "isNull" parameter to hint
|
|
||||||
// Metadata that it is a legacy null version
|
|
||||||
//
|
|
||||||
// If the target version is not a null version and is versioned:
|
|
||||||
//
|
|
||||||
// - send the "isNull=false" param to Metadata in non-compat
|
|
||||||
// mode (mandatory for v1 format)
|
|
||||||
//
|
|
||||||
// - otherwise, do not send the "isNull" parameter to hint
|
|
||||||
// Metadata that an existing null version may not be stored in a
|
|
||||||
// null key
|
|
||||||
//
|
|
||||||
//
|
|
||||||
if (objectMD.versionId === undefined) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
const options = { versionId: objectMD.versionId };
|
|
||||||
if (objectMD.isNull) {
|
|
||||||
if (objectMD.isNull2) {
|
|
||||||
options.isNull = true;
|
|
||||||
}
|
|
||||||
} else if (!nullVersionCompatMode) {
|
|
||||||
options.isNull = false;
|
|
||||||
}
|
|
||||||
return options;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** preprocessingVersioningDelete - return versioning information for S3 to
|
/** preprocessingVersioningDelete - return versioning information for S3 to
|
||||||
|
@ -438,69 +327,33 @@ function getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode) {
|
||||||
* @param {object} bucketMD - bucket metadata
|
* @param {object} bucketMD - bucket metadata
|
||||||
* @param {object} objectMD - obj metadata
|
* @param {object} objectMD - obj metadata
|
||||||
* @param {string} [reqVersionId] - specific version ID sent as part of request
|
* @param {string} [reqVersionId] - specific version ID sent as part of request
|
||||||
* @param {boolean} nullVersionCompatMode - if true, behaves in null version compatibility mode
|
|
||||||
* @return {object} options object with params:
|
* @return {object} options object with params:
|
||||||
* {boolean} [options.deleteData=true|undefined] - whether to delete data (if undefined
|
* options.deleteData - (true/undefined) whether to delete data (if undefined
|
||||||
* means creating a delete marker instead)
|
* means creating a delete marker instead)
|
||||||
* {string} [options.versionId] - specific versionId to delete
|
* options.versionId - specific versionId to delete
|
||||||
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
|
|
||||||
* Metadata backend if we're deleting a new-style null version (stored
|
|
||||||
* in master or null key), or not a null version.
|
|
||||||
*/
|
*/
|
||||||
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId, nullVersionCompatMode) {
|
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId) {
|
||||||
let options = {};
|
const options = {};
|
||||||
if (bucketMD.getVersioningConfiguration() && reqVersionId) {
|
|
||||||
options = getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode);
|
|
||||||
}
|
|
||||||
if (!bucketMD.getVersioningConfiguration() || reqVersionId) {
|
if (!bucketMD.getVersioningConfiguration() || reqVersionId) {
|
||||||
// delete data if bucket is non-versioned or the request
|
// delete data if bucket is non-versioned or the request
|
||||||
// deletes a specific version
|
// deletes a specific version
|
||||||
options.deleteData = true;
|
options.deleteData = true;
|
||||||
}
|
}
|
||||||
|
if (bucketMD.getVersioningConfiguration() && reqVersionId) {
|
||||||
|
if (reqVersionId === 'null') {
|
||||||
|
// deleting the 'null' version if it exists: use its
|
||||||
|
// internal versionId if it exists
|
||||||
|
if (objectMD.versionId !== undefined) {
|
||||||
|
options.versionId = objectMD.versionId;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// deleting a specific version
|
||||||
|
options.versionId = reqVersionId;
|
||||||
|
}
|
||||||
|
}
|
||||||
return options;
|
return options;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Keep metadatas when the object is restored from cold storage
|
|
||||||
* but remove the specific ones we don't want to keep
|
|
||||||
* @param {object} objMD - obj metadata
|
|
||||||
* @param {object} metadataStoreParams - custom built object containing resource details.
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function restoreMetadata(objMD, metadataStoreParams) {
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
|
|
||||||
// We need to keep user metadata and tags
|
|
||||||
Object.keys(objMD).forEach(key => {
|
|
||||||
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
|
|
||||||
metadataStoreParams.metaHeaders[key] = objMD[key];
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (objMD['x-amz-website-redirect-location']) {
|
|
||||||
if (!metadataStoreParams.headers) {
|
|
||||||
metadataStoreParams.headers = {};
|
|
||||||
}
|
|
||||||
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (objMD.replicationInfo) {
|
|
||||||
metadataStoreParams.replicationInfo = objMD.replicationInfo;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (objMD.legalHold) {
|
|
||||||
metadataStoreParams.legalHold = objMD.legalHold;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (objMD.acl) {
|
|
||||||
metadataStoreParams.acl = objMD.acl;
|
|
||||||
}
|
|
||||||
|
|
||||||
metadataStoreParams.creationTime = objMD['creation-time'];
|
|
||||||
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
|
|
||||||
metadataStoreParams.taggingCopy = objMD.tags;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** overwritingVersioning - return versioning information for S3 to handle
|
/** overwritingVersioning - return versioning information for S3 to handle
|
||||||
* storing version metadata with a specific version id.
|
* storing version metadata with a specific version id.
|
||||||
* @param {object} objMD - obj metadata
|
* @param {object} objMD - obj metadata
|
||||||
|
@ -512,8 +365,10 @@ function restoreMetadata(objMD, metadataStoreParams) {
|
||||||
* version id of the null version
|
* version id of the null version
|
||||||
*/
|
*/
|
||||||
function overwritingVersioning(objMD, metadataStoreParams) {
|
function overwritingVersioning(objMD, metadataStoreParams) {
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
metadataStoreParams.creationTime = objMD['creation-time'];
|
||||||
|
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
|
||||||
metadataStoreParams.updateMicroVersionId = true;
|
metadataStoreParams.updateMicroVersionId = true;
|
||||||
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
|
|
||||||
|
|
||||||
// set correct originOp
|
// set correct originOp
|
||||||
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
|
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
|
||||||
|
@ -526,7 +381,7 @@ function overwritingVersioning(objMD, metadataStoreParams) {
|
||||||
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
|
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
|
||||||
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
|
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
|
||||||
restoreCompletedAt: new Date(now),
|
restoreCompletedAt: new Date(now),
|
||||||
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
|
restoreWillExpireAt: new Date(now + (days * oneDay)),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* eslint-enable no-param-reassign */
|
/* eslint-enable no-param-reassign */
|
||||||
|
@ -535,14 +390,8 @@ function overwritingVersioning(objMD, metadataStoreParams) {
|
||||||
const options = {
|
const options = {
|
||||||
versionId,
|
versionId,
|
||||||
isNull: objMD.isNull,
|
isNull: objMD.isNull,
|
||||||
|
nullVersionId: objMD.nullVersionId,
|
||||||
};
|
};
|
||||||
if (objMD.nullVersionId) {
|
|
||||||
options.extraMD = {
|
|
||||||
nullVersionId: objMD.nullVersionId,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
restoreMetadata(objMD, metadataStoreParams);
|
|
||||||
|
|
||||||
return options;
|
return options;
|
||||||
}
|
}
|
||||||
|
@ -554,7 +403,6 @@ module.exports = {
|
||||||
processVersioningState,
|
processVersioningState,
|
||||||
getMasterState,
|
getMasterState,
|
||||||
versioningPreprocessing,
|
versioningPreprocessing,
|
||||||
getVersionSpecificMetadataOptions,
|
|
||||||
preprocessingVersioningDelete,
|
preprocessingVersioningDelete,
|
||||||
overwritingVersioning,
|
overwritingVersioning,
|
||||||
decodeVID,
|
decodeVID,
|
||||||
|
|
|
@ -101,33 +101,8 @@ function validateWebsiteHeader(header) {
|
||||||
header.startsWith('http://') || header.startsWith('https://'));
|
header.startsWith('http://') || header.startsWith('https://'));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* appendWebsiteIndexDocument - append index to objectKey if necessary
|
|
||||||
* @param {object} request - normalized request object
|
|
||||||
* @param {string} indexDocumentSuffix - index document from website config
|
|
||||||
* @param {boolean} force - flag to force append index
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) {
|
|
||||||
const reqObjectKey = request.objectKey ? request.objectKey : '';
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
|
|
||||||
// find index document if "directory" sent in request
|
|
||||||
if (reqObjectKey.endsWith('/')) {
|
|
||||||
request.objectKey += indexDocumentSuffix;
|
|
||||||
// find index document if no key provided
|
|
||||||
} else if (reqObjectKey === '') {
|
|
||||||
request.objectKey = indexDocumentSuffix;
|
|
||||||
// force for redirect 302 on folder without trailing / that has an index
|
|
||||||
} else if (force) {
|
|
||||||
request.objectKey += `/${indexDocumentSuffix}`;
|
|
||||||
}
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
findRoutingRule,
|
findRoutingRule,
|
||||||
extractRedirectInfo,
|
extractRedirectInfo,
|
||||||
validateWebsiteHeader,
|
validateWebsiteHeader,
|
||||||
appendWebsiteIndexDocument,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,314 +0,0 @@
|
||||||
const async = require('async');
|
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const monitoring = require('../../../utilities/monitoringHandler');
|
|
||||||
const {
|
|
||||||
actionNeedQuotaCheckCopy,
|
|
||||||
actionNeedQuotaCheck,
|
|
||||||
actionWithDataDeletion,
|
|
||||||
} = require('arsenal').policies;
|
|
||||||
const { config } = require('../../../Config');
|
|
||||||
const QuotaService = require('../../../quotas/quotas');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Process the bytes to write based on the request and object metadata
|
|
||||||
* @param {string} apiMethod - api method
|
|
||||||
* @param {BucketInfo} bucket - bucket info
|
|
||||||
* @param {string} versionId - version id of the object
|
|
||||||
* @param {number} contentLength - content length of the object
|
|
||||||
* @param {object} objMD - object metadata
|
|
||||||
* @param {object} destObjMD - destination object metadata
|
|
||||||
* @return {number} processed content length
|
|
||||||
*/
|
|
||||||
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
|
|
||||||
let bytes = contentLength;
|
|
||||||
if (apiMethod === 'objectRestore') {
|
|
||||||
// object is being restored
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10);
|
|
||||||
} else if (!bytes && objMD?.['content-length']) {
|
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
|
||||||
if (!destObjMD || bucket.isVersioningEnabled()) {
|
|
||||||
// object is being copied
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10);
|
|
||||||
} else if (!bucket.isVersioningEnabled()) {
|
|
||||||
// object is being copied and replaces the target
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10) -
|
|
||||||
Number.parseInt(destObjMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
|
|
||||||
// object is being deleted
|
|
||||||
bytes = -Number.parseInt(objMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
|
|
||||||
// object is being replaced: store the diff, if the bucket is not versioned
|
|
||||||
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
return bytes || 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks if a metric is stale based on the provided parameters.
|
|
||||||
*
|
|
||||||
* @param {Object} metric - The metric object to check.
|
|
||||||
* @param {string} resourceType - The type of the resource.
|
|
||||||
* @param {string} resourceName - The name of the resource.
|
|
||||||
* @param {string} action - The action being performed.
|
|
||||||
* @param {number} inflight - The number of inflight requests.
|
|
||||||
* @param {Object} log - The logger object.
|
|
||||||
* @returns {boolean} Returns true if the metric is stale, false otherwise.
|
|
||||||
*/
|
|
||||||
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
|
|
||||||
if (metric.date && Date.now() - new Date(metric.date).getTime() >
|
|
||||||
QuotaService.maxStaleness) {
|
|
||||||
log.warn('Stale metrics from the quota service, allowing the request', {
|
|
||||||
resourceType,
|
|
||||||
resourceName,
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
});
|
|
||||||
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Evaluates quotas for a bucket and an account and update inflight count.
|
|
||||||
*
|
|
||||||
* @param {number} bucketQuota - The quota limit for the bucket.
|
|
||||||
* @param {number} accountQuota - The quota limit for the account.
|
|
||||||
* @param {object} bucket - The bucket object.
|
|
||||||
* @param {object} account - The account object.
|
|
||||||
* @param {number} inflight - The number of inflight requests.
|
|
||||||
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
|
|
||||||
* @param {string} action - The action being performed.
|
|
||||||
* @param {object} log - The logger object.
|
|
||||||
* @param {function} callback - The callback function to be called when evaluation is complete.
|
|
||||||
* @returns {object} - The result of the evaluation.
|
|
||||||
*/
|
|
||||||
function _evaluateQuotas(
|
|
||||||
bucketQuota,
|
|
||||||
accountQuota,
|
|
||||||
bucket,
|
|
||||||
account,
|
|
||||||
inflight,
|
|
||||||
inflightForCheck,
|
|
||||||
action,
|
|
||||||
log,
|
|
||||||
callback,
|
|
||||||
) {
|
|
||||||
let bucketQuotaExceeded = false;
|
|
||||||
let accountQuotaExceeded = false;
|
|
||||||
const creationDate = new Date(bucket.getCreationDate()).getTime();
|
|
||||||
return async.parallel({
|
|
||||||
bucketQuota: parallelDone => {
|
|
||||||
if (bucketQuota > 0) {
|
|
||||||
return QuotaService.getUtilizationMetrics('bucket',
|
|
||||||
`${bucket.getName()}_${creationDate}`, null, {
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
}, (err, bucketMetrics) => {
|
|
||||||
if (err || inflight < 0) {
|
|
||||||
return parallelDone(err);
|
|
||||||
}
|
|
||||||
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
|
|
||||||
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
|
|
||||||
log.debug('Bucket quota exceeded', {
|
|
||||||
bucket: bucket.getName(),
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
quota: bucketQuota,
|
|
||||||
bytesTotal: bucketMetrics.bytesTotal,
|
|
||||||
});
|
|
||||||
bucketQuotaExceeded = true;
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
},
|
|
||||||
accountQuota: parallelDone => {
|
|
||||||
if (accountQuota > 0 && account?.account) {
|
|
||||||
return QuotaService.getUtilizationMetrics('account',
|
|
||||||
account.account, null, {
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
}, (err, accountMetrics) => {
|
|
||||||
if (err || inflight < 0) {
|
|
||||||
return parallelDone(err);
|
|
||||||
}
|
|
||||||
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
|
|
||||||
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
|
|
||||||
log.debug('Account quota exceeded', {
|
|
||||||
accountId: account.account,
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
quota: accountQuota,
|
|
||||||
bytesTotal: accountMetrics.bytesTotal,
|
|
||||||
});
|
|
||||||
accountQuotaExceeded = true;
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
},
|
|
||||||
}, err => {
|
|
||||||
if (err) {
|
|
||||||
log.warn('Error evaluating quotas', {
|
|
||||||
error: err.name,
|
|
||||||
description: err.message,
|
|
||||||
isInflightDeletion: inflight < 0,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Monitors the duration of quota evaluation for a specific API method.
|
|
||||||
*
|
|
||||||
* @param {string} apiMethod - The name of the API method being monitored.
|
|
||||||
* @param {string} type - The type of quota being evaluated.
|
|
||||||
* @param {string} code - The code associated with the quota being evaluated.
|
|
||||||
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
|
|
||||||
* @returns {undefined} - Returns nothing.
|
|
||||||
*/
|
|
||||||
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
|
|
||||||
monitoring.quotaEvaluationDuration.labels({
|
|
||||||
action: apiMethod,
|
|
||||||
type,
|
|
||||||
code,
|
|
||||||
}).observe(duration / 1e9);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @param {Request} request - request object
|
|
||||||
* @param {BucketInfo} bucket - bucket object
|
|
||||||
* @param {Account} account - account object
|
|
||||||
* @param {array} apiNames - action names: operations to authorize
|
|
||||||
* @param {string} apiMethod - the main API call
|
|
||||||
* @param {number} inflight - inflight bytes
|
|
||||||
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
|
|
||||||
* the incoming bytes, are under the limit.
|
|
||||||
* @param {Logger} log - logger
|
|
||||||
* @param {function} callback - callback function
|
|
||||||
* @returns {boolean} - true if the quota is valid, false otherwise
|
|
||||||
*/
|
|
||||||
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
|
|
||||||
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
|
|
||||||
return callback(null);
|
|
||||||
}
|
|
||||||
let type;
|
|
||||||
let bucketQuotaExceeded = false;
|
|
||||||
let accountQuotaExceeded = false;
|
|
||||||
let quotaEvaluationDuration;
|
|
||||||
const requestStartTime = process.hrtime.bigint();
|
|
||||||
const bucketQuota = bucket.getQuota();
|
|
||||||
const accountQuota = account?.quota || 0;
|
|
||||||
const shouldSendInflights = config.isQuotaInflightEnabled();
|
|
||||||
|
|
||||||
if (bucketQuota && accountQuota) {
|
|
||||||
type = 'bucket+account';
|
|
||||||
} else if (bucketQuota) {
|
|
||||||
type = 'bucket';
|
|
||||||
} else {
|
|
||||||
type = 'account';
|
|
||||||
}
|
|
||||||
|
|
||||||
if (actionWithDataDeletion[apiMethod]) {
|
|
||||||
type = 'delete';
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
|
|
||||||
if (bucketQuota > 0 || accountQuota > 0) {
|
|
||||||
log.warn('quota is set for a bucket, but the quota service is disabled', {
|
|
||||||
bucketName: bucket.getName(),
|
|
||||||
});
|
|
||||||
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
|
||||||
}
|
|
||||||
return callback(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isStorageReserved) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
inflight = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return async.forEach(apiNames, (apiName, done) => {
|
|
||||||
// Object copy operations first check the target object,
|
|
||||||
// meaning the source object, containing the current bytes,
|
|
||||||
// is checked second. This logic handles these APIs calls by
|
|
||||||
// ensuring the bytes are positives (i.e., not an object
|
|
||||||
// replacement).
|
|
||||||
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
inflight = Math.abs(inflight);
|
|
||||||
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
|
|
||||||
return done();
|
|
||||||
}
|
|
||||||
// When inflights are disabled, the sum of the current utilization metrics
|
|
||||||
// and the current bytes are compared with the quota. The current bytes
|
|
||||||
// are not sent to the utilization service. When inflights are enabled,
|
|
||||||
// the sum of the current utilization metrics only are compared with the
|
|
||||||
// quota. They include the current inflight bytes sent in the request.
|
|
||||||
let _inflights = shouldSendInflights ? inflight : undefined;
|
|
||||||
const inflightForCheck = shouldSendInflights ? 0 : inflight;
|
|
||||||
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
|
||||||
inflightForCheck, apiName, log,
|
|
||||||
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
bucketQuotaExceeded = _bucketQuotaExceeded;
|
|
||||||
accountQuotaExceeded = _accountQuotaExceeded;
|
|
||||||
|
|
||||||
// Inflights are inverted: in case of cleanup, we just re-issue
|
|
||||||
// the same API call.
|
|
||||||
if (_inflights) {
|
|
||||||
_inflights = -_inflights;
|
|
||||||
}
|
|
||||||
|
|
||||||
request.finalizerHooks.push((errorFromAPI, _done) => {
|
|
||||||
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
|
|
||||||
const quotaCleanUpStartTime = process.hrtime.bigint();
|
|
||||||
// Quotas are cleaned only in case of error in the API
|
|
||||||
async.waterfall([
|
|
||||||
cb => {
|
|
||||||
if (errorFromAPI) {
|
|
||||||
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
|
||||||
null, apiName, log, cb);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
},
|
|
||||||
], () => {
|
|
||||||
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
|
|
||||||
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
|
|
||||||
return _done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
}, err => {
|
|
||||||
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
|
|
||||||
if (err) {
|
|
||||||
log.warn('Error getting metrics from the quota service, allowing the request', {
|
|
||||||
error: err.name,
|
|
||||||
description: err.message,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (!actionWithDataDeletion[apiMethod] &&
|
|
||||||
(bucketQuotaExceeded || accountQuotaExceeded)) {
|
|
||||||
return callback(errors.QuotaExceeded);
|
|
||||||
}
|
|
||||||
return callback();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
processBytesToWrite,
|
|
||||||
isMetricStale,
|
|
||||||
validateQuotas,
|
|
||||||
};
|
|
|
@ -1,18 +1,18 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
const constants = require('../../../constants');
|
const constants = require('../../../constants');
|
||||||
const services = require('../../services');
|
const services = require('../../services');
|
||||||
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../../utapi/utilities');
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
const monitoring = require('../../utilities/monitoringHandler');
|
const monitoring = require('../../utilities/monitoringHandler');
|
||||||
const { getLocationConstraintErrorMessage, processCurrents,
|
const { processCurrents } = require('../apiUtils/object/lifecycle');
|
||||||
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
|
||||||
const { config } = require('../../Config');
|
|
||||||
|
|
||||||
function handleResult(listParams, requestMaxKeys, authInfo,
|
function handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
bucketName, list, isBucketVersioned, log, callback) {
|
bucketName, list, log, callback) {
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
listParams.maxKeys = requestMaxKeys;
|
listParams.maxKeys = requestMaxKeys;
|
||||||
const res = processCurrents(bucketName, listParams, isBucketVersioned, list);
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
const res = processCurrents(bucketName, listParams, list);
|
||||||
|
|
||||||
pushMetric('listLifecycleCurrents', log, { authInfo, bucket: bucketName });
|
pushMetric('listLifecycleCurrents', log, { authInfo, bucket: bucketName });
|
||||||
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleCurrents');
|
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleCurrents');
|
||||||
|
@ -21,16 +21,15 @@ function handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* listLifecycleCurrents - Return list of current versions/masters in bucket
|
* listLifecycleCurrents - Return list of current versions/masters in bucket
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
||||||
* requester's info
|
* requester's info
|
||||||
* @param {array} locationConstraints - array of location contraint
|
* @param {object} request - http request object
|
||||||
* @param {object} request - http request object
|
* @param {function} log - Werelogs request logger
|
||||||
* @param {function} log - Werelogs request logger
|
* @param {function} callback - callback to respond to http request
|
||||||
* @param {function} callback - callback to respond to http request
|
* with either error code or xml response body
|
||||||
* with either error code or xml response body
|
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function listLifecycleCurrents(authInfo, locationConstraints, request, log, callback) {
|
function listLifecycleCurrents(authInfo, request, log, callback) {
|
||||||
const params = request.query;
|
const params = request.query;
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
|
||||||
|
@ -39,28 +38,11 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call
|
||||||
Number.parseInt(params['max-keys'], 10) : 1000;
|
Number.parseInt(params['max-keys'], 10) : 1000;
|
||||||
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, 400, 'listLifecycleCurrents');
|
'GET', bucketName, 400, 'listBucket');
|
||||||
return callback(errors.InvalidArgument);
|
return callback(errors.InvalidArgument);
|
||||||
}
|
}
|
||||||
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
||||||
|
|
||||||
const minEntriesToBeScanned = 1;
|
|
||||||
const { isValid, maxScannedLifecycleListingEntries } =
|
|
||||||
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
|
|
||||||
if (!isValid) {
|
|
||||||
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
|
|
||||||
const excludedDataStoreName = params['excluded-data-store-name'];
|
|
||||||
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
|
|
||||||
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
|
|
||||||
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
|
|
||||||
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
|
|
||||||
|
|
||||||
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
|
|
||||||
}
|
|
||||||
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
|
@ -72,12 +54,10 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call
|
||||||
maxKeys: actualMaxKeys,
|
maxKeys: actualMaxKeys,
|
||||||
prefix: params.prefix,
|
prefix: params.prefix,
|
||||||
beforeDate: params['before-date'],
|
beforeDate: params['before-date'],
|
||||||
marker: params.marker,
|
marker: params['key-marker'],
|
||||||
excludedDataStoreName,
|
|
||||||
maxScannedLifecycleListingEntries,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
|
@ -85,29 +65,25 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call
|
||||||
return callback(err, null);
|
return callback(err, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
const vcfg = bucket.getVersioningConfiguration();
|
|
||||||
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
|
|
||||||
|
|
||||||
if (!requestMaxKeys) {
|
if (!requestMaxKeys) {
|
||||||
const emptyList = {
|
const emptyList = {
|
||||||
Contents: [],
|
Contents: [],
|
||||||
IsTruncated: false,
|
IsTruncated: false,
|
||||||
};
|
};
|
||||||
return handleResult(listParams, requestMaxKeys, authInfo,
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
bucketName, emptyList, isBucketVersioned, log, callback);
|
bucketName, emptyList, log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
return services.getLifecycleListing(bucketName, listParams, log,
|
return services.getLifecycleListing(bucketName, listParams, log,
|
||||||
(err, list) => {
|
(err, list) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
|
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, err.code, 'listLifecycleCurrents');
|
'GET', bucketName, err.code, 'listLifecycleCurrents');
|
||||||
return callback(err, null);
|
return callback(err, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
return handleResult(listParams, requestMaxKeys, authInfo,
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
bucketName, list, isBucketVersioned, log, callback);
|
bucketName, list, log, callback);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,18 +1,17 @@
|
||||||
const { errors, versioning } = require('arsenal');
|
const { errors, versioning } = require('arsenal');
|
||||||
const constants = require('../../../constants');
|
const constants = require('../../../constants');
|
||||||
const services = require('../../services');
|
const services = require('../../services');
|
||||||
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../../utapi/utilities');
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
const monitoring = require('../../utilities/monitoringHandler');
|
const monitoring = require('../../utilities/monitoringHandler');
|
||||||
const { getLocationConstraintErrorMessage, processNonCurrents,
|
const { processNonCurrents } = require('../apiUtils/object/lifecycle');
|
||||||
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
|
||||||
const { config } = require('../../Config');
|
|
||||||
|
|
||||||
function handleResult(listParams, requestMaxKeys, authInfo,
|
function handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
bucketName, list, log, callback) {
|
bucketName, list, log, callback) {
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
listParams.maxKeys = requestMaxKeys;
|
listParams.maxKeys = requestMaxKeys;
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
const res = processNonCurrents(bucketName, listParams, list);
|
const res = processNonCurrents(bucketName, listParams, list);
|
||||||
|
|
||||||
pushMetric('listLifecycleNonCurrents', log, { authInfo, bucket: bucketName });
|
pushMetric('listLifecycleNonCurrents', log, { authInfo, bucket: bucketName });
|
||||||
|
@ -22,16 +21,15 @@ function handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* listLifecycleNonCurrents - Return list of non-current versions in bucket
|
* listLifecycleNonCurrents - Return list of non-current versions in bucket
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
||||||
* requester's info
|
* requester's info
|
||||||
* @param {array} locationConstraints - array of location contraint
|
* @param {object} request - http request object
|
||||||
* @param {object} request - http request object
|
* @param {function} log - Werelogs request logger
|
||||||
* @param {function} log - Werelogs request logger
|
* @param {function} callback - callback to respond to http request
|
||||||
* @param {function} callback - callback to respond to http request
|
* with either error code or xml response body
|
||||||
* with either error code or xml response body
|
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, callback) {
|
function listLifecycleNonCurrents(authInfo, request, log, callback) {
|
||||||
const params = request.query;
|
const params = request.query;
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
|
||||||
|
@ -40,30 +38,11 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c
|
||||||
Number.parseInt(params['max-keys'], 10) : 1000;
|
Number.parseInt(params['max-keys'], 10) : 1000;
|
||||||
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, 400, 'listLifecycleNonCurrents');
|
'GET', bucketName, 400, 'listBucket');
|
||||||
return callback(errors.InvalidArgument);
|
return callback(errors.InvalidArgument);
|
||||||
}
|
}
|
||||||
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
||||||
|
|
||||||
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
|
|
||||||
// Two entries representing the master key and the following one representing the non-current version.
|
|
||||||
const minEntriesToBeScanned = 3;
|
|
||||||
const { isValid, maxScannedLifecycleListingEntries } =
|
|
||||||
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
|
|
||||||
if (!isValid) {
|
|
||||||
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleNonCurrents');
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
|
|
||||||
const excludedDataStoreName = params['excluded-data-store-name'];
|
|
||||||
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
|
|
||||||
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
|
|
||||||
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
|
|
||||||
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
|
|
||||||
|
|
||||||
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
|
|
||||||
}
|
|
||||||
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
|
@ -76,16 +55,14 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c
|
||||||
prefix: params.prefix,
|
prefix: params.prefix,
|
||||||
beforeDate: params['before-date'],
|
beforeDate: params['before-date'],
|
||||||
keyMarker: params['key-marker'],
|
keyMarker: params['key-marker'],
|
||||||
excludedDataStoreName,
|
|
||||||
maxScannedLifecycleListingEntries,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
listParams.versionIdMarker = params['version-id-marker'] ?
|
listParams.versionIdMarker = params['version-id-marker'] ?
|
||||||
versionIdUtils.decode(params['version-id-marker']) : undefined;
|
versionIdUtils.decode(params['version-id-marker']) : undefined;
|
||||||
|
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
|
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
|
||||||
return callback(err, null);
|
return callback(err, null);
|
||||||
|
|
|
@ -1,78 +1,66 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
const constants = require('../../../constants');
|
const constants = require('../../../constants');
|
||||||
const services = require('../../services');
|
const services = require('../../services');
|
||||||
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../../utapi/utilities');
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
const monitoring = require('../../utilities/monitoringHandler');
|
const monitoring = require('../../utilities/monitoringHandler');
|
||||||
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
const { processOrphans } = require('../apiUtils/object/lifecycle');
|
||||||
const { config } = require('../../Config');
|
|
||||||
|
|
||||||
function handleResult(listParams, requestMaxKeys, authInfo,
|
function handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
bucketName, list, log, callback) {
|
bucketName, list, log, callback) {
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
listParams.maxKeys = requestMaxKeys;
|
listParams.maxKeys = requestMaxKeys;
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
const res = processOrphans(bucketName, listParams, list);
|
const res = processOrphans(bucketName, listParams, list);
|
||||||
|
|
||||||
pushMetric('listLifecycleOrphanDeleteMarkers', log, { authInfo, bucket: bucketName });
|
pushMetric('listLifecycleOrphans', log, { authInfo, bucket: bucketName });
|
||||||
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleOrphanDeleteMarkers');
|
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleOrphans');
|
||||||
return callback(null, res);
|
return callback(null, res);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* listLifecycleOrphanDeleteMarkers - Return list of expired object delete marker in bucket
|
* listLifecycleOrphans - Return list of expired object delete marker in bucket
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
||||||
* requester's info
|
* requester's info
|
||||||
* @param {array} locationConstraints - array of location contraint
|
* @param {object} request - http request object
|
||||||
* @param {object} request - http request object
|
* @param {function} log - Werelogs request logger
|
||||||
* @param {function} log - Werelogs request logger
|
* @param {function} callback - callback to respond to http request
|
||||||
* @param {function} callback - callback to respond to http request
|
* with either error code or xml response body
|
||||||
* with either error code or xml response body
|
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request, log, callback) {
|
function listLifecycleOrphans(authInfo, request, log, callback) {
|
||||||
const params = request.query;
|
const params = request.query;
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
|
||||||
log.debug('processing request', { method: 'listLifecycleOrphanDeleteMarkers' });
|
log.debug('processing request', { method: 'listLifecycleOrphans' });
|
||||||
const requestMaxKeys = params['max-keys'] ?
|
const requestMaxKeys = params['max-keys'] ?
|
||||||
Number.parseInt(params['max-keys'], 10) : 1000;
|
Number.parseInt(params['max-keys'], 10) : 1000;
|
||||||
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
|
'GET', bucketName, 400, 'listBucket');
|
||||||
return callback(errors.InvalidArgument);
|
return callback(errors.InvalidArgument);
|
||||||
}
|
}
|
||||||
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
||||||
|
|
||||||
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
|
|
||||||
// Two entries representing the master key and the following one representing the non-current version.
|
|
||||||
const minEntriesToBeScanned = 3;
|
|
||||||
const { isValid, maxScannedLifecycleListingEntries } =
|
|
||||||
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
|
|
||||||
if (!isValid) {
|
|
||||||
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'listLifecycleOrphanDeleteMarkers',
|
requestType: 'listLifecycleOrphans',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const listParams = {
|
const listParams = {
|
||||||
listingType: 'DelimiterOrphanDeleteMarker',
|
listingType: 'DelimiterOrphan',
|
||||||
maxKeys: actualMaxKeys,
|
maxKeys: actualMaxKeys,
|
||||||
prefix: params.prefix,
|
prefix: params.prefix,
|
||||||
beforeDate: params['before-date'],
|
beforeDate: params['before-date'],
|
||||||
marker: params.marker,
|
keyMarker: params['key-marker'],
|
||||||
maxScannedLifecycleListingEntries,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
|
'GET', bucketName, err.code, 'listLifecycleOrphans');
|
||||||
return callback(err, null);
|
return callback(err, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,7 +86,7 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', { error: err });
|
log.debug('error processing request', { error: err });
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
|
'GET', bucketName, err.code, 'listLifecycleOrphans');
|
||||||
return callback(err, null);
|
return callback(err, null);
|
||||||
}
|
}
|
||||||
return handleResult(listParams, requestMaxKeys, authInfo,
|
return handleResult(listParams, requestMaxKeys, authInfo,
|
||||||
|
@ -108,5 +96,5 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
listLifecycleOrphanDeleteMarkers,
|
listLifecycleOrphans,
|
||||||
};
|
};
|
|
@ -2,7 +2,7 @@ const { errors } = require('arsenal');
|
||||||
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
|
const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ function bucketDelete(authInfo, request, log, cb) {
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
return metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucketMD) => {
|
(err, bucketMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucketMD);
|
request.method, bucketMD);
|
||||||
|
@ -48,7 +48,7 @@ function bucketDelete(authInfo, request, log, cb) {
|
||||||
log.trace('passed checks',
|
log.trace('passed checks',
|
||||||
{ method: 'metadataValidateBucket' });
|
{ method: 'metadataValidateBucket' });
|
||||||
return deleteBucket(authInfo, bucketMD, bucketName,
|
return deleteBucket(authInfo, bucketMD, bucketName,
|
||||||
authInfo.getCanonicalID(), request, log, err => {
|
authInfo.getCanonicalID(), log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'DELETE', bucketName, err.code, 'deleteBucket');
|
'DELETE', bucketName, err.code, 'deleteBucket');
|
||||||
|
|
|
@ -38,8 +38,7 @@ function bucketDeleteCors(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
log.trace('found bucket in metadata');
|
log.trace('found bucket in metadata');
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
||||||
authInfo, log, request, request.actionImplicitDenies)) {
|
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketDeleteCors',
|
method: 'bucketDeleteCors',
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
|
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
||||||
|
@ -21,12 +21,12 @@ function bucketDeleteEncryption(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketDeleteEncryption',
|
requestType: 'bucketDeleteEncryption',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
|
next => metadataValidateBucket(metadataValParams, log, next),
|
||||||
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
||||||
(bucket, next) => {
|
(bucket, next) => {
|
||||||
const sseConfig = bucket.getServerSideEncryption();
|
const sseConfig = bucket.getServerSideEncryption();
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -18,10 +18,10 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketDeleteLifecycle',
|
requestType: 'bucketDeleteLifecycle',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -16,10 +16,10 @@ function bucketDeletePolicy(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketDeletePolicy',
|
requestType: 'bucketDeletePolicy',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,58 +0,0 @@
|
||||||
const { waterfall } = require('async');
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
|
||||||
|
|
||||||
const requestType = 'bucketDeleteQuota';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Bucket Update Quota - Update bucket quota
|
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
|
||||||
* @param {object} request - http request object
|
|
||||||
* @param {object} log - Werelogs logger
|
|
||||||
* @param {function} callback - callback to server
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function bucketDeleteQuota(authInfo, request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'bucketDeleteQuota' });
|
|
||||||
|
|
||||||
const { bucketName } = request;
|
|
||||||
const metadataValParams = {
|
|
||||||
authInfo,
|
|
||||||
bucketName,
|
|
||||||
requestType: request.apiMethods || requestType,
|
|
||||||
request,
|
|
||||||
};
|
|
||||||
return waterfall([
|
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
|
||||||
(err, bucket) => next(err, bucket)),
|
|
||||||
(bucket, next) => {
|
|
||||||
bucket.setQuota(0);
|
|
||||||
metadata.updateBucket(bucket.getName(), bucket, log, err =>
|
|
||||||
next(err, bucket));
|
|
||||||
},
|
|
||||||
], (err, bucket) => {
|
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
|
||||||
request.method, bucket);
|
|
||||||
if (err) {
|
|
||||||
log.debug('error processing request', {
|
|
||||||
error: err,
|
|
||||||
method: 'bucketDeleteQuota'
|
|
||||||
});
|
|
||||||
monitoring.promMetrics('DELETE', bucketName, err.code,
|
|
||||||
'bucketDeleteQuota');
|
|
||||||
return callback(err, err.code, corsHeaders);
|
|
||||||
}
|
|
||||||
monitoring.promMetrics(
|
|
||||||
'DELETE', bucketName, '204', 'bucketDeleteQuota');
|
|
||||||
pushMetric('bucketDeleteQuota', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, 204, corsHeaders);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = bucketDeleteQuota;
|
|
|
@ -1,5 +1,5 @@
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -18,10 +18,10 @@ function bucketDeleteReplication(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketDeleteReplication',
|
requestType: 'bucketDeleteReplication',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
const { waterfall } = require('async');
|
const { waterfall } = require('async');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
|
@ -20,20 +20,16 @@ function bucketDeleteTagging(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketDeleteTagging',
|
requestType: 'bucketDeleteTagging',
|
||||||
request,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let bucket = null;
|
let bucket = null;
|
||||||
return waterfall([
|
return waterfall([
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
next => metadataValidateBucket(metadataValParams, log,
|
||||||
(err, b) => {
|
(err, b) => {
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
bucket = b;
|
bucket = b;
|
||||||
bucket.setTags([]);
|
bucket.setTags([]);
|
||||||
return next();
|
return next(err);
|
||||||
}),
|
}),
|
||||||
next => metadata.updateBucket(bucket.getName(), bucket, log, next),
|
next => metadata.updateBucket(bucket.getName(), bucket, log, next),
|
||||||
], err => {
|
], err => {
|
||||||
|
|
|
@ -30,8 +30,7 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
log.trace('found bucket in metadata');
|
log.trace('found bucket in metadata');
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
||||||
authInfo, log, request, request.actionImplicitDenies)) {
|
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketDeleteWebsite',
|
method: 'bucketDeleteWebsite',
|
||||||
|
|
|
@ -2,7 +2,7 @@ const querystring = require('querystring');
|
||||||
const { errors, versioning, s3middleware } = require('arsenal');
|
const { errors, versioning, s3middleware } = require('arsenal');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const escapeForXml = s3middleware.escapeForXml;
|
const escapeForXml = s3middleware.escapeForXml;
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
@ -322,7 +322,7 @@ function bucketGet(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGet',
|
requestType: 'bucketGet',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const listParams = {
|
const listParams = {
|
||||||
|
@ -345,7 +345,7 @@ function bucketGet(authInfo, request, log, callback) {
|
||||||
listParams.marker = params.marker;
|
listParams.marker = params.marker;
|
||||||
}
|
}
|
||||||
|
|
||||||
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const aclUtils = require('../utilities/aclUtils');
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
@ -44,7 +44,7 @@ function bucketGetACL(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetACL',
|
requestType: 'bucketGetACL',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const grantInfo = {
|
const grantInfo = {
|
||||||
|
@ -55,7 +55,7 @@ function bucketGetACL(authInfo, request, log, callback) {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -39,8 +39,7 @@ function bucketGetCors(authInfo, request, log, callback) {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
||||||
authInfo, log, request, request.actionImplicitDenies)) {
|
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketGetCors',
|
method: 'bucketGetCors',
|
||||||
|
|
|
@ -4,7 +4,7 @@ const async = require('async');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const escapeForXml = s3middleware.escapeForXml;
|
const escapeForXml = s3middleware.escapeForXml;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -22,12 +22,12 @@ function bucketGetEncryption(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetEncryption',
|
requestType: 'bucketGetEncryption',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
|
next => metadataValidateBucket(metadataValParams, log, next),
|
||||||
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
||||||
(bucket, next) => {
|
(bucket, next) => {
|
||||||
// If sseInfo is present but the `mandatory` flag is not set
|
// If sseInfo is present but the `mandatory` flag is not set
|
||||||
|
|
|
@ -2,7 +2,7 @@ const { errors } = require('arsenal');
|
||||||
const LifecycleConfiguration =
|
const LifecycleConfiguration =
|
||||||
require('arsenal').models.LifecycleConfiguration;
|
require('arsenal').models.LifecycleConfiguration;
|
||||||
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -21,10 +21,10 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetLifecycle',
|
requestType: 'bucketGetLifecycle',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -41,8 +41,7 @@ function bucketGetLocation(authInfo, request, log, callback) {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
||||||
authInfo, log, request, request.actionImplicitDenies)) {
|
|
||||||
log.debug('access denied for account on bucket', {
|
log.debug('access denied for account on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketGetLocation',
|
method: 'bucketGetLocation',
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { NotificationConfiguration } = require('arsenal').models;
|
const { NotificationConfiguration } = require('arsenal').models;
|
||||||
|
@ -37,11 +37,11 @@ function bucketGetNotification(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetNotification',
|
requestType: 'bucketGetNotification',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const ObjectLockConfiguration =
|
const ObjectLockConfiguration =
|
||||||
|
@ -33,10 +33,10 @@ function bucketGetObjectLock(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetObjectLock',
|
requestType: 'bucketGetObjectLock',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -17,11 +17,11 @@ function bucketGetPolicy(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetPolicy',
|
requestType: 'bucketGetPolicy',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,58 +0,0 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* bucketGetQuota - Get the bucket quota
|
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
|
||||||
* @param {object} request - http request object
|
|
||||||
* @param {object} log - Werelogs logger
|
|
||||||
* @param {function} callback - callback to server
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function bucketGetQuota(authInfo, request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'bucketGetQuota' });
|
|
||||||
const { bucketName, headers, method } = request;
|
|
||||||
const metadataValParams = {
|
|
||||||
authInfo,
|
|
||||||
bucketName,
|
|
||||||
requestType: request.apiMethods || 'bucketGetQuota',
|
|
||||||
request,
|
|
||||||
};
|
|
||||||
const xml = [];
|
|
||||||
|
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
|
||||||
if (err) {
|
|
||||||
log.debug('error processing request', {
|
|
||||||
error: err,
|
|
||||||
method: 'bucketGetQuota',
|
|
||||||
});
|
|
||||||
return callback(err, null, corsHeaders);
|
|
||||||
}
|
|
||||||
xml.push(
|
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
|
||||||
'<GetBucketQuota>',
|
|
||||||
'<Name>', bucket.getName(), '</Name>',
|
|
||||||
);
|
|
||||||
const bucketQuota = bucket.getQuota();
|
|
||||||
if (!bucketQuota) {
|
|
||||||
log.debug('bucket has no quota', {
|
|
||||||
method: 'bucketGetQuota',
|
|
||||||
});
|
|
||||||
return callback(errors.NoSuchQuota, null,
|
|
||||||
corsHeaders);
|
|
||||||
}
|
|
||||||
xml.push('<Quota>', bucketQuota, '</Quota>',
|
|
||||||
'</GetBucketQuota>');
|
|
||||||
|
|
||||||
pushMetric('getBucketQuota', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, xml.join(''), corsHeaders);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = bucketGetQuota;
|
|
|
@ -1,6 +1,6 @@
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const { getReplicationConfigurationXML } =
|
const { getReplicationConfigurationXML } =
|
||||||
require('./apiUtils/bucket/getReplicationConfiguration');
|
require('./apiUtils/bucket/getReplicationConfiguration');
|
||||||
|
@ -21,10 +21,10 @@ function bucketGetReplication(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetReplication',
|
requestType: 'bucketGetReplication',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
@ -37,7 +37,7 @@ const escapeForXml = s3middleware.escapeForXml;
|
||||||
function tagsToXml(tags) {
|
function tagsToXml(tags) {
|
||||||
const xml = [];
|
const xml = [];
|
||||||
|
|
||||||
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Tagging> <TagSet>');
|
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Tagging><TagSet>');
|
||||||
|
|
||||||
tags.forEach(tag => {
|
tags.forEach(tag => {
|
||||||
xml.push('<Tag>');
|
xml.push('<Tag>');
|
||||||
|
@ -46,7 +46,7 @@ function tagsToXml(tags) {
|
||||||
xml.push('</Tag>');
|
xml.push('</Tag>');
|
||||||
});
|
});
|
||||||
|
|
||||||
xml.push('</TagSet> </Tagging>');
|
xml.push('</TagSet></Tagging>');
|
||||||
|
|
||||||
return xml.join('');
|
return xml.join('');
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetTagging',
|
requestType: 'bucketGetTagging',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
let bucket = null;
|
let bucket = null;
|
||||||
|
@ -75,7 +75,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
|
||||||
let tags = null;
|
let tags = null;
|
||||||
|
|
||||||
return waterfall([
|
return waterfall([
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
next => metadataValidateBucket(metadataValParams, log,
|
||||||
(err, b) => {
|
(err, b) => {
|
||||||
bucket = b;
|
bucket = b;
|
||||||
return next(err);
|
return next(err);
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -54,11 +54,11 @@ function bucketGetVersioning(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketGetVersioning',
|
requestType: 'bucketGetVersioning',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -39,8 +39,7 @@ function bucketGetWebsite(authInfo, request, log, callback) {
|
||||||
|
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
||||||
authInfo, log, request, request.actionImplicitDenies)) {
|
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketGetWebsite',
|
method: 'bucketGetWebsite',
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -19,10 +19,10 @@ function bucketHead(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketHead',
|
requestType: 'bucketHead',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -45,8 +45,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
|
||||||
} else if (parsedHost && restEndpoints[parsedHost]) {
|
} else if (parsedHost && restEndpoints[parsedHost]) {
|
||||||
locationConstraintChecked = restEndpoints[parsedHost];
|
locationConstraintChecked = restEndpoints[parsedHost];
|
||||||
} else {
|
} else {
|
||||||
locationConstraintChecked = Object.keys(locationConstrains)[0];
|
log.trace('no location constraint provided on bucket put;' +
|
||||||
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
|
'setting us-east-1');
|
||||||
|
locationConstraintChecked = 'us-east-1';
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!locationConstraints[locationConstraintChecked]) {
|
if (!locationConstraints[locationConstraintChecked]) {
|
||||||
|
|
|
@ -6,7 +6,7 @@ const aclUtils = require('../utilities/aclUtils');
|
||||||
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -44,7 +44,7 @@ const monitoring = require('../utilities/monitoringHandler');
|
||||||
function bucketPutACL(authInfo, request, log, callback) {
|
function bucketPutACL(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutACL' });
|
log.debug('processing request', { method: 'bucketPutACL' });
|
||||||
|
|
||||||
const { bucketName } = request;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
const newCannedACL = request.headers['x-amz-acl'];
|
const newCannedACL = request.headers['x-amz-acl'];
|
||||||
const possibleCannedACL = [
|
const possibleCannedACL = [
|
||||||
|
@ -54,6 +54,19 @@ function bucketPutACL(authInfo, request, log, callback) {
|
||||||
'authenticated-read',
|
'authenticated-read',
|
||||||
'log-delivery-write',
|
'log-delivery-write',
|
||||||
];
|
];
|
||||||
|
if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) {
|
||||||
|
log.trace('invalid canned acl argument', {
|
||||||
|
acl: newCannedACL,
|
||||||
|
method: 'bucketPutACL',
|
||||||
|
});
|
||||||
|
monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL');
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
|
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
|
||||||
|
log.trace('invalid acl header');
|
||||||
|
monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL');
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
const possibleGroups = [constants.allAuthedUsersId,
|
const possibleGroups = [constants.allAuthedUsersId,
|
||||||
constants.publicId,
|
constants.publicId,
|
||||||
constants.logId,
|
constants.logId,
|
||||||
|
@ -61,7 +74,7 @@ function bucketPutACL(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutACL',
|
requestType: 'bucketPutACL',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const possibleGrants = ['FULL_CONTROL', 'WRITE',
|
const possibleGrants = ['FULL_CONTROL', 'WRITE',
|
||||||
|
@ -92,7 +105,7 @@ function bucketPutACL(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function waterfall1(next) {
|
function waterfall1(next) {
|
||||||
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => {
|
(err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed', {
|
log.trace('request authorization failed', {
|
||||||
|
@ -101,18 +114,6 @@ function bucketPutACL(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
// if the API call is allowed, ensure that the parameters are valid
|
|
||||||
if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) {
|
|
||||||
log.trace('invalid canned acl argument', {
|
|
||||||
acl: newCannedACL,
|
|
||||||
method: 'bucketPutACL',
|
|
||||||
});
|
|
||||||
return next(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
|
|
||||||
log.trace('invalid acl header');
|
|
||||||
return next(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
return next(null, bucket);
|
return next(null, bucket);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
|
@ -23,7 +23,7 @@ const requestType = 'bucketPutCors';
|
||||||
*/
|
*/
|
||||||
function bucketPutCors(authInfo, request, log, callback) {
|
function bucketPutCors(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutCors' });
|
log.debug('processing request', { method: 'bucketPutCors' });
|
||||||
const { bucketName } = request;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
if (!request.post) {
|
if (!request.post) {
|
||||||
|
@ -70,8 +70,7 @@ function bucketPutCors(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function validateBucketAuthorization(bucket, rules, corsHeaders, next) {
|
function validateBucketAuthorization(bucket, rules, corsHeaders, next) {
|
||||||
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
||||||
authInfo, log, request, request.actionImplicitDenies)) {
|
|
||||||
log.debug('access denied for account on bucket', {
|
log.debug('access denied for account on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
});
|
});
|
||||||
|
|
|
@ -3,7 +3,7 @@ const async = require('async');
|
||||||
const { parseEncryptionXml } = require('./apiUtils/bucket/bucketEncryption');
|
const { parseEncryptionXml } = require('./apiUtils/bucket/bucketEncryption');
|
||||||
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const kms = require('../kms/wrapper');
|
const kms = require('../kms/wrapper');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
@ -18,17 +18,17 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
*/
|
*/
|
||||||
|
|
||||||
function bucketPutEncryption(authInfo, request, log, callback) {
|
function bucketPutEncryption(authInfo, request, log, callback) {
|
||||||
const { bucketName } = request;
|
const bucketName = request.bucketName;
|
||||||
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutEncryption',
|
requestType: 'bucketPutEncryption',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
|
next => metadataValidateBucket(metadataValParams, log, next),
|
||||||
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
|
||||||
(bucket, next) => {
|
(bucket, next) => {
|
||||||
log.trace('parsing encryption config', { method: 'bucketPutEncryption' });
|
log.trace('parsing encryption config', { method: 'bucketPutEncryption' });
|
||||||
|
|
|
@ -7,7 +7,7 @@ const config = require('../Config').config;
|
||||||
const parseXML = require('../utilities/parseXML');
|
const parseXML = require('../utilities/parseXML');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
@ -23,11 +23,11 @@ const monitoring = require('../utilities/monitoringHandler');
|
||||||
function bucketPutLifecycle(authInfo, request, log, callback) {
|
function bucketPutLifecycle(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutLifecycle' });
|
log.debug('processing request', { method: 'bucketPutLifecycle' });
|
||||||
|
|
||||||
const { bucketName } = request;
|
const bucketName = request.bucketName;
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutLifecycle',
|
requestType: 'bucketPutLifecycle',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return waterfall([
|
return waterfall([
|
||||||
|
@ -45,7 +45,7 @@ function bucketPutLifecycle(authInfo, request, log, callback) {
|
||||||
return next(null, configObj);
|
return next(null, configObj);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
(lcConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
(lcConfig, next) => metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => {
|
(err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
|
|
|
@ -4,7 +4,7 @@ const parseXML = require('../utilities/parseXML');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const getNotificationConfiguration = require('./apiUtils/bucket/getNotificationConfiguration');
|
const getNotificationConfiguration = require('./apiUtils/bucket/getNotificationConfiguration');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -19,11 +19,11 @@ const { pushMetric } = require('../utapi/utilities');
|
||||||
function bucketPutNotification(authInfo, request, log, callback) {
|
function bucketPutNotification(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutNotification' });
|
log.debug('processing request', { method: 'bucketPutNotification' });
|
||||||
|
|
||||||
const { bucketName } = request;
|
const bucketName = request.bucketName;
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutNotification',
|
requestType: 'bucketPutNotification',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ function bucketPutNotification(authInfo, request, log, callback) {
|
||||||
const notifConfig = notificationConfig.error ? undefined : notificationConfig;
|
const notifConfig = notificationConfig.error ? undefined : notificationConfig;
|
||||||
process.nextTick(() => next(notificationConfig.error, notifConfig));
|
process.nextTick(() => next(notificationConfig.error, notifConfig));
|
||||||
},
|
},
|
||||||
(notifConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
(notifConfig, next) => metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => next(err, bucket, notifConfig)),
|
(err, bucket) => next(err, bucket, notifConfig)),
|
||||||
(bucket, notifConfig, next) => {
|
(bucket, notifConfig, next) => {
|
||||||
bucket.setNotificationConfiguration(notifConfig);
|
bucket.setNotificationConfiguration(notifConfig);
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
const { waterfall } = require('async');
|
const { waterfall } = require('async');
|
||||||
const arsenal = require('arsenal');
|
const arsenal = require('arsenal');
|
||||||
|
|
||||||
const { errors } = arsenal;
|
const errors = arsenal.errors;
|
||||||
const { models: { ObjectLockConfiguration } } = arsenal;
|
const ObjectLockConfiguration = arsenal.models.ObjectLockConfiguration;
|
||||||
|
|
||||||
const parseXML = require('../utilities/parseXML');
|
const parseXML = require('../utilities/parseXML');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -26,7 +26,7 @@ function bucketPutObjectLock(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutObjectLock',
|
requestType: 'bucketPutObjectLock',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return waterfall([
|
return waterfall([
|
||||||
|
@ -41,7 +41,7 @@ function bucketPutObjectLock(authInfo, request, log, callback) {
|
||||||
return next(configObj.error || null, configObj);
|
return next(configObj.error || null, configObj);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
(objectLockConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies,
|
(objectLockConfig, next) => metadataValidateBucket(metadataValParams,
|
||||||
log, (err, bucket) => {
|
log, (err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
|
|
|
@ -1,9 +1,10 @@
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
const { errors, models } = require('arsenal');
|
const { errors, models } = require('arsenal');
|
||||||
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { validatePolicyResource, validatePolicyConditions } =
|
const { validatePolicyResource } =
|
||||||
require('./apiUtils/authorization/permissionChecks');
|
require('./apiUtils/authorization/permissionChecks');
|
||||||
const { BucketPolicy } = models;
|
const { BucketPolicy } = models;
|
||||||
|
|
||||||
|
@ -16,8 +17,9 @@ const { BucketPolicy } = models;
|
||||||
function _checkNotImplementedPolicy(policyString) {
|
function _checkNotImplementedPolicy(policyString) {
|
||||||
// bucket names and key names cannot include "", so including those
|
// bucket names and key names cannot include "", so including those
|
||||||
// isolates not implemented keys
|
// isolates not implemented keys
|
||||||
return policyString.includes('"Service"')
|
return policyString.includes('"Condition"')
|
||||||
|| policyString.includes('"Federated"');
|
|| policyString.includes('"Service"')
|
||||||
|
|| policyString.includes('"Federated"');
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -35,7 +37,7 @@ function bucketPutPolicy(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutPolicy',
|
requestType: 'bucketPutPolicy',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -65,10 +67,10 @@ function bucketPutPolicy(authInfo, request, log, callback) {
|
||||||
return next(errors.MalformedPolicy.customizeDescription(
|
return next(errors.MalformedPolicy.customizeDescription(
|
||||||
'Policy has invalid resource'));
|
'Policy has invalid resource'));
|
||||||
}
|
}
|
||||||
return next(validatePolicyConditions(bucketPolicy), bucketPolicy);
|
return next(null, bucketPolicy);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
(bucketPolicy, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
(bucketPolicy, next) => metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => {
|
(err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
|
|
|
@ -2,7 +2,7 @@ const { waterfall } = require('async');
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const { getReplicationConfiguration } =
|
const { getReplicationConfiguration } =
|
||||||
require('./apiUtils/bucket/getReplicationConfiguration');
|
require('./apiUtils/bucket/getReplicationConfiguration');
|
||||||
|
@ -30,7 +30,7 @@ function bucketPutReplication(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutReplication',
|
requestType: 'bucketPutReplication',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return waterfall([
|
return waterfall([
|
||||||
|
@ -39,7 +39,7 @@ function bucketPutReplication(authInfo, request, log, callback) {
|
||||||
// Check bucket user privileges and ensure versioning is 'Enabled'.
|
// Check bucket user privileges and ensure versioning is 'Enabled'.
|
||||||
(config, next) =>
|
(config, next) =>
|
||||||
// TODO: Validate that destination bucket exists and has versioning.
|
// TODO: Validate that destination bucket exists and has versioning.
|
||||||
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ const { s3middleware } = require('arsenal');
|
||||||
|
|
||||||
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
|
||||||
|
@ -38,12 +38,11 @@ function bucketPutTagging(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutTagging',
|
requestType: 'bucketPutTagging',
|
||||||
request,
|
|
||||||
};
|
};
|
||||||
let bucket = null;
|
let bucket = null;
|
||||||
return waterfall([
|
return waterfall([
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
next => metadataValidateBucket(metadataValParams, log,
|
||||||
(err, b) => {
|
(err, b) => {
|
||||||
bucket = b;
|
bucket = b;
|
||||||
return next(err);
|
return next(err);
|
||||||
|
|
|
@ -4,7 +4,7 @@ const { errors } = require('arsenal');
|
||||||
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const versioningNotImplBackends =
|
const versioningNotImplBackends =
|
||||||
require('../../constants').versioningNotImplBackends;
|
require('../../constants').versioningNotImplBackends;
|
||||||
|
@ -119,12 +119,12 @@ function bucketPutVersioning(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketPutVersioning',
|
requestType: 'bucketPutVersioning',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return waterfall([
|
return waterfall([
|
||||||
next => _parseXML(request, log, next),
|
next => _parseXML(request, log, next),
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
next => metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => next(err, bucket)), // ignore extra null object,
|
(err, bucket) => next(err, bucket)), // ignore extra null object,
|
||||||
(bucket, next) => parseString(request.post, (err, result) => {
|
(bucket, next) => parseString(request.post, (err, result) => {
|
||||||
// just for linting; there should not be any parsing error here
|
// just for linting; there should not be any parsing error here
|
||||||
|
|
|
@ -22,7 +22,7 @@ const requestType = 'bucketPutWebsite';
|
||||||
*/
|
*/
|
||||||
function bucketPutWebsite(authInfo, request, log, callback) {
|
function bucketPutWebsite(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutWebsite' });
|
log.debug('processing request', { method: 'bucketPutWebsite' });
|
||||||
const { bucketName } = request;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
if (!request.post) {
|
if (!request.post) {
|
||||||
|
@ -49,8 +49,7 @@ function bucketPutWebsite(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function validateBucketAuthorization(bucket, config, next) {
|
function validateBucketAuthorization(bucket, config, next) {
|
||||||
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
|
||||||
authInfo, log, request, request.actionImplicitDenies)) {
|
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketPutWebsite',
|
method: 'bucketPutWebsite',
|
||||||
|
|
|
@ -1,85 +0,0 @@
|
||||||
const { waterfall } = require('async');
|
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
|
||||||
const metadata = require('../metadata/wrapper');
|
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
|
||||||
const { parseString } = require('xml2js');
|
|
||||||
|
|
||||||
function validateBucketQuotaProperty(requestBody, next) {
|
|
||||||
const quota = requestBody.quota;
|
|
||||||
const quotaValue = parseInt(quota, 10);
|
|
||||||
if (Number.isNaN(quotaValue)) {
|
|
||||||
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
|
|
||||||
}
|
|
||||||
if (quotaValue <= 0) {
|
|
||||||
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
|
|
||||||
}
|
|
||||||
return next(null, quotaValue);
|
|
||||||
}
|
|
||||||
|
|
||||||
function parseRequestBody(requestBody, next) {
|
|
||||||
try {
|
|
||||||
const jsonData = JSON.parse(requestBody);
|
|
||||||
if (typeof jsonData !== 'object') {
|
|
||||||
throw new Error('Invalid JSON');
|
|
||||||
}
|
|
||||||
return next(null, jsonData);
|
|
||||||
} catch (jsonError) {
|
|
||||||
return parseString(requestBody, (xmlError, xmlData) => {
|
|
||||||
if (xmlError) {
|
|
||||||
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
|
|
||||||
}
|
|
||||||
return next(null, xmlData);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function bucketUpdateQuota(authInfo, request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'bucketUpdateQuota' });
|
|
||||||
|
|
||||||
const { bucketName } = request;
|
|
||||||
const metadataValParams = {
|
|
||||||
authInfo,
|
|
||||||
bucketName,
|
|
||||||
requestType: request.apiMethods || 'bucketUpdateQuota',
|
|
||||||
request,
|
|
||||||
};
|
|
||||||
let bucket = null;
|
|
||||||
return waterfall([
|
|
||||||
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
|
||||||
(err, b) => {
|
|
||||||
bucket = b;
|
|
||||||
return next(err, bucket);
|
|
||||||
}),
|
|
||||||
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
|
|
||||||
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
|
|
||||||
next(err, bucket, quotaValue)),
|
|
||||||
(bucket, quotaValue, next) => {
|
|
||||||
bucket.setQuota(quotaValue);
|
|
||||||
return metadata.updateBucket(bucket.getName(), bucket, log, next);
|
|
||||||
},
|
|
||||||
], (err, bucket) => {
|
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
|
||||||
request.method, bucket);
|
|
||||||
if (err) {
|
|
||||||
log.debug('error processing request', {
|
|
||||||
error: err,
|
|
||||||
method: 'bucketUpdateQuota'
|
|
||||||
});
|
|
||||||
monitoring.promMetrics('PUT', bucketName, err.code,
|
|
||||||
'updateBucketQuota');
|
|
||||||
return callback(err, err.code, corsHeaders);
|
|
||||||
}
|
|
||||||
monitoring.promMetrics(
|
|
||||||
'PUT', bucketName, '200', 'updateBucketQuota');
|
|
||||||
pushMetric('updateBucketQuota', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, corsHeaders);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = bucketUpdateQuota;
|
|
|
@ -12,7 +12,7 @@ const constants = require('../../constants');
|
||||||
const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning }
|
const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const locationConstraintCheck
|
const locationConstraintCheck
|
||||||
= require('./apiUtils/object/locationConstraintCheck');
|
= require('./apiUtils/object/locationConstraintCheck');
|
||||||
const { skipMpuPartProcessing } = storage.data.external.backendUtils;
|
const { skipMpuPartProcessing } = storage.data.external.backendUtils;
|
||||||
|
@ -21,6 +21,8 @@ const { validateAndFilterMpuParts, generateMpuPartStorageInfo } =
|
||||||
const locationKeysHaveChanged
|
const locationKeysHaveChanged
|
||||||
= require('./apiUtils/object/locationKeysHaveChanged');
|
= require('./apiUtils/object/locationKeysHaveChanged');
|
||||||
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
||||||
|
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
|
const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
@ -80,7 +82,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
uploadId,
|
uploadId,
|
||||||
// Note: permissions for completing a multipart upload are the
|
// Note: permissions for completing a multipart upload are the
|
||||||
// same as putting a part.
|
// same as putting a part.
|
||||||
requestType: request.apiMethods || 'putPart or complete',
|
requestType: 'putPart or complete',
|
||||||
log,
|
log,
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
@ -131,11 +133,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
bucketName,
|
bucketName,
|
||||||
// Required permissions for this action
|
// Required permissions for this action
|
||||||
// at the destinationBucket level are same as objectPut
|
// at the destinationBucket level are same as objectPut
|
||||||
requestType: request.apiMethods || 'completeMultipartUpload',
|
requestType: 'objectPut',
|
||||||
versionId,
|
versionId,
|
||||||
request,
|
|
||||||
};
|
};
|
||||||
standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next);
|
metadataValidateBucketAndObj(metadataValParams, log, next);
|
||||||
},
|
},
|
||||||
function validateMultipart(destBucket, objMD, next) {
|
function validateMultipart(destBucket, objMD, next) {
|
||||||
if (objMD) {
|
if (objMD) {
|
||||||
|
@ -213,14 +214,9 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
const mdInfo = { storedParts, mpuOverviewKey, splitter };
|
const mdInfo = { storedParts, mpuOverviewKey, splitter };
|
||||||
const mpuInfo =
|
const mpuInfo =
|
||||||
{ objectKey, uploadId, jsonList, bucketName, destBucket };
|
{ objectKey, uploadId, jsonList, bucketName, destBucket };
|
||||||
const originalIdentityImpDenies = request.actionImplicitDenies;
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
delete request.actionImplicitDenies;
|
|
||||||
return data.completeMPU(request, mpuInfo, mdInfo, location,
|
return data.completeMPU(request, mpuInfo, mdInfo, location,
|
||||||
null, null, null, locationConstraintCheck, log,
|
null, null, null, locationConstraintCheck, log,
|
||||||
(err, completeObjData) => {
|
(err, completeObjData) => {
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
request.actionImplicitDenies = originalIdentityImpDenies;
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket);
|
return next(err, destBucket);
|
||||||
}
|
}
|
||||||
|
@ -329,7 +325,6 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
replicationInfo: getReplicationInfo(objectKey, destBucket,
|
replicationInfo: getReplicationInfo(objectKey, destBucket,
|
||||||
false, calculatedSize, REPLICATION_ACTION),
|
false, calculatedSize, REPLICATION_ACTION),
|
||||||
originOp: 's3:ObjectCreated:CompleteMultipartUpload',
|
originOp: 's3:ObjectCreated:CompleteMultipartUpload',
|
||||||
overheadField: constants.overheadField,
|
|
||||||
log,
|
log,
|
||||||
};
|
};
|
||||||
// If key already exists
|
// If key already exists
|
||||||
|
@ -399,10 +394,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
metaStoreParams.versionId = options.versionId;
|
metaStoreParams.versionId = options.versionId;
|
||||||
metaStoreParams.versioning = options.versioning;
|
metaStoreParams.versioning = options.versioning;
|
||||||
metaStoreParams.isNull = options.isNull;
|
metaStoreParams.isNull = options.isNull;
|
||||||
metaStoreParams.deleteNullKey = options.deleteNullKey;
|
metaStoreParams.nullVersionId = options.nullVersionId;
|
||||||
if (options.extraMD) {
|
metaStoreParams.nullUploadId = options.nullUploadId;
|
||||||
Object.assign(metaStoreParams, options.extraMD);
|
|
||||||
}
|
|
||||||
/* eslint-enable no-param-reassign */
|
/* eslint-enable no-param-reassign */
|
||||||
|
|
||||||
// For external backends (where completeObjData is not
|
// For external backends (where completeObjData is not
|
||||||
|
@ -474,9 +467,12 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
const newDataStoreName =
|
const newDataStoreName =
|
||||||
Array.isArray(dataLocations) && dataLocations[0] ?
|
Array.isArray(dataLocations) && dataLocations[0] ?
|
||||||
dataLocations[0].dataStoreName : null;
|
dataLocations[0].dataStoreName : null;
|
||||||
|
const delLog =
|
||||||
|
logger.newRequestLoggerFromSerializedUids(log
|
||||||
|
.getSerializedUids());
|
||||||
return data.batchDelete(dataToDelete,
|
return data.batchDelete(dataToDelete,
|
||||||
request.method,
|
request.method,
|
||||||
newDataStoreName, log, err => {
|
newDataStoreName, delLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
@ -499,8 +495,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
function batchDeleteExtraParts(extraPartLocations, destinationBucket,
|
function batchDeleteExtraParts(extraPartLocations, destinationBucket,
|
||||||
aggregateETag, generatedVersionId, next) {
|
aggregateETag, generatedVersionId, next) {
|
||||||
if (extraPartLocations && extraPartLocations.length > 0) {
|
if (extraPartLocations && extraPartLocations.length > 0) {
|
||||||
|
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||||
|
log.getSerializedUids());
|
||||||
return data.batchDelete(extraPartLocations, request.method,
|
return data.batchDelete(extraPartLocations, request.method,
|
||||||
null, log, err => {
|
null, delLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,11 +6,10 @@ const convertToXml = s3middleware.convertToXml;
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { hasNonPrintables } = require('../utilities/stringChecks');
|
const { hasNonPrintables } = require('../utilities/stringChecks');
|
||||||
const { config } = require('../Config');
|
|
||||||
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const locationConstraintCheck
|
const locationConstraintCheck
|
||||||
= require('./apiUtils/object/locationConstraintCheck');
|
= require('./apiUtils/object/locationConstraintCheck');
|
||||||
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
|
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
|
||||||
|
@ -66,7 +65,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
const websiteRedirectHeader =
|
const websiteRedirectHeader =
|
||||||
request.headers['x-amz-website-redirect-location'];
|
request.headers['x-amz-website-redirect-location'];
|
||||||
if (request.headers['x-amz-storage-class'] &&
|
if (request.headers['x-amz-storage-class'] &&
|
||||||
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
|
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', bucketName,
|
monitoring.promMetrics('PUT', bucketName,
|
||||||
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
|
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
|
||||||
|
@ -82,7 +81,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
if (metaHeaders instanceof Error) {
|
if (metaHeaders instanceof Error) {
|
||||||
log.debug('user metadata validation failed', {
|
log.debug('user metadata validation failed', {
|
||||||
error: metaHeaders,
|
error: metaHeaders,
|
||||||
method: 'initiateMultipartUpload',
|
method: 'createAndStoreObject',
|
||||||
});
|
});
|
||||||
return process.nextTick(() => callback(metaHeaders));
|
return process.nextTick(() => callback(metaHeaders));
|
||||||
}
|
}
|
||||||
|
@ -106,7 +105,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
// Required permissions for this action are same as objectPut
|
// Required permissions for this action are same as objectPut
|
||||||
requestType: request.apiMethods || 'initiateMultipartUpload',
|
requestType: 'objectPut',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const accountCanonicalID = authInfo.getCanonicalID();
|
const accountCanonicalID = authInfo.getCanonicalID();
|
||||||
|
@ -275,7 +274,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
|
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(error, destinationBucket) => {
|
(error, destinationBucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket);
|
const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
|
|
@ -6,7 +6,7 @@ const convertToXml = s3middleware.convertToXml;
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
@ -96,8 +96,8 @@ function listMultipartUploads(authInfo, request, log, callback) {
|
||||||
// to list the multipart uploads so we have provided here that
|
// to list the multipart uploads so we have provided here that
|
||||||
// the authorization to list multipart uploads is the same
|
// the authorization to list multipart uploads is the same
|
||||||
// as listing objects in a bucket.
|
// as listing objects in a bucket.
|
||||||
requestType: request.apiMethods || 'bucketGet',
|
requestType: 'bucketGet',
|
||||||
preciseRequestType: request.apiMethods || 'listMultipartUploads',
|
preciseRequestType: 'listMultipartUploads',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ function listMultipartUploads(authInfo, request, log, callback) {
|
||||||
function waterfall1(next) {
|
function waterfall1(next) {
|
||||||
// Check final destination bucket for authorization rather
|
// Check final destination bucket for authorization rather
|
||||||
// than multipart upload bucket
|
// than multipart upload bucket
|
||||||
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => next(err, bucket));
|
(err, bucket) => next(err, bucket));
|
||||||
},
|
},
|
||||||
function getMPUBucket(bucket, next) {
|
function getMPUBucket(bucket, next) {
|
||||||
|
|
|
@ -8,7 +8,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const locationConstraintCheck =
|
const locationConstraintCheck =
|
||||||
require('./apiUtils/object/locationConstraintCheck');
|
require('./apiUtils/object/locationConstraintCheck');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const escapeForXml = s3middleware.escapeForXml;
|
const escapeForXml = s3middleware.escapeForXml;
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
@ -97,7 +97,7 @@ function listParts(authInfo, request, log, callback) {
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
uploadId,
|
uploadId,
|
||||||
preciseRequestType: request.apiMethods || 'listParts',
|
preciseRequestType: 'listParts',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
// For validating the request at the destinationBucket level
|
// For validating the request at the destinationBucket level
|
||||||
|
@ -114,7 +114,7 @@ function listParts(authInfo, request, log, callback) {
|
||||||
|
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
function checkDestBucketVal(next) {
|
function checkDestBucketVal(next) {
|
||||||
standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
|
metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, destinationBucket) => {
|
(err, destinationBucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destinationBucket, null);
|
return next(err, destinationBucket, null);
|
||||||
|
@ -152,13 +152,8 @@ function listParts(authInfo, request, log, callback) {
|
||||||
mpuOverviewObj,
|
mpuOverviewObj,
|
||||||
destBucket,
|
destBucket,
|
||||||
};
|
};
|
||||||
const originalIdentityImpDenies = request.actionImplicitDenies;
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
delete request.actionImplicitDenies;
|
|
||||||
return data.listParts(mpuInfo, request, locationConstraintCheck,
|
return data.listParts(mpuInfo, request, locationConstraintCheck,
|
||||||
log, (err, backendPartList) => {
|
log, (err, backendPartList) => {
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
request.actionImplicitDenies = originalIdentityImpDenies;
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket);
|
return next(err, destBucket);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
const { errors, versioning } = require('arsenal');
|
const { errors, versioning } = require('arsenal');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const validateSearchParams = require('../api/apiUtils/bucket/validateSearch');
|
const validateSearchParams = require('../api/apiUtils/bucket/validateSearch');
|
||||||
|
@ -71,7 +71,7 @@ function metadataSearch(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'metadataSearch',
|
requestType: 'metadataSearch',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const listParams = {
|
const listParams = {
|
||||||
|
@ -103,7 +103,7 @@ function metadataSearch(authInfo, request, log, callback) {
|
||||||
listParams.marker = params.marker;
|
listParams.marker = params.marker;
|
||||||
}
|
}
|
||||||
|
|
||||||
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -11,27 +11,21 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const { isBucketAuthorized, evaluateBucketPolicyWithIAM } =
|
const { isBucketAuthorized } =
|
||||||
require('./apiUtils/authorization/permissionChecks');
|
require('./apiUtils/authorization/permissionChecks');
|
||||||
const { preprocessingVersioningDelete }
|
const { preprocessingVersioningDelete }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const metadataUtils = require('../metadata/metadataUtils');
|
const { metadataGetObject } = require('../metadata/metadataUtils');
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks');
|
const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks');
|
||||||
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
|
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
|
||||||
= require('./apiUtils/object/objectLockHelpers');
|
= require('./apiUtils/object/objectLockHelpers');
|
||||||
const requestUtils = policies.requestUtils;
|
const requestUtils = policies.requestUtils;
|
||||||
const { validObjectKeys } = require('../routes/routeVeeam');
|
|
||||||
const { deleteVeeamCapabilities } = require('../routes/veeam/delete');
|
|
||||||
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
|
|
||||||
const { overheadField } = require('../../constants');
|
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
const { data } = require('../data/wrapper');
|
|
||||||
const logger = require('../utilities/logger');
|
|
||||||
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
@ -173,63 +167,6 @@ function _parseXml(xmlToParse, next) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* decodeObjectVersion - decode object version to be deleted
|
|
||||||
* @param {object} entry - entry from data model
|
|
||||||
* @param {function} next - callback to call with error or decoded version
|
|
||||||
* @return {undefined}
|
|
||||||
**/
|
|
||||||
function decodeObjectVersion(entry) {
|
|
||||||
let decodedVersionId;
|
|
||||||
if (entry.versionId) {
|
|
||||||
decodedVersionId = entry.versionId === 'null' ?
|
|
||||||
'null' : versionIdUtils.decode(entry.versionId);
|
|
||||||
}
|
|
||||||
if (decodedVersionId instanceof Error) {
|
|
||||||
return [errors.NoSuchVersion];
|
|
||||||
}
|
|
||||||
return [null, decodedVersionId];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Initialization function for the MultiObjectDelete API that will, based on the
|
|
||||||
* current metadata backend, assess if metadata READ batching is supported. If
|
|
||||||
* yes, the initialization step will call the metadataGetObjects function from
|
|
||||||
* the MetadataWrapper.
|
|
||||||
* @param {string} bucketName - bucket name
|
|
||||||
* @param {string []} inPlay - list of object keys still in play
|
|
||||||
* @param {object} log - logger object
|
|
||||||
* @param {function} callback - callback to call with error or list of objects
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback) {
|
|
||||||
if (config.multiObjectDeleteEnableOptimizations === false) {
|
|
||||||
return callback(null, {});
|
|
||||||
}
|
|
||||||
// If the backend supports batching, we want to optimize the API latency by
|
|
||||||
// first getting all the objects metadata, stored in memory, for later use
|
|
||||||
// in the API. This approach does not change the API architecture, but
|
|
||||||
// transplants an additional piece of code that can greatly improve the API
|
|
||||||
// latency when the database supports batching.
|
|
||||||
const objectKeys = Object.values(inPlay).map(entry => {
|
|
||||||
const [err, versionId] = decodeObjectVersion(entry, bucketName);
|
|
||||||
if (err) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return {
|
|
||||||
versionId,
|
|
||||||
inPlay: entry,
|
|
||||||
};
|
|
||||||
});
|
|
||||||
return metadataUtils.metadataGetObjects(bucketName, objectKeys, log, (err, cache) => {
|
|
||||||
// This optional step is read-only, so any error can be safely ignored
|
|
||||||
if (err) {
|
|
||||||
return callback(null, {});
|
|
||||||
}
|
|
||||||
return callback(null, cache);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets object metadata and deletes object
|
* gets object metadata and deletes object
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
@ -255,26 +192,42 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
let numOfObjectsRemoved = 0;
|
let numOfObjectsRemoved = 0;
|
||||||
const skipError = new Error('skip');
|
const skipError = new Error('skip');
|
||||||
const objectLockedError = new Error('object locked');
|
const objectLockedError = new Error('object locked');
|
||||||
let deleteFromStorage = [];
|
|
||||||
|
|
||||||
return async.waterfall([
|
// doing 5 requests at a time. note that the data wrapper
|
||||||
callback => initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback),
|
// will do 5 parallel requests to data backend to delete parts
|
||||||
(cache, callback) => async.forEachLimit(inPlay, config.multiObjectDeleteConcurrency, (entry, moveOn) => {
|
return async.forEachLimit(inPlay, 5, (entry, moveOn) => {
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
callback => callback(...decodeObjectVersion(entry, bucketName)),
|
callback => {
|
||||||
// for obj deletes, no need to check acl's at object level
|
let decodedVersionId;
|
||||||
// (authority is at the bucket level for obj deletes)
|
if (entry.versionId) {
|
||||||
(versionId, callback) => metadataUtils.metadataGetObject(bucketName, entry.key,
|
decodedVersionId = entry.versionId === 'null' ?
|
||||||
versionId, cache, log, (err, objMD) => callback(err, objMD, versionId)),
|
'null' : versionIdUtils.decode(entry.versionId);
|
||||||
(objMD, versionId, callback) => {
|
}
|
||||||
if (!objMD) {
|
if (decodedVersionId instanceof Error) {
|
||||||
|
monitoring.promMetrics('DELETE', bucketName, 404,
|
||||||
|
'multiObjectDelete');
|
||||||
|
return callback(errors.NoSuchVersion);
|
||||||
|
}
|
||||||
|
return callback(null, decodedVersionId);
|
||||||
|
},
|
||||||
|
// for obj deletes, no need to check acl's at object level
|
||||||
|
// (authority is at the bucket level for obj deletes)
|
||||||
|
(versionId, callback) => metadataGetObject(bucketName, entry.key,
|
||||||
|
versionId, log, (err, objMD) => {
|
||||||
|
// if general error from metadata return error
|
||||||
|
if (err && !err.is.NoSuchKey) {
|
||||||
|
monitoring.promMetrics('DELETE', bucketName, err.code,
|
||||||
|
'multiObjectDelete');
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
if (err?.is.NoSuchKey) {
|
||||||
const verCfg = bucket.getVersioningConfiguration();
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
// To adhere to AWS behavior, create a delete marker
|
// To adhere to AWS behavior, create a delete marker
|
||||||
// if trying to delete an object that does not exist
|
// if trying to delete an object that does not exist
|
||||||
// when versioning has been configured
|
// when versioning has been configured
|
||||||
if (verCfg && !entry.versionId) {
|
if (verCfg && !entry.versionId) {
|
||||||
log.debug('trying to delete specific version ' +
|
log.debug('trying to delete specific version ' +
|
||||||
'that does not exist');
|
' that does not exist');
|
||||||
return callback(null, objMD, versionId);
|
return callback(null, objMD, versionId);
|
||||||
}
|
}
|
||||||
// otherwise if particular key does not exist, AWS
|
// otherwise if particular key does not exist, AWS
|
||||||
|
@ -290,165 +243,112 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
objMD.location[0].deleteVersion = true;
|
objMD.location[0].deleteVersion = true;
|
||||||
}
|
}
|
||||||
return callback(null, objMD, versionId);
|
return callback(null, objMD, versionId);
|
||||||
},
|
}),
|
||||||
(objMD, versionId, callback) => {
|
(objMD, versionId, callback) => {
|
||||||
// AWS only returns an object lock error if a version id
|
// AWS only returns an object lock error if a version id
|
||||||
// is specified, else continue to create a delete marker
|
// is specified, else continue to create a delete marker
|
||||||
if (!versionId || !bucket.isObjectLockEnabled()) {
|
if (!versionId || !bucket.isObjectLockEnabled()) {
|
||||||
return callback(null, null, objMD, versionId);
|
return callback(null, null, objMD, versionId);
|
||||||
}
|
}
|
||||||
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
|
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
|
||||||
if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) {
|
if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) {
|
||||||
return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => {
|
return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => {
|
||||||
if (error && error.is.AccessDenied) {
|
if (error && error.is.AccessDenied) {
|
||||||
log.debug('user does not have BypassGovernanceRetention and object is locked',
|
log.debug('user does not have BypassGovernanceRetention and object is locked', { error });
|
||||||
{ error });
|
return callback(objectLockedError);
|
||||||
return callback(objectLockedError);
|
}
|
||||||
}
|
if (error) {
|
||||||
if (error) {
|
return callback(error);
|
||||||
return callback(error);
|
}
|
||||||
}
|
return callback(null, hasGovernanceBypass, objMD, versionId);
|
||||||
return callback(null, hasGovernanceBypass, objMD, versionId);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return callback(null, hasGovernanceBypass, objMD, versionId);
|
|
||||||
},
|
|
||||||
(hasGovernanceBypass, objMD, versionId, callback) => {
|
|
||||||
// AWS only returns an object lock error if a version id
|
|
||||||
// is specified, else continue to create a delete marker
|
|
||||||
if (!versionId || !bucket.isObjectLockEnabled()) {
|
|
||||||
return callback(null, objMD, versionId);
|
|
||||||
}
|
|
||||||
const objLockInfo = new ObjectLockInfo({
|
|
||||||
mode: objMD.retentionMode,
|
|
||||||
date: objMD.retentionDate,
|
|
||||||
legalHold: objMD.legalHold || false,
|
|
||||||
});
|
});
|
||||||
|
}
|
||||||
// If the object can not be deleted raise an error
|
return callback(null, hasGovernanceBypass, objMD, versionId);
|
||||||
if (!objLockInfo.canModifyObject(hasGovernanceBypass)) {
|
},
|
||||||
log.debug('trying to delete locked object');
|
(hasGovernanceBypass, objMD, versionId, callback) => {
|
||||||
return callback(objectLockedError);
|
// AWS only returns an object lock error if a version id
|
||||||
}
|
// is specified, else continue to create a delete marker
|
||||||
|
if (!versionId || !bucket.isObjectLockEnabled()) {
|
||||||
return callback(null, objMD, versionId);
|
return callback(null, objMD, versionId);
|
||||||
},
|
|
||||||
(objMD, versionId, callback) => validateQuotas(
|
|
||||||
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
|
|
||||||
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
|
|
||||||
(objMD, versionId, callback) => {
|
|
||||||
const options = preprocessingVersioningDelete(
|
|
||||||
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
|
|
||||||
const deleteInfo = {};
|
|
||||||
if (options && options.deleteData) {
|
|
||||||
options.overheadField = overheadField;
|
|
||||||
deleteInfo.deleted = true;
|
|
||||||
if (!_bucketRequiresOplogUpdate(bucket)) {
|
|
||||||
options.doesNotNeedOpogUpdate = true;
|
|
||||||
}
|
|
||||||
if (objMD.uploadId) {
|
|
||||||
// eslint-disable-next-line
|
|
||||||
options.replayId = objMD.uploadId;
|
|
||||||
}
|
|
||||||
return services.deleteObject(bucketName, objMD,
|
|
||||||
entry.key, options, config.multiObjectDeleteEnableOptimizations, log,
|
|
||||||
's3:ObjectRemoved:Delete', (err, toDelete) => {
|
|
||||||
if (err) {
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
if (toDelete) {
|
|
||||||
deleteFromStorage = deleteFromStorage.concat(toDelete);
|
|
||||||
}
|
|
||||||
return callback(null, objMD, deleteInfo);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
deleteInfo.newDeleteMarker = true;
|
|
||||||
// This call will create a delete-marker
|
|
||||||
return createAndStoreObject(bucketName, bucket, entry.key,
|
|
||||||
objMD, authInfo, canonicalID, null, request,
|
|
||||||
deleteInfo.newDeleteMarker, null, overheadField, log,
|
|
||||||
's3:ObjectRemoved:DeleteMarkerCreated', (err, result) =>
|
|
||||||
callback(err, objMD, deleteInfo, result.versionId));
|
|
||||||
},
|
|
||||||
], (err, objMD, deleteInfo, versionId) => {
|
|
||||||
if (err === skipError) {
|
|
||||||
return moveOn();
|
|
||||||
} else if (err === objectLockedError) {
|
|
||||||
errorResults.push({ entry, error: errors.AccessDenied, objectLocked: true });
|
|
||||||
return moveOn();
|
|
||||||
} else if (err) {
|
|
||||||
log.error('error deleting object', { error: err, entry });
|
|
||||||
errorResults.push({ entry, error: err });
|
|
||||||
return moveOn();
|
|
||||||
}
|
}
|
||||||
if (deleteInfo.deleted && objMD['content-length']) {
|
const objLockInfo = new ObjectLockInfo({
|
||||||
numOfObjectsRemoved++;
|
mode: objMD.retentionMode,
|
||||||
totalContentLengthDeleted += objMD['content-length'];
|
date: objMD.retentionDate,
|
||||||
}
|
legalHold: objMD.legalHold || false,
|
||||||
let isDeleteMarker;
|
|
||||||
let deleteMarkerVersionId;
|
|
||||||
// - If trying to delete an object that does not exist (if a new
|
|
||||||
// delete marker was created)
|
|
||||||
// - Or if an object exists but no version was specified
|
|
||||||
// return DeleteMarkerVersionId equals the versionID of the marker
|
|
||||||
// you just generated and DeleteMarker tag equals true
|
|
||||||
if (deleteInfo.newDeleteMarker) {
|
|
||||||
isDeleteMarker = true;
|
|
||||||
deleteMarkerVersionId = versionIdUtils.encode(versionId);
|
|
||||||
// In this case we are putting a new object (i.e., the delete
|
|
||||||
// marker), so we decrement the numOfObjectsRemoved value.
|
|
||||||
numOfObjectsRemoved--;
|
|
||||||
// If trying to delete a delete marker, DeleteMarkerVersionId equals
|
|
||||||
// deleteMarker's versionID and DeleteMarker equals true
|
|
||||||
} else if (objMD && objMD.isDeleteMarker) {
|
|
||||||
isDeleteMarker = true;
|
|
||||||
deleteMarkerVersionId = entry.versionId;
|
|
||||||
}
|
|
||||||
successfullyDeleted.push({
|
|
||||||
entry, isDeleteMarker,
|
|
||||||
deleteMarkerVersionId,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// If the object can not be deleted raise an error
|
||||||
|
if (!objLockInfo.canModifyObject(hasGovernanceBypass)) {
|
||||||
|
log.debug('trying to delete locked object');
|
||||||
|
return callback(objectLockedError);
|
||||||
|
}
|
||||||
|
|
||||||
|
return callback(null, objMD, versionId);
|
||||||
|
},
|
||||||
|
(objMD, versionId, callback) => {
|
||||||
|
const options = preprocessingVersioningDelete(bucketName, bucket, objMD, versionId);
|
||||||
|
const deleteInfo = {};
|
||||||
|
if (options && options.deleteData) {
|
||||||
|
deleteInfo.deleted = true;
|
||||||
|
if (objMD.uploadId) {
|
||||||
|
// eslint-disable-next-line
|
||||||
|
options.replayId = objMD.uploadId;
|
||||||
|
}
|
||||||
|
return services.deleteObject(bucketName, objMD,
|
||||||
|
entry.key, options, log, err =>
|
||||||
|
callback(err, objMD, deleteInfo));
|
||||||
|
}
|
||||||
|
deleteInfo.newDeleteMarker = true;
|
||||||
|
// This call will create a delete-marker
|
||||||
|
return createAndStoreObject(bucketName, bucket, entry.key,
|
||||||
|
objMD, authInfo, canonicalID, null, request,
|
||||||
|
deleteInfo.newDeleteMarker, null, log, (err, result) =>
|
||||||
|
callback(err, objMD, deleteInfo, result.versionId));
|
||||||
|
},
|
||||||
|
], (err, objMD, deleteInfo, versionId) => {
|
||||||
|
if (err === skipError) {
|
||||||
return moveOn();
|
return moveOn();
|
||||||
});
|
} else if (err === objectLockedError) {
|
||||||
},
|
errorResults.push({ entry, error: errors.AccessDenied, objectLocked: true });
|
||||||
// end of forEach func
|
return moveOn();
|
||||||
err => {
|
} else if (err) {
|
||||||
// Batch delete all objects
|
log.error('error deleting object', { error: err, entry });
|
||||||
const onDone = () => callback(err, quietSetting, errorResults, numOfObjectsRemoved,
|
errorResults.push({ entry, error: err });
|
||||||
successfullyDeleted, totalContentLengthDeleted, bucket);
|
return moveOn();
|
||||||
|
}
|
||||||
if (err && deleteFromStorage.length === 0) {
|
if (deleteInfo.deleted && objMD['content-length']) {
|
||||||
log.trace('no objects to delete from data backend');
|
numOfObjectsRemoved++;
|
||||||
return onDone();
|
totalContentLengthDeleted += objMD['content-length'];
|
||||||
}
|
}
|
||||||
// If error but we have objects in the list, delete them to ensure
|
let isDeleteMarker;
|
||||||
// consistent state.
|
let deleteMarkerVersionId;
|
||||||
log.trace('deleting objects from data backend');
|
// - If trying to delete an object that does not exist (if a new
|
||||||
|
// delete marker was created)
|
||||||
// Split the array into chunks
|
// - Or if an object exists but no version was specified
|
||||||
const chunks = [];
|
// return DeleteMarkerVersionId equals the versionID of the marker
|
||||||
while (deleteFromStorage.length > 0) {
|
// you just generated and DeleteMarker tag equals true
|
||||||
chunks.push(deleteFromStorage.splice(0, config.multiObjectDeleteConcurrency));
|
if (deleteInfo.newDeleteMarker) {
|
||||||
}
|
isDeleteMarker = true;
|
||||||
|
deleteMarkerVersionId = versionIdUtils.encode(versionId);
|
||||||
return async.each(chunks, (chunk, done) => data.batchDelete(chunk, null, null,
|
// In this case we are putting a new object (i.e., the delete
|
||||||
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids()), done),
|
// marker), so we decrement the numOfObjectsRemoved value.
|
||||||
err => {
|
numOfObjectsRemoved--;
|
||||||
if (err) {
|
// If trying to delete a delete marker, DeleteMarkerVersionId equals
|
||||||
log.error('error deleting objects from data backend', { error: err });
|
// deleteMarker's versionID and DeleteMarker equals true
|
||||||
return onDone(err);
|
} else if (objMD && objMD.isDeleteMarker) {
|
||||||
}
|
isDeleteMarker = true;
|
||||||
return onDone();
|
deleteMarkerVersionId = entry.versionId;
|
||||||
});
|
}
|
||||||
}),
|
successfullyDeleted.push({ entry, isDeleteMarker,
|
||||||
], (err, ...results) => {
|
deleteMarkerVersionId });
|
||||||
// if general error from metadata return error
|
return moveOn();
|
||||||
if (err) {
|
});
|
||||||
monitoring.promMetrics('DELETE', bucketName, err.code,
|
},
|
||||||
'multiObjectDelete');
|
// end of forEach func
|
||||||
return next(err);
|
err => {
|
||||||
}
|
log.trace('finished deleting objects', { numOfObjectsRemoved });
|
||||||
return next(null, ...results);
|
return next(err, quietSetting, errorResults, numOfObjectsRemoved,
|
||||||
|
successfullyDeleted, totalContentLengthDeleted, bucket);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -482,7 +382,6 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
return callback(errors.BadDigest);
|
return callback(errors.BadDigest);
|
||||||
}
|
}
|
||||||
|
|
||||||
const inPlayInternal = [];
|
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
|
@ -496,47 +395,15 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
return next(null, quietSetting, objects);
|
return next(null, quietSetting, objects);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkBucketMetadata(quietSetting, objects, next) {
|
function checkPolicies(quietSetting, objects, next) {
|
||||||
const errorResults = [];
|
|
||||||
return metadata.getBucket(bucketName, log, (err, bucketMD) => {
|
|
||||||
if (err) {
|
|
||||||
log.trace('error retrieving bucket metadata',
|
|
||||||
{ error: err });
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
// check whether bucket has transient or deleted flag
|
|
||||||
if (bucketShield(bucketMD, 'objectDelete')) {
|
|
||||||
return next(errors.NoSuchBucket);
|
|
||||||
}
|
|
||||||
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
|
|
||||||
// affects the objects.
|
|
||||||
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
|
|
||||||
log.trace("access denied due to bucket acl's");
|
|
||||||
// if access denied at the bucket level, no access for
|
|
||||||
// any of the objects so all results will be error results
|
|
||||||
objects.forEach(entry => {
|
|
||||||
errorResults.push({
|
|
||||||
entry,
|
|
||||||
error: errors.AccessDenied,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
// by sending an empty array as the objects array
|
|
||||||
// async.forEachLimit below will not actually
|
|
||||||
// make any calls to metadata or data but will continue on
|
|
||||||
// to the next step to build xml
|
|
||||||
return next(null, quietSetting, errorResults, [], bucketMD);
|
|
||||||
}
|
|
||||||
return next(null, quietSetting, errorResults, objects, bucketMD);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
function checkPolicies(quietSetting, errorResults, objects, bucketMD, next) {
|
|
||||||
// track keys that are still on track to be deleted
|
// track keys that are still on track to be deleted
|
||||||
const inPlay = [];
|
const inPlay = [];
|
||||||
|
const errorResults = [];
|
||||||
// if request from account, no need to check policies
|
// if request from account, no need to check policies
|
||||||
// all objects are inPlay so send array of object keys
|
// all objects are inPlay so send array of object keys
|
||||||
// as inPlay argument
|
// as inPlay argument
|
||||||
if (!isRequesterNonAccountUser(authInfo)) {
|
if (!isRequesterNonAccountUser(authInfo)) {
|
||||||
return next(null, quietSetting, errorResults, objects, bucketMD);
|
return next(null, quietSetting, errorResults, objects);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: once arsenal's extractParams is separated from doAuth
|
// TODO: once arsenal's extractParams is separated from doAuth
|
||||||
|
@ -580,7 +447,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
error: errors.AccessDenied });
|
error: errors.AccessDenied });
|
||||||
});
|
});
|
||||||
// send empty array for inPlay
|
// send empty array for inPlay
|
||||||
return next(null, quietSetting, errorResults, [], bucketMD);
|
return next(null, quietSetting, errorResults, []);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error checking policies', {
|
log.trace('error checking policies', {
|
||||||
|
@ -598,13 +465,6 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
return next(errors.InternalError);
|
return next(errors.InternalError);
|
||||||
}
|
}
|
||||||
// Convert authorization results into an easier to handle format
|
|
||||||
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
|
|
||||||
const apiMethod = authorizationResults[idx].action;
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
acc[apiMethod] = curr.isImplicit;
|
|
||||||
return acc;
|
|
||||||
}, {});
|
|
||||||
for (let i = 0; i < authorizationResults.length; i++) {
|
for (let i = 0; i < authorizationResults.length; i++) {
|
||||||
const result = authorizationResults[i];
|
const result = authorizationResults[i];
|
||||||
// result is { isAllowed: true,
|
// result is { isAllowed: true,
|
||||||
|
@ -620,31 +480,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
key: result.arn.slice(slashIndex + 1),
|
key: result.arn.slice(slashIndex + 1),
|
||||||
versionId: result.versionId,
|
versionId: result.versionId,
|
||||||
};
|
};
|
||||||
// Deny immediately if there is an explicit deny
|
if (result.isAllowed) {
|
||||||
if (!result.isImplicit && !result.isAllowed) {
|
inPlay.push(entry);
|
||||||
errorResults.push({
|
|
||||||
entry,
|
|
||||||
error: errors.AccessDenied,
|
|
||||||
});
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Evaluate against the bucket policies
|
|
||||||
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
|
|
||||||
bucketMD,
|
|
||||||
Object.keys(actionImplicitDenies),
|
|
||||||
canonicalID,
|
|
||||||
authInfo,
|
|
||||||
actionImplicitDenies,
|
|
||||||
log,
|
|
||||||
request);
|
|
||||||
|
|
||||||
if (areAllActionsAllowed) {
|
|
||||||
if (validObjectKeys.includes(entry.key)) {
|
|
||||||
inPlayInternal.push(entry.key);
|
|
||||||
} else {
|
|
||||||
inPlay.push(entry);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
errorResults.push({
|
errorResults.push({
|
||||||
entry,
|
entry,
|
||||||
|
@ -652,13 +489,49 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return next(null, quietSetting, errorResults, inPlay, bucketMD);
|
return next(null, quietSetting, errorResults, inPlay);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function handleInternalFiles(quietSetting, errorResults, inPlay, bucketMD, next) {
|
function checkBucketMetadata(quietSetting, errorResults, inPlay, next) {
|
||||||
return async.each(inPlayInternal,
|
// if no objects in play, no need to check ACLs / get metadata,
|
||||||
(localInPlay, next) => deleteVeeamCapabilities(bucketName, localInPlay, bucketMD, log, next),
|
// just move on if there is no Origin header
|
||||||
err => next(err, quietSetting, errorResults, inPlay, bucketMD));
|
if (inPlay.length === 0 && !request.headers.origin) {
|
||||||
|
return next(null, quietSetting, errorResults, inPlay,
|
||||||
|
undefined);
|
||||||
|
}
|
||||||
|
return metadata.getBucket(bucketName, log, (err, bucketMD) => {
|
||||||
|
if (err) {
|
||||||
|
log.trace('error retrieving bucket metadata',
|
||||||
|
{ error: err });
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
// check whether bucket has transient or deleted flag
|
||||||
|
if (bucketShield(bucketMD, 'objectDelete')) {
|
||||||
|
return next(errors.NoSuchBucket);
|
||||||
|
}
|
||||||
|
// if no objects in play, no need to check ACLs
|
||||||
|
if (inPlay.length === 0) {
|
||||||
|
return next(null, quietSetting, errorResults, inPlay,
|
||||||
|
bucketMD);
|
||||||
|
}
|
||||||
|
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
|
||||||
|
log.trace("access denied due to bucket acl's");
|
||||||
|
// if access denied at the bucket level, no access for
|
||||||
|
// any of the objects so all results will be error results
|
||||||
|
inPlay.forEach(entry => {
|
||||||
|
errorResults.push({
|
||||||
|
entry,
|
||||||
|
error: errors.AccessDenied,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
// by sending an empty array as the inPlay array
|
||||||
|
// async.forEachLimit below will not actually
|
||||||
|
// make any calls to metadata or data but will continue on
|
||||||
|
// to the next step to build xml
|
||||||
|
return next(null, quietSetting, errorResults, [], bucketMD);
|
||||||
|
}
|
||||||
|
return next(null, quietSetting, errorResults, inPlay, bucketMD);
|
||||||
|
});
|
||||||
},
|
},
|
||||||
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
||||||
bucket, next) {
|
bucket, next) {
|
||||||
|
@ -702,6 +575,4 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
module.exports = {
|
module.exports = {
|
||||||
getObjMetadataAndDelete,
|
getObjMetadataAndDelete,
|
||||||
multiObjectDelete,
|
multiObjectDelete,
|
||||||
decodeObjectVersion,
|
|
||||||
initializeMultiObjectDeleteWithBatchingSupport,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -12,10 +12,11 @@ const { checkQueryVersionId, versioningPreprocessing }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
|
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
|
||||||
const { data } = require('../data/wrapper');
|
const { data } = require('../data/wrapper');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
|
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
|
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
|
||||||
.validateWebsiteHeader;
|
.validateWebsiteHeader;
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
|
@ -23,7 +24,6 @@ const monitoring = require('../utilities/monitoringHandler');
|
||||||
const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD');
|
const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD');
|
||||||
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
||||||
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
||||||
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
const locationHeader = constants.objectLocationConstraintHeader;
|
const locationHeader = constants.objectLocationConstraintHeader;
|
||||||
|
@ -218,16 +218,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
bucketName: sourceBucket,
|
bucketName: sourceBucket,
|
||||||
objectKey: sourceObject,
|
objectKey: sourceObject,
|
||||||
versionId: sourceVersionId,
|
versionId: sourceVersionId,
|
||||||
getDeleteMarker: true,
|
|
||||||
requestType: 'objectGet',
|
requestType: 'objectGet',
|
||||||
/**
|
|
||||||
* Authorization will first check the target object, with an objectPut
|
|
||||||
* action. But in this context, the source object metadata is still
|
|
||||||
* unknown. In the context of quotas, to know the number of bytes that
|
|
||||||
* are being written, we explicitly enable the quota evaluation logic
|
|
||||||
* during the objectGet action instead.
|
|
||||||
*/
|
|
||||||
checkQuota: true,
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const valPutParams = {
|
const valPutParams = {
|
||||||
|
@ -235,7 +226,6 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
requestType: 'objectPut',
|
requestType: 'objectPut',
|
||||||
checkQuota: false,
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const dataStoreContext = {
|
const dataStoreContext = {
|
||||||
|
@ -249,7 +239,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
const responseHeaders = {};
|
const responseHeaders = {};
|
||||||
|
|
||||||
if (request.headers['x-amz-storage-class'] &&
|
if (request.headers['x-amz-storage-class'] &&
|
||||||
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
|
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', destBucketName,
|
monitoring.promMetrics('PUT', destBucketName,
|
||||||
errors.InvalidStorageClass.code, 'copyObject');
|
errors.InvalidStorageClass.code, 'copyObject');
|
||||||
|
@ -269,7 +259,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function checkDestAuth(next) {
|
function checkDestAuth(next) {
|
||||||
return standardMetadataValidateBucketAndObj(valPutParams, request.actionImplicitDenies, log,
|
return metadataValidateBucketAndObj(valPutParams, log,
|
||||||
(err, destBucketMD, destObjMD) => {
|
(err, destBucketMD, destObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating put part of request',
|
log.debug('error validating put part of request',
|
||||||
|
@ -287,10 +277,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
|
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
|
||||||
return standardMetadataValidateBucketAndObj({
|
return metadataValidateBucketAndObj(valGetParams, log,
|
||||||
...valGetParams,
|
|
||||||
destObjMD,
|
|
||||||
}, request.actionImplicitDenies, log,
|
|
||||||
(err, sourceBucketMD, sourceObjMD) => {
|
(err, sourceBucketMD, sourceObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating get part of request',
|
log.debug('error validating get part of request',
|
||||||
|
@ -303,11 +290,6 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
log.debug('no source object', { sourceObject });
|
log.debug('no source object', { sourceObject });
|
||||||
return next(err, null, destBucketMD);
|
return next(err, null, destBucketMD);
|
||||||
}
|
}
|
||||||
// check if object data is in a cold storage
|
|
||||||
const coldErr = verifyColdObjectAvailable(sourceObjMD);
|
|
||||||
if (coldErr) {
|
|
||||||
return next(coldErr, null);
|
|
||||||
}
|
|
||||||
if (sourceObjMD.isDeleteMarker) {
|
if (sourceObjMD.isDeleteMarker) {
|
||||||
log.debug('delete marker on source object',
|
log.debug('delete marker on source object',
|
||||||
{ sourceObject });
|
{ sourceObject });
|
||||||
|
@ -342,10 +324,6 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
dataStoreContext.metaHeaders =
|
dataStoreContext.metaHeaders =
|
||||||
storeMetadataParams.metaHeaders;
|
storeMetadataParams.metaHeaders;
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
storeMetadataParams.overheadField = constants.overheadField;
|
|
||||||
|
|
||||||
let dataLocator;
|
let dataLocator;
|
||||||
// If 0 byte object just set dataLocator to empty array
|
// If 0 byte object just set dataLocator to empty array
|
||||||
if (!sourceObjMD.location) {
|
if (!sourceObjMD.location) {
|
||||||
|
@ -461,15 +439,10 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
return next(null, storeMetadataParams, dataLocator, destObjMD,
|
return next(null, storeMetadataParams, dataLocator, destObjMD,
|
||||||
serverSideEncryption, destBucketMD);
|
serverSideEncryption, destBucketMD);
|
||||||
}
|
}
|
||||||
const originalIdentityImpDenies = request.actionImplicitDenies;
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
delete request.actionImplicitDenies;
|
|
||||||
return data.copyObject(request, sourceLocationConstraintName,
|
return data.copyObject(request, sourceLocationConstraintName,
|
||||||
storeMetadataParams, dataLocator, dataStoreContext,
|
storeMetadataParams, dataLocator, dataStoreContext,
|
||||||
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
|
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
|
||||||
(err, results) => {
|
(err, results) => {
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
request.actionImplicitDenies = originalIdentityImpDenies;
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucketMD);
|
return next(err, destBucketMD);
|
||||||
}
|
}
|
||||||
|
@ -493,9 +466,10 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
storeMetadataParams.versioning = options.versioning;
|
storeMetadataParams.versioning = options.versioning;
|
||||||
// eslint-disable-next-line
|
// eslint-disable-next-line
|
||||||
storeMetadataParams.isNull = options.isNull;
|
storeMetadataParams.isNull = options.isNull;
|
||||||
if (options.extraMD) {
|
// eslint-disable-next-line
|
||||||
Object.assign(storeMetadataParams, options.extraMD);
|
storeMetadataParams.nullVersionId = options.nullVersionId;
|
||||||
}
|
// eslint-disable-next-line
|
||||||
|
storeMetadataParams.nullUploadId = options.nullUploadId;
|
||||||
const dataToDelete = options.dataToDelete;
|
const dataToDelete = options.dataToDelete;
|
||||||
return next(null, storeMetadataParams, destDataGetInfoArr,
|
return next(null, storeMetadataParams, destDataGetInfoArr,
|
||||||
destObjMD, serverSideEncryption, destBucketMD,
|
destObjMD, serverSideEncryption, destBucketMD,
|
||||||
|
@ -544,8 +518,10 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
// the same as the destination
|
// the same as the destination
|
||||||
if (!sourceIsDestination && dataToDelete) {
|
if (!sourceIsDestination && dataToDelete) {
|
||||||
const newDataStoreName = storeMetadataParams.dataStoreName;
|
const newDataStoreName = storeMetadataParams.dataStoreName;
|
||||||
|
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||||
|
log.getSerializedUids());
|
||||||
return data.batchDelete(dataToDelete, request.method,
|
return data.batchDelete(dataToDelete, request.method,
|
||||||
newDataStoreName, log, err => {
|
newDataStoreName, delLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// if error, log the error and move on as it is not
|
// if error, log the error and move on as it is not
|
||||||
// relevant to the client as the client's
|
// relevant to the client as the client's
|
||||||
|
|
|
@ -8,30 +8,26 @@ const { pushMetric } = require('../utapi/utilities');
|
||||||
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
||||||
const { decodeVersionId, preprocessingVersioningDelete }
|
const { decodeVersionId, preprocessingVersioningDelete }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
|
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
|
||||||
= require('./apiUtils/object/objectLockHelpers');
|
= require('./apiUtils/object/objectLockHelpers');
|
||||||
const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks');
|
const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks');
|
||||||
const { config } = require('../Config');
|
|
||||||
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
|
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
const objectLockedError = new Error('object locked');
|
const objectLockedError = new Error('object locked');
|
||||||
const { overheadField } = require('../../constants');
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* objectDeleteInternal - DELETE an object from a bucket
|
* objectDelete - DELETE an object from a bucket
|
||||||
* @param {AuthInfo} authInfo - requester's infos
|
* @param {AuthInfo} authInfo - requester's infos
|
||||||
* @param {object} request - request object given by router,
|
* @param {object} request - request object given by router,
|
||||||
* includes normalized headers
|
* includes normalized headers
|
||||||
* @param {Logger} log - werelogs request instance
|
* @param {Logger} log - werelogs request instance
|
||||||
* @param {boolean} isExpiration - true if the call comes from LifecycleExpiration
|
|
||||||
* @param {function} cb - final cb to call with the result and response headers
|
* @param {function} cb - final cb to call with the result and response headers
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
|
function objectDelete(authInfo, request, log, cb) {
|
||||||
log.debug('processing request', { method: 'objectDeleteInternal' });
|
log.debug('processing request', { method: 'objectDelete' });
|
||||||
if (authInfo.isRequesterPublicUser()) {
|
if (authInfo.isRequesterPublicUser()) {
|
||||||
log.debug('operation not available for public user');
|
log.debug('operation not available for public user');
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
|
@ -56,14 +52,14 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
versionId: reqVersionId,
|
versionId: reqVersionId,
|
||||||
requestType: request.apiMethods || 'objectDelete',
|
requestType: 'objectDelete',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function validateBucketAndObj(next) {
|
function validateBucketAndObj(next) {
|
||||||
return standardMetadataValidateBucketAndObj(valParams, request.actionImplicitDenies, log,
|
return metadataValidateBucketAndObj(valParams, log,
|
||||||
(err, bucketMD, objMD) => {
|
(err, bucketMD, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, bucketMD);
|
return next(err, bucketMD);
|
||||||
|
@ -155,10 +151,11 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
|
||||||
},
|
},
|
||||||
function deleteOperation(bucketMD, objectMD, next) {
|
function deleteOperation(bucketMD, objectMD, next) {
|
||||||
const delOptions = preprocessingVersioningDelete(
|
const delOptions = preprocessingVersioningDelete(
|
||||||
bucketName, bucketMD, objectMD, reqVersionId, config.nullVersionCompatMode);
|
bucketName, bucketMD, objectMD, reqVersionId);
|
||||||
const deleteInfo = {
|
const deleteInfo = {
|
||||||
removeDeleteMarker: false,
|
removeDeleteMarker: false,
|
||||||
newDeleteMarker: false,
|
newDeleteMarker: false,
|
||||||
|
isNull: delOptions.isNull,
|
||||||
};
|
};
|
||||||
if (delOptions && delOptions.deleteData && bucketMD.isNFS() &&
|
if (delOptions && delOptions.deleteData && bucketMD.isNFS() &&
|
||||||
bucketMD.getReplicationConfiguration()) {
|
bucketMD.getReplicationConfiguration()) {
|
||||||
|
@ -167,10 +164,7 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
|
||||||
// source does not have versioning.
|
// source does not have versioning.
|
||||||
return createAndStoreObject(bucketName, bucketMD, objectKey,
|
return createAndStoreObject(bucketName, bucketMD, objectKey,
|
||||||
objectMD, authInfo, canonicalID, null, request, true, null,
|
objectMD, authInfo, canonicalID, null, request, true, null,
|
||||||
log, isExpiration ?
|
log, err => {
|
||||||
's3:LifecycleExpiration:DeleteMarkerCreated' :
|
|
||||||
's3:ObjectRemoved:DeleteMarkerCreated',
|
|
||||||
err => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
|
@ -180,15 +174,12 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
|
||||||
deleteInfo.removeDeleteMarker = true;
|
deleteInfo.removeDeleteMarker = true;
|
||||||
}
|
}
|
||||||
return services.deleteObject(bucketName, objectMD,
|
return services.deleteObject(bucketName, objectMD,
|
||||||
objectKey, delOptions, false, log, isExpiration ?
|
objectKey, delOptions, log, (err, delResult) =>
|
||||||
's3:LifecycleExpiration:Delete' :
|
next(err, bucketMD, objectMD, delResult,
|
||||||
's3:ObjectRemoved:Delete',
|
deleteInfo));
|
||||||
(err, delResult) =>
|
|
||||||
next(err, bucketMD, objectMD, delResult, deleteInfo));
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (delOptions && delOptions.deleteData) {
|
if (delOptions && delOptions.deleteData) {
|
||||||
delOptions.overheadField = overheadField;
|
|
||||||
if (objectMD.isDeleteMarker) {
|
if (objectMD.isDeleteMarker) {
|
||||||
// record that we deleted a delete marker to set
|
// record that we deleted a delete marker to set
|
||||||
// response headers accordingly
|
// response headers accordingly
|
||||||
|
@ -200,25 +191,15 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
|
||||||
delOptions.replayId = objectMD.uploadId;
|
delOptions.replayId = objectMD.uploadId;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_bucketRequiresOplogUpdate(bucketMD)) {
|
|
||||||
delOptions.doesNotNeedOpogUpdate = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return services.deleteObject(bucketName, objectMD, objectKey,
|
return services.deleteObject(bucketName, objectMD, objectKey,
|
||||||
delOptions, false, log, isExpiration ?
|
delOptions, log, (err, delResult) => next(err, bucketMD,
|
||||||
's3:LifecycleExpiration:Delete' :
|
objectMD, delResult, deleteInfo));
|
||||||
's3:ObjectRemoved:Delete',
|
|
||||||
(err, delResult) => next(err, bucketMD,
|
|
||||||
objectMD, delResult, deleteInfo));
|
|
||||||
}
|
}
|
||||||
// putting a new delete marker
|
// putting a new delete marker
|
||||||
deleteInfo.newDeleteMarker = true;
|
deleteInfo.newDeleteMarker = true;
|
||||||
return createAndStoreObject(bucketName, bucketMD,
|
return createAndStoreObject(bucketName, bucketMD,
|
||||||
objectKey, objectMD, authInfo, canonicalID, null, request,
|
objectKey, objectMD, authInfo, canonicalID, null, request,
|
||||||
deleteInfo.newDeleteMarker, null, overheadField, log, isExpiration ?
|
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) => {
|
||||||
's3:LifecycleExpiration:DeleteMarkerCreated' :
|
|
||||||
's3:ObjectRemoved:DeleteMarkerCreated',
|
|
||||||
(err, newDelMarkerRes) => {
|
|
||||||
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo);
|
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
@ -307,21 +288,4 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
module.exports = objectDelete;
|
||||||
* This function is used to delete an object from a bucket. The bucket must
|
|
||||||
* already exist and the user must have permission to delete the object.
|
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
|
||||||
* @param {object} request - http request object
|
|
||||||
* @param {werelogs.Logger} log - Logger object
|
|
||||||
* @param {function} cb - callback to server
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function objectDelete(authInfo, request, log, cb) {
|
|
||||||
log.debug('processing request', { method: 'objectDelete' });
|
|
||||||
return objectDeleteInternal(authInfo, request, log, false, cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
objectDelete,
|
|
||||||
objectDeleteInternal,
|
|
||||||
};
|
|
||||||
|
|
|
@ -1,17 +1,16 @@
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions }
|
const { decodeVersionId, getVersionIdResHeader }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
|
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
|
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
|
||||||
const { data } = require('../data/wrapper');
|
const { data } = require('../data/wrapper');
|
||||||
const { config } = require('../Config');
|
|
||||||
const REPLICATION_ACTION = 'DELETE_TAGGING';
|
const REPLICATION_ACTION = 'DELETE_TAGGING';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -42,14 +41,13 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
|
requestType: 'objectDeleteTagging',
|
||||||
versionId: reqVersionId,
|
versionId: reqVersionId,
|
||||||
getDeleteMarker: true,
|
|
||||||
requestType: request.apiMethods || 'objectDeleteTagging',
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
|
@ -66,8 +64,6 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
||||||
if (objectMD.isDeleteMarker) {
|
if (objectMD.isDeleteMarker) {
|
||||||
log.trace('version is a delete marker',
|
log.trace('version is a delete marker',
|
||||||
{ method: 'objectDeleteTagging' });
|
{ method: 'objectDeleteTagging' });
|
||||||
// FIXME we should return a `x-amz-delete-marker: true` header,
|
|
||||||
// see S3C-7592
|
|
||||||
return next(errors.MethodNotAllowed, bucket);
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
|
@ -75,7 +71,8 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
||||||
(bucket, objectMD, next) => {
|
(bucket, objectMD, next) => {
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
objectMD.tags = {};
|
objectMD.tags = {};
|
||||||
const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode);
|
const params = objectMD.versionId ? { versionId:
|
||||||
|
objectMD.versionId } : {};
|
||||||
const replicationInfo = getReplicationInfo(objectKey, bucket, true,
|
const replicationInfo = getReplicationInfo(objectKey, bucket, true,
|
||||||
0, REPLICATION_ACTION, objectMD);
|
0, REPLICATION_ACTION, objectMD);
|
||||||
if (replicationInfo) {
|
if (replicationInfo) {
|
||||||
|
@ -91,7 +88,7 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
||||||
},
|
},
|
||||||
(bucket, objectMD, next) =>
|
(bucket, objectMD, next) =>
|
||||||
// if external backends handles tagging
|
// if external backends handles tagging
|
||||||
data.objectTagging('Delete', objectKey, bucket.getName(), objectMD,
|
data.objectTagging('Delete', objectKey, bucket, objectMD,
|
||||||
log, err => next(err, bucket, objectMD)),
|
log, err => next(err, bucket, objectMD)),
|
||||||
], (err, bucket, objectMD) => {
|
], (err, bucket, objectMD) => {
|
||||||
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
|
|
@ -15,13 +15,12 @@ const getReplicationBackendDataLocator =
|
||||||
require('./apiUtils/object/getReplicationBackendDataLocator');
|
require('./apiUtils/object/getReplicationBackendDataLocator');
|
||||||
const checkReadLocation = require('./apiUtils/object/checkReadLocation');
|
const checkReadLocation = require('./apiUtils/object/checkReadLocation');
|
||||||
|
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
const { locationConstraints } = config;
|
const { locationConstraints } = config;
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo');
|
const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo');
|
||||||
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
|
||||||
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
|
||||||
|
|
||||||
const validateHeaders = s3middleware.validateConditionalHeaders;
|
const validateHeaders = s3middleware.validateConditionalHeaders;
|
||||||
|
|
||||||
|
@ -65,12 +64,11 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
versionId,
|
versionId,
|
||||||
getDeleteMarker: true,
|
requestType: 'objectGet',
|
||||||
requestType: request.apiMethods || 'objectGet',
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log,
|
return metadataValidateBucketAndObj(mdValParams, log,
|
||||||
(err, bucket, objMD) => {
|
(err, bucket, objMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
|
@ -90,12 +88,16 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, null, corsHeaders);
|
||||||
}
|
}
|
||||||
const verCfg = bucket.getVersioningConfiguration();
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
// check if object data is in a cold storage
|
if (objMD.archive &&
|
||||||
const coldErr = verifyColdObjectAvailable(objMD);
|
// Object is in cold backend
|
||||||
if (coldErr) {
|
(!objMD.archive.restoreRequestedAt ||
|
||||||
|
// Object is being restored
|
||||||
|
(objMD.archive.restoreRequestedAt &&
|
||||||
|
!objMD.archive.restoreCompletedAt))) {
|
||||||
|
const error = errors.InvalidObjectState;
|
||||||
monitoring.promMetrics(
|
monitoring.promMetrics(
|
||||||
'GET', bucketName, coldErr.code, 'getObject');
|
'GET', bucketName, error.code, 'getObject');
|
||||||
return callback(coldErr, null, corsHeaders);
|
return callback(error, null, corsHeaders);
|
||||||
}
|
}
|
||||||
if (objMD.isDeleteMarker) {
|
if (objMD.isDeleteMarker) {
|
||||||
const responseMetaHeaders = Object.assign({},
|
const responseMetaHeaders = Object.assign({},
|
||||||
|
|
|
@ -7,7 +7,7 @@ const { pushMetric } = require('../utapi/utilities');
|
||||||
const { decodeVersionId, getVersionIdResHeader }
|
const { decodeVersionId, getVersionIdResHeader }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
// Sample XML response:
|
// Sample XML response:
|
||||||
|
@ -54,14 +54,12 @@ function objectGetACL(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
const versionId = decodedVidResult;
|
const versionId = decodedVidResult;
|
||||||
|
|
||||||
// FIXME pass 'getDeleteMarker: true' option to set
|
|
||||||
// 'x-amz-delete-marker' header (see S3C-7592)
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
versionId,
|
versionId,
|
||||||
requestType: request.apiMethods || 'objectGetACL',
|
requestType: 'objectGetACL',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
const grantInfo = {
|
const grantInfo = {
|
||||||
|
@ -74,7 +72,7 @@ function objectGetACL(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function validateBucketAndObj(next) {
|
function validateBucketAndObj(next) {
|
||||||
return standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
|
return metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
|
@ -92,14 +90,10 @@ function objectGetACL(authInfo, request, log, callback) {
|
||||||
if (versionId) {
|
if (versionId) {
|
||||||
log.trace('requested version is delete marker',
|
log.trace('requested version is delete marker',
|
||||||
{ method: 'objectGetACL' });
|
{ method: 'objectGetACL' });
|
||||||
// FIXME we should return a `x-amz-delete-marker: true` header,
|
|
||||||
// see S3C-7592
|
|
||||||
return next(errors.MethodNotAllowed);
|
return next(errors.MethodNotAllowed);
|
||||||
}
|
}
|
||||||
log.trace('most recent version is delete marker',
|
log.trace('most recent version is delete marker',
|
||||||
{ method: 'objectGetACL' });
|
{ method: 'objectGetACL' });
|
||||||
// FIXME we should return a `x-amz-delete-marker: true` header,
|
|
||||||
// see S3C-7592
|
|
||||||
return next(errors.NoSuchKey);
|
return next(errors.NoSuchKey);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
|
|
|
@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
|
||||||
const { decodeVersionId, getVersionIdResHeader }
|
const { decodeVersionId, getVersionIdResHeader }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
|
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
|
@ -33,19 +33,17 @@ function objectGetLegalHold(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
const versionId = decodedVidResult;
|
const versionId = decodedVidResult;
|
||||||
|
|
||||||
// FIXME pass 'getDeleteMarker: true' option to set
|
|
||||||
// 'x-amz-delete-marker' header (see S3C-7592)
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
|
requestType: 'objectGetLegalHold',
|
||||||
versionId,
|
versionId,
|
||||||
requestType: request.apiMethods || 'objectGetLegalHold',
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
|
@ -63,14 +61,10 @@ function objectGetLegalHold(authInfo, request, log, callback) {
|
||||||
if (versionId) {
|
if (versionId) {
|
||||||
log.trace('requested version is delete marker',
|
log.trace('requested version is delete marker',
|
||||||
{ method: 'objectGetLegalHold' });
|
{ method: 'objectGetLegalHold' });
|
||||||
// FIXME we should return a `x-amz-delete-marker: true` header,
|
|
||||||
// see S3C-7592
|
|
||||||
return next(errors.MethodNotAllowed);
|
return next(errors.MethodNotAllowed);
|
||||||
}
|
}
|
||||||
log.trace('most recent version is delete marker',
|
log.trace('most recent version is delete marker',
|
||||||
{ method: 'objectGetLegalHold' });
|
{ method: 'objectGetLegalHold' });
|
||||||
// FIXME we should return a `x-amz-delete-marker: true` header,
|
|
||||||
// see S3C-7592
|
|
||||||
return next(errors.NoSuchKey);
|
return next(errors.NoSuchKey);
|
||||||
}
|
}
|
||||||
if (!bucket.isObjectLockEnabled()) {
|
if (!bucket.isObjectLockEnabled()) {
|
||||||
|
|
|
@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
|
||||||
const { decodeVersionId, getVersionIdResHeader }
|
const { decodeVersionId, getVersionIdResHeader }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
|
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
|
@ -33,19 +33,17 @@ function objectGetRetention(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
const reqVersionId = decodedVidResult;
|
const reqVersionId = decodedVidResult;
|
||||||
|
|
||||||
// FIXME pass 'getDeleteMarker: true' option to set
|
|
||||||
// 'x-amz-delete-marker' header (see S3C-7592)
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
|
requestType: 'objectGetRetention',
|
||||||
versionId: reqVersionId,
|
versionId: reqVersionId,
|
||||||
requestType: request.apiMethods || 'objectGetRetention',
|
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
|
@ -63,14 +61,10 @@ function objectGetRetention(authInfo, request, log, callback) {
|
||||||
if (reqVersionId) {
|
if (reqVersionId) {
|
||||||
log.trace('requested version is delete marker',
|
log.trace('requested version is delete marker',
|
||||||
{ method: 'objectGetRetention' });
|
{ method: 'objectGetRetention' });
|
||||||
// FIXME we should return a `x-amz-delete-marker: true` header,
|
|
||||||
// see S3C-7592
|
|
||||||
return next(errors.MethodNotAllowed);
|
return next(errors.MethodNotAllowed);
|
||||||
}
|
}
|
||||||
log.trace('most recent version is delete marker',
|
log.trace('most recent version is delete marker',
|
||||||
{ method: 'objectGetRetention' });
|
{ method: 'objectGetRetention' });
|
||||||
// FIXME we should return a `x-amz-delete-marker: true` header,
|
|
||||||
// see S3C-7592
|
|
||||||
return next(errors.NoSuchKey);
|
return next(errors.NoSuchKey);
|
||||||
}
|
}
|
||||||
if (!bucket.isObjectLockEnabled()) {
|
if (!bucket.isObjectLockEnabled()) {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue