Compare commits

..

6 Commits

Author SHA1 Message Date
Francois Ferrand 0a1bbb95ec
Merge branch 'w/7.70/improvement/PTFE-1339' into w/8.6/improvement/PTFE-1339 2024-04-09 22:54:18 +02:00
Francois Ferrand 45262dfc30
Merge branch 'improvement/PTFE-1339' into w/7.70/improvement/PTFE-1339 2024-04-09 22:28:11 +02:00
Francois Ferrand 60c4469237
Use official docker build steps
The docker-build step from `scality/workflows/` fails to login to
 ghcr, as it picks up the old registry creds.

Issue: PTFE-1339
2024-04-09 22:06:23 +02:00
Francois Ferrand 483d19fce1
Build pykmip image
Issue: PTFE-1339
2024-04-09 22:06:21 +02:00
Francois Ferrand bfbf1c0fa0
Upgrade actions
- artifacts@v4
- cache@v4
- checkout@v4
- codeql@v3
- dependency-review@v4
- login@v3
- setup-buildx@v3
- setup-node@v4
- setup-python@v5

Issue: PTFE-1339
2024-04-09 22:05:56 +02:00
Francois Ferrand b5be74e6cc
Migrate to ghcr
Issue: PTFE-1339
2024-04-09 22:05:54 +02:00
148 changed files with 8937 additions and 8991 deletions

View File

@ -1,8 +1,5 @@
{ {
"extends": "scality", "extends": "scality",
"plugins": [
"mocha"
],
"rules": { "rules": {
"import/extensions": "off", "import/extensions": "off",
"lines-around-directive": "off", "lines-around-directive": "off",
@ -45,8 +42,7 @@
"no-restricted-properties": "off", "no-restricted-properties": "off",
"new-parens": "off", "new-parens": "off",
"no-multi-spaces": "off", "no-multi-spaces": "off",
"quote-props": "off", "quote-props": "off"
"mocha/no-exclusive-tests": "error",
}, },
"parserOptions": { "parserOptions": {
"ecmaVersion": 2020 "ecmaVersion": 2020

View File

@ -23,7 +23,7 @@ runs:
- name: install dependencies - name: install dependencies
shell: bash shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1 run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v3 - uses: actions/cache@v2
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip key: ${{ runner.os }}-pip
@ -35,9 +35,3 @@ runs:
run: | run: |
sudo apt-get install -y libdigest-hmac-perl sudo apt-get install -y libdigest-hmac-perl
pip install 's3cmd==2.3.0' pip install 's3cmd==2.3.0'
- name: fix sproxyd.conf permissions
shell: bash
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
- name: ensure fuse kernel module is loaded (for sproxyd)
shell: bash
run: sudo modprobe fuse

View File

@ -40,11 +40,6 @@ services:
- DEFAULT_BUCKET_KEY_FORMAT - DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS - METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE - ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file: env_file:
- creds.env - creds.env
depends_on: depends_on:
@ -78,15 +73,8 @@ services:
mongo: mongo:
network_mode: "host" network_mode: "host"
profiles: ['mongo', 'ceph'] profiles: ['mongo', 'ceph']
image: ${MONGODB_IMAGE} image: scality/ci-mongo:3.6.8
ceph: ceph:
network_mode: "host" network_mode: "host"
profiles: ['ceph'] profiles: ['ceph']
image: ghcr.io/scality/cloudserver/ci-ceph image: ghcr.io/scality/cloudserver/ci-ceph
sproxyd:
network_mode: "host"
profiles: ['sproxyd']
image: sproxyd-standalone
build: ./sproxyd
user: 0:0
privileged: yes

View File

@ -1,28 +0,0 @@
FROM mongo:5.0.21
ENV USER=scality \
HOME_DIR=/home/scality \
CONF_DIR=/conf \
DATA_DIR=/data
# Set up directories and permissions
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
mkdir /logs; \
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
# Set up environment variables and directories for scality user
RUN mkdir ${CONF_DIR} && \
chown -R ${USER} ${CONF_DIR} && \
chown -R ${USER} ${DATA_DIR}
# copy the mongo config file
COPY /conf/mongod.conf /conf/mongod.conf
COPY /conf/mongo-run.sh /conf/mongo-run.sh
COPY /conf/initReplicaSet /conf/initReplicaSet.js
EXPOSE 27017/tcp
EXPOSE 27018
# Set up CMD
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
CMD ["bash", "/conf/mongo-run.sh"]

View File

@ -1,4 +0,0 @@
rs.initiate({
_id: "rs0",
members: [{ _id: 0, host: "127.0.0.1:27018" }]
});

View File

@ -1,10 +0,0 @@
#!/bin/bash
set -exo pipefail
init_RS() {
sleep 5
mongo --port 27018 /conf/initReplicaSet.js
}
init_RS &
mongod --bind_ip_all --config=/conf/mongod.conf

View File

@ -1,15 +0,0 @@
storage:
journal:
enabled: true
engine: wiredTiger
dbPath: "/data/db"
processManagement:
fork: false
net:
port: 27018
bindIp: 0.0.0.0
replication:
replSetName: "rs0"
enableMajorityReadConcern: true
security:
authorization: disabled

View File

@ -1,3 +0,0 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -1,26 +0,0 @@
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param SCRIPT_NAME /var/www;
fastcgi_param PATH_INFO $document_uri;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
# PHP only, required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;

View File

@ -1,88 +0,0 @@
worker_processes 1;
error_log /logs/error.log;
user root root;
events {
worker_connections 1000;
reuse_port on;
multi_accept on;
}
worker_rlimit_nofile 20000;
http {
root /var/www/;
upstream sproxyds {
least_conn;
keepalive 40;
server 127.0.0.1:20000;
}
server {
client_max_body_size 0;
client_body_timeout 150;
client_header_timeout 150;
postpone_output 0;
client_body_postpone_size 0;
keepalive_requests 1100;
keepalive_timeout 300s;
server_tokens off;
default_type application/octet-stream;
gzip off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
listen 81;
server_name localhost;
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
location ~* ^/proxy/(.*)$ {
rewrite ^/proxy/(.*)$ /$1 last;
}
allow 127.0.0.1;
deny all;
set $usermd '-';
set $sentusermd '-';
set $elapsed_ms '-';
set $now '-';
log_by_lua '
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
end
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
end
local elapsed_ms = tonumber(ngx.var.request_time)
if not ( elapsed_ms == nil) then
elapsed_ms = elapsed_ms * 1000
ngx.var.elapsed_ms = tostring(elapsed_ms)
end
local time = tonumber(ngx.var.msec) * 1000
ngx.var.now = time
';
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
'"contentType":"$content_type","s3Address":"$remote_addr",'
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
access_log /dev/stdout irm;
error_log /dev/stdout error;
location / {
proxy_request_buffering off;
fastcgi_request_buffering off;
fastcgi_no_cache 1;
fastcgi_cache_bypass 1;
fastcgi_buffering off;
fastcgi_ignore_client_abort on;
fastcgi_keep_conn on;
include fastcgi_params;
fastcgi_pass sproxyds;
fastcgi_next_upstream error timeout;
fastcgi_send_timeout 285s;
fastcgi_read_timeout 285s;
}
}
}

View File

@ -1,12 +0,0 @@
{
"general": {
"ring": "DATA",
"port": 20000,
"syslog_facility": "local0"
},
"ring_driver:0": {
"alias": "dc1",
"type": "local",
"queue_path": "/tmp/ring-objs"
},
}

View File

@ -1,43 +0,0 @@
[supervisord]
nodaemon = true
loglevel = info
logfile = %(ENV_LOG_DIR)s/supervisord.log
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
logfile_maxbytes = 20MB
logfile_backups = 2
[unix_http_server]
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
[program:nginx]
directory=%(ENV_SUP_RUN_DIR)s
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=7
autorestart=true
autostart=true
user=root
[program:sproxyd]
directory=%(ENV_SUP_RUN_DIR)s
process_name=%(program_name)s-%(process_num)s
numprocs=1
numprocs_start=0
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
redirect_stderr=true
autorestart=true
autostart=true
user=root

View File

@ -20,16 +20,13 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
- name: Render and test ${{ matrix.tests.name }} - name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.3 uses: scality/action-prom-render-test@1.0.1
with: with:
alert_file_path: monitoring/alerts.yaml alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }} test_file_path: ${{ matrix.tests.file }}
alert_inputs: | alert_inputs: >-
namespace=zenko namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -14,7 +14,7 @@ env:
jobs: jobs:
build-federation-image: build-federation-image:
runs-on: ubuntu-20.04 runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4

View File

@ -2,8 +2,6 @@
name: tests name: tests
on: on:
workflow_dispatch:
push: push:
branches-ignore: branches-ignore:
- 'development/**' - 'development/**'
@ -67,8 +65,7 @@ env:
ENABLE_LOCAL_CACHE: "true" ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1" REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1" REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs: jobs:
linting-coverage: linting-coverage:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -128,9 +125,6 @@ jobs:
build: build:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
permissions:
contents: read
packages: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -150,9 +144,6 @@ jobs:
provenance: false provenance: false
tags: | tags: |
ghcr.io/${{ github.repository }}:${{ github.sha }} ghcr.io/${{ github.repository }}:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image - name: Build and push pykmip image
@ -162,26 +153,14 @@ jobs:
context: .github/pykmip context: .github/pykmip
tags: | tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }} ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
multiple-backend: multiple-backend:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
env: env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple S3DATA: multiple
@ -189,22 +168,15 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker compose --profile sproxyd up -d run: docker compose up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run multiple backend test - name: Run multiple backend test
run: |- run: |-
set -o pipefail; set -o pipefail;
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 81 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env: env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
@ -228,7 +200,6 @@ jobs:
S3KMS: file S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0 DEFAULT_BUCKET_KEY_FORMAT: v0
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
@ -267,7 +238,6 @@ jobs:
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v1 DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1 METADATA_MAX_CACHED_BUCKETS: 1
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
@ -308,7 +278,6 @@ jobs:
S3BACKEND: file S3BACKEND: file
S3VAULT: mem S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes" MPU_TESTING: "yes"
JOB_NAME: ${{ matrix.job-name }} JOB_NAME: ${{ matrix.job-name }}
steps: steps:
@ -347,7 +316,6 @@ jobs:
S3BACKEND: mem S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
@ -372,50 +340,6 @@ jobs:
source: /tmp/artifacts source: /tmp/artifacts
if: always() if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
kmip-ft-tests: kmip-ft-tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
@ -425,7 +349,6 @@ jobs:
MPU_TESTING: "yes" MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }} PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
@ -464,7 +387,6 @@ jobs:
CI_CEPH: 'true' CI_CEPH: 'true'
MPU_TESTING: "yes" MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:

View File

@ -1,4 +1,4 @@
ARG NODE_VERSION=16.20-bullseye-slim ARG NODE_VERSION=16.17.1-bullseye-slim
FROM node:${NODE_VERSION} as builder FROM node:${NODE_VERSION} as builder
@ -23,7 +23,6 @@ RUN apt-get update \
ENV PYTHON=python3 ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/ COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1 RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
################################################################################ ################################################################################
@ -43,7 +42,6 @@ EXPOSE 8002
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
jq \ jq \
tini \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app WORKDIR /usr/src/app
@ -55,6 +53,6 @@ COPY --from=builder /usr/src/app/node_modules ./node_modules/
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"] VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"] ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ] CMD [ "yarn", "start" ]

175
README.md
View File

@ -1,7 +1,10 @@
# Zenko CloudServer with Vitastor Backend # Zenko CloudServer
![Zenko CloudServer logo](res/scality-cloudserver-logo.png) ![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview ## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -11,71 +14,137 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud. backend data storage both on-premise or public in the cloud.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor) CloudServer is useful for Developers, either to run as part of a
backend support. continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
## Quick Start with Vitastor ## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
Vitastor Backend is in experimental status, however you can already try to ## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
Installation instructions: ## Docker
### Install Vitastor [Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md). ## Contributing
### Install Zenko with Vitastor Backend In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor` ## Installation
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Install and Configure MongoDB ### Dependencies
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/). Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
### Setup Zenko ### Clone source code
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data` ```shell
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data` git clone https://github.com/scality/S3.git
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
``` ```
``` ### Install js dependencies
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \ Go to the ./S3 folder,
geesefs --endpoint http://localhost:8000 testbucket mountdir
```shell
yarn install --frozen-lockfile
``` ```
# Author & License If you get an error regarding installation of the diskUsage module,
please install g++.
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0) If you get an error regarding level-down bindings, try clearing your yarn cache:
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way) ```shell
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it with Vault user management
Note: Vault is proprietary and must be accessed separately.
```shell
export S3VAULT=vault
yarn start
```
This starts a Zenko CloudServer using Vault for user management.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae

46
bin/metrics_server.js Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

46
bin/secure_channel_proxy.js Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -4,7 +4,6 @@
"metricsPort": 8002, "metricsPort": 8002,
"metricsListenOn": [], "metricsListenOn": [],
"replicationGroupId": "RG001", "replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": { "restEndpoints": {
"localhost": "us-east-1", "localhost": "us-east-1",
"127.0.0.1": "us-east-1", "127.0.0.1": "us-east-1",
@ -102,14 +101,6 @@
"readPreference": "primary", "readPreference": "primary",
"database": "metadata" "database": "metadata"
}, },
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": { "externalBackends": {
"aws_s3": { "aws_s3": {
"httpAgent": { "httpAgent": {

View File

@ -1,71 +0,0 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -116,7 +116,7 @@ const constants = {
], ],
// user metadata header to set object locationConstraint // user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-storage-class', objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified', lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'], legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties // declare here all existing service accounts and their properties
@ -130,7 +130,7 @@ const constants = {
}, },
}, },
/* eslint-disable camelcase */ /* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true }, externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true },
// some of the available data backends (if called directly rather // some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided // than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods. // as a string as first parameter of the get/delete methods.
@ -205,6 +205,9 @@ const constants = {
], ],
allowedUtapiEventFilterStates: ['allow', 'deny'], allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'], allowedRestoreObjectRequestTierValues: ['Standard'],
validStorageClasses: [
'STANDARD',
],
lifecycleListing: { lifecycleListing: {
CURRENT_TYPE: 'current', CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent', NON_CURRENT_TYPE: 'noncurrent',
@ -240,9 +243,6 @@ const constants = {
'objectPutPart', 'objectPutPart',
'completeMultipartUpload', 'completeMultipartUpload',
], ],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
}; };
module.exports = constants; module.exports = constants;

View File

@ -2,12 +2,11 @@
## Docker Image Generation ## Docker Image Generation
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages). Docker images are hosted on [registry.scality.com](registry.scality.com).
CloudServer has a few images there: CloudServer has two namespaces there:
* Cloudserver container image: ghcr.io/scality/cloudserver * Production Namespace: registry.scality.com/cloudserver
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard * Dev Namespace: registry.scality.com/cloudserver-dev
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash. content with the developer branch's short SHA-1 commit hash.
@ -19,8 +18,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images ## How to Pull Docker Images
```sh ```sh
docker pull ghcr.io/scality/cloudserver:<commit hash> docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
docker pull ghcr.io/scality/cloudserver:<tag> docker pull registry.scality.com/cloudserver/cloudserver:<tag>
``` ```
## Release Process ## Release Process

View File

@ -14,10 +14,8 @@ RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \ git config --global --add safe.directory . && \
git lfs install && \ git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \ GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \ yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all && \ yarn cache clean --all
yarn global remove typescript
# run symlinking separately to avoid yarn installation errors # run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed! # we might have to check if the symlinking is really needed!

View File

@ -1,10 +1,10 @@
'use strict'; // eslint-disable-line strict 'use strict'; // eslint-disable-line strict
require('werelogs').stderrUtils.catchAndTimestampStderr( /**
undefined, * Catch uncaught exceptions and add timestamp to aid debugging
// Do not exit as workers have their own listener that will exit */
// But primary don't have another listener process.on('uncaughtException', err => {
require('cluster').isPrimary ? 1 : null, process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
); });
require('./lib/server.js')(); require('./lib/server.js')();

View File

@ -8,17 +8,15 @@ const crypto = require('crypto');
const { v4: uuidv4 } = require('uuid'); const { v4: uuidv4 } = require('uuid');
const cronParser = require('cron-parser'); const cronParser = require('cron-parser');
const joi = require('@hapi/joi'); const joi = require('@hapi/joi');
const { s3routes, auth: arsenalAuth, s3middleware } = require('arsenal');
const { isValidBucketName } = s3routes.routesUtils; const { isValidBucketName } = require('arsenal').s3routes.routesUtils;
const validateAuthConfig = arsenalAuth.inMemory.validateAuthConfig; const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig;
const { buildAuthDataAccount } = require('./auth/in_memory/builder'); const { buildAuthDataAccount } = require('./auth/in_memory/builder');
const validExternalBackends = require('../constants').externalBackends; const validExternalBackends = require('../constants').externalBackends;
const { azureAccountNameRegex, base64Regex, const { azureAccountNameRegex, base64Regex,
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates, allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
} = require('../constants'); } = require('../constants');
const { utapiVersion } = require('utapi'); const { utapiVersion } = require('utapi');
const { scaleMsPerDay } = s3middleware.objectUtils;
const constants = require('../constants'); const constants = require('../constants');
// config paths // config paths
@ -107,47 +105,6 @@ function parseSproxydConfig(configSproxyd) {
return joi.attempt(configSproxyd, joiSchema, 'bad config'); return joi.attempt(configSproxyd, joiSchema, 'bad config');
} }
function parseRedisConfig(redisConfig) {
const joiSchema = joi.object({
password: joi.string().allow(''),
host: joi.string(),
port: joi.number(),
retry: joi.object({
connectBackoff: joi.object({
min: joi.number().required(),
max: joi.number().required(),
jitter: joi.number().required(),
factor: joi.number().required(),
deadline: joi.number().required(),
}),
}),
// sentinel config
sentinels: joi.alternatives().try(
joi.string()
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
.custom(hosts => hosts.split(',').map(item => {
const [host, port] = item.split(':');
return { host, port: Number.parseInt(port, 10) };
})),
joi.array().items(
joi.object({
host: joi.string().required(),
port: joi.number().required(),
})
).min(1),
),
name: joi.string(),
sentinelPassword: joi.string().allow(''),
})
.and('host', 'port')
.and('sentinels', 'name')
.xor('host', 'sentinels')
.without('sentinels', ['host', 'port'])
.without('host', ['sentinels', 'sentinelPassword']);
return joi.attempt(redisConfig, joiSchema, 'bad config');
}
function restEndpointsAssert(restEndpoints, locationConstraints) { function restEndpointsAssert(restEndpoints, locationConstraints) {
assert(typeof restEndpoints === 'object', assert(typeof restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints'); 'bad config: restEndpoints must be an object of endpoints');
@ -280,60 +237,6 @@ function hdClientLocationConstraintAssert(configHd) {
return hdclientFields; return hdclientFields;
} }
function azureArchiveLocationConstraintAssert(locationObj) {
const checkedFields = [
'azureContainerName',
'azureStorageEndpoint',
];
if (Object.keys(locationObj.details).length === 0 ||
!checkedFields.every(field => field in locationObj.details)) {
return;
}
const {
azureContainerName,
azureStorageEndpoint,
} = locationObj.details;
const stringFields = [
azureContainerName,
azureStorageEndpoint,
];
stringFields.forEach(field => {
assert(typeof field === 'string',
`bad config: ${field} must be a string`);
});
let hasAuthMethod = false;
if (locationObj.details.sasToken !== undefined) {
assert(typeof locationObj.details.sasToken === 'string',
`bad config: ${locationObj.details.sasToken} must be a string`);
hasAuthMethod = true;
}
if (locationObj.details.azureStorageAccountName !== undefined &&
locationObj.details.azureStorageAccessKey !== undefined) {
assert(typeof locationObj.details.azureStorageAccountName === 'string',
`bad config: ${locationObj.details.azureStorageAccountName} must be a string`);
assert(typeof locationObj.details.azureStorageAccessKey === 'string',
`bad config: ${locationObj.details.azureStorageAccessKey} must be a string`);
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
hasAuthMethod = true;
}
if (locationObj.details.tenantId !== undefined &&
locationObj.details.clientId !== undefined &&
locationObj.details.clientKey !== undefined) {
assert(typeof locationObj.details.tenantId === 'string',
`bad config: ${locationObj.details.tenantId} must be a string`);
assert(typeof locationObj.details.clientId === 'string',
`bad config: ${locationObj.details.clientId} must be a string`);
assert(typeof locationObj.details.clientKey === 'string',
`bad config: ${locationObj.details.clientKey} must be a string`);
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
hasAuthMethod = true;
}
assert(hasAuthMethod, 'Missing authentication method');
}
function dmfLocationConstraintAssert(locationObj) { function dmfLocationConstraintAssert(locationObj) {
const checkedFields = [ const checkedFields = [
'endpoint', 'endpoint',
@ -377,7 +280,7 @@ function dmfLocationConstraintAssert(locationObj) {
function locationConstraintAssert(locationConstraints) { function locationConstraintAssert(locationConstraints) {
const supportedBackends = const supportedBackends =
['mem', 'file', 'scality', ['mem', 'file', 'scality',
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends)); 'mongodb', 'dmf'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object', assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object'); 'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => { Object.keys(locationConstraints).forEach(l => {
@ -488,9 +391,6 @@ function locationConstraintAssert(locationConstraints) {
if (locationConstraints[l].type === 'dmf') { if (locationConstraints[l].type === 'dmf') {
dmfLocationConstraintAssert(locationConstraints[l]); dmfLocationConstraintAssert(locationConstraints[l]);
} }
if (locationConstraints[l].type === 'azure_archive') {
azureArchiveLocationConstraintAssert(locationConstraints[l]);
}
if (locationConstraints[l].type === 'pfs') { if (locationConstraints[l].type === 'pfs') {
assert(typeof details.pfsDaemonEndpoint === 'object', assert(typeof details.pfsDaemonEndpoint === 'object',
'bad config: pfsDaemonEndpoint is mandatory and must be an object'); 'bad config: pfsDaemonEndpoint is mandatory and must be an object');
@ -502,23 +402,27 @@ function locationConstraintAssert(locationConstraints) {
locationConstraints[l].details.connector.hdclient); locationConstraints[l].details.connector.hdclient);
} }
}); });
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
} }
function parseUtapiReindex(config) { function parseUtapiReindex(config) {
const { const {
enabled, enabled,
schedule, schedule,
redis, sentinel,
bucketd, bucketd,
onlyCountLatestWhenObjectLocked, onlyCountLatestWhenObjectLocked,
} = config; } = config;
assert(typeof enabled === 'boolean', assert(typeof enabled === 'boolean',
'bad config: utapi.reindex.enabled must be a boolean'); 'bad config: utapi.reindex.enabled must be a boolean');
assert(typeof sentinel === 'object',
const parsedRedis = parseRedisConfig(redis); 'bad config: utapi.reindex.sentinel must be an object');
assert(Array.isArray(parsedRedis.sentinels), assert(typeof sentinel.port === 'number',
'bad config: utapi reindex redis config requires a list of sentinels'); 'bad config: utapi.reindex.sentinel.port must be a number');
assert(typeof sentinel.name === 'string',
'bad config: utapi.reindex.sentinel.name must be a string');
assert(typeof bucketd === 'object', assert(typeof bucketd === 'object',
'bad config: utapi.reindex.bucketd must be an object'); 'bad config: utapi.reindex.bucketd must be an object');
assert(typeof bucketd.port === 'number', assert(typeof bucketd.port === 'number',
@ -536,13 +440,6 @@ function parseUtapiReindex(config) {
'bad config: utapi.reindex.schedule must be a valid ' + 'bad config: utapi.reindex.schedule must be a valid ' +
`cron schedule. ${e.message}.`); `cron schedule. ${e.message}.`);
} }
return {
enabled,
schedule,
redis: parsedRedis,
bucketd,
onlyCountLatestWhenObjectLocked,
};
} }
function requestsConfigAssert(requestsConfig) { function requestsConfigAssert(requestsConfig) {
@ -630,6 +527,7 @@ class Config extends EventEmitter {
// Read config automatically // Read config automatically
this._getLocationConfig(); this._getLocationConfig();
this._getConfig(); this._getConfig();
this._configureBackends();
} }
_getLocationConfig() { _getLocationConfig() {
@ -841,11 +739,11 @@ class Config extends EventEmitter {
this.websiteEndpoints = config.websiteEndpoints; this.websiteEndpoints = config.websiteEndpoints;
} }
this.workers = false; this.clusters = false;
if (config.workers !== undefined) { if (config.clusters !== undefined) {
assert(Number.isInteger(config.workers) && config.workers > 0, assert(Number.isInteger(config.clusters) && config.clusters > 0,
'bad config: workers must be a positive integer'); 'bad config: clusters must be a positive integer');
this.workers = config.workers; this.clusters = config.clusters;
} }
if (config.usEastBehavior !== undefined) { if (config.usEastBehavior !== undefined) {
@ -1083,7 +981,8 @@ class Config extends EventEmitter {
assert(typeof config.localCache.port === 'number', assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number'); 'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) { if (config.localCache.password !== undefined) {
assert(typeof config.localCache.password === 'string', assert(
this._verifyRedisPassword(config.localCache.password),
'config: vad password for localCache. password must' + 'config: vad password for localCache. password must' +
' be a string'); ' be a string');
} }
@ -1109,46 +1008,56 @@ class Config extends EventEmitter {
} }
if (config.redis) { if (config.redis) {
this.redis = parseRedisConfig(config.redis); if (config.redis.sentinels) {
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels) ||
typeof config.redis.sentinels === 'string',
'bad config: redis sentinels must be an array or string');
if (typeof config.redis.sentinels === 'string') {
config.redis.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.redis.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.redis.sentinels)) {
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
} }
if (config.scuba) {
this.scuba = {}; if (config.redis.sentinelPassword !== undefined) {
if (config.scuba.host) { assert(
assert(typeof config.scuba.host === 'string', this._verifyRedisPassword(config.redis.sentinelPassword));
'bad config: scuba host must be a string'); this.redis.sentinelPassword = config.redis.sentinelPassword;
this.scuba.host = config.scuba.host;
} }
if (config.scuba.port) { } else {
assert(Number.isInteger(config.scuba.port) // check for standalone configuration
&& config.scuba.port > 0, this.redis = {};
'bad config: scuba port must be a positive integer'); assert(typeof config.redis.host === 'string',
this.scuba.port = config.scuba.port; 'bad config: redis.host must be a string');
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
}
if (config.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.redis.password),
'bad config: invalid password for redis. password must ' +
'be a string');
this.redis.password = config.redis.password;
} }
} }
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
assert(typeof process.env.SCUBA_HOST === 'string',
'bad config: scuba host must be a string');
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
&& Number(process.env.SCUBA_PORT) > 0,
'bad config: scuba port must be a positive integer');
this.scuba = {
host: process.env.SCUBA_HOST,
port: Number(process.env.SCUBA_PORT),
};
}
if (this.scuba) {
this.quotaEnabled = true;
}
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
config.quota?.maxStatenessMS ||
24 * 60 * 60 * 1000;
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
config.quota?.enableInflights || false;
this.quota = {
maxStaleness,
enableInflights,
};
if (config.utapi) { if (config.utapi) {
this.utapi = { component: 's3' }; this.utapi = { component: 's3' };
if (config.utapi.host) { if (config.utapi.host) {
@ -1177,8 +1086,50 @@ class Config extends EventEmitter {
assert(config.redis, 'missing required property of utapi ' + assert(config.redis, 'missing required property of utapi ' +
'configuration: redis'); 'configuration: redis');
if (config.utapi.redis) { if (config.utapi.redis) {
this.utapi.redis = parseRedisConfig(config.utapi.redis); if (config.utapi.redis.sentinels) {
if (this.utapi.redis.retry === undefined) { this.utapi.redis = { sentinels: [], name: null };
assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port });
});
} else {
// check for standalone configuration
this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port;
}
if (config.utapi.redis.retry !== undefined) {
if (config.utapi.redis.retry.connectBackoff !== undefined) {
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
assert.strictEqual(typeof min, 'number',
'utapi.redis.retry.connectBackoff: min must be a number');
assert.strictEqual(typeof max, 'number',
'utapi.redis.retry.connectBackoff: max must be a number');
assert.strictEqual(typeof jitter, 'number',
'utapi.redis.retry.connectBackoff: jitter must be a number');
assert.strictEqual(typeof factor, 'number',
'utapi.redis.retry.connectBackoff: factor must be a number');
assert.strictEqual(typeof deadline, 'number',
'utapi.redis.retry.connectBackoff: deadline must be a number');
}
this.utapi.redis.retry = config.utapi.redis.retry;
} else {
this.utapi.redis.retry = { this.utapi.redis.retry = {
connectBackoff: { connectBackoff: {
min: 10, min: 10,
@ -1189,6 +1140,22 @@ class Config extends EventEmitter {
}, },
}; };
} }
if (config.utapi.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.password = config.utapi.redis.password;
}
if (config.utapi.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(
config.utapi.redis.sentinelPassword),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
} }
if (config.utapi.metrics) { if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics; this.utapi.metrics = config.utapi.metrics;
@ -1258,7 +1225,8 @@ class Config extends EventEmitter {
} }
if (config.utapi && config.utapi.reindex) { if (config.utapi && config.utapi.reindex) {
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex); parseUtapiReindex(config.utapi.reindex);
this.utapi.reindex = config.utapi.reindex;
} }
} }
@ -1303,8 +1271,6 @@ class Config extends EventEmitter {
} }
} }
this.authdata = config.authdata || 'authdata.json';
this.kms = {}; this.kms = {};
if (config.kms) { if (config.kms) {
assert(typeof config.kms.userName === 'string'); assert(typeof config.kms.userName === 'string');
@ -1524,6 +1490,25 @@ class Config extends EventEmitter {
this.outboundProxy.certs = certObj.certs; this.outboundProxy.certs = certObj.certs;
} }
this.managementAgent = {};
this.managementAgent.port = 8010;
this.managementAgent.host = 'localhost';
if (config.managementAgent !== undefined) {
if (config.managementAgent.port !== undefined) {
assert(Number.isInteger(config.managementAgent.port)
&& config.managementAgent.port > 0,
'bad config: managementAgent port must be a positive ' +
'integer');
this.managementAgent.port = config.managementAgent.port;
}
if (config.managementAgent.host !== undefined) {
assert.strictEqual(typeof config.managementAgent.host, 'string',
'bad config: management agent host must ' +
'be a string');
this.managementAgent.host = config.managementAgent.host;
}
}
// Ephemeral token to protect the reporting endpoint: // Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file, // try inherited from parent first, then hardcoded in conf file,
// then create a fresh one as last resort. // then create a fresh one as last resort.
@ -1589,7 +1574,6 @@ class Config extends EventEmitter {
// Version of the configuration we're running under // Version of the configuration we're running under
this.overlayVersion = config.overlayVersion || 0; this.overlayVersion = config.overlayVersion || 0;
this._setTimeOptions();
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency; this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10); const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) { if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
@ -1613,77 +1597,37 @@ class Config extends EventEmitter {
'bad config: maxScannedLifecycleListingEntries must be greater than 2'); 'bad config: maxScannedLifecycleListingEntries must be greater than 2');
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries; this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
} }
this._configureBackends(config);
}
_setTimeOptions() {
// NOTE: EXPIRE_ONE_DAY_EARLIER and TRANSITION_ONE_DAY_EARLIER are deprecated in favor of
// TIME_PROGRESSION_FACTOR which decreases the weight attributed to a day in order to among other things
// expedite the lifecycle of objects.
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
// decreases the weight attributed to a day in order to expedite the lifecycle of objects.
const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1;
const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1);
assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' +
'"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.');
// The scaledMsPerDay value is initially set to the number of milliseconds per day
// (24 * 60 * 60 * 1000) as the default value.
// However, during testing, if the timeProgressionFactor is defined and greater than 1,
// the scaledMsPerDay value is decreased. This adjustment allows for simulating actions occurring
// earlier in time.
const scaledMsPerDay = scaleMsPerDay(timeProgressionFactor);
this.timeOptions = {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
};
}
getTimeOptions() {
return this.timeOptions;
} }
_getAuthData() { _getAuthData() {
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' })); return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
} }
_configureBackends(config) { _configureBackends() {
const backends = config.backends || {};
/** /**
* Configure the backends for Authentication, Data and Metadata. * Configure the backends for Authentication, Data and Metadata.
*/ */
let auth = backends.auth || 'mem'; let auth = 'mem';
let data = backends.data || 'multiple'; let data = 'multiple';
let metadata = backends.metadata || 'file'; let metadata = 'file';
let kms = backends.kms || 'file'; let kms = 'file';
let quota = backends.quota || 'none';
if (process.env.S3BACKEND) { if (process.env.S3BACKEND) {
const validBackends = ['mem', 'file', 'scality', 'cdmi']; const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1, assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' + 'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi' 'should be one of mem/file/scality/cdmi'
); );
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem'; auth = process.env.S3BACKEND;
data = process.env.S3BACKEND; data = process.env.S3BACKEND;
metadata = process.env.S3BACKEND; metadata = process.env.S3BACKEND;
kms = process.env.S3BACKEND; kms = process.env.S3BACKEND;
} }
if (process.env.S3VAULT) { if (process.env.S3VAULT) {
auth = process.env.S3VAULT; auth = process.env.S3VAULT;
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
} }
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') { if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
// Auth only checks for 'mem' since mem === file // Auth only checks for 'mem' since mem === file
auth = 'mem';
let authData; let authData;
if (process.env.SCALITY_ACCESS_KEY_ID && if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) { process.env.SCALITY_SECRET_ACCESS_KEY) {
@ -1712,10 +1656,10 @@ class Config extends EventEmitter {
'should be one of mem/file/scality/multiple' 'should be one of mem/file/scality/multiple'
); );
data = process.env.S3DATA; data = process.env.S3DATA;
}
if (data === 'scality' || data === 'multiple') { if (data === 'scality' || data === 'multiple') {
data = 'multiple'; data = 'multiple';
} }
}
assert(this.locationConstraints !== undefined && assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined, this.restEndpoints !== undefined,
'bad config: locationConstraints and restEndpoints must be set' 'bad config: locationConstraints and restEndpoints must be set'
@ -1727,18 +1671,18 @@ class Config extends EventEmitter {
if (process.env.S3KMS) { if (process.env.S3KMS) {
kms = process.env.S3KMS; kms = process.env.S3KMS;
} }
if (process.env.S3QUOTA) {
quota = process.env.S3QUOTA;
}
this.backends = { this.backends = {
auth, auth,
data, data,
metadata, metadata,
kms, kms,
quota,
}; };
} }
_verifyRedisPassword(password) {
return typeof password === 'string';
}
setAuthDataAccounts(accounts) { setAuthDataAccounts(accounts) {
this.authData.accounts = accounts; this.authData.accounts = accounts;
this.emit('authdata-update'); this.emit('authdata-update');
@ -1861,19 +1805,10 @@ class Config extends EventEmitter {
.update(instanceId) .update(instanceId)
.digest('hex'); .digest('hex');
} }
isQuotaEnabled() {
return !!this.quotaEnabled;
}
isQuotaInflightEnabled() {
return this.quota.enableInflights;
}
} }
module.exports = { module.exports = {
parseSproxydConfig, parseSproxydConfig,
parseRedisConfig,
locationConstraintAssert, locationConstraintAssert,
ConfigObject: Config, ConfigObject: Config,
config: new Config(), config: new Config(),
@ -1881,5 +1816,4 @@ module.exports = {
bucketNotifAssert, bucketNotifAssert,
azureGetStorageAccountName, azureGetStorageAccountName,
azureGetLocationCredentials, azureGetLocationCredentials,
azureArchiveLocationConstraintAssert,
}; };

View File

@ -7,7 +7,6 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite'); const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle'); const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy'); const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketDeleteQuota = require('./bucketDeleteQuota');
const { bucketGet } = require('./bucketGet'); const { bucketGet } = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL'); const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors'); const bucketGetCors = require('./bucketGetCors');
@ -18,7 +17,6 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification'); const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock'); const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy'); const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption'); const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead'); const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut'); const { bucketPut } = require('./bucketPut');
@ -35,7 +33,6 @@ const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption'); const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy'); const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock'); const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication'); const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication'); const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight'); const corsPreflight = require('./corsPreflight');
@ -47,7 +44,7 @@ const metadataSearch = require('./metadataSearch');
const { multiObjectDelete } = require('./multiObjectDelete'); const { multiObjectDelete } = require('./multiObjectDelete');
const multipartDelete = require('./multipartDelete'); const multipartDelete = require('./multipartDelete');
const objectCopy = require('./objectCopy'); const objectCopy = require('./objectCopy');
const { objectDelete } = require('./objectDelete'); const objectDelete = require('./objectDelete');
const objectDeleteTagging = require('./objectDeleteTagging'); const objectDeleteTagging = require('./objectDeleteTagging');
const objectGet = require('./objectGet'); const objectGet = require('./objectGet');
const objectGetACL = require('./objectGetACL'); const objectGetACL = require('./objectGetACL');
@ -85,10 +82,6 @@ const api = {
// Attach the apiMethod method to the request, so it can used by monitoring in the server // Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod; request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod]; const actionLog = monitoringMap[apiMethod];
if (!actionLog && if (!actionLog &&
@ -197,17 +190,14 @@ const api = {
return async.waterfall([ return async.waterfall([
next => auth.server.doAuth( next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
if (err) { if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err }); log.trace('authentication error', { error: err });
return next(arsenalError); return next(err);
} }
return next(null, userInfo, authorizationResults, streamingV4Params, infos); return next(null, userInfo, authorizationResults, streamingV4Params);
}, 's3', requestContexts), }, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, infos, next) => { (userInfo, authorizationResults, streamingV4Params, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() }; const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) { if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName(); authNames.userName = userInfo.getIAMdisplayName();
@ -217,7 +207,7 @@ const api = {
} }
log.addDefaultFields(authNames); log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params, infos); return next(null, userInfo, authorizationResults, streamingV4Params);
} }
// issue 100 Continue to the client // issue 100 Continue to the client
writeContinue(request, response); writeContinue(request, response);
@ -248,12 +238,12 @@ const api = {
} }
// Convert array of post buffers into one string // Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString(); request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params, infos); return next(null, userInfo, authorizationResults, streamingV4Params);
}); });
return undefined; return undefined;
}, },
// Tag condition keys require information from CloudServer for evaluation // Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth( (userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
authorizationResults, authorizationResults,
request, request,
requestContexts, requestContexts,
@ -264,14 +254,13 @@ const api = {
log.trace('tag authentication error', { error: err }); log.trace('tag authentication error', { error: err });
return next(err); return next(err);
} }
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos); return next(null, userInfo, authResultsWithTags, streamingV4Params);
}, },
), ),
], (err, userInfo, authorizationResults, streamingV4Params, infos) => { ], (err, userInfo, authorizationResults, streamingV4Params) => {
if (err) { if (err) {
return callback(err); return callback(err);
} }
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) { if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults); const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) { if (checkedResults instanceof Error) {
@ -288,23 +277,19 @@ const api = {
return acc; return acc;
}, {}); }, {});
} }
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response; request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params, return this[apiMethod](userInfo, request, streamingV4Params,
log, methodCallback, authorizationResults); log, callback, authorizationResults);
} }
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket, return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, methodCallback); sourceObject, sourceVersionId, log, callback);
} }
if (apiMethod === 'objectGet') { if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback); return this[apiMethod](userInfo, request, returnTagCount, log, callback);
} }
return this[apiMethod](userInfo, request, log, methodCallback); return this[apiMethod](userInfo, request, log, callback);
}); });
}, },
bucketDelete, bucketDelete,
@ -331,14 +316,11 @@ const api = {
bucketPutReplication, bucketPutReplication,
bucketGetReplication, bucketGetReplication,
bucketDeleteReplication, bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle, bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle, bucketGetLifecycle,
bucketDeleteLifecycle, bucketDeleteLifecycle,
bucketPutPolicy, bucketPutPolicy,
bucketGetPolicy, bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy, bucketDeletePolicy,
bucketPutObjectLock, bucketPutObjectLock,
bucketPutNotification, bucketPutNotification,

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3'); apiMethod, 's3');
} }
if (apiMethod === 'bucketPut') { if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
return null; return null;
} }
@ -65,17 +65,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = []; const requestContexts = [];
if (apiMethod === 'multiObjectDelete') { if (apiMethodAfterVersionCheck === 'objectCopy'
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') { || apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' : const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet'; 'objectGet';

View File

@ -2,13 +2,11 @@
* Code based on Yutaka Oishi (Fujifilm) contributions * Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020 * Date: 11 Sep 2020
*/ */
const { ObjectMDArchive } = require('arsenal').models; const ObjectMDArchive = require('arsenal').models.ObjectMDArchive;
const errors = require('arsenal').errors; const errors = require('arsenal').errors;
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const { locationConstraints } = config; const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/** /**
* Get response header "x-amz-restore" * Get response header "x-amz-restore"
* Be called by objectHead.js * Be called by objectHead.js
@ -34,6 +32,7 @@ function getAmzRestoreResHeader(objMD) {
return undefined; return undefined;
} }
/** /**
* Check if restore can be done. * Check if restore can be done.
* *
@ -42,23 +41,6 @@ function getAmzRestoreResHeader(objMD) {
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled * @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
*/ */
function _validateStartRestore(objectMD, log) { function _validateStartRestore(objectMD, log) {
if (objectMD.archive?.restoreCompletedAt) {
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
// If object is already restored, no further check is needed
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
// been reset.
return undefined;
}
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold; const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
if (!isLocationCold) { if (!isLocationCold) {
// return InvalidObjectState error if the object is not in cold storage, // return InvalidObjectState error if the object is not in cold storage,
@ -70,7 +52,18 @@ function _validateStartRestore(objectMD, log) {
}); });
return errors.InvalidObjectState; return errors.InvalidObjectState;
} }
if (objectMD.archive?.restoreRequestedAt) { if (objectMD.archive?.restoreCompletedAt
&& new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreRequestedAt && !objectMD.archive?.restoreCompletedAt) {
// return RestoreAlreadyInProgress error if the object is currently being restored // return RestoreAlreadyInProgress error if the object is currently being restored
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists // check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
log.debug('The object is currently being restored.', log.debug('The object is currently being restored.',
@ -127,36 +120,22 @@ function validatePutVersionId(objMD, versionId, log) {
} }
/** /**
* Check if the object is already restored, and update the expiration date accordingly: * Check if the object is already restored
* > After restoring an archived object, you can update the restoration period by reissuing the
* > request with a new period. Amazon S3 updates the restoration period relative to the current
* > time.
* *
* @param {ObjectMD} objectMD - object metadata * @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger * @param {object} log - werelogs logger
* @return {boolean} - true if the object is already restored * @return {boolean} - true if the object is already restored
*/ */
function _updateObjectExpirationDate(objectMD, log) { function isObjectAlreadyRestored(objectMD, log) {
// Check if restoreCompletedAt field exists // check if restoreCompletedAt field exists
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is // and archive.restoreWillExpireAt > current time
// checked earlier in the process, so checking again here would create weird states const isObjectAlreadyRestored = objectMD.archive?.restoreCompletedAt
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt; && new Date(objectMD.archive?.restoreWillExpireAt) >= new Date(Date.now());
log.debug('The restore status of the object.', { log.debug('The restore status of the object.',
{
isObjectAlreadyRestored, isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored' method: 'isObjectAlreadyRestored'
}); });
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;
objectMD['x-amz-restore'] = {
'ongoing-request': false,
'expiry-date': expiryDate,
};
/* eslint-enable no-param-reassign */
}
return isObjectAlreadyRestored; return isObjectAlreadyRestored;
} }
@ -216,32 +195,12 @@ function startRestore(objectMD, restoreParam, log, cb) {
if (updateResultError) { if (updateResultError) {
return cb(updateResultError); return cb(updateResultError);
} }
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log); return cb(null, isObjectAlreadyRestored(objectMD, log));
return cb(null, isObjectAlreadyRestored);
} }
/**
* checks if object data is available or if it's in cold storage
* @param {ObjectMD} objMD Object metadata
* @returns {ArsenalError|null} error if object data is not available
*/
function verifyColdObjectAvailable(objMD) {
// return error when object is cold
if (objMD.archive &&
// Object is in cold backend
(!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
const err = errors.InvalidObjectState
.customizeDescription('The operation is not valid for the object\'s storage class');
return err;
}
return null;
}
module.exports = { module.exports = {
startRestore, startRestore,
getAmzRestoreResHeader, getAmzRestoreResHeader,
validatePutVersionId, validatePutVersionId,
verifyColdObjectAvailable,
}; };

View File

@ -52,7 +52,6 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* credentialScope (to be used for streaming v4 auth if applicable) * credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead * @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance * @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation
* @param {function} callback - callback function * @param {function} callback - callback function
* @return {undefined} and call callback with (err, result) - * @return {undefined} and call callback with (err, result) -
* result.contentMD5 - content md5 of new object or version * result.contentMD5 - content md5 of new object or version
@ -60,7 +59,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/ */
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params, canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
overheadField, log, originOp, callback) { overheadField, log, callback) {
const putVersionId = request.headers['x-scal-s3-version-id']; const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === ''; const isPutVersion = putVersionId || putVersionId === '';
@ -143,7 +142,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
removeAWSChunked(request.headers['content-encoding']); removeAWSChunked(request.headers['content-encoding']);
metadataStoreParams.expires = request.headers.expires; metadataStoreParams.expires = request.headers.expires;
metadataStoreParams.tagging = request.headers['x-amz-tagging']; metadataStoreParams.tagging = request.headers['x-amz-tagging'];
metadataStoreParams.originOp = originOp; metadataStoreParams.originOp = 's3:ObjectCreated:Put';
const defaultObjectLockConfiguration const defaultObjectLockConfiguration
= bucketMD.getObjectLockConfiguration(); = bucketMD.getObjectLockConfiguration();
if (defaultObjectLockConfiguration) { if (defaultObjectLockConfiguration) {
@ -158,7 +157,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
request.headers[constants.objectLocationConstraintHeader] = request.headers[constants.objectLocationConstraintHeader] =
objMD[constants.objectLocationConstraintHeader]; objMD[constants.objectLocationConstraintHeader];
metadataStoreParams.originOp = originOp; metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated';
} }
const backendInfoObj = const backendInfoObj =

View File

@ -4,25 +4,23 @@ const {
LifecycleDateTime, LifecycleDateTime,
LifecycleUtils, LifecycleUtils,
} = require('arsenal').s3middleware.lifecycleHelpers; } = require('arsenal').s3middleware.lifecycleHelpers;
const { config } = require('../../../Config');
const { // moves lifecycle transition deadlines 1 day earlier, mostly for testing
expireOneDayEarlier, const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
transitionOneDayEarlier, // moves lifecycle expiration deadlines 1 day earlier, mostly for testing
timeProgressionFactor, const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
scaledMsPerDay,
} = config.getTimeOptions();
const lifecycleDateTime = new LifecycleDateTime({ const lifecycleDateTime = new LifecycleDateTime({
transitionOneDayEarlier, transitionOneDayEarlier,
expireOneDayEarlier, expireOneDayEarlier,
timeProgressionFactor,
}); });
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor); const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime);
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
function calculateDate(objDate, expDays, datetime) { function calculateDate(objDate, expDays, datetime) {
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay)); return new Date(datetime.getTimestamp(objDate) + expDays * oneDay);
} }
function formatExpirationHeader(date, id) { function formatExpirationHeader(date, id) {

View File

@ -5,7 +5,6 @@ const { config } = require('../../../Config');
const vault = require('../../../auth/vault'); const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks'); const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/** /**
* Calculates retain until date for the locked object version * Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period * @param {object} retention - includes days or years retention period
@ -21,9 +20,8 @@ function calculateRetainUntilDate(retention) {
const date = moment(); const date = moment();
// Calculate the number of days to retain the lock on the object // Calculate the number of days to retain the lock on the object
const retainUntilDays = days || years * 365; const retainUntilDays = days || years * 365;
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
const retainUntilDate const retainUntilDate
= date.add(retainUntilDaysInMs, 'ms'); = date.add(retainUntilDays, 'days');
return retainUntilDate.toISOString(); return retainUntilDate.toISOString();
} }
/** /**

View File

@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning'); const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore; const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/** /**
* Check if tier is supported * Check if tier is supported
@ -59,14 +59,6 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
objectKey, objectKey,
versionId: decodedVidResult, versionId: decodedVidResult,
requestType: request.apiMethods || 'restoreObject', requestType: request.apiMethods || 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
}; };
return async.waterfall([ return async.waterfall([
@ -124,16 +116,6 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
return next(err, bucketMD, objectMD); return next(err, bucketMD, objectMD);
}); });
}, },
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) { function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {}; const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params, metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,

View File

@ -4,7 +4,7 @@ const async = require('async');
const metadata = require('../../../metadata/wrapper'); const metadata = require('../../../metadata/wrapper');
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const { scaledMsPerDay } = config.getTimeOptions(); const oneDay = 24 * 60 * 60 * 1000;
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
// Use Arsenal function to generate a version ID used internally by metadata // Use Arsenal function to generate a version ID used internally by metadata
@ -460,47 +460,6 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersio
return options; return options;
} }
/**
* Keep metadatas when the object is restored from cold storage
* but remove the specific ones we don't want to keep
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {undefined}
*/
function restoreMetadata(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
// We need to keep user metadata and tags
Object.keys(objMD).forEach(key => {
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
metadataStoreParams.metaHeaders[key] = objMD[key];
}
});
if (objMD['x-amz-website-redirect-location']) {
if (!metadataStoreParams.headers) {
metadataStoreParams.headers = {};
}
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
}
if (objMD.replicationInfo) {
metadataStoreParams.replicationInfo = objMD.replicationInfo;
}
if (objMD.legalHold) {
metadataStoreParams.legalHold = objMD.legalHold;
}
if (objMD.acl) {
metadataStoreParams.acl = objMD.acl;
}
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.taggingCopy = objMD.tags;
}
/** overwritingVersioning - return versioning information for S3 to handle /** overwritingVersioning - return versioning information for S3 to handle
* storing version metadata with a specific version id. * storing version metadata with a specific version id.
* @param {object} objMD - obj metadata * @param {object} objMD - obj metadata
@ -512,8 +471,10 @@ function restoreMetadata(objMD, metadataStoreParams) {
* version id of the null version * version id of the null version
*/ */
function overwritingVersioning(objMD, metadataStoreParams) { function overwritingVersioning(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.updateMicroVersionId = true; metadataStoreParams.updateMicroVersionId = true;
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
// set correct originOp // set correct originOp
metadataStoreParams.originOp = 's3:ObjectRestore:Completed'; metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
@ -526,7 +487,7 @@ function overwritingVersioning(objMD, metadataStoreParams) {
restoreRequestedAt: objMD.archive?.restoreRequestedAt, restoreRequestedAt: objMD.archive?.restoreRequestedAt,
restoreRequestedDays: objMD.archive?.restoreRequestedDays, restoreRequestedDays: objMD.archive?.restoreRequestedDays,
restoreCompletedAt: new Date(now), restoreCompletedAt: new Date(now),
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)), restoreWillExpireAt: new Date(now + (days * oneDay)),
}; };
/* eslint-enable no-param-reassign */ /* eslint-enable no-param-reassign */
@ -542,8 +503,6 @@ function overwritingVersioning(objMD, metadataStoreParams) {
}; };
} }
restoreMetadata(objMD, metadataStoreParams);
return options; return options;
} }

View File

@ -1,314 +0,0 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -1,58 +0,0 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteQuota';
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketDeleteQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketDeleteQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || requestType,
request,
};
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)),
(bucket, next) => {
bucket.setQuota(0);
metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketDeleteQuota'
});
monitoring.promMetrics('DELETE', bucketName, err.code,
'bucketDeleteQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, 204, corsHeaders);
});
}
module.exports = bucketDeleteQuota;

View File

@ -1,58 +0,0 @@
const { errors } = require('arsenal');
const { pushMetric } = require('../utapi/utilities');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/**
* bucketGetQuota - Get the bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketGetQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGetQuota' });
const { bucketName, headers, method } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketGetQuota',
request,
};
const xml = [];
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketGetQuota',
});
return callback(err, null, corsHeaders);
}
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<GetBucketQuota>',
'<Name>', bucket.getName(), '</Name>',
);
const bucketQuota = bucket.getQuota();
if (!bucketQuota) {
log.debug('bucket has no quota', {
method: 'bucketGetQuota',
});
return callback(errors.NoSuchQuota, null,
corsHeaders);
}
xml.push('<Quota>', bucketQuota, '</Quota>',
'</GetBucketQuota>');
pushMetric('getBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml.join(''), corsHeaders);
});
}
module.exports = bucketGetQuota;

View File

@ -45,8 +45,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
} else if (parsedHost && restEndpoints[parsedHost]) { } else if (parsedHost && restEndpoints[parsedHost]) {
locationConstraintChecked = restEndpoints[parsedHost]; locationConstraintChecked = restEndpoints[parsedHost];
} else { } else {
locationConstraintChecked = Object.keys(locationConstrains)[0]; log.trace('no location constraint provided on bucket put;' +
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked); 'setting us-east-1');
locationConstraintChecked = 'us-east-1';
} }
if (!locationConstraints[locationConstraintChecked]) { if (!locationConstraints[locationConstraintChecked]) {

View File

@ -1,85 +0,0 @@
const { waterfall } = require('async');
const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const { parseString } = require('xml2js');
function validateBucketQuotaProperty(requestBody, next) {
const quota = requestBody.quota;
const quotaValue = parseInt(quota, 10);
if (Number.isNaN(quotaValue)) {
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
}
if (quotaValue <= 0) {
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
}
return next(null, quotaValue);
}
function parseRequestBody(requestBody, next) {
try {
const jsonData = JSON.parse(requestBody);
if (typeof jsonData !== 'object') {
throw new Error('Invalid JSON');
}
return next(null, jsonData);
} catch (jsonError) {
return parseString(requestBody, (xmlError, xmlData) => {
if (xmlError) {
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
}
return next(null, xmlData);
});
}
}
function bucketUpdateQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketUpdateQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketUpdateQuota',
request,
};
let bucket = null;
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
bucket = b;
return next(err, bucket);
}),
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
next(err, bucket, quotaValue)),
(bucket, quotaValue, next) => {
bucket.setQuota(quotaValue);
return metadata.updateBucket(bucket.getName(), bucket, log, next);
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketUpdateQuota'
});
monitoring.promMetrics('PUT', bucketName, err.code,
'updateBucketQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'PUT', bucketName, '200', 'updateBucketQuota');
pushMetric('updateBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, corsHeaders);
});
}
module.exports = bucketUpdateQuota;

View File

@ -6,7 +6,6 @@ const convertToXml = s3middleware.convertToXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { hasNonPrintables } = require('../utilities/stringChecks'); const { hasNonPrintables } = require('../utilities/stringChecks');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation'); const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
@ -66,7 +65,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
const websiteRedirectHeader = const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location']; request.headers['x-amz-website-redirect-location'];
if (request.headers['x-amz-storage-class'] && if (request.headers['x-amz-storage-class'] &&
!config.locationConstraints[request.headers['x-amz-storage-class']]) { !constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header'); log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', bucketName, monitoring.promMetrics('PUT', bucketName,
errors.InvalidStorageClass.code, 'initiateMultipartUpload'); errors.InvalidStorageClass.code, 'initiateMultipartUpload');

View File

@ -23,15 +23,13 @@ const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissi
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
= require('./apiUtils/object/objectLockHelpers'); = require('./apiUtils/object/objectLockHelpers');
const requestUtils = policies.requestUtils; const requestUtils = policies.requestUtils;
const { validObjectKeys } = require('../routes/routeVeeam'); const { data } = require('../data/wrapper');
const { deleteVeeamCapabilities } = require('../routes/veeam/delete'); const logger = require('../utilities/logger');
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject'); const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
const { overheadField } = require('../../constants'); const { overheadField } = require('../../constants');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
/* /*
Format of xml request: Format of xml request:
@ -333,9 +331,6 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
return callback(null, objMD, versionId); return callback(null, objMD, versionId);
}, },
(objMD, versionId, callback) => validateQuotas(
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
(objMD, versionId, callback) => { (objMD, versionId, callback) => {
const options = preprocessingVersioningDelete( const options = preprocessingVersioningDelete(
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode); bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
@ -351,8 +346,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
options.replayId = objMD.uploadId; options.replayId = objMD.uploadId;
} }
return services.deleteObject(bucketName, objMD, return services.deleteObject(bucketName, objMD,
entry.key, options, config.multiObjectDeleteEnableOptimizations, log, entry.key, options, config.multiObjectDeleteEnableOptimizations, log, (err, toDelete) => {
's3:ObjectRemoved:Delete', (err, toDelete) => {
if (err) { if (err) {
return callback(err); return callback(err);
} }
@ -366,8 +360,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
// This call will create a delete-marker // This call will create a delete-marker
return createAndStoreObject(bucketName, bucket, entry.key, return createAndStoreObject(bucketName, bucket, entry.key,
objMD, authInfo, canonicalID, null, request, objMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, overheadField, log, deleteInfo.newDeleteMarker, null, overheadField, log, (err, result) =>
's3:ObjectRemoved:DeleteMarkerCreated', (err, result) =>
callback(err, objMD, deleteInfo, result.versionId)); callback(err, objMD, deleteInfo, result.versionId));
}, },
], (err, objMD, deleteInfo, versionId) => { ], (err, objMD, deleteInfo, versionId) => {
@ -482,7 +475,6 @@ function multiObjectDelete(authInfo, request, log, callback) {
return callback(errors.BadDigest); return callback(errors.BadDigest);
} }
const inPlayInternal = [];
const bucketName = request.bucketName; const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
@ -508,9 +500,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
if (bucketShield(bucketMD, 'objectDelete')) { if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket); return next(errors.NoSuchBucket);
} }
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request,
// affects the objects. request.actionImplicitDenies)) {
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
log.trace("access denied due to bucket acl's"); log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for // if access denied at the bucket level, no access for
// any of the objects so all results will be error results // any of the objects so all results will be error results
@ -640,11 +631,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
request); request);
if (areAllActionsAllowed) { if (areAllActionsAllowed) {
if (validObjectKeys.includes(entry.key)) {
inPlayInternal.push(entry.key);
} else {
inPlay.push(entry); inPlay.push(entry);
}
} else { } else {
errorResults.push({ errorResults.push({
entry, entry,
@ -655,11 +642,6 @@ function multiObjectDelete(authInfo, request, log, callback) {
return next(null, quietSetting, errorResults, inPlay, bucketMD); return next(null, quietSetting, errorResults, inPlay, bucketMD);
}); });
}, },
function handleInternalFiles(quietSetting, errorResults, inPlay, bucketMD, next) {
return async.each(inPlayInternal,
(localInPlay, next) => deleteVeeamCapabilities(bucketName, localInPlay, bucketMD, log, next),
err => next(err, quietSetting, errorResults, inPlay, bucketMD));
},
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay, function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
bucket, next) { bucket, next) {
return getObjMetadataAndDelete(authInfo, canonicalID, request, return getObjMetadataAndDelete(authInfo, canonicalID, request,

View File

@ -23,7 +23,6 @@ const monitoring = require('../utilities/monitoringHandler');
const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD'); const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption'); const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const locationHeader = constants.objectLocationConstraintHeader; const locationHeader = constants.objectLocationConstraintHeader;
@ -220,14 +219,6 @@ function objectCopy(authInfo, request, sourceBucket,
versionId: sourceVersionId, versionId: sourceVersionId,
getDeleteMarker: true, getDeleteMarker: true,
requestType: 'objectGet', requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request, request,
}; };
const valPutParams = { const valPutParams = {
@ -235,7 +226,6 @@ function objectCopy(authInfo, request, sourceBucket,
bucketName: destBucketName, bucketName: destBucketName,
objectKey: destObjectKey, objectKey: destObjectKey,
requestType: 'objectPut', requestType: 'objectPut',
checkQuota: false,
request, request,
}; };
const dataStoreContext = { const dataStoreContext = {
@ -249,7 +239,7 @@ function objectCopy(authInfo, request, sourceBucket,
const responseHeaders = {}; const responseHeaders = {};
if (request.headers['x-amz-storage-class'] && if (request.headers['x-amz-storage-class'] &&
!config.locationConstraints[request.headers['x-amz-storage-class']]) { !constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header'); log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', destBucketName, monitoring.promMetrics('PUT', destBucketName,
errors.InvalidStorageClass.code, 'copyObject'); errors.InvalidStorageClass.code, 'copyObject');
@ -287,10 +277,7 @@ function objectCopy(authInfo, request, sourceBucket,
}); });
}, },
function checkSourceAuthorization(destBucketMD, destObjMD, next) { function checkSourceAuthorization(destBucketMD, destObjMD, next) {
return standardMetadataValidateBucketAndObj({ return standardMetadataValidateBucketAndObj(valGetParams, request.actionImplicitDenies, log,
...valGetParams,
destObjMD,
}, request.actionImplicitDenies, log,
(err, sourceBucketMD, sourceObjMD) => { (err, sourceBucketMD, sourceObjMD) => {
if (err) { if (err) {
log.debug('error validating get part of request', log.debug('error validating get part of request',
@ -303,11 +290,6 @@ function objectCopy(authInfo, request, sourceBucket,
log.debug('no source object', { sourceObject }); log.debug('no source object', { sourceObject });
return next(err, null, destBucketMD); return next(err, null, destBucketMD);
} }
// check if object data is in a cold storage
const coldErr = verifyColdObjectAvailable(sourceObjMD);
if (coldErr) {
return next(coldErr, null);
}
if (sourceObjMD.isDeleteMarker) { if (sourceObjMD.isDeleteMarker) {
log.debug('delete marker on source object', log.debug('delete marker on source object',
{ sourceObject }); { sourceObject });

View File

@ -21,17 +21,16 @@ const objectLockedError = new Error('object locked');
const { overheadField } = require('../../constants'); const { overheadField } = require('../../constants');
/** /**
* objectDeleteInternal - DELETE an object from a bucket * objectDelete - DELETE an object from a bucket
* @param {AuthInfo} authInfo - requester's infos * @param {AuthInfo} authInfo - requester's infos
* @param {object} request - request object given by router, * @param {object} request - request object given by router,
* includes normalized headers * includes normalized headers
* @param {Logger} log - werelogs request instance * @param {Logger} log - werelogs request instance
* @param {boolean} isExpiration - true if the call comes from LifecycleExpiration
* @param {function} cb - final cb to call with the result and response headers * @param {function} cb - final cb to call with the result and response headers
* @return {undefined} * @return {undefined}
*/ */
function objectDeleteInternal(authInfo, request, log, isExpiration, cb) { function objectDelete(authInfo, request, log, cb) {
log.debug('processing request', { method: 'objectDeleteInternal' }); log.debug('processing request', { method: 'objectDelete' });
if (authInfo.isRequesterPublicUser()) { if (authInfo.isRequesterPublicUser()) {
log.debug('operation not available for public user'); log.debug('operation not available for public user');
monitoring.promMetrics( monitoring.promMetrics(
@ -167,10 +166,7 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
// source does not have versioning. // source does not have versioning.
return createAndStoreObject(bucketName, bucketMD, objectKey, return createAndStoreObject(bucketName, bucketMD, objectKey,
objectMD, authInfo, canonicalID, null, request, true, null, objectMD, authInfo, canonicalID, null, request, true, null,
log, isExpiration ? log, err => {
's3:LifecycleExpiration:DeleteMarkerCreated' :
's3:ObjectRemoved:DeleteMarkerCreated',
err => {
if (err) { if (err) {
return next(err); return next(err);
} }
@ -180,11 +176,9 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
deleteInfo.removeDeleteMarker = true; deleteInfo.removeDeleteMarker = true;
} }
return services.deleteObject(bucketName, objectMD, return services.deleteObject(bucketName, objectMD,
objectKey, delOptions, false, log, isExpiration ? objectKey, delOptions, log, (err, delResult) =>
's3:LifecycleExpiration:Delete' : next(err, bucketMD, objectMD, delResult,
's3:ObjectRemoved:Delete', deleteInfo));
(err, delResult) =>
next(err, bucketMD, objectMD, delResult, deleteInfo));
}); });
} }
if (delOptions && delOptions.deleteData) { if (delOptions && delOptions.deleteData) {
@ -205,20 +199,14 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
} }
return services.deleteObject(bucketName, objectMD, objectKey, return services.deleteObject(bucketName, objectMD, objectKey,
delOptions, false, log, isExpiration ? delOptions, false, log, (err, delResult) => next(err, bucketMD,
's3:LifecycleExpiration:Delete' :
's3:ObjectRemoved:Delete',
(err, delResult) => next(err, bucketMD,
objectMD, delResult, deleteInfo)); objectMD, delResult, deleteInfo));
} }
// putting a new delete marker // putting a new delete marker
deleteInfo.newDeleteMarker = true; deleteInfo.newDeleteMarker = true;
return createAndStoreObject(bucketName, bucketMD, return createAndStoreObject(bucketName, bucketMD,
objectKey, objectMD, authInfo, canonicalID, null, request, objectKey, objectMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, overheadField, log, isExpiration ? deleteInfo.newDeleteMarker, null, overheadField, log, (err, newDelMarkerRes) => {
's3:LifecycleExpiration:DeleteMarkerCreated' :
's3:ObjectRemoved:DeleteMarkerCreated',
(err, newDelMarkerRes) => {
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo); next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo);
}); });
}, },
@ -307,21 +295,4 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
}); });
} }
/** module.exports = objectDelete;
* This function is used to delete an object from a bucket. The bucket must
* already exist and the user must have permission to delete the object.
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {werelogs.Logger} log - Logger object
* @param {function} cb - callback to server
* @return {undefined}
*/
function objectDelete(authInfo, request, log, cb) {
log.debug('processing request', { method: 'objectDelete' });
return objectDeleteInternal(authInfo, request, log, false, cb);
}
module.exports = {
objectDelete,
objectDeleteInternal,
};

View File

@ -91,7 +91,7 @@ function objectDeleteTagging(authInfo, request, log, callback) {
}, },
(bucket, objectMD, next) => (bucket, objectMD, next) =>
// if external backends handles tagging // if external backends handles tagging
data.objectTagging('Delete', objectKey, bucket.getName(), objectMD, data.objectTagging('Delete', objectKey, bucket, objectMD,
log, err => next(err, bucket, objectMD)), log, err => next(err, bucket, objectMD)),
], (err, bucket, objectMD) => { ], (err, bucket, objectMD) => {
const additionalResHeaders = collectCorsHeaders(request.headers.origin, const additionalResHeaders = collectCorsHeaders(request.headers.origin,

View File

@ -21,7 +21,6 @@ const { locationConstraints } = config;
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo'); const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const validateHeaders = s3middleware.validateConditionalHeaders; const validateHeaders = s3middleware.validateConditionalHeaders;
@ -90,12 +89,16 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
// check if object data is in a cold storage if (objMD.archive &&
const coldErr = verifyColdObjectAvailable(objMD); // Object is in cold backend
if (coldErr) { (!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt &&
!objMD.archive.restoreCompletedAt))) {
const error = errors.InvalidObjectState;
monitoring.promMetrics( monitoring.promMetrics(
'GET', bucketName, coldErr.code, 'getObject'); 'GET', bucketName, error.code, 'getObject');
return callback(coldErr, null, corsHeaders); return callback(error, null, corsHeaders);
} }
if (objMD.isDeleteMarker) { if (objMD.isDeleteMarker) {
const responseMetaHeaders = Object.assign({}, const responseMetaHeaders = Object.assign({},

View File

@ -3,7 +3,6 @@ const { errors, versioning } = require('arsenal');
const constants = require('../../constants'); const constants = require('../../constants');
const aclUtils = require('../utilities/aclUtils'); const aclUtils = require('../utilities/aclUtils');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation'); const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption'); const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -72,7 +71,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
query, query,
} = request; } = request;
if (headers['x-amz-storage-class'] && if (headers['x-amz-storage-class'] &&
!config.locationConstraints[headers['x-amz-storage-class']]) { !constants.validStorageClasses.includes(headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header'); log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', request.bucketName, monitoring.promMetrics('PUT', request.bucketName,
errors.InvalidStorageClass.code, 'putObject'); errors.InvalidStorageClass.code, 'putObject');
@ -99,7 +98,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
'The encryption method specified is not supported'); 'The encryption method specified is not supported');
const requestType = request.apiMethods || 'objectPut'; const requestType = request.apiMethods || 'objectPut';
const valParams = { authInfo, bucketName, objectKey, versionId, const valParams = { authInfo, bucketName, objectKey, versionId,
requestType, request, withVersionId: isPutVersion }; requestType, request };
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
if (hasNonPrintables(objectKey)) { if (hasNonPrintables(objectKey)) {
@ -175,7 +174,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
writeContinue(request, request._response); writeContinue(request, request._response);
return createAndStoreObject(bucketName, return createAndStoreObject(bucketName,
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle, bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
request, false, streamingV4Params, overheadField, log, 's3:ObjectCreated:Put', next); request, false, streamingV4Params, overheadField, log, next);
}, },
], (err, storingResult) => { ], (err, storingResult) => {
if (err) { if (err) {
@ -243,14 +242,6 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
monitoring.promMetrics('PUT', bucketName, '200', monitoring.promMetrics('PUT', bucketName, '200',
'putObject', newByteLength, oldByteLength, isVersionedObj, 'putObject', newByteLength, oldByteLength, isVersionedObj,
null, ingestSize); null, ingestSize);
if (isPutVersion) {
const durationMs = Date.now() - new Date(objMD.archive.restoreRequestedAt);
monitoring.lifecycleDuration.observe(
{ type: 'restore', location: objMD.dataStoreName },
durationMs / 1000);
}
return callback(null, responseHeaders); return callback(null, responseHeaders);
}); });
}); });

View File

@ -13,8 +13,6 @@ const services = require('../services');
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator'); const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
@ -46,14 +44,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
versionId: reqVersionId, versionId: reqVersionId,
getDeleteMarker: true, getDeleteMarker: true,
requestType: 'objectGet', requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request, request,
}; };
@ -77,7 +67,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
bucketName: destBucketName, bucketName: destBucketName,
objectKey: destObjectKey, objectKey: destObjectKey,
requestType: 'objectPutPart', requestType: 'objectPutPart',
checkQuota: false,
request, request,
}; };
@ -98,7 +87,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
objectKey: destObjectKey, objectKey: destObjectKey,
partNumber: paddedPartNumber, partNumber: paddedPartNumber,
uploadId, uploadId,
enableQuota: true,
}; };
return async.waterfall([ return async.waterfall([
@ -145,11 +133,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
sourceLocationConstraintName = sourceLocationConstraintName =
sourceObjMD.location[0].dataStoreName; sourceObjMD.location[0].dataStoreName;
} }
// check if object data is in a cold storage
const coldErr = verifyColdObjectAvailable(sourceObjMD);
if (coldErr) {
return next(coldErr, null);
}
if (sourceObjMD.isDeleteMarker) { if (sourceObjMD.isDeleteMarker) {
log.debug('delete marker on source object', log.debug('delete marker on source object',
{ sourceObject }); { sourceObject });
@ -192,16 +175,9 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
} }
return next(null, copyLocator.dataLocator, destBucketMD, return next(null, copyLocator.dataLocator, destBucketMD,
copyLocator.copyObjectSize, sourceVerId, copyLocator.copyObjectSize, sourceVerId,
sourceLocationConstraintName, sourceObjMD); sourceLocationConstraintName);
}); });
}, },
function _validateQuotas(dataLocator, destBucketMD,
copyObjectSize, sourceVerId,
sourceLocationConstraintName, sourceObjMD, next) {
return validateQuotas(request, destBucketMD, request.accountQuotas, valPutParams.requestType,
request.apiMethod, sourceObjMD?.['content-length'] || 0, false, log, err =>
next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName));
},
// get MPU shadow bucket to get splitter based on MD version // get MPU shadow bucket to get splitter based on MD version
function getMpuShadowBucket(dataLocator, destBucketMD, function getMpuShadowBucket(dataLocator, destBucketMD,
copyObjectSize, sourceVerId, copyObjectSize, sourceVerId,

View File

@ -21,7 +21,6 @@ const { BackendInfo } = models;
const writeContinue = require('../utilities/writeContinue'); const writeContinue = require('../utilities/writeContinue');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption'); const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders'); const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
const skipError = new Error('skip'); const skipError = new Error('skip');
@ -61,9 +60,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
log.debug('processing request', { method: 'objectPutPart' }); log.debug('processing request', { method: 'objectPutPart' });
const size = request.parsedContentLength; const size = request.parsedContentLength;
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) { if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
log.debug('put part size too large', { size }); log.debug('put part size too large', { size });
monitoring.promMetrics('PUT', request.bucketName, 400, monitoring.promMetrics('PUT', request.bucketName, 400,
@ -107,9 +103,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`; const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
const { objectKey } = request; const { objectKey } = request;
const originalIdentityAuthzResults = request.actionImplicitDenies; const originalIdentityAuthzResults = request.actionImplicitDenies;
// For validating the request at the destinationBucket level the
// `requestType` is the general 'objectPut'.
const requestType = request.apiMethods || 'objectPutPart';
return async.waterfall([ return async.waterfall([
// Get the destination bucket. // Get the destination bucket.
@ -129,6 +122,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}), }),
// Check the bucket authorization. // Check the bucket authorization.
(destinationBucket, next) => { (destinationBucket, next) => {
// For validating the request at the destinationBucket level the
// `requestType` is the general 'objectPut'.
const requestType = request.apiMethods || 'objectPutPart';
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo, if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
log, request, request.actionImplicitDenies)) { log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { requestType }); log.debug('access denied for user on bucket', { requestType });
@ -136,8 +132,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
} }
return next(null, destinationBucket); return next(null, destinationBucket);
}, },
(destinationBucket, next) => validateQuotas(request, destinationBucket, request.accountQuotas,
requestType, request.apiMethod, size, isPutVersion, log, err => next(err, destinationBucket)),
// Get bucket server-side encryption, if it exists. // Get bucket server-side encryption, if it exists.
(destinationBucket, next) => getObjectSSEConfiguration( (destinationBucket, next) => getObjectSSEConfiguration(
request.headers, destinationBucket, log, request.headers, destinationBucket, log,

View File

@ -96,7 +96,7 @@ function objectPutTagging(authInfo, request, log, callback) {
}, },
(bucket, objectMD, next) => (bucket, objectMD, next) =>
// if external backend handles tagging // if external backend handles tagging
data.objectTagging('Put', objectKey, bucket.getName(), objectMD, data.objectTagging('Put', objectKey, bucket, objectMD,
log, err => next(err, bucket, objectMD)), log, err => next(err, bucket, objectMD)),
], (err, bucket, objectMD) => { ], (err, bucket, objectMD) => {
const additionalResHeaders = collectCorsHeaders(request.headers.origin, const additionalResHeaders = collectCorsHeaders(request.headers.origin,

View File

@ -1,3 +1,4 @@
const vaultclient = require('vaultclient');
const { auth } = require('arsenal'); const { auth } = require('arsenal');
const { config } = require('../Config'); const { config } = require('../Config');
@ -20,7 +21,6 @@ function getVaultClient(config) {
port, port,
https: true, https: true,
}); });
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca); vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
} else { } else {
logger.info('vaultclient configuration', { logger.info('vaultclient configuration', {
@ -28,7 +28,6 @@ function getVaultClient(config) {
port, port,
https: false, https: false,
}); });
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port); vaultClient = new vaultclient.Client(host, port);
} }
@ -50,6 +49,10 @@ function getMemBackend(config) {
} }
switch (config.backends.auth) { switch (config.backends.auth) {
case 'mem':
implName = 'vaultMem';
client = getMemBackend(config);
break;
case 'multiple': case 'multiple':
implName = 'vaultChain'; implName = 'vaultChain';
client = new ChainBackend('s3', [ client = new ChainBackend('s3', [
@ -57,14 +60,9 @@ case 'multiple':
getVaultClient(config), getVaultClient(config),
]); ]);
break; break;
case 'vault': default: // vault
implName = 'vault'; implName = 'vault';
client = getVaultClient(config); client = getVaultClient(config);
break;
default: // mem
implName = 'vaultMem';
client = getMemBackend(config);
break;
} }
module.exports = new Vault(client, implName); module.exports = new Vault(client, implName);

View File

@ -8,6 +8,20 @@ const inMemory = require('./in_memory/backend').backend;
const file = require('./file/backend'); const file = require('./file/backend');
const KMIPClient = require('arsenal').network.kmipClient; const KMIPClient = require('arsenal').network.kmipClient;
const Common = require('./common'); const Common = require('./common');
let scalityKMS;
let scalityKMSImpl;
try {
// eslint-disable-next-line import/no-unresolved
const ScalityKMS = require('scality-kms');
scalityKMS = new ScalityKMS(config.kms);
scalityKMSImpl = 'scalityKms';
} catch (error) {
logger.warn('scality kms unavailable. ' +
'Using file kms backend unless mem specified.',
{ error });
scalityKMS = file;
scalityKMSImpl = 'fileKms';
}
let client; let client;
let implName; let implName;
@ -19,9 +33,8 @@ if (config.backends.kms === 'mem') {
client = file; client = file;
implName = 'fileKms'; implName = 'fileKms';
} else if (config.backends.kms === 'scality') { } else if (config.backends.kms === 'scality') {
const ScalityKMS = require('scality-kms'); client = scalityKMS;
client = new ScalityKMS(config.kms); implName = scalityKMSImpl;
implName = 'scalityKms';
} else if (config.backends.kms === 'kmip') { } else if (config.backends.kms === 'kmip') {
const kmipConfig = { kmip: config.kmip }; const kmipConfig = { kmip: config.kmip };
if (!kmipConfig.kmip) { if (!kmipConfig.kmip) {

View File

@ -0,0 +1,131 @@
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const MessageType = {
/** Message that contains a configuration overlay */
CONFIG_OVERLAY_MESSAGE: 1,
/** Message that requests a metrics report */
METRICS_REQUEST_MESSAGE: 2,
/** Message that contains a metrics report */
METRICS_REPORT_MESSAGE: 3,
/** Close the virtual TCP socket associated to the channel */
CHANNEL_CLOSE_MESSAGE: 4,
/** Write data to the virtual TCP socket associated to the channel */
CHANNEL_PAYLOAD_MESSAGE: 5,
};
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const TargetType = {
/** Let the dispatcher choose the most appropriate message */
TARGET_ANY: 0,
};
const headerSize = 3;
class ChannelMessageV0 {
/**
* @param {Buffer} buffer Message bytes
*/
constructor(buffer) {
this.messageType = buffer.readUInt8(0);
this.channelNumber = buffer.readUInt8(1);
this.target = buffer.readUInt8(2);
this.payload = buffer.slice(headerSize);
}
/**
* @returns {number} Message type
*/
getType() {
return this.messageType;
}
/**
* @returns {number} Channel number if applicable
*/
getChannelNumber() {
return this.channelNumber;
}
/**
* @returns {number} Target service, or 0 to choose automatically
*/
getTarget() {
return this.target;
}
/**
* @returns {Buffer} Message payload if applicable
*/
getPayload() {
return this.payload;
}
/**
* Creates a wire representation of a channel close message
*
* @param {number} channelId Channel number
*
* @returns {Buffer} wire representation
*/
static encodeChannelCloseMessage(channelId) {
const buf = Buffer.alloc(headerSize);
buf.writeUInt8(MessageType.CHANNEL_CLOSE_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
return buf;
}
/**
* Creates a wire representation of a channel data message
*
* @param {number} channelId Channel number
* @param {Buffer} data Payload
*
* @returns {Buffer} wire representation
*/
static encodeChannelDataMessage(channelId, data) {
const buf = Buffer.alloc(data.length + headerSize);
buf.writeUInt8(MessageType.CHANNEL_PAYLOAD_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
data.copy(buf, headerSize);
return buf;
}
/**
* Creates a wire representation of a metrics message
*
* @param {object} body Metrics report
*
* @returns {Buffer} wire representation
*/
static encodeMetricsReportMessage(body) {
const report = JSON.stringify(body);
const buf = Buffer.alloc(report.length + headerSize);
buf.writeUInt8(MessageType.METRICS_REPORT_MESSAGE, 0);
buf.writeUInt8(0, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
buf.write(report, headerSize);
return buf;
}
/**
* Protocol name used for subprotocol negociation
*/
static get protocolName() {
return 'zenko-secure-channel-v0';
}
}
module.exports = {
ChannelMessageV0,
MessageType,
TargetType,
};

View File

@ -0,0 +1,94 @@
const WebSocket = require('ws');
const arsenal = require('arsenal');
const logger = require('../utilities/logger');
const _config = require('../Config').config;
const { patchConfiguration } = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const managementAgentMessageType = {
/** Message that contains the loaded overlay */
NEW_OVERLAY: 1,
};
const CONNECTION_RETRY_TIMEOUT_MS = 5000;
function initManagementClient() {
const { host, port } = _config.managementAgent;
const ws = new WebSocket(`ws://${host}:${port}/watch`);
ws.on('open', () => {
logger.info('connected with management agent');
});
ws.on('close', (code, reason) => {
logger.info('disconnected from management agent', { reason });
setTimeout(initManagementClient, CONNECTION_RETRY_TIMEOUT_MS);
});
ws.on('error', error => {
logger.error('error on connection with management agent', { error });
});
ws.on('message', data => {
const method = 'initManagementclient::onMessage';
const log = logger.newRequestLogger();
let msg;
if (!data) {
log.error('message without data', { method });
return;
}
try {
msg = JSON.parse(data);
} catch (err) {
log.error('data is an invalid json', { method, err, data });
return;
}
if (msg.payload === undefined) {
log.error('message without payload', { method });
return;
}
if (typeof msg.messageType !== 'number') {
log.error('messageType is not an integer', {
type: typeof msg.messageType,
method,
});
return;
}
switch (msg.messageType) {
case managementAgentMessageType.NEW_OVERLAY:
patchConfiguration(msg.payload, log, err => {
if (err) {
log.error('failed to patch overlay', {
error: reshapeExceptionError(err),
method,
});
}
});
return;
default:
log.error('new overlay message with unmanaged message type', {
method,
type: msg.messageType,
});
return;
}
});
}
function isManagementAgentUsed() {
return process.env.MANAGEMENT_USE_AGENT === '1';
}
module.exports = {
managementAgentMessageType,
initManagementClient,
isManagementAgentUsed,
};

View File

@ -0,0 +1,240 @@
const arsenal = require('arsenal');
const { buildAuthDataAccount } = require('../auth/in_memory/builder');
const _config = require('../Config').config;
const metadata = require('../metadata/wrapper');
const { getStoredCredentials } = require('./credentials');
const latestOverlayVersionKey = 'configuration/overlay-version';
const managementDatabaseName = 'PENSIEVE';
const replicatorEndpoint = 'zenko-cloudserver-replicator';
const { decryptSecret } = arsenal.pensieve.credentialUtils;
const { patchLocations } = arsenal.patches.locationConstraints;
const { reshapeExceptionError } = arsenal.errorUtils;
const { replicationBackends } = require('arsenal').constants;
function overlayHasVersion(overlay) {
return overlay && overlay.version !== undefined;
}
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
return (overlayHasVersion(remoteOverlay) &&
(!overlayHasVersion(cachedOverlay) ||
remoteOverlay.version > cachedOverlay.version));
}
/**
* Updates the live {Config} object with the new overlay configuration.
*
* No-op if this version was already applied to the live {Config}.
*
* @param {object} newConf Overlay configuration to apply
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, newConf)
*
* @returns {undefined}
*/
function patchConfiguration(newConf, log, cb) {
if (newConf.version === undefined) {
log.debug('no remote configuration created yet');
return process.nextTick(cb, null, newConf);
}
if (_config.overlayVersion !== undefined &&
newConf.version <= _config.overlayVersion) {
log.debug('configuration version already applied',
{ configurationVersion: newConf.version });
return process.nextTick(cb, null, newConf);
}
return getStoredCredentials(log, (err, creds) => {
if (err) {
return cb(err);
}
const accounts = [];
if (newConf.users) {
newConf.users.forEach(u => {
if (u.secretKey && u.secretKey.length > 0) {
const secretKey = decryptSecret(creds, u.secretKey);
// accountType will be service-replication or service-clueso
let serviceName;
if (u.accountType && u.accountType.startsWith('service-')) {
serviceName = u.accountType.split('-')[1];
}
const newAccount = buildAuthDataAccount(
u.accessKey, secretKey, u.canonicalId, serviceName,
u.userName);
accounts.push(newAccount.accounts[0]);
}
});
}
const restEndpoints = Object.assign({}, _config.restEndpoints);
if (newConf.endpoints) {
newConf.endpoints.forEach(e => {
restEndpoints[e.hostname] = e.locationName;
});
}
if (!restEndpoints[replicatorEndpoint]) {
restEndpoints[replicatorEndpoint] = 'us-east-1';
}
const locations = patchLocations(newConf.locations, creds, log);
if (Object.keys(locations).length !== 0) {
try {
_config.setLocationConstraints(locations);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply configuration version location ' +
'constraints', { error: exceptionError,
method: 'getStoredCredentials' });
return cb(exceptionError);
}
try {
const locationsWithReplicationBackend = Object.keys(locations)
// NOTE: In Orbit, we don't need to have Scality location in our
// replication endpoind config, since we do not replicate to
// any Scality Instance yet.
.filter(key => replicationBackends
[locations[key].type])
.reduce((obj, key) => {
/* eslint no-param-reassign:0 */
obj[key] = locations[key];
return obj;
}, {});
_config.setReplicationEndpoints(
locationsWithReplicationBackend);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply replication endpoints',
{ error: exceptionError, method: 'getStoredCredentials' });
return cb(exceptionError);
}
}
_config.setAuthDataAccounts(accounts);
_config.setRestEndpoints(restEndpoints);
_config.setPublicInstanceId(newConf.instanceId);
if (newConf.browserAccess) {
if (Boolean(_config.browserAccessEnabled) !==
Boolean(newConf.browserAccess.enabled)) {
_config.browserAccessEnabled =
Boolean(newConf.browserAccess.enabled);
_config.emit('browser-access-enabled-change');
}
}
_config.overlayVersion = newConf.version;
log.info('applied configuration version',
{ configurationVersion: _config.overlayVersion });
return cb(null, newConf);
});
}
/**
* Writes configuration version to the management database
*
* @param {object} cachedOverlay Latest stored configuration version
* for freshness comparison purposes
* @param {object} remoteOverlay New configuration version
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, remoteOverlay)
*
* @returns {undefined}
*/
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
const objName = `configuration/overlay/${remoteOverlay.version}`;
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
{}, log, error => {
if (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not save configuration',
{ error: exceptionError,
method: 'saveConfigurationVersion',
configurationVersion: remoteOverlay.version });
cb(exceptionError);
return;
}
metadata.putObjectMD(managementDatabaseName,
latestOverlayVersionKey, remoteOverlay.version, {}, log,
error => {
if (error) {
log.error('could not save configuration version', {
configurationVersion: remoteOverlay.version,
});
}
cb(error, remoteOverlay);
});
});
} else {
log.debug('no remote configuration to cache yet');
process.nextTick(cb, null, remoteOverlay);
}
}
/**
* Loads the latest cached configuration overlay from the management
* database, without contacting the Orbit API.
*
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} callback Function called with (error, cachedOverlay)
*
* @returns {undefined}
*/
function loadCachedOverlay(log, callback) {
return metadata.getObjectMD(managementDatabaseName,
latestOverlayVersionKey, {}, log, (err, version) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return metadata.getObjectMD(managementDatabaseName,
`configuration/overlay/${version}`, {}, log, (err, conf) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return callback(null, conf);
});
});
}
function applyAndSaveOverlay(overlay, log) {
patchConfiguration(overlay, log, err => {
if (err) {
log.error('could not apply pushed overlay', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
saveConfigurationVersion(null, overlay, log, err => {
if (err) {
log.error('could not cache overlay version', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
log.info('overlay push processed');
});
});
}
module.exports = {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
saveConfigurationVersion,
remoteOverlayIsNewer,
applyAndSaveOverlay,
};

View File

@ -0,0 +1,145 @@
const arsenal = require('arsenal');
const forge = require('node-forge');
const request = require('../utilities/request');
const metadata = require('../metadata/wrapper');
const managementDatabaseName = 'PENSIEVE';
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
const { reshapeExceptionError } = arsenal.errorUtils;
/**
* Retrieves Orbit API token from the management database.
*
* The token is used to authenticate stat posting and
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function getStoredCredentials(log, callback) {
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
log, callback);
}
function issueCredentials(managementEndpoint, instanceId, log, callback) {
log.info('registering with API to get token');
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
const postData = {
publicKey,
};
request.post(`${managementEndpoint}/${instanceId}/register`,
{ body: postData, json: true }, (error, response, body) => {
if (error) {
return callback(error);
}
if (response.statusCode !== 201) {
log.error('could not register instance', {
statusCode: response.statusCode,
});
return callback(arsenal.errors.InternalError);
}
/* eslint-disable no-param-reassign */
body.privateKey = privateKey;
/* eslint-enable no-param-reassign */
return callback(null, body);
});
}
function confirmInstanceCredentials(
managementEndpoint, instanceId, creds, log, callback) {
const postData = {
serial: creds.serial || 0,
publicKey: creds.publicKey,
};
const opts = {
headers: {
'x-instance-authentication-token': creds.token,
},
body: postData,
};
request.post(`${managementEndpoint}/${instanceId}/confirm`,
opts, (error, response) => {
if (error) {
return callback(error);
}
if (response.statusCode === 200) {
return callback(null, instanceId, creds.token);
}
return callback(arsenal.errors.InternalError);
});
}
/**
* Initializes credentials and PKI in the management database.
*
* In case the management database is new and empty, the instance
* is registered as new against the Orbit API with newly-generated
* RSA key pair.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function initManagementCredentials(
managementEndpoint, instanceId, log, callback) {
getStoredCredentials(log, (error, value) => {
if (error) {
if (error.is.NoSuchKey) {
return issueCredentials(managementEndpoint, instanceId, log,
(error, value) => {
if (error) {
log.error('could not issue token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials' });
return callback(error);
}
log.debug('saving token');
return metadata.putObjectMD(managementDatabaseName,
tokenConfigurationKey, value, {}, log, error => {
if (error) {
log.error('could not save token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials',
});
return callback(error);
}
log.info('saved token locally, ' +
'confirming instance');
return confirmInstanceCredentials(
managementEndpoint, instanceId, value, log,
callback);
});
});
}
log.debug('could not get token', { error });
return callback(error);
}
log.info('returning existing token');
if (Date.now() - value.issueDate > tokenRotationDelay) {
log.warn('management API token is too old, should re-issue');
}
return callback(null, instanceId, value.token);
});
}
module.exports = {
getStoredCredentials,
initManagementCredentials,
};

138
lib/management/index.js Normal file
View File

@ -0,0 +1,138 @@
const arsenal = require('arsenal');
const async = require('async');
const metadata = require('../metadata/wrapper');
const logger = require('../utilities/logger');
const {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
} = require('./configuration');
const { initManagementCredentials } = require('./credentials');
const { startWSManagementClient } = require('./push');
const { startPollingManagementClient } = require('./poll');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const initRemoteManagementRetryDelay = 10000;
const managementEndpointRoot =
process.env.MANAGEMENT_ENDPOINT ||
'https://api.zenko.io';
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
const pushEndpointRoot =
process.env.PUSH_ENDPOINT ||
'https://push.api.zenko.io';
const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`;
function initManagementDatabase(log, callback) {
// XXX choose proper owner names
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
'owner display name', new Date().toJSON());
metadata.createBucket(managementDatabaseName, md, log, error => {
if (error) {
if (error.is.BucketAlreadyExists) {
log.info('created management database');
return callback();
}
log.error('could not initialize management database',
{ error: reshapeExceptionError(error),
method: 'initManagementDatabase' });
return callback(error);
}
log.info('initialized management database');
return callback();
});
}
function startManagementListeners(instanceId, token) {
const mode = process.env.MANAGEMENT_MODE || 'push';
if (mode === 'push') {
const url = `${pushEndpoint}/${instanceId}/ws`;
startWSManagementClient(url, token);
} else {
startPollingManagementClient(managementEndpoint, instanceId, token);
}
}
/**
* Initializes Orbit-based management by:
* - creating the management database in metadata
* - generating a key pair for credentials encryption
* - generating an instance-unique ID
* - getting an authentication token for the API
* - loading and applying the latest cached overlay configuration
* - starting a configuration update and metrics push background task
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function to call once the overlay is loaded
* (overlay)
*
* @returns {undefined}
*/
function initManagement(log, callback) {
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|| process.env.S3BACKEND === 'mem') {
log.info('remote management disabled');
return;
}
/* Temporary check before to fully move to the process management agent. */
if (isManagementAgentUsed() ^ typeof callback === 'function') {
let msg = 'misuse of initManagement function: ';
msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`;
msg += `, callback type: ${typeof callback}`;
throw new Error(msg);
}
async.waterfall([
// eslint-disable-next-line arrow-body-style
cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); },
cb => initManagementDatabase(log, cb),
cb => metadata.getUUID(log, cb),
(instanceId, cb) => initManagementCredentials(
managementEndpoint, instanceId, log, cb),
(instanceId, token, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, {});
return;
}
loadCachedOverlay(log, (err, overlay) => cb(err, instanceId,
token, overlay));
},
(instanceId, token, overlay, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, overlay);
return;
}
patchConfiguration(overlay, log,
err => cb(err, instanceId, token, overlay));
},
], (error, instanceId, token, overlay) => {
if (error) {
log.error('could not initialize remote management, retrying later',
{ error: reshapeExceptionError(error),
method: 'initManagement' });
setTimeout(initManagement,
initRemoteManagementRetryDelay,
logger.newRequestLogger());
} else {
log.info(`this deployment's Instance ID is ${instanceId}`);
log.end('management init done');
startManagementListeners(instanceId, token);
if (callback) {
callback(overlay);
}
}
});
}
module.exports = {
initManagement,
initManagementDatabase,
};

157
lib/management/poll.js Normal file
View File

@ -0,0 +1,157 @@
const arsenal = require('arsenal');
const async = require('async');
const request = require('../utilities/request');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const {
loadCachedOverlay,
patchConfiguration,
saveConfigurationVersion,
} = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const pushReportDelay = 30000;
const pullConfigurationOverlayDelay = 60000;
function loadRemoteOverlay(
managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) {
log.debug('loading remote overlay');
const opts = {
headers: {
'x-instance-authentication-token': remoteToken,
'x-scal-request-id': log.getSerializedUids(),
},
json: true,
};
request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
(error, response, body) => {
if (error) {
return cb(error);
}
if (response.statusCode === 200) {
return cb(null, cachedOverlay, body);
}
if (response.statusCode === 404) {
return cb(null, cachedOverlay, {});
}
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
});
}
// TODO save only after successful patch
function applyConfigurationOverlay(
managementEndpoint, instanceId, remoteToken, log) {
async.waterfall([
wcb => loadCachedOverlay(log, wcb),
(cachedOverlay, wcb) => patchConfiguration(cachedOverlay,
log, wcb),
(cachedOverlay, wcb) =>
loadRemoteOverlay(managementEndpoint, instanceId, remoteToken,
cachedOverlay, log, wcb),
(cachedOverlay, remoteOverlay, wcb) =>
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
(remoteOverlay, wcb) => patchConfiguration(remoteOverlay,
log, wcb),
], error => {
if (error) {
log.error('could not apply managed configuration',
{ error: reshapeExceptionError(error),
method: 'applyConfigurationOverlay' });
}
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
});
}
function postStats(managementEndpoint, instanceId, remoteToken, report, next) {
const toURL = `${managementEndpoint}/${instanceId}/stats`;
const toOptions = {
json: true,
headers: {
'content-type': 'application/json',
'x-instance-authentication-token': remoteToken,
},
body: report,
};
const toCallback = (err, response, body) => {
if (err) {
logger.info('could not post stats', { error: err });
}
if (response && response.statusCode !== 201) {
logger.info('could not post stats', {
body,
statusCode: response.statusCode,
});
}
if (next) {
next(null, instanceId, remoteToken);
}
};
return request.post(toURL, toOptions, toCallback);
}
function getStats(next) {
const fromURL = `http://localhost:${_config.port}/_/report`;
const fromOptions = {
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
},
};
return request.get(fromURL, fromOptions, next);
}
function pushStats(managementEndpoint, instanceId, remoteToken, next) {
if (process.env.PUSH_STATS === 'false') {
return;
}
getStats((err, res, report) => {
if (err) {
logger.info('could not retrieve stats', { error: err });
return;
}
logger.debug('report', { report });
postStats(
managementEndpoint,
instanceId,
remoteToken,
report,
next
);
return;
});
setTimeout(pushStats, pushReportDelay,
managementEndpoint, instanceId, remoteToken);
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Periodically polls for configuration updates, and pushes stats at
* a fixed interval.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {string} remoteToken API authentication token
*
* @returns {undefined}
*/
function startPollingManagementClient(
managementEndpoint, instanceId, remoteToken) {
metadata.notifyBucketChange(() => {
pushStats(managementEndpoint, instanceId, remoteToken);
});
pushStats(managementEndpoint, instanceId, remoteToken);
applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
}
module.exports = {
startPollingManagementClient,
};

301
lib/management/push.js Normal file
View File

@ -0,0 +1,301 @@
const arsenal = require('arsenal');
const HttpsProxyAgent = require('https-proxy-agent');
const net = require('net');
const request = require('../utilities/request');
const { URL } = require('url');
const WebSocket = require('ws');
const assert = require('assert');
const http = require('http');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const { applyAndSaveOverlay } = require('./configuration');
const {
ChannelMessageV0,
MessageType,
} = require('./ChannelMessageV0');
const {
CONFIG_OVERLAY_MESSAGE,
METRICS_REQUEST_MESSAGE,
CHANNEL_CLOSE_MESSAGE,
CHANNEL_PAYLOAD_MESSAGE,
} = MessageType;
const PING_INTERVAL_MS = 10000;
const subprotocols = [ChannelMessageV0.protocolName];
const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST
|| 'localhost';
const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT
|| _config.port;
let overlayMessageListener = null;
let connected = false;
// No wildcard nor cidr/mask match for now
function createWSAgent(pushEndpoint, env, log) {
const url = new URL(pushEndpoint);
const noProxy = (env.NO_PROXY || env.no_proxy
|| '').split(',');
if (noProxy.includes(url.hostname)) {
log.info('push server ws has proxy exclusion', { noProxy });
return null;
}
if (url.protocol === 'https:' || url.protocol === 'wss:') {
const httpsProxy = (env.HTTPS_PROXY || env.https_proxy);
if (httpsProxy) {
log.info('push server ws using https proxy', { httpsProxy });
return new HttpsProxyAgent(httpsProxy);
}
} else if (url.protocol === 'http:' || url.protocol === 'ws:') {
const httpProxy = (env.HTTP_PROXY || env.http_proxy);
if (httpProxy) {
log.info('push server ws using http proxy', { httpProxy });
return new HttpsProxyAgent(httpProxy);
}
}
const allProxy = (env.ALL_PROXY || env.all_proxy);
if (allProxy) {
log.info('push server ws using wildcard proxy', { allProxy });
return new HttpsProxyAgent(allProxy);
}
log.info('push server ws not using proxy');
return null;
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Receives pushed Websocket messages on configuration updates, and
* sends stat messages in response to API sollicitations.
*
* @param {string} url API endpoint
* @param {string} token API authentication token
* @param {function} cb end-of-connection callback
*
* @returns {undefined}
*/
function startWSManagementClient(url, token, cb) {
logger.info('connecting to push server', { url });
function _logError(error, errorMessage, method) {
if (error) {
logger.error(`management client error: ${errorMessage}`,
{ error: reshapeExceptionError(error), method });
}
}
const socketsByChannelId = [];
const headers = {
'x-instance-authentication-token': token,
};
const agent = createWSAgent(url, process.env, logger);
const ws = new WebSocket(url, subprotocols, { headers, agent });
let pingTimeout = null;
function sendPing() {
if (ws.readyState === ws.OPEN) {
ws.ping(err => _logError(err, 'failed to send a ping', 'sendPing'));
}
pingTimeout = setTimeout(() => ws.terminate(), PING_INTERVAL_MS);
}
function initiatePing() {
clearTimeout(pingTimeout);
setTimeout(sendPing, PING_INTERVAL_MS);
}
function pushStats(options) {
if (process.env.PUSH_STATS === 'false') {
return;
}
const fromURL = `http://${cloudServerHost}:${cloudServerPort}/_/report`;
const fromOptions = {
json: true,
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
'x-scal-report-skip-cache': Boolean(options && options.noCache),
},
};
request.get(fromURL, fromOptions, (err, response, body) => {
if (err) {
_logError(err, 'failed to get metrics report', 'pushStats');
return;
}
ws.send(ChannelMessageV0.encodeMetricsReportMessage(body),
err => _logError(err, 'failed to send metrics report message',
'pushStats'));
});
}
function closeChannel(channelId) {
const socket = socketsByChannelId[channelId];
if (socket) {
socket.destroy();
delete socketsByChannelId[channelId];
}
}
function receiveChannelData(channelId, payload) {
let socket = socketsByChannelId[channelId];
if (!socket) {
socket = net.createConnection(cloudServerPort, cloudServerHost);
socket.on('data', data => {
ws.send(ChannelMessageV0.
encodeChannelDataMessage(channelId, data), err =>
_logError(err, 'failed to send channel data message',
'receiveChannelData'));
});
socket.on('connect', () => {
});
socket.on('drain', () => {
});
socket.on('error', error => {
logger.error('failed to connect to S3', {
code: error.code,
host: error.address,
port: error.port,
});
});
socket.on('end', () => {
socket.destroy();
socketsByChannelId[channelId] = null;
ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId),
err => _logError(err,
'failed to send channel close message',
'receiveChannelData'));
});
socketsByChannelId[channelId] = socket;
}
socket.write(payload);
}
function browserAccessChangeHandler() {
if (!_config.browserAccessEnabled) {
socketsByChannelId.forEach(s => s.close());
}
}
ws.on('open', () => {
connected = true;
logger.info('connected to push server');
metadata.notifyBucketChange(() => {
pushStats({ noCache: true });
});
_config.on('browser-access-enabled-change', browserAccessChangeHandler);
initiatePing();
});
const cbOnce = cb ? arsenal.jsutil.once(cb) : null;
ws.on('close', () => {
logger.info('disconnected from push server, reconnecting in 10s');
metadata.notifyBucketChange(null);
_config.removeListener('browser-access-enabled-change',
browserAccessChangeHandler);
setTimeout(startWSManagementClient, 10000, url, token);
connected = false;
if (cbOnce) {
process.nextTick(cbOnce);
}
});
ws.on('error', err => {
connected = false;
logger.error('error from push server connection', {
error: err,
errorMessage: err.message,
});
if (cbOnce) {
process.nextTick(cbOnce, err);
}
});
ws.on('ping', () => {
ws.pong(err => _logError(err, 'failed to send a pong'));
});
ws.on('pong', () => {
initiatePing();
});
ws.on('message', data => {
const log = logger.newRequestLogger();
const message = new ChannelMessageV0(data);
switch (message.getType()) {
case CONFIG_OVERLAY_MESSAGE:
if (!isManagementAgentUsed()) {
applyAndSaveOverlay(JSON.parse(message.getPayload()), log);
} else {
if (overlayMessageListener) {
overlayMessageListener(message.getPayload().toString());
}
}
break;
case METRICS_REQUEST_MESSAGE:
pushStats();
break;
case CHANNEL_CLOSE_MESSAGE:
closeChannel(message.getChannelNumber());
break;
case CHANNEL_PAYLOAD_MESSAGE:
// browserAccessEnabled defaults to true unless explicitly false
if (_config.browserAccessEnabled !== false) {
receiveChannelData(
message.getChannelNumber(), message.getPayload());
}
break;
default:
logger.error('unknown message type from push server',
{ messageType: message.getType() });
}
});
}
function addOverlayMessageListener(callback) {
assert(typeof callback === 'function');
overlayMessageListener = callback;
}
function startPushConnectionHealthCheckServer(cb) {
const server = http.createServer((req, res) => {
if (req.url !== '/_/healthcheck') {
res.writeHead(404);
res.write('Not Found');
} else if (connected) {
res.writeHead(200);
res.write('Connected');
} else {
res.writeHead(503);
res.write('Not Connected');
}
res.end();
});
server.listen(_config.port, cb);
}
module.exports = {
createWSAgent,
startWSManagementClient,
startPushConnectionHealthCheckServer,
addOverlayMessageListener,
};

View File

@ -6,9 +6,6 @@ const BucketInfo = require('arsenal').models.BucketInfo;
const { isBucketAuthorized, isObjAuthorized } = const { isBucketAuthorized, isObjAuthorized } =
require('../api/apiUtils/authorization/permissionChecks'); require('../api/apiUtils/authorization/permissionChecks');
const bucketShield = require('../api/apiUtils/bucket/bucketShield'); const bucketShield = require('../api/apiUtils/bucket/bucketShield');
const { onlyOwnerAllowed } = require('../../constants');
const { actionNeedQuotaCheck, actionWithDataDeletion } = require('arsenal/build/lib/policyEvaluator/RequestContext');
const { processBytesToWrite, validateQuotas } = require('../api/apiUtils/quotas/quotaUtils');
/** getNullVersionFromMaster - retrieves the null version /** getNullVersionFromMaster - retrieves the null version
* metadata via retrieving the master key * metadata via retrieving the master key
@ -155,6 +152,9 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
}); });
return errors.NoSuchBucket; return errors.NoSuchBucket;
} }
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
const onlyOwnerAllowed = ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'];
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
if (!Array.isArray(requestType)) { if (!Array.isArray(requestType)) {
requestType = [requestType]; requestType = [requestType];
@ -184,7 +184,7 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
* @return {undefined} - and call callback with params err, bucket md * @return {undefined} - and call callback with params err, bucket md
*/ */
function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) { function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) {
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request, withVersionId } = params; const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request } = params;
let requestType = params.requestType; let requestType = params.requestType;
if (!Array.isArray(requestType)) { if (!Array.isArray(requestType)) {
requestType = [requestType]; requestType = [requestType];
@ -238,21 +238,6 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
} }
return next(null, bucket, objMD); return next(null, bucket, objMD);
}, },
(bucket, objMD, next) => {
const needQuotaCheck = requestType => requestType.some(type => actionNeedQuotaCheck[type] ||
actionWithDataDeletion[type]);
const checkQuota = params.checkQuota === undefined ? needQuotaCheck(requestType) : params.checkQuota;
// withVersionId cover cases when an object is being restored with a specific version ID.
// In this case, the storage space was already accounted for when the RestoreObject API call
// was made, so we don't need to add any inflight, but quota must be evaluated.
if (!checkQuota) {
return next(null, bucket, objMD);
}
const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId,
request?.parsedContentLength || 0, objMD, params.destObjMD);
return validateQuotas(request, bucket, request.accountQuotas, requestType, request.apiMethod,
contentLength, withVersionId, log, err => next(err, bucket, objMD));
},
], (err, bucket, objMD) => { ], (err, bucket, objMD) => {
if (err) { if (err) {
// still return bucket for cors headers // still return bucket for cors headers
@ -294,7 +279,6 @@ module.exports = {
validateBucket, validateBucket,
metadataGetObject, metadataGetObject,
metadataGetObjects, metadataGetObjects,
processBytesToWrite,
standardMetadataValidateBucketAndObj, standardMetadataValidateBucketAndObj,
standardMetadataValidateBucket, standardMetadataValidateBucket,
}; };

View File

@ -2,9 +2,9 @@ const MetadataWrapper = require('arsenal').storage.metadata.MetadataWrapper;
const { config } = require('../Config'); const { config } = require('../Config');
const logger = require('../utilities/logger'); const logger = require('../utilities/logger');
const constants = require('../../constants'); const constants = require('../../constants');
const bucketclient = require('bucketclient');
const clientName = config.backends.metadata; const clientName = config.backends.metadata;
let bucketclient;
let params; let params;
if (clientName === 'mem') { if (clientName === 'mem') {
params = {}; params = {};
@ -21,7 +21,6 @@ if (clientName === 'mem') {
noDbOpen: null, noDbOpen: null,
}; };
} else if (clientName === 'scality') { } else if (clientName === 'scality') {
bucketclient = require('bucketclient');
params = { params = {
bucketdBootstrap: config.bucketd.bootstrap, bucketdBootstrap: config.bucketd.bootstrap,
bucketdLog: config.bucketd.log, bucketdLog: config.bucketd.log,

View File

@ -1,17 +0,0 @@
const { config } = require('../Config');
const { ScubaClientImpl } = require('./scuba/wrapper');
let instance = null;
switch (config.backends.quota) {
case 'scuba':
instance = new ScubaClientImpl(config);
break;
default:
instance = {
enabled: false,
};
break;
}
module.exports = instance;

View File

@ -1,80 +0,0 @@
const util = require('util');
const { default: ScubaClient } = require('scubaclient');
const { externalBackendHealthCheckInterval } = require('../../../constants');
const monitoring = require('../../utilities/monitoringHandler');
class ScubaClientImpl extends ScubaClient {
constructor(config) {
super(config.scuba);
this.enabled = false;
this.maxStaleness = config.quota.maxStaleness;
this._healthCheckTimer = null;
this._log = null;
this._getLatestMetricsCallback = util.callbackify(this.getLatestMetrics);
if (config.scuba) {
this.enabled = true;
} else {
this.enabled = false;
}
}
setup(log) {
this._log = log;
if (this.enabled) {
this.periodicHealthCheck();
}
}
_healthCheck() {
return this.healthCheck().then(data => {
if (data?.date) {
const date = new Date(data.date);
if (Date.now() - date.getTime() > this.maxStaleness) {
throw new Error('Data is stale, disabling quotas');
}
}
if (!this.enabled) {
this._log.info('Scuba health check passed, enabling quotas');
}
monitoring.utilizationServiceAvailable.set(1);
this.enabled = true;
}).catch(err => {
if (this.enabled) {
this._log.warn('Scuba health check failed, disabling quotas', {
err: err.name,
description: err.message,
});
}
monitoring.utilizationServiceAvailable.set(0);
this.enabled = false;
});
}
periodicHealthCheck() {
if (this._healthCheckTimer) {
clearInterval(this._healthCheckTimer);
}
this._healthCheck();
this._healthCheckTimer = setInterval(async () => {
this._healthCheck();
}, Number(process.env.SCUBA_HEALTHCHECK_FREQUENCY)
|| externalBackendHealthCheckInterval);
}
getUtilizationMetrics(metricsClass, resourceName, options, body, callback) {
const requestStartTime = process.hrtime.bigint();
return this._getLatestMetricsCallback(metricsClass, resourceName, options, body, (err, data) => {
const responseTimeInNs = Number(process.hrtime.bigint() - requestStartTime);
monitoring.utilizationMetricsRetrievalDuration.labels({
code: err ? (err.statusCode || 500) : 200,
class: metricsClass,
}).observe(responseTimeInNs / 1e9);
return callback(err, data);
});
}
}
module.exports = {
ScubaClientImpl,
};

View File

@ -37,7 +37,6 @@ const kms = require('../kms/wrapper');
const { listLifecycleCurrents } = require('../api/backbeat/listLifecycleCurrents'); const { listLifecycleCurrents } = require('../api/backbeat/listLifecycleCurrents');
const { listLifecycleNonCurrents } = require('../api/backbeat/listLifecycleNonCurrents'); const { listLifecycleNonCurrents } = require('../api/backbeat/listLifecycleNonCurrents');
const { listLifecycleOrphanDeleteMarkers } = require('../api/backbeat/listLifecycleOrphanDeleteMarkers'); const { listLifecycleOrphanDeleteMarkers } = require('../api/backbeat/listLifecycleOrphanDeleteMarkers');
const { objectDeleteInternal } = require('../api/objectDelete');
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = constants.lifecycleListing; const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = constants.lifecycleListing;
const lifecycleTypeCalls = { const lifecycleTypeCalls = {
@ -710,19 +709,6 @@ function putObject(request, response, log, callback) {
}); });
} }
function deleteObjectFromExpiration(request, response, userInfo, log, callback) {
return objectDeleteInternal(userInfo, request, log, true, err => {
if (err) {
log.error('error deleting object from expiration', {
error: err,
method: 'deleteObjectFromExpiration',
});
return callback(err);
}
return _respond(response, {}, log, callback);
});
}
function deleteObject(request, response, log, callback) { function deleteObject(request, response, log, callback) {
const err = _checkMultipleBackendRequest(request, log); const err = _checkMultipleBackendRequest(request, log);
if (err) { if (err) {
@ -1288,7 +1274,6 @@ const backbeatRoutes = {
}, },
}, },
DELETE: { DELETE: {
expiration: deleteObjectFromExpiration,
multiplebackenddata: { multiplebackenddata: {
deleteobject: deleteObject, deleteobject: deleteObject,
deleteobjecttagging: deleteObjectTagging, deleteobjecttagging: deleteObjectTagging,

View File

@ -1,225 +0,0 @@
const url = require('url');
const async = require('async');
const vault = require('../auth/vault');
const putVeeamFile = require('./veeam/put');
const getVeeamFile = require('./veeam/get');
const headVeeamFile = require('./veeam/head');
const listVeeamFiles = require('./veeam/list');
const { deleteVeeamFile } = require('./veeam/delete');
const { auth, s3routes, errors } = require('arsenal');
const { _decodeURI, validPath } = require('./veeam/utils');
const { routesUtils } = require('arsenal/build/lib/s3routes');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const prepareRequestContexts = require('../api/apiUtils/authorization/prepareRequestContexts');
const { responseXMLBody } = s3routes.routesUtils;
auth.setHandler(vault);
const validObjectKeys = [
`${validPath}system.xml`,
`${validPath}capacity.xml`,
];
const apiToAction = {
PUT: 'PutObject',
GET: 'GetObject',
HEAD: 'HeadObject',
DELETE: 'DeleteObject',
LIST: 'ListObjects',
};
const routeMap = {
GET: getVeeamFile,
PUT: putVeeamFile,
HEAD: headVeeamFile,
DELETE: deleteVeeamFile,
LIST: listVeeamFiles,
};
/**
* Validator for the Veeam12 custom routes. Ensures that bucket name and
* object name are correct, and that the bucket exists in the DB.
* @param {string} bucketName - name of the bucket
* @param {string} objectKey - key of the object
* @param {array | null} requestQueryParams - request query parameters
* @param {string} method - HTTP verb
* @param {object} log - request logger
* @returns {Error | undefined} error or undefined
*/
function checkBucketAndKey(bucketName, objectKey, requestQueryParams, method, log) {
// In case bucket name is not specified and the request contains an
// object key or is not a GET, then the bucket name is mandatory.
// Reject the request in this case.
if (!bucketName && !(method === 'GET' && !objectKey)) {
log.debug('empty bucket name', { method: 'checkBucketAndKey' });
return errors.MethodNotAllowed;
}
if (typeof bucketName !== 'string' || routesUtils.isValidBucketName(bucketName, []) === false) {
log.debug('invalid bucket name', { bucketName });
if (method === 'DELETE') {
return errors.NoSuchBucket;
}
return errors.InvalidBucketName;
}
if (method !== 'LIST') {
// Reject any unsupported request, but allow downloads and deletes from UI
// Download relies on GETs calls with auth in query parameters, that can be
// checked if 'X-Amz-Credential' is included.
// Deletion requires that the tags of the object are returned.
if (requestQueryParams && Object.keys(requestQueryParams).length > 0
&& !(method === 'GET' && (requestQueryParams['X-Amz-Credential'] || ('tagging' in requestQueryParams)))) {
return errors.InvalidRequest
.customizeDescription('The Veeam SOSAPI folder does not support this action.');
}
if (typeof objectKey !== 'string' || !validObjectKeys.includes(objectKey)) {
log.debug('invalid object name', { objectKey });
return errors.InvalidArgument;
}
}
return undefined;
}
/**
* Query the authorization service for the request, and extract the bucket
* and, if applicable, object metadata according to the request method.
*
* @param {object} request - incoming request
* @param {object} response - response object
* @param {string} api - HTTP verb
* @param {object} log - logger instance
* @param {function} callback -
* @returns {undefined}
*/
function authorizationMiddleware(request, response, api, log, callback) {
if (!api) {
return responseXMLBody(errors.AccessDenied, null, response, log);
}
const requestContexts = prepareRequestContexts(api, request);
return async.waterfall([
next => auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
if (err) {
log.debug('authentication error', {
error: err,
method: request.method,
bucketName: request.bucketName,
objectKey: request.objectKey,
});
}
/* eslint-disable no-param-reassign */
request.authorizationResults = authorizationResults;
request.streamingV4Params = streamingV4Params;
/* eslint-enable no-param-reassign */
return next(err, userInfo);
}, 's3', requestContexts),
(userInfo, next) => {
// Ensure only supported HTTP verbs and actions are called,
// otherwise deny access
const requestType = apiToAction[api];
if (!requestType) {
return next(errors.AccessDenied);
}
const mdValParams = {
bucketName: request.bucketName,
authInfo: userInfo,
requestType,
request,
};
return next(null, mdValParams);
},
(mdValParams, next) => standardMetadataValidateBucket(mdValParams, request.actionImplicitDenies, log, next),
], (err, bucketMd) => {
if (err || !bucketMd) {
return responseXMLBody(err, null, response, log);
}
return callback(request, response, bucketMd, log);
});
}
function _normalizeVeeamRequest(req) {
/* eslint-disable no-param-reassign */
// Rewriting the URL is needed for the V4 signature check: the initial
// request targets https://s3.subdomain/bucketName/objectKey, but the
// custom ingresses and/or nginx configuration for the UI will redirect this
// call to .../_/veeam/bucketName/objectKey. We need to revert the custom
// path only used for routing before computing the V4 signature.
req.url = req.url.replace('/_/veeam', '');
// Assign multiple common (extracted) parameters to the request object
const parsedUrl = url.parse(req.url, true);
req.path = _decodeURI(parsedUrl.pathname);
const pathArr = req.path.split('/');
req.query = parsedUrl.query;
req.bucketName = pathArr[1];
req.objectKey = pathArr.slice(2).join('/');
const contentLength = req.headers['x-amz-decoded-content-length'] ?
req.headers['x-amz-decoded-content-length'] :
req.headers['content-length'];
req.parsedContentLength =
Number.parseInt(contentLength?.toString() ?? '', 10);
/* eslint-enable no-param-reassign */
}
/**
* Ensure only supported methods are supported, otherwise, return an error
* @param {string} reqMethod - the HTTP verb of the request
* @param {string} reqQuery - request query
* @param {object} reqHeaders - request headers
* @returns {object} - method or error
*/
function checkUnsupportedRoutes(reqMethod, reqQuery, reqHeaders) {
const method = routeMap[reqMethod];
if (!method || (!reqQuery && !reqHeaders)) {
return { error: errors.MethodNotAllowed };
}
return { method };
}
/**
* Router for the Veeam custom files
* @param {string} clientIP - client IP address
* @param {object} request - request object
* @param {object} response - response object
* @param {object} log - requets logger
* @returns {undefined}
*/
function routeVeeam(clientIP, request, response, log) {
// Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign
request.apiMethod = 'routeVeeam';
_normalizeVeeamRequest(request);
log.info('routing request', {
method: 'routeVeeam',
url: request.url,
clientIP,
resourceType: request.resourceType,
subResource: request.subResource,
});
// Rewrite action to LIST for list-objects
const requestMethod = request.method === 'GET' && !request.objectKey ? 'LIST' : request.method;
const { error, method } = checkUnsupportedRoutes(requestMethod, request.query, request.headers);
if (error) {
log.error('error validating route or uri params', { error });
return responseXMLBody(error, '', response, log);
}
const bucketOrKeyError = checkBucketAndKey(
request.bucketName, request.objectKey, request.query, requestMethod, log);
if (bucketOrKeyError) {
log.error('error with bucket or key value',
{ error: bucketOrKeyError });
return routesUtils.responseXMLBody(bucketOrKeyError, null, response, log);
}
return authorizationMiddleware(request, response, requestMethod, log, method);
}
module.exports = {
routeVeeam,
checkUnsupportedRoutes,
_normalizeVeeamRequest,
authorizationMiddleware,
checkBucketAndKey,
validObjectKeys,
};

View File

@ -1,72 +0,0 @@
const { s3routes, errors } = require('arsenal');
const metadata = require('../../metadata/wrapper');
const { isSystemXML } = require('./utils');
const { responseXMLBody, responseNoBody } = s3routes.routesUtils;
/**
* Deletes system.xml or capacity.xml files for a given bucket.
*
* @param {string} bucketName - bucket name
* @param {string} objectKey - object key to delete
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @param {function} callback - callback
* @returns {undefined} -
*/
function deleteVeeamCapabilities(bucketName, objectKey, bucketMd, log, callback) {
const capabilityFieldName = isSystemXML(objectKey) ? 'SystemInfo' : 'CapacityInfo';
// Ensure file exists in metadata before deletion
if (!bucketMd._capabilities?.VeeamSOSApi
|| !bucketMd._capabilities?.VeeamSOSApi[capabilityFieldName]) {
return callback(errors.NoSuchKey);
}
// eslint-disable-next-line no-param-reassign
delete bucketMd._capabilities.VeeamSOSApi[capabilityFieldName];
// Delete the whole veeam capacity if nothing is left
if (Object.keys(bucketMd._capabilities.VeeamSOSApi).length === 0) {
// eslint-disable-next-line no-param-reassign
delete bucketMd._capabilities.VeeamSOSApi;
// Delete all capacities if no capacity is left
if (Object.keys(bucketMd._capabilities).length === 0) {
// eslint-disable-next-line no-param-reassign
delete bucketMd._capabilities;
}
}
// Update the bucket metadata
return metadata.deleteBucketCapabilities(bucketName, bucketMd, 'VeeamSOSApi', capabilityFieldName, log, err => {
if (err) {
return callback(err);
}
return callback();
});
}
/**
* Deletes system.xml or capacity.xml files for a given bucket. handle
* request context for custom routes.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function deleteVeeamFile(request, response, bucketMd, log) {
if (!bucketMd) {
return responseXMLBody(errors.NoSuchBucket, null, response, log);
}
return deleteVeeamCapabilities(request.bucketName, request.objectKey, bucketMd, log, err => {
if (err) {
return responseXMLBody(err, null, response, log);
}
return responseNoBody(null, null, response, 204, log);
});
}
module.exports = {
deleteVeeamFile,
deleteVeeamCapabilities,
};

View File

@ -1,46 +0,0 @@
const xml2js = require('xml2js');
const { errors } = require('arsenal');
const metadata = require('../../metadata/wrapper');
const { respondWithData, buildHeadXML, getFileToBuild } = require('./utils');
const { responseXMLBody } = require('arsenal/build/lib/s3routes/routesUtils');
/**
* Returns system.xml or capacity.xml files for a given bucket.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function getVeeamFile(request, response, bucketMd, log) {
if (!bucketMd) {
return responseXMLBody(errors.NoSuchBucket, null, response, log);
}
if ('tagging' in request.query) {
return respondWithData(request, response, log, bucketMd,
buildHeadXML('<Tagging><TagSet></TagSet></Tagging>'));
}
return metadata.getBucket(request.bucketName, log, (err, data) => {
if (err) {
return responseXMLBody(errors.InternalError, null, response, log);
}
const fileToBuild = getFileToBuild(request, data._capabilities?.VeeamSOSApi);
if (fileToBuild.error) {
return responseXMLBody(fileToBuild.error, null, response, log);
}
let modified = new Date().toISOString();
// Extract the last modified date, but do not include it when computing
// the file's ETag (md5)
modified = fileToBuild.value.LastModified;
delete fileToBuild.value.LastModified;
const builder = new xml2js.Builder({
headless: true,
});
return respondWithData(request, response, log, data,
buildHeadXML(builder.buildObject(fileToBuild.value)), modified);
});
}
module.exports = getVeeamFile;

View File

@ -1,43 +0,0 @@
const xml2js = require('xml2js');
const { errors } = require('arsenal');
const metadata = require('../../metadata/wrapper');
const { getResponseHeader, buildHeadXML, getFileToBuild } = require('./utils');
const { responseXMLBody, responseContentHeaders } = require('arsenal/build/lib/s3routes/routesUtils');
/**
* Returns system.xml or capacity.xml files metadata for a given bucket.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function headVeeamFile(request, response, bucketMd, log) {
if (!bucketMd) {
return responseXMLBody(errors.NoSuchBucket, null, response, log);
}
return metadata.getBucket(request.bucketName, log, (err, data) => {
if (err) {
return responseXMLBody(errors.InternalError, null, response, log);
}
const fileToBuild = getFileToBuild(request, data._capabilities?.VeeamSOSApi);
if (fileToBuild.error) {
return responseXMLBody(fileToBuild.error, null, response, log);
}
let modified = new Date().toISOString();
// Extract the last modified date, but do not include it when computing
// the file's ETag (md5)
modified = fileToBuild.value.LastModified;
delete fileToBuild.value.LastModified;
// Recompute file content to generate appropriate content-md5 header
const builder = new xml2js.Builder({
headless: true,
});
const dataBuffer = Buffer.from(buildHeadXML(builder.buildObject(fileToBuild)));
return responseContentHeaders(null, {}, getResponseHeader(request, data,
dataBuffer, modified, log), response, log);
});
}
module.exports = headVeeamFile;

View File

@ -1,132 +0,0 @@
const url = require('url');
const xml2js = require('xml2js');
const { errors } = require('arsenal');
const querystring = require('querystring');
const metadata = require('../../metadata/wrapper');
const { responseXMLBody } = require('arsenal/build/lib/s3routes/routesUtils');
const { respondWithData, getResponseHeader, buildHeadXML, validPath } = require('./utils');
const { processVersions, processMasterVersions } = require('../../api/bucketGet');
/**
* Utility function to build a standard response for the LIST route.
* It adds the supported path by default as a static and default file.
*
* @param {object} request - request object
* @param {object} arrayOfFiles - array of files headers
* @param {boolean} [versioned] - set to true if versioned listing is enabled
* @returns {string} - the formatted XML content to send
*/
function buildXMLResponse(request, arrayOfFiles, versioned = false) {
const parsedUrl = url.parse(request.url);
const parsedQs = querystring.parse(parsedUrl.query);
const listParams = {
prefix: validPath,
maxKeys: parsedQs['max-keys'] || 1000,
delimiter: '/',
};
const list = {
IsTruncated: false,
Versions: [],
Contents: [],
CommonPrefixes: [],
};
const entries = arrayOfFiles.map(file => ({
key: file.name,
value: {
IsDeleteMarker: false,
IsNull: true,
LastModified: file['Last-Modified'],
// Generated ETag alrady contains quotes, removing them here
ETag: file.ETag.substring(1, file.ETag.length - 1),
Size: file['Content-Length'],
Owner: {
ID: 0,
DisplayName: 'Veeam SOSAPI',
},
StorageClass: 'VIRTUAL',
}
}));
entries.push({
key: validPath,
value: {
IsDeleteMarker: false,
IsNull: true,
LastModified: new Date().toISOString(),
ETag: 'd41d8cd98f00b204e9800998ecf8427e',
Size: 0,
Owner: {
ID: 0,
DisplayName: 'Veeam SOSAPI',
},
StorageClass: 'VIRTUAL',
}
});
// Add the folder as the base file
if (versioned) {
list.Versions = entries;
} else {
list.Contents = entries;
}
const processingXMLFunction = versioned ? processVersions : processMasterVersions;
return processingXMLFunction(request.bucketName, listParams, list);
}
/**
* List system.xml and/or capacity.xml files for a given bucket.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function listVeeamFiles(request, response, bucketMd, log) {
if (!bucketMd) {
return responseXMLBody(errors.NoSuchBucket, null, response, log);
}
// Only accept list-type query parameter
if (!('list-type' in request.query) && !('versions' in request.query)) {
return responseXMLBody(errors.InvalidRequest
.customizeDescription('The Veeam folder does not support this action.'), null, response, log);
}
return metadata.getBucket(request.bucketName, log, (err, data) => {
if (err) {
return responseXMLBody(errors.InternalError, null, response, log);
}
const filesToBuild = [];
const fieldsToGenerate = [];
if (data._capabilities?.VeeamSOSApi?.SystemInfo) {
fieldsToGenerate.push({
...data._capabilities?.VeeamSOSApi?.SystemInfo,
name: `${validPath}system.xml`,
});
}
if (data._capabilities?.VeeamSOSApi?.CapacityInfo) {
fieldsToGenerate.push({
...data._capabilities?.VeeamSOSApi?.CapacityInfo,
name: `${validPath}capacity.xml`,
});
}
fieldsToGenerate.forEach(file => {
const lastModified = file.LastModified;
// eslint-disable-next-line no-param-reassign
delete file.LastModified;
const builder = new xml2js.Builder({
headless: true,
});
const dataBuffer = Buffer.from(buildHeadXML(builder.buildObject(file)));
filesToBuild.push({
...getResponseHeader(request, data,
dataBuffer, lastModified, log),
name: file.name,
});
});
// When `versions` is present, listing should return a versioned list
return respondWithData(request, response, log, data,
buildXMLResponse(request, filesToBuild, 'versions' in request.query));
});
}
module.exports = listVeeamFiles;

View File

@ -1,80 +0,0 @@
const async = require('async');
const { parseString } = require('xml2js');
const { receiveData, isSystemXML, getFileToBuild } = require('./utils');
const { s3routes, errors } = require('arsenal');
const metadata = require('../../metadata/wrapper');
const parseSystemSchema = require('./schemas/system');
const parseCapacitySchema = require('./schemas/capacity');
const writeContinue = require('../../utilities/writeContinue');
const { responseNoBody, responseXMLBody } = s3routes.routesUtils;
/**
* Puts a veeam capacity or system file in the bucket metadata.
* Logic ensures consistency of the data and metadata.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function putVeeamFile(request, response, bucketMd, log) {
if (!bucketMd) {
return errors.NoSuchBucket;
}
return async.waterfall([
next => {
// Extract the data from the request, keep it in memory
writeContinue(request, response);
return receiveData(request, log, next);
},
(value, next) => parseString(value, { explicitArray: false }, (err, parsed) => {
// Convert the received XML to a JS object
if (err) {
return next(errors.MalformedXML);
}
return next(null, parsed);
}),
(parsedXML, next) => {
const capabilities = bucketMd._capabilities || {
VeeamSOSApi: {},
};
// Validate the JS object schema with joi and prepare the object for
// further logic
const validateFn = isSystemXML(request.objectKey) ? parseSystemSchema : parseCapacitySchema;
let validatedData = null;
try {
validatedData = validateFn(parsedXML);
} catch (err) {
log.error('xml file did not pass validation', { err });
return next(errors.MalformedXML);
}
const file = getFileToBuild(request, validatedData, true);
if (file.error) {
return next(file.error);
}
capabilities.VeeamSOSApi = {
...(capabilities.VeeamSOSApi || {}),
...file.value,
};
// Write data to bucketMD with the same (validated) format
// eslint-disable-next-line no-param-reassign
bucketMd = {
...bucketMd,
_capabilities: capabilities,
};
// Update bucket metadata
return metadata.updateBucketCapabilities(
request.bucketName, bucketMd, 'VeeamSOSApi', file.fieldName, file.value[file.fieldName], log, next);
}
], err => {
if (err) {
return responseXMLBody(err, null, response, log);
}
return responseNoBody(null, null, response, 200, log);
});
}
module.exports = putVeeamFile;

View File

@ -1,38 +0,0 @@
const joi = require('joi');
const { errors } = require('arsenal');
/**
* Validates and parse the provided JSON object from the
* provided XML file. XML scheme example:
*
* <?xml version="1.0" encoding="utf-8" ?>
* <CapacityInfo>
* <Capacity>1099511627776</Capacity>
* <Available>0</Available>
* <Used>0</Used>
* </CapacityInfo>
*
* @param {string} parsedXML - the parsed XML from xml2js
* @returns {object | Error} the valid system.xml JS object or an error if
* validation fails
*/
function validateCapacitySchema(parsedXML) {
const schema = joi.object({
CapacityInfo: joi.object({
Capacity: joi.number().min(-1).integer().required(),
Available: joi.number().min(-1).integer().required(),
Used: joi.number().min(-1).integer().required(),
}).required(),
});
const validatedData = schema.validate(parsedXML, {
// Allow any unknown keys for future compatibility
allowUnknown: true,
convert: true,
});
if (validatedData.error) {
throw new Error(errors.MalformedXML);
}
return validatedData.value;
}
module.exports = validateCapacitySchema;

View File

@ -1,95 +0,0 @@
const joi = require('joi');
const { errors } = require('arsenal');
// Allow supporting any version of the protocol
const systemSchemasPerVersion = {
'unsupported': joi.object({}),
'"1.0"': joi.object({
SystemInfo: joi.object({
ProtocolVersion: joi.string().required(),
ModelName: joi.string().required(),
ProtocolCapabilities: joi.object({
CapacityInfo: joi.boolean().required(),
UploadSessions: joi.boolean().required(),
IAMSTS: joi.boolean().default(false),
}).required(),
APIEndpoints: joi.object({
IAMEndpoint: joi.string().required(),
STSEndpoint: joi.string().required()
}),
SystemRecommendations: joi.object({
S3ConcurrentTaskLimit: joi.number().min(0).default(64),
S3MultiObjectDeleteLimit: joi.number().min(1).default(1000),
StorageCurrentTasksLimit: joi.number().min(0).default(0),
KbBlockSize: joi.number()
.valid(256, 512, 1024, 2048, 4096, 8192)
.default(1024),
}),
}).required()
}),
};
/**
* Validates and parse the provided JSON object from the
* provided XML file. XML scheme example:
*
* <?xml version="1.0" encoding="utf-8" ?>
* <SystemInfo>
* <ProtocolVersion>"1.0"</ProtocolVersion>
* <ModelName>"ACME corp - Custom S3 server - v1.2"</ModelName>
* <ProtocolCapabilities>
* <CapacityInfo>true</CapacityInfo>
* <UploadSessions>true</UploadSessions>
* <IAMSTS>true</IAMSTS>
* </ProtocolCapabilities>
* <APIEndpoints>
* <IAMEndpoint>https://storage.acme.local/iam/endpoint</IAMEndpoint>
* <STSEndpoint>https://storage.acme.local/sts/endpoint</STSEndpoint>
* </APIEndpoints>
* <SystemRecommendations>
* <S3ConcurrentTaskLimit>64</S3ConcurrentTaskLimit>
* <S3MultiObjectDeleteLimit>1000</S3MultiObjectDeleteLimit>
* <StorageCurrentTaksLimit>0</StorageCurrentTaskLimit>
* <KbBlockSize>1024</KbBlockSize>
* </SystemRecommendations>
* </SystemInfo>
*
* @param {string} parsedXML - the parsed XML from xml2js
* @returns {object | Error} the valid system.xml JS object or an error if
* validation fails
*/
function validateSystemSchema(parsedXML) {
const protocolVersion = parsedXML?.SystemInfo?.ProtocolVersion;
let schema = systemSchemasPerVersion.unsupported;
if (!protocolVersion) {
throw new Error(errors.MalformedXML
.customizeDescription('ProtocolVersion must be set for the system.xml file'));
}
if (protocolVersion && protocolVersion in systemSchemasPerVersion) {
schema = systemSchemasPerVersion[parsedXML?.SystemInfo?.ProtocolVersion];
}
const validatedData = schema.validate(parsedXML, {
// Allow any unknown keys for future compatibility
allowUnknown: true,
convert: true,
});
if (validatedData.error) {
throw validatedData.error;
} else {
switch (protocolVersion) {
case '"1.0"':
// Ensure conditional fields are set
// IAMSTS === true implies that SystemInfo.APIEndpoints is defined
if (validatedData.value.SystemInfo.ProtocolCapabilities.IAMSTS
&& !validatedData.value.SystemInfo.APIEndpoints) {
throw new Error(errors.MalformedXML);
}
break;
default:
break;
}
}
return validatedData.value;
}
module.exports = validateSystemSchema;

View File

@ -1,211 +0,0 @@
const { errors, jsutil } = require('arsenal');
const { Readable } = require('stream');
const collectResponseHeaders = require('../../utilities/collectResponseHeaders');
const collectCorsHeaders = require('../../utilities/collectCorsHeaders');
const crypto = require('crypto');
const { prepareStream } = require('arsenal/build/lib/s3middleware/prepareStream');
/**
* Decodes an URI and return the result.
* Do the same decoding than in S3 server
* @param {string} uri - uri to decode
* @returns {string} -
*/
function _decodeURI(uri) {
return decodeURIComponent(uri.replace(/\+/g, ' '));
}
/**
* Generic function to get data from a client request.
*
* @param {object} request - incoming request
* @param {object} log - logger object
* @param {function} callback -
* @returns {undefined}
*/
function receiveData(request, log, callback) {
// Get keycontent
const { parsedContentLength } = request;
const ContentLengthThreshold = 1024 * 1024; // 1MB
// Prevent memory overloads by limiting the size of the
// received data.
if (parsedContentLength > ContentLengthThreshold) {
return callback(errors.InvalidInput
.customizeDescription(`maximum allowed content-length is ${ContentLengthThreshold} bytes`));
}
const value = Buffer.alloc(parsedContentLength);
const cbOnce = jsutil.once(callback);
const dataStream = prepareStream(request, request.streamingV4Params, log, cbOnce);
let cursor = 0;
let exceeded = false;
dataStream.on('data', data => {
if (cursor + data.length > parsedContentLength) {
exceeded = true;
}
if (!exceeded) {
data.copy(value, cursor);
}
cursor += data.length;
});
dataStream.on('end', () => {
if (exceeded) {
log.error('data stream exceed announced size',
{ parsedContentLength, overflow: cursor });
return callback(errors.InternalError);
} else {
return callback(null, value.toString());
}
});
return undefined;
}
/**
* Builds a valid XML file for SOSAPI
*
* @param {string} xmlContent - valid xml content
* @returns {string} a valid and formatted XML file
*/
function buildHeadXML(xmlContent) {
return `<?xml version="1.0" encoding="UTF-8" ?>\n${xmlContent}\n`;
}
/**
* Get response headers for the object
* @param {object} request - incoming request
* @param {BucketInfo} bucket - bucket
* @param {string} dataBuffer - data to send as a buffer
* @param {date} [lastModified] - last modified date of the value
* @param {object} log - logging object
* @returns {object} - response headers
*/
function getResponseHeader(request, bucket, dataBuffer, lastModified, log) {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
const responseMetaHeaders = collectResponseHeaders({
'last-modified': lastModified || new Date().toISOString(),
'content-md5': crypto
.createHash('md5')
.update(dataBuffer)
.digest('hex'),
'content-length': dataBuffer.byteLength,
'content-type': 'text/xml',
}, corsHeaders, null, false);
responseMetaHeaders.versionId = 'null';
responseMetaHeaders['x-amz-id-2'] = log.getSerializedUids();
responseMetaHeaders['x-amz-request-id'] = log.getSerializedUids();
return responseMetaHeaders;
}
/**
* Generic function to respond to user with data using streams
*
* @param {object} request - incoming request
* @param {object} response - response object
* @param {object} log - logging object
* @param {BucketInfo} bucket - bucket info
* @param {string} data - data to send
* @param {date} [lastModified] - last modified date of the value
* @returns {undefined} -
*/
function respondWithData(request, response, log, bucket, data, lastModified) {
const dataBuffer = Buffer.from(data);
const responseMetaHeaders = getResponseHeader(request, bucket, dataBuffer, lastModified, log);
response.on('finish', () => {
let contentLength = 0;
if (responseMetaHeaders && responseMetaHeaders['Content-Length']) {
contentLength = responseMetaHeaders['Content-Length'];
}
log.end().addDefaultFields({ contentLength });
log.end().info('responded with streamed content', {
httpCode: response.statusCode,
});
});
if (responseMetaHeaders && typeof responseMetaHeaders === 'object') {
Object.keys(responseMetaHeaders).forEach(key => {
if (responseMetaHeaders[key] !== undefined) {
try {
response.setHeader(key, responseMetaHeaders[key]);
} catch (e) {
log.debug('header can not be added ' +
'to the response', {
header: responseMetaHeaders[key],
error: e.stack, method: 'routeVeeam/respondWithData'
});
}
}
});
}
response.writeHead(200);
const stream = Readable.from(dataBuffer);
stream.pipe(response);
stream.on('unpipe', () => {
response.end();
});
stream.on('error', () => {
response.end();
});
}
const validPath = '.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/';
/**
* Helper to determine if the current requested file is system.xml
*
* @param {string} objectKey - object key
* @returns {boolean} - true if the object key ends with `/system.xml`
*/
function isSystemXML(objectKey) {
return objectKey.endsWith('/system.xml');
}
/**
* Helper to extract the file from the bucket metadata
*
* @param {object} request - incoming request
* @param {object} data - the bucket metadata or input data
* @param {boolean} inlineLastModified - true if LastModified should be in the returned object
* or as another standalone field
* @returns {error | object} - error if file does not exist, or
* the associated metadata
*/
function getFileToBuild(request, data, inlineLastModified = false) {
const _isSystemXML = isSystemXML(request.objectKey);
const fileToBuild = _isSystemXML ? data?.SystemInfo
: data?.CapacityInfo;
if (!fileToBuild) {
return { error: errors.NoSuchKey };
}
const modified = fileToBuild.LastModified || (new Date()).toISOString();
const fieldName = _isSystemXML ? 'SystemInfo' : 'CapacityInfo';
if (inlineLastModified) {
fileToBuild.LastModified = modified;
return {
value: {
[fieldName]: fileToBuild,
},
fieldName,
};
} else {
delete fileToBuild.LastModified;
return {
value: {
[fieldName]: fileToBuild,
LastModified: modified,
},
fieldName,
};
}
}
module.exports = {
_decodeURI,
receiveData,
respondWithData,
getResponseHeader,
buildHeadXML,
validPath,
isSystemXML,
getFileToBuild,
};

View File

@ -18,9 +18,13 @@ const locationStorageCheck =
require('./api/apiUtils/object/locationStorageCheck'); require('./api/apiUtils/object/locationStorageCheck');
const vault = require('./auth/vault'); const vault = require('./auth/vault');
const metadata = require('./metadata/wrapper'); const metadata = require('./metadata/wrapper');
const { initManagement } = require('./management');
const {
initManagementClient,
isManagementAgentUsed,
} = require('./management/agentClient');
const HttpAgent = require('agentkeepalive'); const HttpAgent = require('agentkeepalive');
const QuotaService = require('./quotas/quotas');
const routes = arsenal.s3routes.routes; const routes = arsenal.s3routes.routes;
const { parseLC, MultipleBackendGateway } = arsenal.storage.data; const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
const websiteEndpoints = _config.websiteEndpoints; const websiteEndpoints = _config.websiteEndpoints;
@ -51,6 +55,7 @@ const STATS_INTERVAL = 5; // 5 seconds
const STATS_EXPIRY = 30; // 30 seconds const STATS_EXPIRY = 30; // 30 seconds
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL, const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
STATS_EXPIRY); STATS_EXPIRY);
const enableRemoteManagement = true;
class S3Server { class S3Server {
/** /**
@ -316,9 +321,16 @@ class S3Server {
this._startServer(this.routeAdminRequest, _config.metricsPort); this._startServer(this.routeAdminRequest, _config.metricsPort);
} }
// Start quota service health checks // TODO this should wait for metadata healthcheck to be ok
if (QuotaService.enabled) { // TODO only do this in cluster master
QuotaService?.setup(log); if (enableRemoteManagement) {
if (!isManagementAgentUsed()) {
setTimeout(() => {
initManagement(logger.newRequestLogger());
}, 5000);
} else {
initManagementClient();
}
} }
this.started = true; this.started = true;
@ -327,7 +339,8 @@ class S3Server {
} }
function main() { function main() {
let workers = _config.workers || 1; // TODO: change config to use workers prop. name for clarity
let workers = _config.clusters || 1;
if (process.env.S3BACKEND === 'mem') { if (process.env.S3BACKEND === 'mem') {
workers = 1; workers = 1;
} }

View File

@ -109,7 +109,7 @@ const services = {
tagging, taggingCopy, replicationInfo, defaultRetention, tagging, taggingCopy, replicationInfo, defaultRetention,
dataStoreName, creationTime, retentionMode, retentionDate, dataStoreName, creationTime, retentionMode, retentionDate,
legalHold, originOp, updateMicroVersionId, archive, oldReplayId, legalHold, originOp, updateMicroVersionId, archive, oldReplayId,
deleteNullKey, amzStorageClass, overheadField } = params; deleteNullKey, overheadField } = params;
log.trace('storing object in metadata'); log.trace('storing object in metadata');
assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof bucketName, 'string');
const md = new ObjectMD(); const md = new ObjectMD();
@ -186,7 +186,6 @@ const services = {
} }
// update restore // update restore
if (archive) { if (archive) {
md.setAmzStorageClass(amzStorageClass);
md.setArchive(new ObjectMDArchive( md.setArchive(new ObjectMDArchive(
archive.archiveInfo, archive.archiveInfo,
archive.restoreRequestedAt, archive.restoreRequestedAt,
@ -263,11 +262,6 @@ const services = {
if (legalHold) { if (legalHold) {
md.setLegalHold(legalHold); md.setLegalHold(legalHold);
} }
if (params.acl) {
// In case of a restore we dont pass ACL in the headers
// but we take them from the old metadata
md.setAcl(params.acl);
}
log.trace('object metadata', { omVal: md.getValue() }); log.trace('object metadata', { omVal: md.getValue() });
// If this is not the completion of a multipart upload or // If this is not the completion of a multipart upload or
@ -333,11 +327,10 @@ const services = {
* @param {boolean} deferLocationDeletion - true if the object should not * @param {boolean} deferLocationDeletion - true if the object should not
* be removed from the storage, but be returned instead. * be removed from the storage, but be returned instead.
* @param {Log} log - logger instance * @param {Log} log - logger instance
* @param {string} originOp - origin operation
* @param {function} cb - callback from async.waterfall in objectGet * @param {function} cb - callback from async.waterfall in objectGet
* @return {undefined} * @return {undefined}
*/ */
deleteObject(bucketName, objectMD, objectKey, options, deferLocationDeletion, log, originOp, cb) { deleteObject(bucketName, objectMD, objectKey, options, deferLocationDeletion, log, cb) {
log.trace('deleting object from bucket'); log.trace('deleting object from bucket');
assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof objectMD, 'object'); assert.strictEqual(typeof objectMD, 'object');
@ -369,7 +362,7 @@ const services = {
} }
return cb(null, res); return cb(null, res);
}); });
}, originOp); });
} }
const objGetInfo = objectMD.location; const objGetInfo = objectMD.location;

View File

@ -1,12 +1,16 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const _config = require('../Config').config; const _config = require('../Config').config;
const { utapiVersion, UtapiServer: utapiServer } = require('utapi'); const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
const vault = require('../auth/vault');
// start utapi server // start utapi server
if (utapiVersion === 1 && _config.utapi) { if (utapiVersion === 1 && _config.utapi) {
const fullConfig = Object.assign({}, _config.utapi, const fullConfig = Object.assign({}, _config.utapi,
{ redis: _config.redis, vaultclient: vault }); { redis: _config.redis });
if (_config.vaultd) {
Object.assign(fullConfig, { vaultd: _config.vaultd });
}
if (_config.https) {
Object.assign(fullConfig, { https: _config.https });
}
// copy healthcheck IPs // copy healthcheck IPs
if (_config.healthChecks) { if (_config.healthChecks) {
Object.assign(fullConfig, { healthChecks: _config.healthChecks }); Object.assign(fullConfig, { healthChecks: _config.healthChecks });

View File

@ -1,4 +1,3 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const UtapiReindex = require('utapi').UtapiReindex; const UtapiReindex = require('utapi').UtapiReindex;
const { config } = require('../Config'); const { config } = require('../Config');

View File

@ -1,4 +1,3 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const UtapiReplay = require('utapi').UtapiReplay; const UtapiReplay = require('utapi').UtapiReplay;
const _config = require('../Config').config; const _config = require('../Config').config;

View File

@ -3,14 +3,12 @@ const routeMetadata = require('../routes/routeMetadata');
const routeWorkflowEngineOperator = const routeWorkflowEngineOperator =
require('../routes/routeWorkflowEngineOperator'); require('../routes/routeWorkflowEngineOperator');
const { reportHandler } = require('./reportHandler'); const { reportHandler } = require('./reportHandler');
const routeVeeam = require('../routes/routeVeeam').routeVeeam;
const internalHandlers = { const internalHandlers = {
backbeat: routeBackbeat, backbeat: routeBackbeat,
report: reportHandler, report: reportHandler,
metadata: routeMetadata, metadata: routeMetadata,
'workflow-engine-operator': routeWorkflowEngineOperator, 'workflow-engine-operator': routeWorkflowEngineOperator,
veeam: routeVeeam,
}; };
module.exports = { module.exports = {

View File

@ -1,11 +1,7 @@
const { configure, Werelogs } = require('werelogs'); const { Werelogs } = require('werelogs');
const _config = require('../Config.js').config; const _config = require('../Config.js').config;
configure({
level: _config.log.logLevel,
dump: _config.log.dumpLevel,
});
const werelogs = new Werelogs({ const werelogs = new Werelogs({
level: _config.log.logLevel, level: _config.log.logLevel,
dump: _config.log.dumpLevel, dump: _config.log.dumpLevel,

View File

@ -1,6 +1,5 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const client = require('prom-client'); const client = require('prom-client');
const { config } = require('../Config');
const collectDefaultMetrics = client.collectDefaultMetrics; const collectDefaultMetrics = client.collectDefaultMetrics;
const numberOfBuckets = new client.Gauge({ const numberOfBuckets = new client.Gauge({
@ -65,61 +64,6 @@ const httpResponseSizeBytes = new client.Summary({
help: 'Cloudserver HTTP response sizes in bytes', help: 'Cloudserver HTTP response sizes in bytes',
}); });
let quotaEvaluationDuration;
let utilizationMetricsRetrievalDuration;
let utilizationServiceAvailable;
let bucketsWithQuota;
let accountsWithQuota;
let requestWithQuotaMetricsUnavailable;
if (config.isQuotaEnabled) {
quotaEvaluationDuration = new client.Histogram({
name: 's3_cloudserver_quota_evaluation_duration_seconds',
help: 'Duration of the quota evaluation operation',
labelNames: ['action', 'code', 'type'],
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1],
});
utilizationMetricsRetrievalDuration = new client.Histogram({
name: 's3_cloudserver_quota_metrics_retrieval_duration_seconds',
help: 'Duration of the utilization metrics retrieval operation',
labelNames: ['code', 'class'],
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5],
});
utilizationServiceAvailable = new client.Gauge({
name: 's3_cloudserver_quota_utilization_service_available',
help: 'Availability of the utilization service',
});
bucketsWithQuota = new client.Gauge({
name: 's3_cloudserver_quota_buckets_count',
help: 'Total number of buckets quota',
});
accountsWithQuota = new client.Gauge({
name: 's3_cloudserver_quota_accounts_count',
help: 'Total number of account quota',
});
requestWithQuotaMetricsUnavailable = new client.Counter({
name: 's3_cloudserver_quota_unavailable_count',
help: 'Total number of requests with quota metrics unavailable',
});
}
// Lifecycle duration metric, to track the completion of restore.
// This metric is used to track the time it takes to complete the lifecycle operation (restore).
// NOTE : this metric is the same as the one defined in Backbeat, and must keep the same name,
// labels and buckets.
const lifecycleDuration = new client.Histogram({
name: 's3_lifecycle_duration_seconds',
help: 'Duration of the lifecycle operation, calculated from the theoretical date to the end ' +
'of the operation',
labelNames: ['type', 'location'],
buckets: [0.2, 1, 5, 30, 120, 600, 3600, 4 * 3600, 8 * 3600, 16 * 3600, 24 * 3600],
});
function promMetrics(method, bucketName, code, action, function promMetrics(method, bucketName, code, action,
newByteLength, oldByteLength, isVersionedObj, newByteLength, oldByteLength, isVersionedObj,
numOfObjectsRemoved, ingestSize) { numOfObjectsRemoved, ingestSize) {
@ -187,10 +131,6 @@ function crrCacheToProm(crrResults) {
numberOfBuckets.set(crrResults.getObjectCount.buckets || 0); numberOfBuckets.set(crrResults.getObjectCount.buckets || 0);
numberOfObjects.set(crrResults.getObjectCount.objects || 0); numberOfObjects.set(crrResults.getObjectCount.objects || 0);
} }
if (config.isQuotaEnabled) {
bucketsWithQuota.set(crrResults?.getObjectCount?.bucketWithQuotaCount || 0);
accountsWithQuota.set(crrResults?.getVaultReport?.accountWithQuotaCount || 0);
}
if (crrResults.getDataDiskUsage) { if (crrResults.getDataDiskUsage) {
dataDiskAvailable.set(crrResults.getDataDiskUsage.available || 0); dataDiskAvailable.set(crrResults.getDataDiskUsage.available || 0);
dataDiskFree.set(crrResults.getDataDiskUsage.free || 0); dataDiskFree.set(crrResults.getDataDiskUsage.free || 0);
@ -267,10 +207,4 @@ module.exports = {
httpRequestDurationSeconds, httpRequestDurationSeconds,
httpRequestsTotal, httpRequestsTotal,
httpActiveRequests, httpActiveRequests,
lifecycleDuration,
quotaEvaluationDuration,
utilizationMetricsRetrievalDuration,
utilizationServiceAvailable,
bucketsWithQuota,
requestWithQuotaMetricsUnavailable,
}; };

View File

@ -10,7 +10,6 @@ const config = require('../Config').config;
const { data } = require('../data/wrapper'); const { data } = require('../data/wrapper');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const vault = require('../auth/vault');
const REPORT_MODEL_VERSION = 1; const REPORT_MODEL_VERSION = 1;
const ASYNCLIMIT = 5; const ASYNCLIMIT = 5;
@ -462,7 +461,6 @@ function reportHandler(clientIP, req, res, log) {
getCRRMetrics: cb => getCRRMetrics(log, cb), getCRRMetrics: cb => getCRRMetrics(log, cb),
getReplicationStates: cb => getReplicationStates(log, cb), getReplicationStates: cb => getReplicationStates(log, cb),
getIngestionInfo: cb => getIngestionInfo(log, cb), getIngestionInfo: cb => getIngestionInfo(log, cb),
getVaultReport: cb => vault.report(log, cb),
}, },
(err, results) => { (err, results) => {
if (err) { if (err) {
@ -490,7 +488,6 @@ function reportHandler(clientIP, req, res, log) {
capabilities: getCapabilities(), capabilities: getCapabilities(),
ingestStats: results.getIngestionInfo.metrics, ingestStats: results.getIngestionInfo.metrics,
ingestStatus: results.getIngestionInfo.status, ingestStatus: results.getIngestionInfo.status,
vaultReport: results.getVaultReport,
}; };
monitoring.crrCacheToProm(results); monitoring.crrCacheToProm(results);
res.writeHead(200, { 'Content-Type': 'application/json' }); res.writeHead(200, { 'Content-Type': 'application/json' });

View File

@ -101,12 +101,5 @@
"legacyAwsBehavior": false, "legacyAwsBehavior": false,
"isCold": true, "isCold": true,
"details": {} "details": {}
},
"location-azure-archive-v1": {
"type": "azure_archive",
"objectId": "location-azure-archive-v1",
"legacyAwsBehavior": false,
"isCold": true,
"details": {}
} }
} }

View File

@ -1,12 +0,0 @@
{
"STANDARD": {
"type": "vitastor",
"objectId": "std",
"legacyAwsBehavior": true,
"details": {
"config_path": "/etc/vitastor/vitastor.conf",
"pool_id": 3,
"metadata_image": "s3-volume-meta"
}
}
}

179
managementAgent.js Normal file
View File

@ -0,0 +1,179 @@
const Uuid = require('uuid');
const WebSocket = require('ws');
const logger = require('./lib/utilities/logger');
const { initManagement } = require('./lib/management');
const _config = require('./lib/Config').config;
const { managementAgentMessageType } = require('./lib/management/agentClient');
const { addOverlayMessageListener } = require('./lib/management/push');
const { saveConfigurationVersion } = require('./lib/management/configuration');
// TODO: auth?
// TODO: werelogs with a specific name.
const CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS = 15000;
class ManagementAgentServer {
constructor() {
this.port = _config.managementAgent.port || 8010;
this.wss = null;
this.loadedOverlay = null;
this.stop = this.stop.bind(this);
process.on('SIGINT', this.stop);
process.on('SIGHUP', this.stop);
process.on('SIGQUIT', this.stop);
process.on('SIGTERM', this.stop);
process.on('SIGPIPE', () => {});
}
start(_cb) {
const cb = _cb || function noop() {};
/* Define REPORT_TOKEN env variable needed by the management
* module. */
process.env.REPORT_TOKEN = process.env.REPORT_TOKEN
|| _config.reportToken
|| Uuid.v4();
initManagement(logger.newRequestLogger(), overlay => {
let error = null;
if (overlay) {
this.loadedOverlay = overlay;
this.startServer();
} else {
error = new Error('failed to init management');
}
return cb(error);
});
}
stop() {
if (!this.wss) {
process.exit(0);
return;
}
this.wss.close(() => {
logger.info('server shutdown');
process.exit(0);
});
}
startServer() {
this.wss = new WebSocket.Server({
port: this.port,
clientTracking: true,
path: '/watch',
});
this.wss.on('connection', this.onConnection.bind(this));
this.wss.on('listening', this.onListening.bind(this));
this.wss.on('error', this.onError.bind(this));
setInterval(this.checkBrokenConnections.bind(this),
CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS);
addOverlayMessageListener(this.onNewOverlay.bind(this));
}
onConnection(socket, request) {
function hearthbeat() {
this.isAlive = true;
}
logger.info('client connected to watch route', {
ip: request.connection.remoteAddress,
});
/* eslint-disable no-param-reassign */
socket.isAlive = true;
socket.on('pong', hearthbeat.bind(socket));
if (socket.readyState !== socket.OPEN) {
logger.error('client socket not in ready state', {
state: socket.readyState,
client: socket._socket._peername,
});
return;
}
const msg = {
messageType: managementAgentMessageType.NEW_OVERLAY,
payload: this.loadedOverlay,
};
socket.send(JSON.stringify(msg), error => {
if (error) {
logger.error('failed to send remoteOverlay to client', {
error,
client: socket._socket._peername,
});
}
});
}
onListening() {
logger.info('websocket server listening',
{ port: this.port });
}
onError(error) {
logger.error('websocket server error', { error });
}
_sendNewOverlayToClient(client) {
if (client.readyState !== client.OPEN) {
logger.error('client socket not in ready state', {
state: client.readyState,
client: client._socket._peername,
});
return;
}
const msg = {
messageType: managementAgentMessageType.NEW_OVERLAY,
payload: this.loadedOverlay,
};
client.send(JSON.stringify(msg), error => {
if (error) {
logger.error(
'failed to send remoteOverlay to management agent client', {
error, client: client._socket._peername,
});
}
});
}
onNewOverlay(remoteOverlay) {
const remoteOverlayObj = JSON.parse(remoteOverlay);
saveConfigurationVersion(
this.loadedOverlay, remoteOverlayObj, logger, err => {
if (err) {
logger.error('failed to save remote overlay', { err });
return;
}
this.loadedOverlay = remoteOverlayObj;
this.wss.clients.forEach(
this._sendNewOverlayToClient.bind(this)
);
});
}
checkBrokenConnections() {
this.wss.clients.forEach(client => {
if (!client.isAlive) {
logger.info('close broken connection', {
client: client._socket._peername,
});
client.terminate();
return;
}
client.isAlive = false;
client.ping();
});
}
}
const server = new ManagementAgentServer();
server.start();

View File

@ -192,163 +192,3 @@ tests:
summary: Very high delete latency summary: Very high delete latency
exp_labels: exp_labels:
severity: critical severity: critical
# QuotaMetricsNotAvailable (case with bucket quota)
##################################################################################################
- name: Quota metrics not available (bucket quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case with account quota)
##################################################################################################
- name: Quota metrics not available (account quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case with both quota quota)
##################################################################################################
- name: Quota metrics not available (account quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case without quota)
##################################################################################################
- name: Utilization service Latency
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaUnavailable
##################################################################################################
- name: Quota evaluation disabled
interval: 1m
input_series:
- series: s3_cloudserver_quota_unavailable_count{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 0+0x6 1+1x20 0+0x6
alert_rule_test:
- alertname: QuotaUnavailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaUnavailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet.
summary: High number of quota requests with metrics unavailable
exp_labels:
severity: critical
- alertname: QuotaUnavailable
eval_time: 30m
exp_alerts: []

View File

@ -6,9 +6,6 @@ x-inputs:
- name: service - name: service
type: constant type: constant
value: artesca-data-connector-s3api-metrics value: artesca-data-connector-s3api-metrics
- name: reportJob
type: constant
value: artesca-data-ops-report-handler
- name: replicas - name: replicas
type: constant type: constant
- name: systemErrorsWarningThreshold - name: systemErrorsWarningThreshold
@ -29,9 +26,6 @@ x-inputs:
- name: deleteLatencyCriticalThreshold - name: deleteLatencyCriticalThreshold
type: config type: config
value: 1.000 value: 1.000
- name: quotaUnavailabilityThreshold
type: config
value: 0.500
groups: groups:
- name: CloudServer - name: CloudServer
@ -138,45 +132,3 @@ groups:
annotations: annotations:
description: "Latency of delete object operations is more than 1s" description: "Latency of delete object operations is more than 1s"
summary: "Very high delete latency" summary: "Very high delete latency"
# As a platform admin I want to be alerted (warning) when the utilization metrics service is enabled
# but not available for at least half of the S3 services during the last minute
- alert: QuotaMetricsNotAvailable
expr: |
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
< ${quotaUnavailabilityThreshold} and
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
labels:
severity: warning
annotations:
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
summary: "Utilization metrics service not available"
# As a platform admin I want to be alerted (critical) when the utilization metrics service is enabled
# but not available during the last 10 minutes
- alert: QuotaMetricsNotAvailable
expr: |
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
< ${quotaUnavailabilityThreshold} and
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
for: 10m
labels:
severity: critical
annotations:
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
summary: "Utilization metrics service not available"
# As a platform admin I want to be alerted (critical) when quotas were not honored due to metrics
# being unavailable
- alert: QuotaUnavailable
expr: |
sum(increase(s3_cloudserver_quota_unavailable_count{namespace="${namespace}",service="${service}"}[2m]))
> 0
for: 5m
labels:
severity: critical
annotations:
description: "Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet."
summary: "High number of quota requests with metrics unavailable"

View File

@ -1625,7 +1625,7 @@
"targets": [ "targets": [
{ {
"datasource": null, "datasource": null,
"expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by(action)", "expr": "sum(rate(http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by(action)",
"format": "time_series", "format": "time_series",
"hide": false, "hide": false,
"instant": false, "instant": false,
@ -1697,7 +1697,7 @@
"targets": [ "targets": [
{ {
"datasource": null, "datasource": null,
"expr": "sum(round(increase(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))) by(method)", "expr": "sum(round(increase(http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))) by(method)",
"format": "time_series", "format": "time_series",
"hide": false, "hide": false,
"instant": false, "instant": false,
@ -1931,7 +1931,7 @@
"targets": [ "targets": [
{ {
"datasource": null, "datasource": null,
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", "expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"format": "heatmap", "format": "heatmap",
"hide": false, "hide": false,
"instant": false, "instant": false,
@ -1960,7 +1960,7 @@
}, },
"yAxis": { "yAxis": {
"decimals": null, "decimals": null,
"format": "s", "format": "dtdurations",
"label": null, "label": null,
"logBase": 1, "logBase": 1,
"max": null, "max": null,
@ -2182,7 +2182,7 @@
"targets": [ "targets": [
{ {
"datasource": null, "datasource": null,
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", "expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"format": "time_series", "format": "time_series",
"hide": false, "hide": false,
"instant": false, "instant": false,
@ -2196,7 +2196,7 @@
}, },
{ {
"datasource": null, "datasource": null,
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))", "expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"format": "time_series", "format": "time_series",
"hide": false, "hide": false,
"instant": false, "instant": false,
@ -2665,865 +2665,6 @@
"transformations": [], "transformations": [],
"transparent": false, "transparent": false,
"type": "piechart" "type": "piechart"
},
{
"collapsed": false,
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": []
}
}
},
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 65
},
"hideTimeOverride": false,
"id": 34,
"links": [],
"maxDataPoints": 100,
"panels": [],
"targets": [],
"title": "Quotas",
"transformations": [],
"transparent": false,
"type": "row"
},
{
"datasource": "${DS_PROMETHEUS}",
"description": "Number of S3 buckets with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"custom": {},
"decimals": null,
"mappings": [],
"noValue": "-",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#808080",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "blue",
"index": 1,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 66
},
"hideTimeOverride": false,
"id": 35,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"targets": [
{
"datasource": null,
"expr": "max(s3_cloudserver_quota_buckets_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Buckets with quota",
"transformations": [],
"transparent": false,
"type": "stat"
},
{
"datasource": "${DS_PROMETHEUS}",
"description": "Number of accounts with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"custom": {},
"decimals": null,
"mappings": [],
"noValue": "-",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#808080",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "blue",
"index": 1,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 70
},
"hideTimeOverride": false,
"id": 36,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"targets": [
{
"datasource": null,
"expr": "max(s3_cloudserver_quota_accounts_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Accounts with quota",
"transformations": [],
"transparent": false,
"type": "stat"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 66
},
"hideTimeOverride": false,
"id": 37,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "hidden",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_unavailable_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Operations with unavailable metrics",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 66
},
"hideTimeOverride": false,
"id": 38,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval])) by(action)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{action}}",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluaton rate per S3 action",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "stepAfter",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "orange",
"index": 1,
"line": true,
"op": "gt",
"value": 90.0,
"yaxis": "left"
},
{
"color": "red",
"index": 2,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 74
},
"hideTimeOverride": false,
"id": 39,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "hidden",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace=\"${namespace}\",job=\"${job}\"}[$__rate_interval])) * 100",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota service uptime",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 74
},
"hideTimeOverride": false,
"id": 40,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=~\"2..\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "Success",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=\"429\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "Quota Exceeded",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluation status code over time",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": 180000,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 74
},
"hideTimeOverride": false,
"id": 41,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ type }} (success)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ type }} (exceeded)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Average quota evaluation latencies",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"cards": {
"cardPadding": null,
"cardRound": null
},
"color": {
"cardColor": "#b4ff00",
"colorScale": "sqrt",
"colorScheme": "interpolateOranges",
"exponent": 0.5,
"max": null,
"min": null,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": []
}
}
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 82
},
"heatmap": {},
"hideTimeOverride": false,
"hideZeroBuckets": false,
"highlightCards": true,
"id": 42,
"legend": {
"show": false
},
"links": [],
"maxDataPoints": 25,
"reverseYBuckets": false,
"targets": [
{
"datasource": null,
"expr": "sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "heatmap",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ le }}",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluation duration",
"tooltip": {
"show": true,
"showHistogram": true
},
"transformations": [],
"transparent": false,
"type": "heatmap",
"xAxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yAxis": {
"decimals": null,
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": 180000,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 18,
"x": 6,
"y": 82
},
"hideTimeOverride": false,
"id": 43,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ class }} (success)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ class }} (error)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Average utilization metrics retrieval latencies",
"transformations": [],
"transparent": false,
"type": "timeseries"
} }
], ],
"refresh": "30s", "refresh": "30s",
@ -3625,5 +2766,5 @@
"timezone": "", "timezone": "",
"title": "S3 service", "title": "S3 service",
"uid": null, "uid": null,
"version": 110 "version": 31
} }

View File

@ -331,7 +331,7 @@ requestsByAction = TimeSeries(
unit=UNITS.OPS_PER_SEC, unit=UNITS.OPS_PER_SEC,
targets=[ targets=[
Target( Target(
expr='sum(rate(s3_cloudserver_http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval])) by(action)', # noqa: E501 expr='sum(rate(http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval])) by(action)', # noqa: E501
legendFormat="{{action}}", legendFormat="{{action}}",
) )
] ]
@ -345,7 +345,7 @@ requestsByMethod = PieChart(
unit=UNITS.SHORT, unit=UNITS.SHORT,
targets=[ targets=[
Target( Target(
expr='sum(round(increase(s3_cloudserver_http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval]))) by(method)', # noqa: E501 expr='sum(round(increase(http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval]))) by(method)', # noqa: E501
legendFormat="{{method}}", legendFormat="{{method}}",
), ),
], ],
@ -366,28 +366,6 @@ def average_latency_target(title, action="", by=""):
) )
def average_quota_latency_target(code="", by=""):
# type: (str, str) -> Target
extra = ', code=' + code if code else ""
by = " by (" + by + ")" if by else ""
return "\n".join([
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
" /",
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
])
def average_quota_retrieval_latency(code="", by=""):
# type: (str, str) -> Target
extra = ', code=' + code if code else ""
by = " by (" + by + ")" if by else ""
return "\n".join([
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
" /",
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
])
averageLatencies = TimeSeries( averageLatencies = TimeSeries(
title="Average latencies", title="Average latencies",
dataSource="${DS_PROMETHEUS}", dataSource="${DS_PROMETHEUS}",
@ -428,10 +406,10 @@ requestTime = Heatmap(
dataFormat="tsbuckets", dataFormat="tsbuckets",
maxDataPoints=25, maxDataPoints=25,
tooltip=Tooltip(show=True, showHistogram=True), tooltip=Tooltip(show=True, showHistogram=True),
yAxis=YAxis(format=UNITS.SECONDS), yAxis=YAxis(format=UNITS.DURATION_SECONDS),
color=HeatmapColor(mode="opacity"), color=HeatmapColor(mode="opacity"),
targets=[Target( targets=[Target(
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501 expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
format="heatmap", format="heatmap",
legendFormat="{{ le }}", legendFormat="{{ le }}",
)], )],
@ -455,11 +433,11 @@ bandWidth = TimeSeries(
unit="binBps", unit="binBps",
targets=[ targets=[
Target( Target(
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501 expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
legendFormat="Out" legendFormat="Out"
), ),
Target( Target(
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501 expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
legendFormat="In" legendFormat="In"
) )
], ],
@ -547,174 +525,6 @@ top10Error5xxByBucket = top10_errors_by_bucket(
title="5xx : Top10 by Bucket", code='~"5.."' title="5xx : Top10 by Bucket", code='~"5.."'
) )
quotaHealth = TimeSeries(
title="Quota service uptime",
legendDisplayMode="hidden",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="stepAfter",
fillOpacity=30,
unit=UNITS.PERCENT_FORMAT,
targets=[Target(
expr='avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",job="${job}"}[$__rate_interval])) * 100', # noqa: E501
)],
thresholds=[
Threshold("green", 0, 95.0),
Threshold("orange", 1, 90.0),
Threshold("red", 2, 0.0),
],
)
quotaStatusCode = TimeSeries(
title="Quota evaluation status code over time",
dataSource="${DS_PROMETHEUS}",
fillOpacity=30,
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
targets=[Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code=~"2..", job="${job}"}[$__rate_interval]))', # noqa: E501
legendFormat="Success",
), Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code="429", job="${job}"}[$__rate_interval]))', # noqa: E501
legendFormat="Quota Exceeded",
)],
)
quotaByAction = TimeSeries(
title="Quota evaluaton rate per S3 action",
dataSource="${DS_PROMETHEUS}",
legendDisplayMode="table",
legendPlacement="right",
legendValues=["min", "mean", "max"],
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
targets=[
Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"}[$__rate_interval])) by(action)', # noqa: E501
legendFormat="{{action}}",
)
]
)
averageQuotaDuration = Heatmap(
title="Quota evaluation duration",
dataSource="${DS_PROMETHEUS}",
dataFormat="tsbuckets",
maxDataPoints=25,
tooltip=Tooltip(show=True, showHistogram=True),
yAxis=YAxis(format=UNITS.SECONDS),
color=HeatmapColor(mode="opacity"),
targets=[Target(
expr='sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
format="heatmap",
legendFormat="{{ le }}",
)],
)
operationsWithUnavailableMetrics = TimeSeries(
title="Operations with unavailable metrics",
dataSource="${DS_PROMETHEUS}",
fillOpacity=30,
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
legendDisplayMode="hidden",
targets=[Target(
expr='sum(rate(s3_cloudserver_quota_unavailable_count{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
)],
)
averageQuotaLatencies = TimeSeries(
title="Average quota evaluation latencies",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="smooth",
spanNulls=3*60*1000,
legendDisplayMode="table",
legendPlacement="right",
legendValues=["min", "mean", "max"],
unit=UNITS.SECONDS,
targets=[
Target(
expr=average_quota_latency_target(code='~"2.."', by='type'),
legendFormat='{{ type }} (success)',
),
Target(
expr=average_quota_latency_target(code='"429"', by='type'),
legendFormat='{{ type }} (exceeded)',
),
],
)
averageMetricsRetrievalLatencies = TimeSeries(
title="Average utilization metrics retrieval latencies",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="smooth",
spanNulls=3*60*1000,
unit=UNITS.SECONDS,
targets=[
Target(
expr=average_quota_retrieval_latency(code='~"2.."', by='class'),
legendFormat='{{ class }} (success)',
),
Target(
expr=average_quota_retrieval_latency(
code='~"4..|5.."',
by='class'
),
legendFormat='{{ class }} (error)',
),
],
)
bucketQuotaCounter = Stat(
title="Buckets with quota",
description=(
"Number of S3 buckets with quota enabled in the cluster.\n"
"This value is computed asynchronously, and update "
"may be delayed up to 1h."
),
dataSource="${DS_PROMETHEUS}",
colorMode="value",
format=UNITS.SHORT,
noValue="-",
reduceCalc="lastNotNull",
targets=[Target(
expr='max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
)],
thresholds=[
Threshold("#808080", 0, 0.0),
Threshold("blue", 1, 0.0),
],
)
accountQuotaCounter = Stat(
title="Accounts with quota",
description=(
"Number of accounts with quota enabled in the cluster.\n"
"This value is computed asynchronously, and update "
"may be delayed up to 1h."
),
dataSource="${DS_PROMETHEUS}",
colorMode="value",
format=UNITS.SHORT,
noValue="-",
reduceCalc="lastNotNull",
targets=[Target(
expr='max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
)],
thresholds=[
Threshold("#808080", 0, 0.0),
Threshold("blue", 1, 0.0),
],
)
dashboard = ( dashboard = (
Dashboard( Dashboard(
title="S3 service", title="S3 service",
@ -820,24 +630,6 @@ dashboard = (
top10Error500ByBucket, top10Error500ByBucket,
top10Error5xxByBucket top10Error5xxByBucket
], height=8), ], height=8),
RowPanel(title="Quotas"),
layout.row([
layout.column([
layout.resize([bucketQuotaCounter], width=6, height=4),
layout.resize([accountQuotaCounter], width=6, height=4),
], height=8),
layout.resize([operationsWithUnavailableMetrics], width=6),
quotaByAction,
], height=8),
layout.row([
layout.resize([quotaHealth], width=6),
layout.resize([quotaStatusCode], width=6),
averageQuotaLatencies,
], height=8),
layout.row([
layout.resize([averageQuotaDuration], width=6),
averageMetricsRetrievalLatencies,
], height=8),
]), ]),
) )
.auto_panel_ids() .auto_panel_ids()

View File

@ -45,8 +45,8 @@ then
exit 1 exit 1
fi fi
REGISTRY=${REGISTRY:-"ghcr.io/scality"} REGISTRY=${REGISTRY:-"registry.scality.com"}
PROJECT=${PROJECT:-"cloudserver"} PROJECT=${PROJECT:-"cloudserver-dev"}
set -x set -x
${ORAS} push "${REGISTRY}/${PROJECT}/${NAME_TAG}" "${INPUT_FILE}:${MIME_TYPE}" ${ORAS} push "${REGISTRY}/${PROJECT}/${NAME_TAG}" "${INPUT_FILE}:${MIME_TYPE}"

View File

@ -1,6 +1,6 @@
{ {
"name": "@zenko/cloudserver", "name": "@zenko/cloudserver",
"version": "8.8.27", "version": "8.6.26",
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol", "description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
"main": "index.js", "main": "index.js",
"engines": { "engines": {
@ -21,61 +21,53 @@
"dependencies": { "dependencies": {
"@azure/storage-blob": "^12.12.0", "@azure/storage-blob": "^12.12.0",
"@hapi/joi": "^17.1.0", "@hapi/joi": "^17.1.0",
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1", "arsenal": "git+https://github.com/scality/arsenal#8.1.127",
"async": "^2.5.0", "async": "~2.5.0",
"aws-sdk": "^2.905.0", "aws-sdk": "2.905.0",
"bucketclient": "scality/bucketclient#8.1.9",
"bufferutil": "^4.0.6", "bufferutil": "^4.0.6",
"commander": "^2.9.0", "commander": "^2.9.0",
"cron-parser": "^2.11.0", "cron-parser": "^2.11.0",
"diskusage": "^1.1.3", "diskusage": "1.1.3",
"google-auto-auth": "^0.9.1", "google-auto-auth": "^0.9.1",
"http-proxy": "^1.17.0", "http-proxy": "^1.17.0",
"http-proxy-agent": "^4.0.1", "http-proxy-agent": "^4.0.1",
"https-proxy-agent": "^2.2.0", "https-proxy-agent": "^2.2.0",
"level-mem": "^5.0.1", "level-mem": "^5.0.1",
"moment": "^2.26.0", "moment": "^2.26.0",
"mongodb": "^5.2.0", "mongodb": "^2.2.31",
"node-fetch": "^2.6.0", "node-fetch": "^2.6.0",
"node-forge": "^0.7.1", "node-forge": "^0.7.1",
"npm-run-all": "^4.1.5", "npm-run-all": "~4.1.5",
"prom-client": "14.2.0", "prom-client": "14.2.0",
"request": "^2.81.0", "request": "^2.81.0",
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git", "sql-where-parser": "~2.2.1",
"sql-where-parser": "^2.2.1", "utapi": "github:scality/utapi#8.1.13",
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
"utf-8-validate": "^5.0.8", "utf-8-validate": "^5.0.8",
"utf8": "^2.1.1", "utf8": "~2.1.1",
"uuid": "^8.3.2", "uuid": "^8.3.2",
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1", "vaultclient": "scality/vaultclient#8.3.11",
"werelogs": "scality/werelogs#8.1.4",
"ws": "^5.1.0", "ws": "^5.1.0",
"xml2js": "^0.4.16" "xml2js": "~0.4.16"
},
"overrides": {
"ltgt": "^2.2.0"
}, },
"devDependencies": { "devDependencies": {
"@babel/core": "^7.25.2",
"@babel/preset-env": "^7.25.3",
"babel-loader": "^9.1.3",
"bluebird": "^3.3.1", "bluebird": "^3.3.1",
"eslint": "^8.14.0", "eslint": "^8.14.0",
"eslint-config-airbnb-base": "^15.0.0", "eslint-config-airbnb-base": "^13.1.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git", "eslint-config-scality": "scality/Guidelines#8.2.0",
"eslint-plugin-import": "^2.14.0", "eslint-plugin-import": "^2.14.0",
"eslint-plugin-mocha": "^10.1.0",
"express": "^4.17.1", "express": "^4.17.1",
"ioredis": "^4.9.5", "ioredis": "4.9.5",
"istanbul": "^1.0.0-alpha.2", "istanbul": "1.0.0-alpha.2",
"istanbul-api": "^1.0.0-alpha.13", "istanbul-api": "1.0.0-alpha.13",
"lolex": "^1.4.0", "lolex": "^1.4.0",
"mocha": ">=3.1.2", "mocha": "^2.3.4",
"mocha-junit-reporter": "^1.23.1", "mocha-junit-reporter": "^1.23.1",
"mocha-multi-reporters": "^1.1.7", "mocha-multi-reporters": "^1.1.7",
"node-mocks-http": "^1.5.2", "node-mocks-http": "1.5.2",
"sinon": "^13.0.1", "sinon": "^13.0.1",
"tv4": "^1.2.7", "tv4": "^1.2.7"
"webpack": "^5.93.0",
"webpack-cli": "^5.1.4"
}, },
"scripts": { "scripts": {
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server", "cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
@ -116,11 +108,10 @@
"utapi_replay": "node lib/utapi/utapiReplay.js", "utapi_replay": "node lib/utapi/utapiReplay.js",
"utapi_reindex": "node lib/utapi/utapiReindex.js", "utapi_reindex": "node lib/utapi/utapiReindex.js",
"management_agent": "node managementAgent.js", "management_agent": "node managementAgent.js",
"test": "CI=true S3BACKEND=mem S3QUOTA=scuba mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit", "test": "CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api", "test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
"test_legacy_location": "CI=true S3QUOTA=scuba S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit", "test_legacy_location": "CI=true S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi", "test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
"test_quota": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/quota",
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend", "multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit", "unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit" "unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"

View File

@ -1,39 +0,0 @@
const AWS = require('aws-sdk');
const S3 = AWS.S3;
const assert = require('assert');
const getConfig = require('../support/config');
const sendRequest = require('../quota/tooling').sendRequest;
const bucket = 'deletequotatestbucket';
const nonExistantBucket = 'deletequotatestnonexistantbucket';
describe('Test delete bucket quota', () => {
let s3;
before(() => {
const config = getConfig('default', { signatureVersion: 'v4' });
s3 = new S3(config);
AWS.config.update(config);
});
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
it('should delete the bucket quota', async () => {
try {
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
assert.ok(true);
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
it('should return no such bucket error', async () => {
try {
await sendRequest('DELETE', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`);
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
}
});
});

View File

@ -1,77 +0,0 @@
const AWS = require('aws-sdk');
const S3 = AWS.S3;
const assert = require('assert');
const getConfig = require('../support/config');
const sendRequest = require('../quota/tooling').sendRequest;
const bucket = 'getquotatestbucket';
const quota = { quota: 1000 };
describe('Test get bucket quota', () => {
let s3;
before(() => {
const config = getConfig('default', { signatureVersion: 'v4' });
s3 = new S3(config);
AWS.config.update(config);
});
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
it('should return the quota', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
const data = await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
assert.strictEqual(data.GetBucketQuota.Name[0], bucket);
assert.strictEqual(data.GetBucketQuota.Quota[0], '1000');
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
it('should return no such bucket error', async () => {
try {
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
}
});
it('should return no such bucket quota', async () => {
try {
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
try {
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
assert.fail('Expected NoSuchQuota error');
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
}
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
it('should return no such bucket error', async () => {
try {
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
}
});
it('should return no such bucket quota', async () => {
try {
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
try {
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
assert.fail('Expected NoSuchQuota error');
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
}
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
});

View File

@ -475,58 +475,4 @@ describe('Listing corner cases tests', () => {
} }
}); });
}); });
it('should not list DeleteMarkers for version suspended buckets', done => {
const obj = { name: 'testDeleteMarker.txt', value: 'foo' };
const bucketName = `bucket-test-delete-markers-not-listed${Date.now()}`;
let objectCount = 0;
return async.waterfall([
next => s3.createBucket({ Bucket: bucketName }, err => next(err)),
next => {
const params = {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Suspended',
},
};
return s3.putBucketVersioning(params, err =>
next(err));
},
next => s3.putObject({
Bucket: bucketName,
Key: obj.name,
Body: obj.value,
}, err =>
next(err)),
next => s3.listObjectsV2({ Bucket: bucketName },
(err, res) => {
if (err) {
return next(err);
}
objectCount = res.Contents.length;
assert.strictEqual(res.Contents.some(c => c.Key === obj.name), true);
return next();
}),
next => s3.deleteObject({
Bucket: bucketName,
Key: obj.name,
}, function test(err) {
const headers = this.httpResponse.headers;
assert.strictEqual(
headers['x-amz-delete-marker'], 'true');
return next(err);
}),
next => s3.listObjectsV2({ Bucket: bucketName },
(err, res) => {
if (err) {
return next(err);
}
assert.strictEqual(res.Contents.length, objectCount - 1);
assert.strictEqual(res.Contents.some(c => c.Key === obj.name), false);
return next();
}),
next => s3.deleteObject({ Bucket: bucketName, Key: obj.name, VersionId: 'null' }, err => next(err)),
next => s3.deleteBucket({ Bucket: bucketName }, err => next(err))
], err => done(err));
});
}); });

View File

@ -1,70 +0,0 @@
const AWS = require('aws-sdk');
const S3 = AWS.S3;
const assert = require('assert');
const getConfig = require('../support/config');
const sendRequest = require('../quota/tooling').sendRequest;
const bucket = 'updatequotatestbucket';
const nonExistantBucket = 'updatequotatestnonexistantbucket';
const quota = { quota: 2000 };
const negativeQuota = { quota: -1000 };
const wrongquotaFromat = '1000';
const largeQuota = { quota: 1000000000000 };
describe('Test update bucket quota', () => {
let s3;
before(() => {
const config = getConfig('default', { signatureVersion: 'v4' });
s3 = new S3(config);
AWS.config.update(config);
});
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
it('should update the quota', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
assert.ok(true);
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
it('should return no such bucket error', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`, JSON.stringify(quota));
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
}
});
it('should return error when quota is negative', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(negativeQuota));
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
assert.strictEqual(err.Error.Message[0], 'Quota value must be a positive number');
}
});
it('should return error when quota is not in correct format', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, wrongquotaFromat);
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
assert.strictEqual(err.Error.Message[0], 'Request body must be a JSON object');
}
});
it('should handle large quota values', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(largeQuota));
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
});

View File

@ -33,7 +33,7 @@ describe('aws-node-sdk v2auth query tests', function testSuite() {
let s3; let s3;
before(() => { before(() => {
const config = getConfig('default', { signatureVersion: 'v2' }); const config = getConfig('default');
s3 = new S3(config); s3 = new S3(config);
}); });

View File

@ -45,7 +45,7 @@ const itSkipCeph = isCEPH ? it.skip : it.skip;
const describeSkipIfCeph = isCEPH ? describe.skip : describe.skip; // always skip const describeSkipIfCeph = isCEPH ? describe.skip : describe.skip; // always skip
if (config.backends.data === 'multiple') { if (config.backends.data === 'multiple') {
describeSkipIfNotMultiple = describe; describeSkipIfNotMultiple = describe.skip;
describeSkipIfNotMultipleOrCeph = isCEPH ? describe.skip : describe.skip; // always skip describeSkipIfNotMultipleOrCeph = isCEPH ? describe.skip : describe.skip; // always skip
const awsConfig = getRealAwsConfig(awsLocation); const awsConfig = getRealAwsConfig(awsLocation);
awsS3 = new AWS.S3(awsConfig); awsS3 = new AWS.S3(awsConfig);

View File

@ -7,7 +7,6 @@ const withV4 = require('../support/withV4');
const BucketUtility = require('../../lib/utility/bucket-util'); const BucketUtility = require('../../lib/utility/bucket-util');
const { createEncryptedBucketPromise } = const { createEncryptedBucketPromise } =
require('../../lib/utility/createEncryptedBucket'); require('../../lib/utility/createEncryptedBucket');
const { fakeMetadataTransition, fakeMetadataArchive } = require('../utils/init');
const sourceBucketName = 'supersourcebucket81033016532'; const sourceBucketName = 'supersourcebucket81033016532';
const sourceObjName = 'supersourceobject'; const sourceObjName = 'supersourceobject';
@ -711,72 +710,6 @@ describe('Object Part Copy', () => {
}); });
}); });
it('should not copy a part of a cold object', done => {
const archive = {
archiveInfo: {
archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322',
archiveVersion: 5577006791947779
},
};
fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archive, err => {
assert.ifError(err);
s3.uploadPartCopy({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}, err => {
assert.strictEqual(err.code, 'InvalidObjectState');
assert.strictEqual(err.statusCode, 403);
done();
});
});
});
it('should copy a part of an object when it\'s transitioning to cold', done => {
fakeMetadataTransition(sourceBucketName, sourceObjName, undefined, err => {
assert.ifError(err);
s3.uploadPartCopy({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}, (err, res) => {
checkNoError(err);
assert.strictEqual(res.ETag, etag);
assert(res.LastModified);
done();
});
});
});
it('should copy a part of a restored object', done => {
const archiveCompleted = {
archiveInfo: {},
restoreRequestedAt: new Date(0),
restoreRequestedDays: 5,
restoreCompletedAt: new Date(10),
restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)),
};
fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archiveCompleted, err => {
assert.ifError(err);
s3.uploadPartCopy({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}, (err, res) => {
checkNoError(err);
assert.strictEqual(res.ETag, etag);
assert(res.LastModified);
done();
});
});
});
describe('copying parts by another account', () => { describe('copying parts by another account', () => {
const otherAccountBucket = 'otheraccountbucket42342342342'; const otherAccountBucket = 'otheraccountbucket42342342342';
const otherAccountKey = 'key'; const otherAccountKey = 'key';

View File

@ -6,13 +6,12 @@ const BucketUtility = require('../../lib/utility/bucket-util');
const metadata = require('../../../../../lib/metadata/wrapper'); const metadata = require('../../../../../lib/metadata/wrapper');
const { DummyRequestLogger } = require('../../../../unit/helpers'); const { DummyRequestLogger } = require('../../../../unit/helpers');
const checkError = require('../../lib/utility/checkError'); const checkError = require('../../lib/utility/checkError');
const { getMetadata, fakeMetadataArchive } = require('../utils/init'); const { getMetadata, fakeMetadataRestore } = require('../utils/init');
const log = new DummyRequestLogger(); const log = new DummyRequestLogger();
const bucketName = 'bucket1putversion33'; const bucketName = 'bucket1putversion33';
const objectName = 'object1putversion'; const objectName = 'object1putversion';
const bucketNameMD = 'restore-metadata-copy-bucket-mpu';
const mdListingParams = { listingType: 'DelimiterVersions', maxKeys: 1000 }; const mdListingParams = { listingType: 'DelimiterVersions', maxKeys: 1000 };
const archive = { const archive = {
archiveInfo: {}, archiveInfo: {},
@ -105,19 +104,22 @@ describe('MPU with x-scal-s3-version-id header', () => {
beforeEach(done => { beforeEach(done => {
bucketUtil = new BucketUtility('default', sigCfg); bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3; s3 = bucketUtil.s3;
async.series([ return metadata.setup(() =>
next => metadata.setup(next), s3.createBucket({ Bucket: bucketName }, err => {
next => s3.createBucket({ Bucket: bucketName }, next), if (err) {
next => s3.createBucket({ Bucket: bucketNameMD, ObjectLockEnabledForBucket: true, }, next), assert.strictEqual(err, null, 'Creating bucket: Expected success, ' +
], done); `got error ${JSON.stringify(err)}`);
}
done();
}));
}); });
afterEach(() => { afterEach(() => {
process.stdout.write('Emptying bucket'); process.stdout.write('Emptying bucket');
return bucketUtil.emptyMany([bucketName, bucketNameMD]) return bucketUtil.empty(bucketName)
.then(() => { .then(() => {
process.stdout.write('Deleting bucket'); process.stdout.write('Deleting bucket');
return bucketUtil.deleteMany([bucketName, bucketNameMD]); return bucketUtil.deleteOne(bucketName);
}) })
.catch(err => { .catch(err => {
process.stdout.write('Error in afterEach'); process.stdout.write('Error in afterEach');
@ -133,7 +135,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
async.series([ async.series([
next => putMPU(s3, bucketName, objectName, next), next => putMPU(s3, bucketName, objectName, next),
next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), next => fakeMetadataRestore(bucketName, objectName, undefined, archive, next),
next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { next => getMetadata(bucketName, objectName, undefined, (err, objMD) => {
objMDBefore = objMD; objMDBefore = objMD;
return next(err); return next(err);
@ -174,7 +176,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
async.series([ async.series([
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), next => fakeMetadataRestore(bucketName, objectName, undefined, archive, next),
next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { next => getMetadata(bucketName, objectName, undefined, (err, objMD) => {
objMDBefore = objMD; objMDBefore = objMD;
return next(err); return next(err);
@ -227,7 +229,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
vId = res.VersionId; vId = res.VersionId;
return next(err); return next(err);
}), }),
next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => fakeMetadataRestore(bucketName, objectName, vId, archive, next),
next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => {
versionsBefore = res.Versions; versionsBefore = res.Versions;
return next(err); return next(err);
@ -279,7 +281,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
vId = res.VersionId; vId = res.VersionId;
return next(err); return next(err);
}), }),
next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => fakeMetadataRestore(bucketName, objectName, vId, archive, next),
next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => {
versionsBefore = res.Versions; versionsBefore = res.Versions;
return next(err); return next(err);
@ -386,7 +388,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => s3.putBucketVersioning(vParams, next), next => s3.putBucketVersioning(vParams, next),
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => fakeMetadataArchive(bucketName, objectName, 'null', archive, next), next => fakeMetadataRestore(bucketName, objectName, 'null', archive, next),
next => getMetadata(bucketName, objectName, 'null', (err, objMD) => { next => getMetadata(bucketName, objectName, 'null', (err, objMD) => {
objMDBefore = objMD; objMDBefore = objMD;
return next(err); return next(err);
@ -439,7 +441,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
vId = res.VersionId; vId = res.VersionId;
return next(err); return next(err);
}), }),
next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => fakeMetadataRestore(bucketName, objectName, vId, archive, next),
next => getMetadata(bucketName, objectName, vId, (err, objMD) => { next => getMetadata(bucketName, objectName, vId, (err, objMD) => {
objMDBefore = objMD; objMDBefore = objMD;
return next(err); return next(err);
@ -495,7 +497,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => s3.putBucketVersioning(sParams, next), next => s3.putBucketVersioning(sParams, next),
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), next => fakeMetadataRestore(bucketName, objectName, undefined, archive, next),
next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { next => getMetadata(bucketName, objectName, undefined, (err, objMD) => {
objMDBefore = objMD; objMDBefore = objMD;
return next(err); return next(err);
@ -549,15 +551,15 @@ describe('MPU with x-scal-s3-version-id header', () => {
return next(err); return next(err);
}), }),
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => fakeMetadataArchive(bucketName, objectName, vId, archive, next),
next => getMetadata(bucketName, objectName, vId, (err, objMD) => {
objMDBefore = objMD;
return next(err);
}),
next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => {
versionsBefore = res.Versions; versionsBefore = res.Versions;
return next(err); return next(err);
}), }),
next => fakeMetadataRestore(bucketName, objectName, vId, archive, next),
next => getMetadata(bucketName, objectName, vId, (err, objMD) => {
objMDBefore = objMD;
return next(err);
}),
next => putMPUVersion(s3, bucketName, objectName, vId, next), next => putMPUVersion(s3, bucketName, objectName, vId, next),
next => getMetadata(bucketName, objectName, vId, (err, objMD) => { next => getMetadata(bucketName, objectName, vId, (err, objMD) => {
objMDAfter = objMD; objMDAfter = objMD;
@ -602,7 +604,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
vId = res.VersionId; vId = res.VersionId;
return next(err); return next(err);
}), }),
next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => fakeMetadataRestore(bucketName, objectName, vId, archive, next),
next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => {
versionsBefore = res.Versions; versionsBefore = res.Versions;
return next(err); return next(err);
@ -661,7 +663,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
vId = res.VersionId; vId = res.VersionId;
return next(err); return next(err);
}), }),
next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => fakeMetadataRestore(bucketName, objectName, vId, archive, next),
next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => {
versionsBefore = res.Versions; versionsBefore = res.Versions;
return next(err); return next(err);
@ -709,11 +711,11 @@ describe('MPU with x-scal-s3-version-id header', () => {
async.series([ async.series([
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next),
next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => {
versionsBefore = res.Versions; versionsBefore = res.Versions;
return next(err); return next(err);
}), }),
next => fakeMetadataRestore(bucketName, objectName, undefined, archive, next),
next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { next => getMetadata(bucketName, objectName, undefined, (err, objMD) => {
objMDBefore = objMD; objMDBefore = objMD;
return next(err); return next(err);
@ -797,7 +799,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
async.series([ async.series([
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => fakeMetadataArchive(bucketName, objectName, undefined, archiveCompleted, next), next => fakeMetadataRestore(bucketName, objectName, undefined, archiveCompleted, next),
next => putMPUVersion(s3, bucketName, objectName, '', err => { next => putMPUVersion(s3, bucketName, objectName, '', err => {
checkError(err, 'InvalidObjectState', 403); checkError(err, 'InvalidObjectState', 403);
return next(); return next();
@ -808,33 +810,14 @@ describe('MPU with x-scal-s3-version-id header', () => {
}); });
}); });
[ it('should update restore metadata', done => {
'non versioned',
'versioned',
'suspended'
].forEach(versioning => {
it(`should update restore metadata while keeping storage class (${versioning})`, done => {
const params = { Bucket: bucketName, Key: objectName }; const params = { Bucket: bucketName, Key: objectName };
let objMDBefore; let objMDBefore;
let objMDAfter; let objMDAfter;
async.series([ async.series([
next => {
if (versioning === 'versioned') {
return s3.putBucketVersioning({
Bucket: bucketName,
VersioningConfiguration: { Status: 'Enabled' }
}, next);
} else if (versioning === 'suspended') {
return s3.putBucketVersioning({
Bucket: bucketName,
VersioningConfiguration: { Status: 'Suspended' }
}, next);
}
return next();
},
next => s3.putObject(params, next), next => s3.putObject(params, next),
next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), next => fakeMetadataRestore(bucketName, objectName, undefined, archive, next),
next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { next => getMetadata(bucketName, objectName, undefined, (err, objMD) => {
objMDBefore = objMD; objMDBefore = objMD;
return next(err); return next(err);
@ -845,22 +828,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
objMDAfter = objMD; objMDAfter = objMD;
return next(err); return next(err);
}), }),
next => s3.listObjects({ Bucket: bucketName }, (err, res) => { next => metadata.listObject(bucketName, mdListingParams, log, next),
assert.ifError(err);
assert.strictEqual(res.Contents.length, 1);
assert.strictEqual(res.Contents[0].StorageClass, 'location-dmf-v1');
return next();
}),
next => s3.headObject(params, (err, res) => {
assert.ifError(err);
assert.strictEqual(res.StorageClass, 'location-dmf-v1');
return next();
}),
next => s3.getObject(params, (err, res) => {
assert.ifError(err);
assert.strictEqual(res.StorageClass, 'location-dmf-v1');
return next();
}),
], err => { ], err => {
assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`);
@ -868,8 +836,7 @@ describe('MPU with x-scal-s3-version-id header', () => {
assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1'); assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1');
assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo); assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo);
assert.deepStrictEqual(objMDAfter.archive.restoreRequestedAt, assert.deepStrictEqual(objMDAfter.archive.restoreRequestedAt, objMDBefore.archive.restoreRequestedAt);
objMDBefore.archive.restoreRequestedAt);
assert.deepStrictEqual(objMDAfter.archive.restoreRequestedDays, assert.deepStrictEqual(objMDAfter.archive.restoreRequestedDays,
objMDBefore.archive.restoreRequestedDays); objMDBefore.archive.restoreRequestedDays);
assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false); assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false);
@ -881,120 +848,4 @@ describe('MPU with x-scal-s3-version-id header', () => {
}); });
}); });
}); });
it('should "copy" all but non data-related metadata (data encryption, data size...)', done => {
const params = {
Bucket: bucketNameMD,
Key: objectName
};
const putParams = {
...params,
Metadata: {
'custom-user-md': 'custom-md',
},
WebsiteRedirectLocation: 'http://custom-redirect'
};
const aclParams = {
...params,
// email of user Bart defined in authdata.json
GrantFullControl: 'emailaddress=sampleaccount1@sampling.com',
};
const tagParams = {
...params,
Tagging: {
TagSet: [{
Key: 'tag1',
Value: 'value1'
}, {
Key: 'tag2',
Value: 'value2'
}]
}
};
const legalHoldParams = {
...params,
LegalHold: {
Status: 'ON'
},
};
const acl = {
'Canned': '',
'FULL_CONTROL': [
// canonicalID of user Bart
'79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be',
],
'WRITE_ACP': [],
'READ': [],
'READ_ACP': [],
};
const tags = { tag1: 'value1', tag2: 'value2' };
const replicationInfo = {
'status': 'COMPLETED',
'backends': [
{
'site': 'azure-normal',
'status': 'COMPLETED',
'dataStoreVersionId': '',
},
],
'content': [
'DATA',
'METADATA',
],
'destination': 'arn:aws:s3:::versioned',
'storageClass': 'azure-normal',
'role': 'arn:aws:iam::root:role/s3-replication-role',
'storageType': 'azure',
'dataStoreVersionId': '',
'isNFS': null,
};
async.series([
next => s3.putObject(putParams, next),
next => s3.putObjectAcl(aclParams, next),
next => s3.putObjectTagging(tagParams, next),
next => s3.putObjectLegalHold(legalHoldParams, next),
next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => {
if (err) {
return next(err);
}
/* eslint-disable no-param-reassign */
objMD.dataStoreName = 'location-dmf-v1';
objMD.archive = archive;
objMD.replicationInfo = replicationInfo;
// data related
objMD['content-length'] = 99;
objMD['content-type'] = 'testtype';
objMD['content-md5'] = 'testmd 5';
objMD['content-encoding'] = 'testencoding';
objMD['x-amz-server-side-encryption'] = 'aws:kms';
/* eslint-enable no-param-reassign */
return metadata.putObjectMD(bucketNameMD, objectName, objMD, undefined, log, next);
}),
next => putMPUVersion(s3, bucketNameMD, objectName, '', next),
next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => {
if (err) {
return next(err);
}
assert.deepStrictEqual(objMD.acl, acl);
assert.deepStrictEqual(objMD.tags, tags);
assert.deepStrictEqual(objMD.replicationInfo, replicationInfo);
assert.deepStrictEqual(objMD.legalHold, true);
assert.strictEqual(objMD['x-amz-meta-custom-user-md'], 'custom-md');
assert.strictEqual(objMD['x-amz-website-redirect-location'], 'http://custom-redirect');
// make sure data related metadatas ar not the same before and after
assert.notStrictEqual(objMD['x-amz-server-side-encryption'], 'aws:kms');
assert.notStrictEqual(objMD['content-length'], 99);
assert.notStrictEqual(objMD['content-md5'], 'testmd5');
assert.notStrictEqual(objMD['content-encoding'], 'testencoding');
assert.notStrictEqual(objMD['content-type'], 'testtype');
return next();
}),
// removing legal hold to be able to clean the bucket after the test
next => {
legalHoldParams.LegalHold.Status = 'OFF';
return s3.putObjectLegalHold(legalHoldParams, next);
},
], done);
});
});
}); });

View File

@ -2,7 +2,6 @@ const assert = require('assert');
const withV4 = require('../support/withV4'); const withV4 = require('../support/withV4');
const BucketUtility = require('../../lib/utility/bucket-util'); const BucketUtility = require('../../lib/utility/bucket-util');
const changeObjectLock = require('../../../../utilities/objectLock-util'); const changeObjectLock = require('../../../../utilities/objectLock-util');
const { fakeMetadataTransition, fakeMetadataArchive } = require('../utils/init');
const { taggingTests } = require('../../lib/utility/tagging'); const { taggingTests } = require('../../lib/utility/tagging');
const genMaxSizeMetaHeaders const genMaxSizeMetaHeaders
@ -144,7 +143,6 @@ describe('Object Copy', () => {
s3.getObject({ Bucket: destBucketName, s3.getObject({ Bucket: destBucketName,
Key: destObjName }, (err, res) => { Key: destObjName }, (err, res) => {
checkNoError(err); checkNoError(err);
assert.strictEqual(res.StorageClass, undefined);
assert.strictEqual(res.Body.toString(), assert.strictEqual(res.Body.toString(),
content); content);
assert.deepStrictEqual(res.Metadata, assert.deepStrictEqual(res.Metadata,
@ -1236,62 +1234,6 @@ describe('Object Copy', () => {
done(); done();
}); });
}); });
it('should not copy a cold object', done => {
const archive = {
archiveInfo: {
archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322',
archiveVersion: 5577006791947779
},
};
fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archive, err => {
assert.ifError(err);
s3.copyObject({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
}, err => {
assert.strictEqual(err.code, 'InvalidObjectState');
assert.strictEqual(err.statusCode, 403);
done();
});
});
});
it('should copy an object when it\'s transitioning to cold', done => {
fakeMetadataTransition(sourceBucketName, sourceObjName, undefined, err => {
assert.ifError(err);
s3.copyObject({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
}, (err, res) => {
successCopyCheck(err, res, originalMetadata,
destBucketName, destObjName, done);
});
});
});
it('should copy restored object and reset storage class', done => {
const archiveCompleted = {
archiveInfo: {},
restoreRequestedAt: new Date(0),
restoreRequestedDays: 5,
restoreCompletedAt: new Date(10),
restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)),
};
fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archiveCompleted, err => {
assert.ifError(err);
s3.copyObject({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
}, (err, res) => {
successCopyCheck(err, res, originalMetadata,
destBucketName, destObjName, done);
});
});
});
}); });
}); });

Some files were not shown because too many files have changed in this diff Show More