Compare commits

..

2 Commits

Author SHA1 Message Date
Dasha Gurova 012a75f67f better wording 2019-11-11 12:15:14 +01:00
Dasha Gurova 40a65ee691 docs: add forum links 2019-11-11 12:04:41 +01:00
504 changed files with 12915 additions and 49040 deletions

View File

@ -1,8 +1,7 @@
node_modules
localData/*
localMetadata/*
# Keep the .git/HEAD file in order to properly report version
.git/objects
.git
.github
.tox
coverage

View File

@ -1,10 +1,6 @@
{
"extends": "scality",
"plugins": [
"mocha"
],
"rules": {
"import/extensions": "off",
"lines-around-directive": "off",
"no-underscore-dangle": "off",
"indent": "off",
@ -45,10 +41,6 @@
"no-restricted-properties": "off",
"new-parens": "off",
"no-multi-spaces": "off",
"quote-props": "off",
"mocha/no-exclusive-tests": "error",
},
"parserOptions": {
"ecmaVersion": 2020
"quote-props": "off"
}
}

View File

@ -1,43 +0,0 @@
---
name: "Setup CI environment"
description: "Setup Cloudserver CI environment"
runs:
using: composite
steps:
- name: Setup etc/hosts
shell: bash
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
- name: Setup Credentials
shell: bash
run: bash .github/scripts/credentials.bash
- name: Setup job artifacts directory
shell: bash
run: |-
set -exu;
mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: 'yarn'
- name: install dependencies
shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Setup python2 test environment
shell: bash
run: |
sudo apt-get install -y libdigest-hmac-perl
pip install 's3cmd==2.3.0'
- name: fix sproxyd.conf permissions
shell: bash
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
- name: ensure fuse kernel module is loaded (for sproxyd)
shell: bash
run: sudo modprobe fuse

View File

@ -1,10 +0,0 @@
---
version: 2
updates:
- package-ecosystem: npm
directory: "/"
schedule:
interval: daily
time: "13:00"
open-pull-requests-limit: 10
target-branch: "development/7.4"

View File

@ -1,36 +0,0 @@
azurebackend_AZURE_STORAGE_ACCESS_KEY
azurebackend_AZURE_STORAGE_ACCOUNT_NAME
azurebackend_AZURE_STORAGE_ENDPOINT
azurebackend2_AZURE_STORAGE_ACCESS_KEY
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME
azurebackend2_AZURE_STORAGE_ENDPOINT
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME
azurebackendmismatch_AZURE_STORAGE_ENDPOINT
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT
azuretest_AZURE_BLOB_ENDPOINT
b2backend_B2_ACCOUNT_ID
b2backend_B2_STORAGE_ACCESS_KEY
GOOGLE_SERVICE_EMAIL
GOOGLE_SERVICE_KEY
AWS_S3_BACKEND_ACCESS_KEY
AWS_S3_BACKEND_SECRET_KEY
AWS_S3_BACKEND_ACCESS_KEY_2
AWS_S3_BACKEND_SECRET_KEY_2
AWS_GCP_BACKEND_ACCESS_KEY
AWS_GCP_BACKEND_SECRET_KEY
AWS_GCP_BACKEND_ACCESS_KEY_2
AWS_GCP_BACKEND_SECRET_KEY_2
b2backend_B2_STORAGE_ENDPOINT
gcpbackend2_GCP_SERVICE_EMAIL
gcpbackend2_GCP_SERVICE_KEY
gcpbackend2_GCP_SERVICE_KEYFILE
gcpbackend_GCP_SERVICE_EMAIL
gcpbackend_GCP_SERVICE_KEY
gcpbackendmismatch_GCP_SERVICE_EMAIL
gcpbackendmismatch_GCP_SERVICE_KEY
gcpbackend_GCP_SERVICE_KEYFILE
gcpbackendmismatch_GCP_SERVICE_KEYFILE
gcpbackendnoproxy_GCP_SERVICE_KEYFILE

View File

@ -1,92 +0,0 @@
services:
cloudserver:
image: ${CLOUDSERVER_IMAGE}
command: sh -c "yarn start > /artifacts/s3.log"
network_mode: "host"
volumes:
- /tmp/ssl:/ssl
- /tmp/ssl-kmip:/ssl-kmip
- ${HOME}/.aws/credentials:/root/.aws/credentials
- /tmp/artifacts/${JOB_NAME}:/artifacts
environment:
- CI=true
- ENABLE_LOCAL_CACHE=true
- REDIS_HOST=0.0.0.0
- REDIS_PORT=6379
- REPORT_TOKEN=report-token-1
- REMOTE_MANAGEMENT_DISABLE=1
- HEALTHCHECKS_ALLOWFROM=0.0.0.0/0
- DATA_HOST=0.0.0.0
- METADATA_HOST=0.0.0.0
- S3BACKEND
- S3DATA
- S3METADATA
- MPU_TESTING
- S3VAULT
- S3_LOCATION_FILE
- ENABLE_UTAPI_V2
- BUCKET_DENY_FILTER
- S3KMS
- S3KMIP_PORT
- S3KMIP_HOSTS
- S3KMIP-COMPOUND_CREATE
- S3KMIP_BUCKET_ATTRIBUTE_NAME
- S3KMIP_PIPELINE_DEPTH
- S3KMIP_KEY
- S3KMIP_CERT
- S3KMIP_CA
- MONGODB_HOSTS=0.0.0.0:27018
- MONGODB_RS=rs0
- DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file:
- creds.env
depends_on:
- redis
extra_hosts:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1"
- "pykmip.local:127.0.0.1"
redis:
image: redis:alpine
network_mode: "host"
squid:
network_mode: "host"
profiles: ['ci-proxy']
image: scality/ci-squid
command: >-
sh -c 'mkdir -p /ssl &&
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
-keyout /ssl/myca.pem -out /ssl/myca.pem &&
cp /ssl/myca.pem /ssl/CA.pem &&
squid -f /etc/squid/squid.conf -N -z &&
squid -f /etc/squid/squid.conf -NYCd 1'
volumes:
- /tmp/ssl:/ssl
pykmip:
network_mode: "host"
profiles: ['pykmip']
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts
mongo:
network_mode: "host"
profiles: ['mongo', 'ceph']
image: ${MONGODB_IMAGE}
ceph:
network_mode: "host"
profiles: ['ceph']
image: ghcr.io/scality/cloudserver/ci-ceph
sproxyd:
network_mode: "host"
profiles: ['sproxyd']
image: sproxyd-standalone
build: ./sproxyd
user: 0:0
privileged: yes

View File

@ -1,28 +0,0 @@
FROM mongo:5.0.21
ENV USER=scality \
HOME_DIR=/home/scality \
CONF_DIR=/conf \
DATA_DIR=/data
# Set up directories and permissions
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
mkdir /logs; \
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
# Set up environment variables and directories for scality user
RUN mkdir ${CONF_DIR} && \
chown -R ${USER} ${CONF_DIR} && \
chown -R ${USER} ${DATA_DIR}
# copy the mongo config file
COPY /conf/mongod.conf /conf/mongod.conf
COPY /conf/mongo-run.sh /conf/mongo-run.sh
COPY /conf/initReplicaSet /conf/initReplicaSet.js
EXPOSE 27017/tcp
EXPOSE 27018
# Set up CMD
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
CMD ["bash", "/conf/mongo-run.sh"]

View File

@ -1,4 +0,0 @@
rs.initiate({
_id: "rs0",
members: [{ _id: 0, host: "127.0.0.1:27018" }]
});

View File

@ -1,10 +0,0 @@
#!/bin/bash
set -exo pipefail
init_RS() {
sleep 5
mongo --port 27018 /conf/initReplicaSet.js
}
init_RS &
mongod --bind_ip_all --config=/conf/mongod.conf

View File

@ -1,15 +0,0 @@
storage:
journal:
enabled: true
engine: wiredTiger
dbPath: "/data/db"
processManagement:
fork: false
net:
port: 27018
bindIp: 0.0.0.0
replication:
replSetName: "rs0"
enableMajorityReadConcern: true
security:
authorization: disabled

View File

@ -1,3 +0,0 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -1,26 +0,0 @@
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param SCRIPT_NAME /var/www;
fastcgi_param PATH_INFO $document_uri;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
# PHP only, required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;

View File

@ -1,88 +0,0 @@
worker_processes 1;
error_log /logs/error.log;
user root root;
events {
worker_connections 1000;
reuse_port on;
multi_accept on;
}
worker_rlimit_nofile 20000;
http {
root /var/www/;
upstream sproxyds {
least_conn;
keepalive 40;
server 127.0.0.1:20000;
}
server {
client_max_body_size 0;
client_body_timeout 150;
client_header_timeout 150;
postpone_output 0;
client_body_postpone_size 0;
keepalive_requests 1100;
keepalive_timeout 300s;
server_tokens off;
default_type application/octet-stream;
gzip off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
listen 81;
server_name localhost;
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
location ~* ^/proxy/(.*)$ {
rewrite ^/proxy/(.*)$ /$1 last;
}
allow 127.0.0.1;
deny all;
set $usermd '-';
set $sentusermd '-';
set $elapsed_ms '-';
set $now '-';
log_by_lua '
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
end
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
end
local elapsed_ms = tonumber(ngx.var.request_time)
if not ( elapsed_ms == nil) then
elapsed_ms = elapsed_ms * 1000
ngx.var.elapsed_ms = tostring(elapsed_ms)
end
local time = tonumber(ngx.var.msec) * 1000
ngx.var.now = time
';
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
'"contentType":"$content_type","s3Address":"$remote_addr",'
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
access_log /dev/stdout irm;
error_log /dev/stdout error;
location / {
proxy_request_buffering off;
fastcgi_request_buffering off;
fastcgi_no_cache 1;
fastcgi_cache_bypass 1;
fastcgi_buffering off;
fastcgi_ignore_client_abort on;
fastcgi_keep_conn on;
include fastcgi_params;
fastcgi_pass sproxyds;
fastcgi_next_upstream error timeout;
fastcgi_send_timeout 285s;
fastcgi_read_timeout 285s;
}
}
}

View File

@ -1,12 +0,0 @@
{
"general": {
"ring": "DATA",
"port": 20000,
"syslog_facility": "local0"
},
"ring_driver:0": {
"alias": "dc1",
"type": "local",
"queue_path": "/tmp/ring-objs"
},
}

View File

@ -1,43 +0,0 @@
[supervisord]
nodaemon = true
loglevel = info
logfile = %(ENV_LOG_DIR)s/supervisord.log
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
logfile_maxbytes = 20MB
logfile_backups = 2
[unix_http_server]
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
[program:nginx]
directory=%(ENV_SUP_RUN_DIR)s
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=7
autorestart=true
autostart=true
user=root
[program:sproxyd]
directory=%(ENV_SUP_RUN_DIR)s
process_name=%(program_name)s-%(process_num)s
numprocs=1
numprocs_start=0
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
redirect_stderr=true
autorestart=true
autostart=true
user=root

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC6zCCAdOgAwIBAgIUPIpMY95b4HjKAk+FyydZApAEFskwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowJDEQMA4GA1UECgwHU2NhbGl0eTEQ
MA4GA1UEAwwHUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AKqLFEsWtfRTxnoZrQe63tq+rQnVgninHMahRmXkzyjK/uNhoKnIh8bXdTC/eCZ6
FBROqBYNL0TJb0HDv1FzcZS1UCUldRqTlvr6wZb0pfrp40fvztsqQgAh1t/Blg5i
Zv5+ESSlNs5rWbFTxtq+FbMW/ERYTrVfnMkBiLg4Gq0HwID9a5jvJatzrrno2s1m
OfZCT3HaE3tMZ6vvYuoamvLNdvdH+9KeTmBCursfNejt0rSGjIqfi6DvFJSayydQ
is5DMSTbCLGdKQmA85VfEQmlQ8v0232WDSd6gVfp2tthDEDHnCbgWkEd1vsTyS85
ubdt5v4CWGOWV+mu3bf8xM0CAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAQEARTjc2zV/ol1/LsSzZy6l1R0uFBmR2KumH+Se1Yq2vKpY
Dv6xmrvmjOUr5RBO77nRhIgdcQA+LyAg8ii2Dfzc8r1RTD+j1bYOxESXctBOBcXM
Chy6FEBydR6m7S8qQyL+caJWO1WZWp2tapcm6sUG1oRVznWtK1/SHKIzOBwsmJ07
79KsCJ6wf9tzD05EDTI2QhAObE9/thy+zc8l8cmv9A6p3jKkx9rwXUttSUqTn0CW
w45bgKg6+DDcrhZ+MATbzuTfhuA4NFUTzK7KeX9sMuOV03Zs8SA3VhAOXmu063M3
0f9X7P/0RmGTTp7GGCqEINcZdbLh3k7CpFb2Ox998Q==
-----END CERTIFICATE-----

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC2zCCAcOgAwIBAgIUIlE8UAkqQ+6mbJDtrt9kkmi8aJYwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowKTEQMA4GA1UECgwHU2NhbGl0eTEV
MBMGA1UEAwwMcHlrbWlwLmxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAtxr7pq/lnzVeZz4z52Yc3DeaPqjNfRSyW5cPUlT7ABXFb7+tja7K2C7u
DYVK+Q+2yJCQwYJY47aKJB++ewam9t2V8Xy0Z8S+0I2ImCwuyeihaD/f6uJZRzms
ycdECH22BA6tCPlQLnlboRiZzI6rcIvXAbUMvLvFm3nyYIs9qidExRnfyMjISknM
V+83LT5QW4IcHgKYqzdz2ZmOnk+f4wmMmitcivTdIZCL8Z0cxr7BJlOh5JZ/V5uj
WUXeNa+ttW0RKKBlg9T+wj0JvwoJBPZTmsMAy3tI9tjLg3DwGYKsflbFeU2tebXI
gncGFZ/dFxj331GGtq3kz1PzAUYf2wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB1
8HgJ0fu6/pCrDxAm90eESFjmaTFyTN8q00zhq4Cb3zAT9KMWzAygkZ9n4ZFgELPo
7kBE2H6RcDdoBmjVYd8HnBloDdYzYbncKgt5YBvxRaMSF4/l65BM8wjatyXErqnH
QLLTRe5AuF0/F0KtPeDQ2JFVu8dZ35W3fyKGPRsEdVOSCTHROmqpGhZCpscyUP4W
Hb0dBTESQ9mQHw14OCaaahARd0X5WdcA/E+m0fpGqj1rQCXS+PrRcSLe1E1hqPlK
q/hXSXD5nybwipktELvJCbB7l4HmJr2pIpldeR5+ef68Cs8hqs6DRlsJX9sK2ng+
TFe5v6SCarqZ9kFvr6Yp
-----END CERTIFICATE-----

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC8zCCAdugAwIBAgIUBs6nVXQXhrFbClub3aSLg72/DiYwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowJTEQMA4GA1UECgwHU2NhbGl0eTER
MA8GA1UEAwwISm9obiBEb2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQC6neSYoBoWh/i2mBpduJnTlXacpJ0iQqLezvcGy8qR0s/48mtfV2IRGTNVsq4L
jLLRsPGt9KkJlUhHGWhG00cBGEsIiJiBUr+WrEsO04ME/Sk76kX8wk/t9Oljl7jt
UDnQUwshj+hRFe0iKAyE65JIutu5EiiNtOqMzbVgPNfNniAaGlrgwByJaS9arzsH
PVju9yZBYzYhwAMyYFcXUGrgvHRCHKmxBi4QmV7DX4TeN4l9TrCyEmqDev4PRFip
yR2Fh3WGSwWh45HgMT+Jp6Uv6yI4wMXWJAcNkHdx1OhjBoUQrkavvdeVEnCwjQ+p
SMLm0T4iNxedQWBtDM7ts4EjAgMBAAGjGjAYMBYGA1UdJQEB/wQMMAoGCCsGAQUF
BwMCMA0GCSqGSIb3DQEBCwUAA4IBAQCMi9HEhZc5jHJMj18Wq00fZy4O9XtjCe0J
nntW9tzi3rTQcQWKA7i9uVdDoCg+gMFVxWMvV7luFEUc/VYV1v8hFfbIFygzFsZY
xwv4GQaIwbsgzD+oziia53w0FSuNL0uE0MeKvrt3yzHxCxylHyl+TQd/UdAtAo+k
RL1sI0mBZx5qo6d1J7ZMCxzAGaT7KjnJvziFr/UbfSNnwDsxsUwGaI1ZeAxJN8DI
zTrg3f3lrrmHcauEgKnuQwIqaMZR6veG6RkjtcYSlJYID1irkE6njs7+wivOAkzt
fBt/0PD76FmAI0VArgU/zDB8dGyYzrq39W749LuEfm1TPmlnUtDr
-----END CERTIFICATE-----

View File

@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6neSYoBoWh/i2
mBpduJnTlXacpJ0iQqLezvcGy8qR0s/48mtfV2IRGTNVsq4LjLLRsPGt9KkJlUhH
GWhG00cBGEsIiJiBUr+WrEsO04ME/Sk76kX8wk/t9Oljl7jtUDnQUwshj+hRFe0i
KAyE65JIutu5EiiNtOqMzbVgPNfNniAaGlrgwByJaS9arzsHPVju9yZBYzYhwAMy
YFcXUGrgvHRCHKmxBi4QmV7DX4TeN4l9TrCyEmqDev4PRFipyR2Fh3WGSwWh45Hg
MT+Jp6Uv6yI4wMXWJAcNkHdx1OhjBoUQrkavvdeVEnCwjQ+pSMLm0T4iNxedQWBt
DM7ts4EjAgMBAAECggEANNXdUeUKXdSzcycPV/ea/c+0XFcy8e9B46lfQTpTqQOx
xD8GbWD1L/gdk6baJgT43+ukEWdSsJbmdtLXti29Ta8OF2VtIDhIbCVtvs3dq3zt
vrvugsiVDr8nkP306qOrKrNIVIFE+igmEmSaXsu/h/33ladxeeV9/s2DC7NOOjWN
Mu4KYr5BBbu3qAavdzbrcz7Sch+GzsYqK/pBounCTQu3o9E4TSUcmcsasWmtHN3u
e6G2UjObdzEW7J0wWvvtJ0wHQUVRueHfqwqKf0dymcZ3xOlx3ZPhKPz5n4F1UGUt
RQaNazqs5SzZpUgDuPw4k8h/aCHK21Yexw/l4+O9KQKBgQD1WZSRK54zFoExBQgt
OZSBNZW3Ibti5lSiF0M0g+66yNZSWfPuABEH0tu5CXopdPDXo4kW8NLGEqQStWTX
RGK0DE9buEL3eebOfjIdS2IZ3t3dX3lMypplVCj4HzAgITlweSH1LLTyAtaaOpwa
jksqfcn5Zw+XGkyc6GBBVaZetQKBgQDCt6Xf/g26+zjvHscjdzsfBhnYvTOrr6+F
xqFFxOEOocGr+mL7UTAs+a9m/6lOWhlagk+m+TIZNL8o3IN7KFTYxPYPxTiewgVE
rIm3JBmPxRiPn01P3HrtjaqfzsXF30j3ele7ix5OxieZq4vsW7ZXP3GZE34a08Ov
12sE1DlvdwKBgQDzpYQOLhyqazzcqzyVfMrnDYmiFVN7QXTmiudobWRUBUIhAcdl
oJdJB7K/rJOuO704x+RJ7dnCbZyWH6EGzZifaGIemXuXO21jvpqR0NyZCGOXhUp2
YfS1j8AntwEZxyS9du2sBjui4gKvomiHTquChOxgSmKHEcznPTTpbN8MyQKBgF5F
LVCZniolkLXsL7tS8VOez4qoZ0i6wP7CYLf3joJX+/z4N023S9yqcaorItvlMRsp
tciAIyoi6F2vDRTmPNXJ3dtav4PVKVnLMs1w89MwOCjoljSQ6Q7zpGTEZenbpWbz
W2BYBS9cLjXu4MpoyInLFINo9YeleLs8TvrCiKAXAoGBANsduqLnlUW/f5zDb5Fe
SB51+KhBjsVIeYmU+8xtur9Z7IxZXK28wpoEsm7LmX7Va5dERjI+tItBiJ5+Unu1
Xs2ljDg35ARKHs0dWBJGpbnZg4dbT6xpIL4YMPXm1Zu++PgRpxPIMn646xqd8GlH
bavm6Km/fXNG58xus+EeLpV5
-----END PRIVATE KEY-----

View File

@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC3Gvumr+WfNV5n
PjPnZhzcN5o+qM19FLJblw9SVPsAFcVvv62NrsrYLu4NhUr5D7bIkJDBgljjtook
H757Bqb23ZXxfLRnxL7QjYiYLC7J6KFoP9/q4llHOazJx0QIfbYEDq0I+VAueVuh
GJnMjqtwi9cBtQy8u8WbefJgiz2qJ0TFGd/IyMhKScxX7zctPlBbghweApirN3PZ
mY6eT5/jCYyaK1yK9N0hkIvxnRzGvsEmU6Hkln9Xm6NZRd41r621bREooGWD1P7C
PQm/CgkE9lOawwDLe0j22MuDcPAZgqx+VsV5Ta15tciCdwYVn90XGPffUYa2reTP
U/MBRh/bAgMBAAECggEABCvcMcbuDztzBB0Zp5re63Fk1SqZS9Et4wJE+hYvhaf5
UHtoY8LoohYnnC0+MQBXpKgOdCoZBk8BRKNofnr/UL5pjQ/POFH2GuAujXDsO/NN
wgc6fapcaE/7DLm6ZgsfG2aOMJclaXmgScI6trtFUpIM+t/6A06vyMP1bpeddwPW
Fqu7NvpDiEcTRUGd+z1JooYgUhGgC7peYUx5+9zqFrwoDBKxnUOnz3BkDsXBy3qm
65Vu0BSjuJzf6vVMpNGUHY6JXjopVNWku+JAX0wD+iikOd5sziNVdIj1fnZ+IHIf
7G5h5owHpvSGzJFQ18/g5VHtJdCm+4WQSnbSJRsCAQKBgQDu4IH8yspyeH44fhoS
PAp/OtILqSP+Da0zAp2LbhrOgyzyuSTdEAYyptqjqHS6QkB1Bu1H44FS0BYUxRXc
iu2e9AndiLVCGngsE7TpA/ZVLN1B0LEZEHjM6p4d6zZM6iveKVnPAOkTWTBAgzCt
b31nj4jL8PdlPKQil1AMrOlRAQKBgQDEOwshzIdr2Iy6B/n4CuBViEtwnbAd5f/c
atA9bcfF8kCahokJsI4eCCLgBwDZpYKD+v0AwOBlacF6t6TX+vdlJsi5EP7uxZ22
ILsuWqVm/0H77PACuckc5/qLZoGGC81l0DhnpoeMEb6r/TKOo5xAK1gxdlwNNrq+
nP1zdZnU2wKBgBAS92xFUR4m0YeHpMV5WNN658t1FEDyNqdqE6PgQtmGpi2nG73s
aB5cb/X3TfOCpce6MZlWy8sAyZuYL4Jprte1YDySCHBsS43bvZ64b4kHvdPB8UjY
fOh9GSq2Oy8tysnmSm7NhuGQbNjKeyoQiIXBeNkQW/VqATl6qR5RPFoBAoGACNqV
JQBCd/Y8W0Ry3eM3vgQ5SyqCQMcY5UwYez0Rz3efvJknY72InAhH8o2+VxOlsOjJ
M5iAR3MfHLdeg7Q6J2E5m0gOCJ34ALi3WV8TqXMI+iH1rlnNnjVFU7bbTz4HFXnw
oZSc9w/x53a0KkVtjmOmRg0OGDaI9ILG2MfMmhMCgYB8ZqJtX8qZ2TqKU3XdLZ4z
T2N7xMFuKohWP420r5jKm3Xw85IC+y1SUTB9XGcL79r2eJzmzmdKQ3A3sf3oyUH3
RdYWxtKcZ5PAE8hVRtn1ETZqUgxASGOUn/6w0npkYSOXPU5bc0W6RSLkjES0i+c3
fv3OMNI8qpmQhEjpHHQS1g==
-----END PRIVATE KEY-----

View File

@ -1,35 +0,0 @@
name: Test alerts
on:
push:
branches-ignore:
- 'development/**'
- 'q/*/**'
jobs:
run-alert-tests:
runs-on: ubuntu-latest
strategy:
matrix:
tests:
- name: 1 minute interval tests
file: monitoring/alerts.test.yaml
- name: 10 seconds interval tests
file: monitoring/alerts.10s.test.yaml
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.3
with:
alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }}
alert_inputs: |
namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,25 +0,0 @@
---
name: codeQL
on:
push:
branches: [w/**, q/*]
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
workflow_dispatch:
jobs:
analyze:
name: Static analysis with CodeQL
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: javascript, python, ruby
- name: Build and analyze
uses: github/codeql-action/analyze@v3

View File

@ -1,16 +0,0 @@
---
name: dependency review
on:
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4

View File

@ -1,80 +0,0 @@
---
name: release
run-name: release ${{ inputs.tag }}
on:
workflow_dispatch:
inputs:
tag:
description: 'Tag to be released'
required: true
env:
PROJECT_NAME: ${{ github.event.repository.name }}
jobs:
build-federation-image:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with:
push: true
context: .
file: images/svc-base/Dockerfile
tags: |
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildk
uses: docker/setup-buildx-action@v3
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Push dashboards into the production namespace
run: |
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create Release
uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }}
generate_release_notes: true
target_commitish: ${{ github.sha }}

View File

@ -1,533 +0,0 @@
---
name: tests
on:
workflow_dispatch:
push:
branches-ignore:
- 'development/**'
- 'q/*/**'
env:
# Secrets
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurebackend_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY_2 }}
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME_2 }}
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT_2 }}
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azuretest_AZURE_BLOB_ENDPOINT: "${{ secrets.AZURE_STORAGE_ENDPOINT }}"
b2backend_B2_ACCOUNT_ID: "${{ secrets.B2BACKEND_B2_ACCOUNT_ID }}"
b2backend_B2_STORAGE_ACCESS_KEY: >-
${{ secrets.B2BACKEND_B2_STORAGE_ACCESS_KEY }}
GOOGLE_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
GOOGLE_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
AWS_S3_BACKEND_ACCESS_KEY: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY }}"
AWS_S3_BACKEND_SECRET_KEY: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY }}"
AWS_S3_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY_2 }}"
AWS_S3_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY_2 }}"
AWS_GCP_BACKEND_ACCESS_KEY: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY }}"
AWS_GCP_BACKEND_SECRET_KEY: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY }}"
AWS_GCP_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY_2 }}"
AWS_GCP_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY_2 }}"
b2backend_B2_STORAGE_ENDPOINT: "${{ secrets.B2BACKEND_B2_STORAGE_ENDPOINT }}"
gcpbackend2_GCP_SERVICE_EMAIL: "${{ secrets.GCP2_SERVICE_EMAIL }}"
gcpbackend2_GCP_SERVICE_KEY: "${{ secrets.GCP2_SERVICE_KEY }}"
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackend_GCP_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
gcpbackend_GCP_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_EMAIL }}
gcpbackendmismatch_GCP_SERVICE_KEY: >-
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_KEY }}
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
# Configs
ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs:
linting-coverage:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps
run: pip install flake8
- name: Lint Javascript
run: yarn run --silent lint -- --max-warnings 0
- name: Lint Markdown
run: yarn run --silent lint_md
- name: Lint python
run: flake8 $(git ls-files "*.py")
- name: Lint Yaml
run: yamllint -c yamllint.yml $(git ls-files "*.yml")
- name: Unit Coverage
run: |
set -ex
mkdir -p $CIRCLE_TEST_REPORTS/unit
yarn test
yarn run test_legacy_location
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
CIRCLE_TEST_REPORTS: /tmp
CIRCLE_ARTIFACTS: /tmp
CI_REPORTS: /tmp
- name: Unit Coverage logs
run: find /tmp/unit -exec cat {} \;
- name: preparing junit files for upload
run: |
mkdir -p artifacts/junit
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always()
- name: Upload files to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: artifacts
if: always()
build:
runs-on: ubuntu-20.04
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push cloudserver image
uses: docker/build-push-action@v5
with:
push: true
context: .
provenance: false
tags: |
ghcr.io/${{ github.repository }}:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
uses: docker/build-push-action@v5
with:
push: true
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
multiple-backend:
runs-on: ubuntu-latest
needs: build
env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile sproxyd up -d
working-directory: .github/docker
- name: Run multiple backend test
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 81 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
mongo-v0-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
mongo-v1-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
file-ft-tests:
strategy:
matrix:
include:
- job-name: file-ft-tests
name: ${{ matrix.job-name }}
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes"
JOB_NAME: ${{ matrix.job-name }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory
shell: bash
run: |
set -exu
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
- name: Setup CI services
run: docker compose up -d
working-directory: .github/docker
- name: Run file ft tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
utapi-v2-tests:
runs-on: ubuntu-latest
needs: build
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose up -d
working-directory: .github/docker
- name: Run file utapi v2 tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
kmip-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy KMIP certs
run: cp -r ./certs /tmp/ssl-kmip
working-directory: .github/pykmip
- name: Setup CI services
run: docker compose --profile pykmip up -d
working-directory: .github/docker
- name: Run file KMIP tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
ceph-backend-test:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
S3DATA: multiple
S3KMS: file
CI_CEPH: 'true'
MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1
with:
ruby-version: '2.5.9'
- name: Install Ruby dependencies
run: |
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
- name: Install Java dependencies
run: |
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
- name: Setup CI services
run: docker compose --profile ceph up -d
working-directory: .github/docker
env:
S3METADATA: mongodb
- name: Run Ceph multiple backend tests
run: |-
set -ex -o pipefail;
bash .github/ceph/wait_for_ceph.sh
bash wait_for_local_port.bash 27018 40
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/multibackend-tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
S3METADATA: mem
- name: Run Java tests
run: |-
set -ex -o pipefail;
mvn test | tee /tmp/artifacts/${{ github.job }}/java-tests.log
working-directory: tests/functional/jaws
- name: Run Ruby tests
run: |-
set -ex -o pipefail;
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
working-directory: tests/functional/fog
- name: Run Javascript AWS SDK tests
run: |-
set -ex -o pipefail;
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
S3BACKEND: file
S3VAULT: mem
S3METADATA: mongodb
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()

View File

@ -1,60 +1,35 @@
ARG NODE_VERSION=16.20-bullseye-slim
FROM node:${NODE_VERSION} as builder
WORKDIR /usr/src/app
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
curl \
git \
gnupg2 \
jq \
python3 \
ssh \
wget \
libffi-dev \
zlib1g-dev \
&& apt-get clean \
&& mkdir -p /root/ssh \
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
################################################################################
FROM node:${NODE_VERSION}
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
&& rm -rf /var/lib/apt/lists/*
FROM node:10-slim
MAINTAINER Giorgio Regni <gr@scality.com>
ENV NO_PROXY localhost,127.0.0.1
ENV no_proxy localhost,127.0.0.1
EXPOSE 8000
EXPOSE 8002
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
tini \
&& rm -rf /var/lib/apt/lists/*
COPY ./package.json /usr/src/app/
COPY ./yarn.lock /usr/src/app/
WORKDIR /usr/src/app
# Keep the .git directory in order to properly report version
COPY . /usr/src/app
COPY --from=builder /usr/src/app/node_modules ./node_modules/
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
RUN apt-get update \
&& apt-get install -y jq python git build-essential ssh --no-install-recommends yarn \
&& mkdir -p /root/ssh \
&& ssh-keyscan -H github.com > /root/ssh/known_hosts \
&& yarn cache clean \
&& yarn install --frozen-lockfile --production --ignore-optional \
&& apt-get autoremove --purge -y python git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-*
COPY . /usr/src/app
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]

View File

@ -1,7 +1,6 @@
# S3 Healthcheck
Scality S3 exposes a healthcheck route `/live` on the port used
for the metrics (defaults to port 8002) which returns a
Scality S3 exposes a healthcheck route `/_/healthcheck` which returns a
response with HTTP code
- 200 OK

165
README.md
View File

@ -1,7 +1,10 @@
# Zenko CloudServer with Vitastor Backend
# Zenko CloudServer
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -11,71 +14,129 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
backend support.
CloudServer is useful for Developers, either to run as part of a
continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
## Quick Start with Vitastor
**[Full CloudServer Documentation](http://s3-server.readthedocs.io/en/latest/)**
Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
For questions, concerns and suggestions about CloudServer please visit **[Zenko forum](https://forum.zenko.io/)**
Installation instructions:
Learn more at **[www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)**
### Install Vitastor
## Contributing
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
### Install Zenko with Vitastor Backend
## Docker
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
[Zenko CloudServer image on DockerHub](https://hub.docker.com/r/zenko/cloudserver/)
[Running Cloudserver with Docker](https://s3-server.readthedocs.io/en/latest/DOCKER.html)
### Install and Configure MongoDB
## Installation
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
### Dependencies
### Setup Zenko
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
### Clone source code
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```shell
git clone https://github.com/scality/S3.git
```
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
### Install js dependencies
Go to the ./S3 folder,
```shell
yarn install --frozen-lockfile
```
# Author & License
If you get an error regarding installation of the diskUsage module,
please install g++.
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
If you get an error regarding level-down bindings, try clearing your yarn cache:
```shell
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae

View File

@ -1,2 +1,2 @@
---
theme: jekyll-theme-modernist
theme: jekyll-theme-minimal

View File

@ -13,26 +13,20 @@ function _performSearch(host,
port,
bucketName,
query,
listVersions,
accessKey,
secretKey,
sessionToken,
verbose, ssl) {
const escapedSearch = encodeURIComponent(query);
const options = {
host,
port,
method: 'GET',
path: `/${bucketName}/?search=${escapedSearch}${listVersions ? '&&versions' : ''}`,
path: `/${bucketName}/?search=${escapedSearch}`,
headers: {
'Content-Length': 0,
},
rejectUnauthorized: false,
versions: '',
};
if (sessionToken) {
options.headers['x-amz-security-token'] = sessionToken;
}
const transport = ssl ? https : http;
const request = transport.request(options, response => {
if (verbose) {
@ -61,9 +55,9 @@ function _performSearch(host,
// generateV4Headers exepects request object with path that does not
// include query
request.path = `/${bucketName}`;
const requestData = listVersions ? { search: query, versions: '' } : { search: query };
auth.client.generateV4Headers(request, requestData, accessKey, secretKey, 's3');
request.path = `/${bucketName}?search=${escapedSearch}${listVersions ? '&&versions' : ''}`;
auth.client.generateV4Headers(request, { search: query },
accessKey, secretKey, 's3');
request.path = `/${bucketName}?search=${escapedSearch}`;
if (verbose) {
logger.info('request headers', { headers: request._headers });
}
@ -82,17 +76,15 @@ function searchBucket() {
.version('0.0.1')
.option('-a, --access-key <accessKey>', 'Access key id')
.option('-k, --secret-key <secretKey>', 'Secret access key')
.option('-t, --session-token <sessionToken>', 'Session token')
.option('-b, --bucket <bucket>', 'Name of the bucket')
.option('-q, --query <query>', 'Search query')
.option('-h, --host <host>', 'Host of the server')
.option('-p, --port <port>', 'Port of the server')
.option('-s', '--ssl', 'Enable ssl')
.option('-l, --list-versions', 'List all versions of the objects that meet the search query, ' +
'otherwise only list the latest version')
.option('-v, --verbose')
.parse(process.argv);
const { host, port, accessKey, secretKey, sessionToken, bucket, query, listVersions, verbose, ssl } =
const { host, port, accessKey, secretKey, bucket, query, verbose, ssl } =
commander;
if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
@ -101,7 +93,7 @@ function searchBucket() {
process.exit(1);
}
_performSearch(host, port, bucket, query, listVersions, accessKey, secretKey, sessionToken, verbose,
_performSearch(host, port, bucket, query, accessKey, secretKey, verbose,
ssl);
}

View File

@ -1,10 +1,7 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": {
"localhost": "us-east-1",
"127.0.0.1": "us-east-1",
@ -102,14 +99,6 @@
"readPreference": "primary",
"database": "metadata"
},
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
@ -132,12 +121,5 @@
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -1,71 +0,0 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -86,9 +86,6 @@ const constants = {
// In testing, AWS seems to allow up to 88 more bytes, so we do the same.
maximumMetaHeadersSize: 2136,
// Maximum HTTP headers size allowed
maxHttpHeadersSize: 14122,
// hex digest of sha256 hash of empty string:
emptyStringHash: crypto.createHash('sha256')
.update('', 'binary').digest('hex'),
@ -98,26 +95,33 @@ const constants = {
unsupportedQueries: [
'accelerate',
'analytics',
'encryption',
'inventory',
'legal-hold',
'logging',
'metrics',
'notification',
'object-lock',
'policyStatus',
'publicAccessBlock',
'requestPayment',
'restore',
'retention',
'torrent',
],
// Headers supported by AWS that we do not currently support.
unsupportedHeaders: [
'x-amz-server-side-encryption',
'x-amz-server-side-encryption-customer-algorithm',
'x-amz-server-side-encryption-aws-kms-key-id',
'x-amz-server-side-encryption-context',
'x-amz-server-side-encryption-customer-key',
'x-amz-server-side-encryption-customer-key-md5',
],
// user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-storage-class',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties
// (if any, otherwise an empty object)
@ -130,7 +134,7 @@ const constants = {
},
},
/* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true },
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
@ -176,8 +180,6 @@ const constants = {
'objectDeleteTagging',
'objectGetTagging',
'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
],
// response header to be sent when there are invalid
// user metadata in the object's metadata
@ -185,64 +187,8 @@ const constants = {
// Bucket specific queries supported by AWS that we do not currently support
// these queries may or may not be supported at object level
unsupportedBucketQueries: [
'tagging',
],
suppressedUtapiEventFields: [
'object',
'location',
'versionId',
],
allowedUtapiEventFilterFields: [
'operationId',
'location',
'account',
'user',
'bucket',
],
arrayOfAllowed: [
'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'],
lifecycleListing: {
CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',
ORPHAN_DM_TYPE: 'orphan',
},
multiObjectDeleteConcurrency: 50,
maxScannedLifecycleListingEntries: 10000,
overheadField: [
'content-length',
'owner-id',
'versionId',
'isNull',
'isDeleteMarker',
],
unsupportedSignatureChecksums: new Set([
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
]),
supportedSignatureChecksums: new Set([
'UNSIGNED-PAYLOAD',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
]),
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
actionsToConsiderAsObjectPut: [
'initiateMultipartUpload',
'objectPutPart',
'completeMultipartUpload',
],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
};
module.exports = constants;

View File

@ -195,14 +195,6 @@ if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
fi
if [[ "$TESTING_MODE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json

View File

@ -295,51 +295,3 @@ Should force path-style requests even though v3 advertises it does by default.
$client->createBucket(array(
'Bucket' => 'bucketphp',
));
Go
~~
`AWS Go SDK <https://github.com/aws/aws-sdk-go>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: go
package main
import (
"context"
"fmt"
"log"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
func main() {
os.Setenv("AWS_ACCESS_KEY_ID", "accessKey1")
os.Setenv("AWS_SECRET_ACCESS_KEY", "verySecretKey1")
endpoint := "http://localhost:8000"
timeout := time.Duration(10) * time.Second
sess := session.Must(session.NewSession())
// Create a context with a timeout that will abort the upload if it takes
// more than the passed in timeout.
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
svc := s3.New(sess, &aws.Config{
Region: aws.String(endpoints.UsEast1RegionID),
Endpoint: &endpoint,
})
out, err := svc.ListBucketsWithContext(ctx, &s3.ListBucketsInput{})
if err != nil {
log.Fatal(err)
} else {
fmt.Println(out)
}
}

View File

@ -4,6 +4,7 @@ Getting Started
.. figure:: ../res/scality-cloudserver-logo.png
:alt: Zenko CloudServer logo
If you have any questions or issues running CloudServer feel free to ask questions on **[Zenko forum](https://forum.zenko.io/)!**
Dependencies
------------
@ -91,17 +92,6 @@ This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1. The secret key is verySecretKey1.
Run CloudServer with Vault User Management
------------------------------------------
.. code:: shell
export S3VAULT=vault
yarn start
Note: Vault is proprietary and must be accessed separately.
This starts a Zenko CloudServer using Vault for user management.
Run CloudServer for Continuous Integration Testing or in Production with Docker
-------------------------------------------------------------------------------

View File

@ -1,161 +0,0 @@
# Object Lock Feature Test Plan
## Feature Component Description
Implementing Object Lock will introduce six new APIs:
- putObjectLockConfiguration
- getObjectLockConfiguration
- putObjectRetention
- getObjectRetention
- putObjectLegalHold
- getObjectLegalHold
Along with these APIs, putBucket, putObject, deleteObject, and multiObjectDelete
be affected. In Arsenal, both the BucketInfo and ObjectMD models will be
updated. Bucket policy and IAM policy permissions will be updated to include
the new API actions.
## Functional Tests
### putBucket tests
- passing option to enable object lock updates bucket metadata and enables
bucket versioning
### putBucketVersioning tests
- suspending versioning on bucket with object lock enabled returns error
### putObject tests
- putting retention configuration on object should be allowed
- putting invalid retention configuration returns error
### getObject tests
- getting object with retention information should include retention information
### copyObject tests
- copying object with retention information should include retention information
### initiateMultipartUpload tests
- mpu object initiated with retention information should include retention
information
### putObjectLockConfiguration tests
- putting configuration as non-bucket-owner user returns AccessDenied error
- disabling object lock on bucket created with object lock returns error
- enabling object lock on bucket created without object lock returns
InvalidBucketState error
- enabling object lock with token on bucket created without object lock succeeds
- putting valid object lock configuration when bucket does not have object
lock enabled returns error (InvalidRequest?)
- putting valid object lock configuration updates bucket metadata
- putting invalid object lock configuration returns error
- ObjectLockEnabled !== "Enabled"
- Rule object doesn't contain DefaultRetention key
- Mode !== "GOVERNANCE" or "COMPLIANCE"
- Days are not an integer
- Years are not an integer
### getObjectLockConfiguration tests
- getting configuration as non-bucket-owner user returns AccessDenied error
- getting configuration when none is set returns
ObjectLockConfigurationNotFoundError error
- getting configuration returns correct object lock configuration for bucket
### putObjectRetention
- putting retention as non-bucket-owner user returns AccessDenied error
- putting retention on object in bucket without object lock enabled returns
InvalidRequest error
- putting valid retention period updates object metadata
### getObjectRetention
- getting retention as non-bucket-owner user returns AccessDenied error
- getting retention when none is set returns NoSuchObjectLockConfiguration
error
- getting retention returns correct object retention period
### putObjectLegalHold
- putting legal hold as non-bucket-owner user returns AccessDenied error
- putting legal hold on object in bucket without object lock enabled returns
InvalidRequest error
- putting valid legal hold updates object metadata
### getObjectLegalHold
- getting legal hold as non-bucket-owner user returns AccessDenied error
- getting legal hold when none is set returns NoSuchObjectLockConfiguration
error
- getting legal hold returns correct object legal hold
## End to End Tests
### Scenarios
- Create bucket with object lock enabled. Put object. Put object lock
configuration. Put another object.
- Ensure object put before configuration does not have retention period set
- Ensure object put after configuration does have retention period set
- Create bucket without object lock. Put object. Enable object lock with token
and put object lock configuration. Put another object.
- Ensure object put before configuration does not have retention period set
- Ensure object put after configuration does have retention period set
- Create bucket with object lock enabled and put configuration with COMPLIANCE
mode. Put object.
- Ensure object cannot be deleted (returns AccessDenied error).
- Ensure object cannot be overwritten.
- Create bucket with object lock enabled and put configuration with GOVERNANCE
mode. Put object.
- Ensure user without permission cannot delete object
- Ensure user without permission cannot overwrite object
- Ensure user with permission can delete object
- Ensure user with permission can overwrite object
- Ensure user with permission can lengthen retention period
- Ensure user with permission cannot shorten retention period
- Create bucket with object lock enabled and put configuration. Edit bucket
metadata so retention period is expired. Put object.
- Ensure object can be deleted.
- Ensure object can be overwritten.
- Create bucket with object lock enabled and put configuration. Edit bucket
metadata so retention period is expired. Put object. Put new retention
period on object.
- Ensure object cannot be deleted.
- Ensure object cannot be overwritten.
- Create bucket with object locked enabled and put configuration. Put object.
Edit object metadata so retention period is past expiration.
- Ensure object can be deleted.
- Ensure object can be overwritten.
- Create bucket with object lock enabled and put configuration. Edit bucket
metadata so retention period is expired. Put object. Put legal hold
on object.
- Ensure object cannot be deleted.
- Ensure object cannot be overwritten.
- Create bucket with object lock enabled and put configuration. Put object.
Check object retention. Change bucket object lock configuration.
- Ensure object retention period has not changed with bucket configuration.
- Create bucket with object lock enabled. Put object with legal hold.
- Ensure object cannot be deleted.
- Ensure object cannot be overwritten.
- Create bucket with object lock enabled. Put object with legal hold. Remove
legal hold.
- Ensure object can be deleted.
- Ensure object can be overwritten.

View File

@ -1,73 +0,0 @@
# Cloudserver Release Plan
## Docker Image Generation
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
CloudServer has a few images there:
* Cloudserver container image: ghcr.io/scality/cloudserver
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
This allows those images to be used by developers, CI builds,
build chain and so on.
Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull ghcr.io/scality/cloudserver:<commit hash>
docker pull ghcr.io/scality/cloudserver:<tag>
```
## Release Process
To release a production image:
* Create a PR to bump the package version
Update Cloudserver's `package.json` by bumping it to the relevant next
version in a new PR. Per example if the last released version was
`8.4.7`, the next version would be `8.4.8`.
```js
{
"name": "cloudserver",
"version": "8.4.8", <--- Here
[...]
}
```
* Review & merge the PR
* Create the release on GitHub
* Go the Release tab (https://github.com/scality/cloudserver/releases);
* Click on the `Draft new release button`;
* In the `tag` field, type the name of the release (`8.4.8`), and confirm
to create the tag on publish;
* Click on `Generate release notes` button to fill the fields;
* Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case);
* Click to `Publish the release` to create the GitHub release and git tag
Notes:
* the Git tag will be created automatically.
* this should be done as soon as the PR is merged, so that the tag
is put on the "version bump" commit.
* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force)
* Branch Name: The one used for the tag earlier. In this example `development/8.4`
* Override Stage: 'release'
* Extra properties:
* name: `'tag'`, value: `[release version]`, in this example`'8.4.8'`
* Release the release version on Jira
* Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page)
* Create a next version
* Name: `[next version]`, in this example `8.4.9`
* Click `...` and select `Release` on the recently released version (`8.4.8`)
* Fill in the field to move incomplete version to the next one

13
eve/get_product_version.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
LOCAL_BRANCH=$(git branch | grep \* | cut -d ' ' -f2)
BRANCHES=(development q stabilization)
for branch in ${BRANCHES[@]}; do
if echo "${LOCAL_BRANCH}\/" | grep -q ^${branch} ; then
cat .git/HEAD | sed 's/.*\///'
exit 0
fi
done
echo 0.0.0

435
eve/main.yml Normal file
View File

@ -0,0 +1,435 @@
---
version: 0.2
branches:
feature/*, documentation/*, improvement/*, bugfix/*, w/*, q/*, hotfix/*:
stage: pre-merge
development/*:
stage: post-merge
models:
- env: &global-env
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurebackend_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key_2)s
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name_2)s
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint_2)s
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azuretest_AZURE_BLOB_ENDPOINT: "%(secret:azure_storage_endpoint)s"
b2backend_B2_ACCOUNT_ID: "%(secret:b2backend_b2_account_id)s"
b2backend_B2_STORAGE_ACCESS_KEY: >-
%(secret:b2backend_b2_storage_access_key)s
GOOGLE_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
GOOGLE_SERVICE_KEY: "%(secret:gcp_service_key)s"
AWS_S3_BACKEND_ACCESS_KEY: "%(secret:aws_s3_backend_access_key)s"
AWS_S3_BACKEND_SECRET_KEY: "%(secret:aws_s3_backend_secret_key)s"
AWS_S3_BACKEND_ACCESS_KEY_2: "%(secret:aws_s3_backend_access_key_2)s"
AWS_S3_BACKEND_SECRET_KEY_2: "%(secret:aws_s3_backend_secret_key_2)s"
AWS_GCP_BACKEND_ACCESS_KEY: "%(secret:aws_gcp_backend_access_key)s"
AWS_GCP_BACKEND_SECRET_KEY: "%(secret:aws_gcp_backend_secret_key)s"
AWS_GCP_BACKEND_ACCESS_KEY_2: "%(secret:aws_gcp_backend_access_key_2)s"
AWS_GCP_BACKEND_SECRET_KEY_2: "%(secret:aws_gcp_backend_secret_key_2)s"
b2backend_B2_STORAGE_ENDPOINT: "%(secret:b2backend_b2_storage_endpoint)s"
gcpbackend2_GCP_SERVICE_EMAIL: "%(secret:gcp2_service_email)s"
gcpbackend2_GCP_SERVICE_KEY: "%(secret:gcp2_service_key)s"
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackend_GCP_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
gcpbackend_GCP_SERVICE_KEY: "%(secret:gcp_service_key)s"
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
%(secret:gcpbackendmismatch_gcp_service_email)s
gcpbackendmismatch_GCP_SERVICE_KEY: >-
%(secret:gcpbackendmismatch_gcp_service_key)s
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
- env: &mongo-vars
S3BACKEND: "mem"
MPU_TESTING: "yes"
S3METADATA: mongodb
- env: &multiple-backend-vars
S3BACKEND: "mem"
S3DATA: "multiple"
- env: &file-mem-mpu
S3BACKEND: "file"
S3VAULT: "mem"
MPU_TESTING: "yes"
- Git: &clone
name: Pull repo
repourl: '%(prop:git_reference)s'
shallow: true
retryFetch: true
haltOnFailure: true
- ShellCommand: &credentials
name: Setup Credentials
command: bash eve/workers/build/credentials.bash
haltOnFailure: true
env: *global-env
- ShellCommand: &yarn-install
name: install modules
command: yarn install
haltOnFailure: true
- Upload: &upload-artifacts
source: /artifacts
urls:
- "*"
- ShellCommand: &follow-s3-log
logfiles:
s3:
filename: /artifacts/s3.log
follow: true
- ShellCommand: &follow-s3-ceph-logs
logfiles:
ceph:
filename: /artifacts/ceph.log
follow: true
s3:
filename: /artifacts/s3.log
follow: true
- ShellCommand: &add-hostname
name: add hostname
command: |
echo "127.0.0.1 testrequestbucket.localhost" >> /etc/hosts
echo \
"127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" \
>> /etc/hosts
haltOnFailure: true
- ShellCommand: &setup-junit-upload
name: preparing junit files for upload
command: |
mkdir -p artifacts/junit
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
alwaysRun: true
- Upload: &upload-junits
source: artifacts
urls:
- "*"
alwaysRun: true
stages:
pre-merge:
worker:
type: local
steps:
- TriggerStages:
name: Launch all workers
stage_names:
- linting-coverage
- file-ft-tests
- multiple-backend-test
- mongo-ft-tests
- ceph-backend-tests
- kmip-ft-tests
waitForFinish: true
haltOnFailure: true
linting-coverage:
worker:
type: docker
path: eve/workers/build
volumes: &default_volumes
- '/home/eve/workspace'
steps:
- Git: *clone
- ShellCommand: *yarn-install
- ShellCommand: *add-hostname
- ShellCommand: *credentials
- ShellCommand:
name: Linting
command: |
set -ex
yarn run --silent lint -- --max-warnings 0
yarn run --silent lint_md
flake8 $(git ls-files "*.py")
yamllint $(git ls-files "*.yml")
- ShellCommand:
name: Unit Coverage
command: |
set -ex
unset HTTP_PROXY HTTPS_PROXY NO_PROXY
unset http_proxy https_proxy no_proxy
mkdir -p $CIRCLE_TEST_REPORTS/unit
yarn test
yarn run test_legacy_location
env: &shared-vars
<<: *global-env
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
CIRCLE_TEST_REPORTS: /tmp
CIRCLE_ARTIFACTS: /tmp
CI_REPORTS: /tmp
- ShellCommand:
name: Unit Coverage logs
command: find /tmp/unit -exec cat {} \;
- ShellCommand: *setup-junit-upload
- Upload: *upload-junits
multiple-backend-test:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2560Mi"
s3Mem: "2560Mi"
env:
<<: *multiple-backend-vars
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test
yarn run ft_awssdk_external_backends"
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
<<: *global-env
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- ShellCommand:
command: mvn test
workdir: build/tests/functional/jaws
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
- ShellCommand:
command: rspec tests.rb
workdir: build/tests/functional/fog
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
ceph-backend-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
ceph: eve/workers/ceph
vars:
aggressorMem: "2500Mi"
s3Mem: "2560Mi"
redis: enabled
env:
<<: *multiple-backend-vars
<<: *global-env
CI_CEPH: "true"
MPU_TESTING: "yes"
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash eve/workers/ceph/wait_for_ceph.sh
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test"
env:
<<: *multiple-backend-vars
<<: *global-env
<<: *follow-s3-ceph-logs
- ShellCommand:
command: mvn test
workdir: build/tests/functional/jaws
<<: *follow-s3-ceph-logs
env:
<<: *multiple-backend-vars
- ShellCommand:
command: rspec tests.rb
workdir: build/tests/functional/fog
<<: *follow-s3-ceph-logs
env:
<<: *multiple-backend-vars
- ShellCommand:
command: |
yarn run ft_awssdk &&
yarn run ft_s3cmd
env:
<<: *file-mem-mpu
<<: *global-env
S3_LOCATION_FILE: "/kube_pod-prod-cloudserver-backend-0/\
build/tests/locationConfig/locationConfigCeph.json"
<<: *follow-s3-ceph-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
mongo-ft-tests:
worker: &s3-pod
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2Gi"
s3Mem: "1664Mi"
redis: enabled
env:
<<: *mongo-vars
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
yarn run ft_test
<<: *follow-s3-log
env:
<<: *mongo-vars
<<: *global-env
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
file-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "3Gi"
s3Mem: "2560Mi"
redis: enabled
env:
<<: *file-mem-mpu
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
yarn run ft_test
<<: *follow-s3-log
env:
<<: *file-mem-mpu
<<: *global-env
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
kmip-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
pykmip: eve/workers/pykmip
vars:
aggressorMem: "2Gi"
s3Mem: "1664Mi"
redis: enabled
pykmip: enabled
env:
<<: *mongo-vars
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip
logfiles:
pykmip:
filename: /artifacts/pykmip.log
follow: true
s3:
filename: /artifacts/s3.log
follow: true
env:
<<: *mongo-vars
<<: *global-env
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
post-merge:
worker:
type: local
steps:
- Git: *clone
- ShellCommand: &docker_login
name: Private Registry Login
command: >
docker login
-u '%(secret:private_registry_username)s'
-p '%(secret:private_registry_password)s'
'%(secret:private_registry_url)s'
- ShellCommand:
name: Dockerhub Login
command: >
docker login
-u '%(secret:dockerhub_ro_user)s'
-p '%(secret:dockerhub_ro_password)s'
- SetProperty: &docker_image_name
name: Set docker image name property
property: docker_image_name
value:
"%(secret:private_registry_url)s/zenko/cloudserver:\
%(prop:commit_short_revision)s"
- ShellCommand:
name: Build docker image
command: >-
docker build
--no-cache
-t %(prop:docker_image_name)s
.
- ShellCommand:
name: Tag images
command: |
docker tag %(prop:docker_image_name)s zenko/cloudserver:$TAG
docker tag %(prop:docker_image_name)s zenko/cloudserver:latest
env:
TAG: "latest-%(prop:product_version)s"
- ShellCommand:
name: Push image
command: |
docker push %(prop:docker_image_name)s
docker push zenko/cloudserver:latest-%(prop:product_version)s
docker push zenko/cloudserver:latest

View File

@ -0,0 +1,58 @@
FROM buildpack-deps:xenial-curl
#
# Install packages needed by the buildchain
#
ENV LANG C.UTF-8
COPY ./s3_packages.list ./buildbot_worker_packages.list /tmp/
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
&& apt-get update \
&& apt-get install -y yarn \
&& cat /tmp/*packages.list | xargs apt-get install -y \
&& git clone https://github.com/tj/n.git \
&& make -C ./n \
&& n 10 \
&& pip install pip==9.0.1 \
&& rm -rf ./n \
&& rm -rf /var/lib/apt/lists/* \
&& rm -f /tmp/packages.list
#
# Add user eve
#
RUN adduser -u 1042 --home /home/eve --disabled-password --gecos "" eve \
&& adduser eve sudo \
&& sed -ri 's/(%sudo.*)ALL$/\1NOPASSWD:ALL/' /etc/sudoers
#
# Install Dependencies
#
# Install RVM and gems
ENV RUBY_VERSION="2.4.1"
COPY ./gems.list /tmp/
RUN cat /tmp/gems.list | xargs gem install
#RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 \
# && curl -sSL https://get.rvm.io | bash -s stable --ruby=$RUBY_VERSION \
# && usermod -a -G rvm eve
#RUN /bin/bash -l -c "\
# source /usr/local/rvm/scripts/rvm \
# && cat /tmp/gems.list | xargs gem install \
# && rm /tmp/gems.list"
# Install Pip packages
COPY ./pip_packages.list /tmp/
RUN cat /tmp/pip_packages.list | xargs pip install \
&& rm -f /tmp/pip_packages.list \
&& mkdir /home/eve/.aws \
&& chown eve /home/eve/.aws
#
# Run buildbot-worker on startup
#
ARG BUILDBOT_VERSION
RUN pip install buildbot-worker==$BUILDBOT_VERSION
CMD ["/bin/bash", "-l", "-c", "buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS && buildbot-worker start --nodaemon"]

View File

@ -0,0 +1,13 @@
ca-certificates
git
gnupg
libffi-dev
libssl-dev
python-pip
python2.7
python2.7-dev
software-properties-common
sudo
tcl
wget
procps

View File

@ -2,9 +2,9 @@
set -x #echo on
set -e #exit at the first error
mkdir -p $HOME/.aws
mkdir -p ~/.aws
cat >>$HOME/.aws/credentials <<EOF
cat >>/root/.aws/credentials <<EOF
[default]
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY

View File

@ -0,0 +1,4 @@
fog-aws:1.3.0
json
mime-types:3.1
rspec:3.5

View File

@ -0,0 +1,3 @@
flake8
s3cmd==1.6.1
yamllint

View File

@ -0,0 +1,11 @@
build-essential
curl
default-jdk
libdigest-hmac-perl
lsof
maven
netcat
redis-server
ruby-full
yarn
zlib1g-dev

233
eve/workers/pod.yaml Normal file
View File

@ -0,0 +1,233 @@
---
apiVersion: v1
kind: Pod
metadata:
name: "proxy-ci-test-pod"
spec:
restartPolicy: Never
terminationGracePeriodSeconds: 10
hostAliases:
- ip: "127.0.0.1"
hostnames:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com"
- "testrequestbucket.localhost"
- "pykmip.local"
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
initContainers:
- name: kmip-certs-installer
image: {{ images.pykmip }}
command: [ 'sh', '-c', 'cp /ssl/* /ssl-kmip/']
volumeMounts:
- name: kmip-certs
readOnly: false
mountPath: /ssl-kmip
{%- endif %}
containers:
{% if vars.env.S3METADATA is defined and vars.env.S3METADATA == "mongodb" -%}
- name: mongo
image: scality/ci-mongo:3.6.8
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 500m
memory: 1Gi
{%- endif %}
- name: aggressor
image: {{ images.aggressor }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "1"
memory: {{ vars.aggressorMem }}
limits:
cpu: "1"
memory: {{ vars.aggressorMem }}
volumeMounts:
- name: creds
readOnly: false
mountPath: /root/.aws
- name: artifacts
readOnly: true
mountPath: /artifacts
command:
- bash
- -lc
- |
buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS
buildbot-worker start --nodaemon
env:
- name: CI
value: "true"
- name: ENABLE_LOCAL_CACHE
value: "true"
- name: REPORT_TOKEN
value: "report-token-1"
- name: REMOTE_MANAGEMENT_DISABLE
value: "1"
{% for key, value in vars.env.items() %}
- name: {{ key }}
value: "{{ value }}"
{% endfor %}
- name: s3
image: {{ images.s3 }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "2"
memory: {{ vars.s3Mem }}
limits:
cpu: "2"
memory: {{ vars.s3Mem }}
volumeMounts:
- name: creds
readOnly: false
mountPath: /root/.aws
- name: certs
readOnly: false
mountPath: /tmp
- name: artifacts
readOnly: false
mountPath: /artifacts
- name: kmip-certs
readOnly: false
mountPath: /ssl-kmip
command:
- bash
- -ec
- |
sleep 10 # wait for mongo
/usr/src/app/docker-entrypoint.sh yarn start | tee -a /artifacts/s3.log
env:
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" and vars.env.CI_CEPH is not defined -%}
- name: S3_LOCATION_FILE
value: "/usr/src/app/tests/locationConfig/locationConfigTests.json"
{%- endif %}
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" and vars.env.CI_CEPH is defined and vars.env.CI_CEPH == "true" -%}
- name: S3_LOCATION_FILE
value: "/usr/src/app/tests/locationConfig/locationConfigCeph.json"
{%- endif %}
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
- name: S3KMS
value: kmip
- name: S3KMIP_PORT
value: "5696"
- name: S3KMIP_HOSTS
value: "pykmip.local"
- name: S3KMIP_COMPOUND_CREATE
value: "false"
- name: S3KMIP_BUCKET_ATTRIBUTE_NAME
value: ''
- name: S3KMIP_PIPELINE_DEPTH
value: "8"
- name: S3KMIP_KEY
value: /ssl-kmip/kmip-client-key.pem
- name: S3KMIP_CERT
value: /ssl-kmip/kmip-client-cert.pem
- name: S3KMIP_CA
value: /ssl-kmip/kmip-ca.pem
{%- endif %}
- name: CI
value: "true"
- name: ENABLE_LOCAL_CACHE
value: "true"
- name: MONGODB_HOSTS
value: "localhost:27018"
- name: MONGODB_RS
value: "rs0"
- name: REDIS_HOST
value: "localhost"
- name: REDIS_PORT
value: "6379"
- name: REPORT_TOKEN
value: "report-token-1"
- name: REMOTE_MANAGEMENT_DISABLE
value: "1"
- name: HEALTHCHECKS_ALLOWFROM
value: "0.0.0.0/0"
{% for key, value in vars.env.items() %}
- name: {{ key }}
value: "{{ value }}"
{% endfor %}
{% if vars.redis is defined and vars.redis == "enabled" -%}
- name: redis
image: redis:alpine
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 200m
memory: 128Mi
limits:
cpu: 200m
memory: 128Mi
{%- endif %}
{% if vars.env.CI_PROXY is defined and vars.env.CI_PROXY == "true" -%}
- name: squid
image: scality/ci-squid
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 250m
memory: 128Mi
volumeMounts:
- name: certs
readOnly: false
mountPath: /ssl
command:
- sh
- -exc
- |
mkdir -p /ssl
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
-keyout /ssl/myca.pem -out /ssl/myca.pem
cp /ssl/myca.pem /ssl/CA.pem
squid -f /etc/squid/squid.conf -N -z
squid -f /etc/squid/squid.conf -NYCd 1
{%- endif %}
{% if vars.env.CI_CEPH is defined and vars.env.CI_CEPH == "true" -%}
- name: ceph
image: {{ images.ceph }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1536Mi
limits:
cpu: 500m
memory: 1536Mi
volumeMounts:
- name: artifacts
readOnly: false
mountPath: /artifacts
{%- endif %}
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
- name: pykmip
image: {{ images.pykmip }}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: artifacts
readOnly: false
mountPath: /artifacts
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
{%- endif %}
volumes:
- name: creds
emptyDir: {}
- name: certs
emptyDir: {}
- name: artifacts
emptyDir: {}
- name: kmip-certs
emptyDir: {}

View File

@ -1,4 +1,5 @@
FROM python:3.10-alpine
FROM python:3-alpine
RUN apk add --no-cache \
libressl && \
@ -7,14 +8,8 @@ RUN apk add --no-cache \
libffi-dev \
libressl-dev \
sqlite-dev \
build-base \
curl
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
RUN pip3 install -U pip && \
pip3 install pykmip requests && \
build-base && \
pip install pykmip requests && \
apk del .build-deps && \
mkdir /pykmip

View File

@ -30,9 +30,7 @@ def create_rsa_private_key(key_size=2048, public_exponent=65537):
return private_key
def create_self_signed_certificate(subject_name,
private_key,
days_valid=36500):
def create_self_signed_certificate(subject_name, private_key, days_valid=365):
subject = x509.Name([
x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"),
x509.NameAttribute(x509.NameOID.COMMON_NAME, subject_name)
@ -61,7 +59,7 @@ def create_certificate(subject_name,
private_key,
signing_certificate,
signing_key,
days_valid=36500,
days_valid=365,
client_auth=False):
subject = x509.Name([
x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"),

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC6TCCAdGgAwIBAgIUO54wXmqIJGCKxQAH4jhGQXa6ZHIwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAeFw0xOTA0
MDIxODE2MjBaFw0yMDA0MDExODE2MjBaMCQxEDAOBgNVBAoMB1NjYWxpdHkxEDAO
BgNVBAMMB1Jvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDL
T3oapzN3ZEWh5cfe5PlOTgewZ55j7Xjz5ZEWNjPWYmBJfh0+5dntK+c1HvtEL/oa
6vnliPbb3kcl01eoOgmWX7ZgRwWsSb05otBSXJ040eJ8IFKw5Pp8OiWS3wXNusBs
HI/exrGdDTukqarhTBuscbBVtVd/IdQNQZRxB14ci1DjD+i3zBv/oRfrDUbXoBDJ
/ucyCICMthqWzFI509FU6DD1554xvDOoryhCOTHfQFcEWgSln8HaiELlJk9D7164
9qRse2R0s0STTrDgclbQpvt8gJfsWpTuRhjEFe0MKmWWhuYfA+o8eHNvCqQKdRwH
QLx9q1fCwi6Czz7aO8lTAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
hvcNAQELBQADggEBAITJfBlmABEwO/d7320ukoQyV/i2dhC/y2C/mKxuzPXPU8hR
FsBC6LA2oJbatSTSuG3YmHcf8/0mj3o4fCgz2+B7rJLH9WXd8lZdz8CRMwsyVmFY
aI3NvtMc0tV+4W1pxmxBs5IDITLAYIxuTm6kowH9jy85bAnGDYjGK9Hr84keWJIg
a2z7DGhL1HEd6tqJvhEFJFLL4VqDB9vEdvILnav3D3EkKU6lQ0Bvi2XO4t8rOclm
lEfYU7taNgCAYc9y/KcQ13jAokjZxmT8Bhep+Xq4BAHDakzqgD7USUnSUZ0inu8e
2bZItCXIJq/wD5ysOyeT490qVJ6F/8LKS1HYsUE=
-----END CERTIFICATE-----

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC2TCCAcGgAwIBAgIUG83im4Ny72RE536mxXOtJxzjOgAwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAeFw0xOTA0
MDIxODE2MjBaFw0yMDA0MDExODE2MjBaMCkxEDAOBgNVBAoMB1NjYWxpdHkxFTAT
BgNVBAMMDHB5a21pcC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBALCSoV6WSaIS0FHmJk1+b1/DEBolk4hru4gkk7qqS+vSlHMog2pu37Dvd6Ll
2G2EUVxkrf7bJP1pz5qxLYoZeMDd2MVCzGpdVHrLx7yRB+A7UwSrVpMcQkpxaBrK
upBZuJZoiYgsqAdxxD/NLUAUSyTm3RQ/xJmLRSs3w8F8AfjoGbFZwsAgfnO4kxNR
tdeVf4a4yGp2tmF7QMtMQr2ov0ktGiFwvmosvhkwJjCgvq5IfL4kc4iLhW6vSKPh
e/51Mhq2ntqX+4WxXuhGV4O9rc8tMZ/8zQY8KuabETDgIeBgpE9u+uEqvjnOZmNx
/bZW7MW2tldBmiPFQ+HMFTNICMsCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAhcJt
1605IDKAlCisolU49574gjJv6OdMSOuMF2ZDibK0mIqta65+t9eoRSmwSzysTyWe
6dDt0BRwxzEwuGN8B4DHe5jV8+9NHq8wh/ImyfohX75xSvgnCW2aRuA0qZhi4qCa
pZjy+rZu7Xa10hAmQ8lllYmLrNyRld8dL0eyL3sYzxb5SWX/60kJ4Fo/OkDAOzBo
7P5PKNlCthHL0ND/jV1jqrr+822xWGzGeN4vvqMYMvR71J+dBTCkJj5lRW3MailR
R2zu+W7idIxbs4Gh2JZ4LwPrWG63KhA7Hc+4sMSdrw3Mcp5IMpwIjllBJlllNbKE
x8ARszTHSgP6WtzvaQ==
-----END CERTIFICATE-----

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC8TCCAdmgAwIBAgIUaGl4Kplsv0uUwIgJKaZByGk6JMwwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAeFw0xOTA0
MDIxODE2MjBaFw0yMDA0MDExODE2MjBaMCUxEDAOBgNVBAoMB1NjYWxpdHkxETAP
BgNVBAMMCEpvaG4gRG9lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
xrms0paQYVeXg/Gra21DQNW9t6lm7/CzsYZRKcqWnALVhg1Ol6NzZYxhx4ezYlga
ztHnKwFUReOqNYWl2Cgivxwav9lpZp4W7LvHemfOcSmy+7fX8ttEs9df/1p9uMu+
AtnE0yiqWrDYQAu2gSQMWv+SjdnTOrOuz88//6CgxKzE6ccC4EKkJuxIr5Vqy+Up
OgF8wZSnx6JuW+o20XhodCLBac/diwvltODif8FHIhkzR948PfGKAofIaV0s2eri
Kfsc7bRDLnUjYLGeEk/PxjydpCy4e+U+fctTvrRxi0KVqnbxz3+B0nx2Cp0IW/DM
RP09Lfx7uQvieiKaqLJElQIDAQABoxowGDAWBgNVHSUBAf8EDDAKBggrBgEFBQcD
AjANBgkqhkiG9w0BAQsFAAOCAQEAk5Xh1EpxMsWhlorkrBadYtkTqsiC4UIBEJvT
hqU2eb3Fom39gSoKTFSJEO2mopMCq34TRG6klkxfMQRxzQxWAmAHAu0BLeWcJ0Rx
FCxZZ5CexZYAH2yJBQDvvTfFCXZ6VmpHfDa+7Z9DBNYm3WPuDROWgnwiTqrtVmu7
3HeBi4KG/DE6tC6QxI+A9Ofj3wcfv25et1NdBQnNMoMmjyWIGlEmhNShgtasNScV
mp+9LxZUFxeVDx3Qnw+wo/bwbyjYGh+osI+7RHpwXobmSQxxC8Vs+hZlKWGIh47b
DU2ONZAO4565Kppp48mTcgmq96IQFLoIK9XY3CkLsowS7IWczQ==
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDGuazSlpBhV5eD
8atrbUNA1b23qWbv8LOxhlEpypacAtWGDU6Xo3NljGHHh7NiWBrO0ecrAVRF46o1
haXYKCK/HBq/2Wlmnhbsu8d6Z85xKbL7t9fy20Sz11//Wn24y74C2cTTKKpasNhA
C7aBJAxa/5KN2dM6s67Pzz//oKDErMTpxwLgQqQm7EivlWrL5Sk6AXzBlKfHom5b
6jbReGh0IsFpz92LC+W04OJ/wUciGTNH3jw98YoCh8hpXSzZ6uIp+xzttEMudSNg
sZ4ST8/GPJ2kLLh75T59y1O+tHGLQpWqdvHPf4HSfHYKnQhb8MxE/T0t/Hu5C+J6
IpqoskSVAgMBAAECggEAJJ4aBkPQHt+w/5MMbyMW/V9lMe55FUVZFyEU24qE7gJn
refNz4tCvDd93PJYT4rEhc+PtRDtomMs/ee+g3IB1Q0ssKUzEsGWn9CKFTgDhj6U
yGU72Xgl1K2e9sKJ3/9K5+OQrQgVO9jSQBroaitmS25EZvb4QRzS3V/m/wduGE8a
V08HuJX7K6XDptaj2wuYxJmb+Zx4RMVc7D9R38zEoPu5yDVIFQSmZtbGSzo2ElyL
93zEiCno9PVIvNCvEayoYA1Mm4hvp7/gmeF8K9aYGHBeMmHJQpixrBRca0zm1en5
TxirD07P7mqENTHUKq9GXciHAmflzBAVYXB7S0zDkQKBgQDrRUNZEN5d9YuteN18
RYdc7bttE5lY1CLtdWera1d1SZ912selsdKBBYl0ukAvVHvjC/XdA/FGfykfK1h1
UV8bWSH3lxaW9wlwn4mtNS4itWY5CaLLlhmUMM3wehSw+iZgTUiNDhhmt5Anj3/H
uSVwumO54+JWC+XjTheIOikAPwKBgQDYPBnbrbEFHp4ZNVArNApyfpefgrnj/ypV
Bs/rJxGtbSe9x92POYmJwHIH44LuU14XQAwSIIKoGwXX66auJ+JPaFf8tAoPQS6j
MxP4YfLesS1sQweEKoYgZ9O+AdyBgOWdubyi063XlM7pUzeCrmxdC5koJ/PQoGOc
8haBAsJGKwKBgQC8qbxKDfbjjeZGY6fo4bCc2p7z50WPL/4aQY2yrs9hZHqU/a4f
tytA/3msuzaBPdRiy9KLO4Adshb9wbqbyXbk7WMJsoUQ5mURhT3YQc8PUjv4/Tso
2uMELObYMm2pRc/EZfUJ+AWlSQo2TyJ+vH/DmBQkmxODQONGlfbU7R637QKBgQC7
aFuA6ajipwafEnXI+/GSCeWfec1irWQjDSRmyhWoGVK4SODdoSBzIzexXp27sMV7
oSbVDxguWj1WRgbQKgEakXSwr9mIHxYsm7hTLZExMJ4NloqNIc3diB8cLsDN/MkF
SlUTSiMBFRe/YUBbIpEIk2TKSNYnmtq6y5Z1ec6mwQKBgDY9sv5xUkON9CjNn4KO
lIs/Ef4id2awMOb1fsTSFTxPjo47UdDQsEF/anxjwc2tYto3WuVpurWxU8Jqknkz
uDtCr8aNVar3d5y3NNFg8oWY1tk9Ha7U05t/PraJRmeduOzy/nWMuZAi+mW9kdRK
WYX5TjM6GzAOrlUr4QdHAfP/
-----END PRIVATE KEY-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCwkqFelkmiEtBR
5iZNfm9fwxAaJZOIa7uIJJO6qkvr0pRzKINqbt+w73ei5dhthFFcZK3+2yT9ac+a
sS2KGXjA3djFQsxqXVR6y8e8kQfgO1MEq1aTHEJKcWgayrqQWbiWaImILKgHccQ/
zS1AFEsk5t0UP8SZi0UrN8PBfAH46BmxWcLAIH5zuJMTUbXXlX+GuMhqdrZhe0DL
TEK9qL9JLRohcL5qLL4ZMCYwoL6uSHy+JHOIi4Vur0ij4Xv+dTIatp7al/uFsV7o
RleDva3PLTGf/M0GPCrmmxEw4CHgYKRPbvrhKr45zmZjcf22VuzFtrZXQZojxUPh
zBUzSAjLAgMBAAECggEAPXHpQdcerI3LfJSQg6sZ6sMgmVi2LGUBD3FbDzwvy1Ku
YhyZDrKimRncEg3V8NZ40aQfgG6WQrFNk1FQvZv7j3Ij+xExvVnZChpb2VzG1tsO
GrPdHrhYwTsRtTETFySBvaWHJqITnvOSDXnC42esdpz4FhHSwnPakB2Ju40ByrG2
M/ljiLpWZC9lMdk+BK7MdjoDOvawCKghovKqxHkxjLVA1CG1sovmm3mTCqO0iOst
b6go/ETb/oSLB+aHV9++bK1Zc3fO26vjz/BDkOqbz66U/74KVqIyOeJy2WarwiDa
sqaRi5c3izwZynhl/eiIDcJJDKD6AyoCHWUbuIdTgQKBgQDW6/Hfb7iO/M+lx4ey
uTxgS5MXOn2ozQFxLbfE5aTdRjPHlq2OhPOK+rmfk0nupEQTPDe7YFwvKaI5kiQQ
R6eJMAcpO4iLGGqjQw1jCBHLo73Ri58v2KEkjOAA+S1jVcWi/nqCPrlpjZqj2640
SKksEUMOfXsPVVbpfEAvqOL0IQKBgQDSUkkdPAALrbaZuOovUurbJoKY6GFHPVJ5
kvrpqNNBYZ12FnEWqJFCTWeF9g3WEjy8XKltxkf8ARxOwXC9P7FTBHX2hXmxRNuE
N7tZ/WML9/9GDlwb4TOQgiIxBOBT5NN7DAS+1api8ik93ZFExD2ufpcBdjVfLxn+
RIIpxVwfawKBgQCirlAUJ9XUbeqjeqftkabw4OPC9mQ9jIfl6owqvwUO9N+m2Rgg
Q+SxM12kO3H/8FkTEkbBT4wXqvT/jO49YG+hOTiCbmzJlL7LO6r7ZhVKRnQdFAl3
xwsaxoOcWQCRK1CBMwz6X44rJqOCGnv/WWysTZirdDHdBmTWMVXIfZbk4QKBgC9M
FekfHxuBOzkinnd5/BrAdEoSqB2vKqbwaMC3GJrxasmtjkz8J35zjb5QcRgdDc+G
PwvStUl0rnr/gWztr+DtdeG0boNw6rS3G8jG9MkyQhPtEsWqRUBQI4RGhnQXV3+q
Wj7YKfMKZj/lXc/LGdvt1+OaQ7JeE0hc+7CNE4R1AoGAGXbXJLj5kGLuq0r2Sj2p
JQPgzp4ZWjQwbkOcTLmGYpn61YjZrlbDDB/3gQ3sQ7q8LYQhgKEpH+cuX0vZixul
V7fj/D8RUxI72+2WlE6rCxYXYmSXEOS2DIx61KdzsL/oSw28dujKxyPRdt8H4uxg
Z7zoolTN1y/WZUxYD2Pxfb4=
-----END PRIVATE KEY-----

View File

@ -4,7 +4,7 @@ port=5696
certificate_path=/ssl/kmip-cert.pem
key_path=/ssl/kmip-key.pem
ca_path=/ssl/kmip-ca.pem
auth_suite=TLS1.2
auth_suite=Basic
policy_path=/etc/pykmip/policies
enable_tls_client_auth=True
database_path=/pykmip/pykmip.db

View File

@ -48,7 +48,7 @@ signed_headers = 'host;x-amz-content-sha256;x-amz-date'
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
.format(method, canonical_uri, canonical_querystring, canonical_headers,
signed_headers, payload_hash)
print(canonical_request)
print canonical_request
credential_scope = '{0}/{1}/{2}/aws4_request' \
.format(date_stamp, region, service)

View File

@ -1,28 +0,0 @@
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
COPY . ${HOME_DIR}/s3
RUN chown -R ${USER} ${HOME_DIR}
RUN pip3 install redis===3.5.3 requests==2.27.1 && \
apt-get install -y git-lfs
USER ${USER}
WORKDIR ${HOME_DIR}/s3
RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \
git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all && \
yarn global remove typescript
# run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed!
RUN ln -sf /scality-kms node_modules
EXPOSE 8000
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/supervisord.conf"

View File

@ -1,10 +1,3 @@
'use strict'; // eslint-disable-line strict
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
require('./lib/server.js')();

File diff suppressed because it is too large Load Diff

View File

@ -1,41 +1,27 @@
const { auth, errors, policies } = require('arsenal');
const async = require('async');
const { auth, errors } = require('arsenal');
const bucketDelete = require('./bucketDelete');
const bucketDeleteCors = require('./bucketDeleteCors');
const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketDeleteQuota = require('./bucketDeleteQuota');
const { bucketGet } = require('./bucketGet');
const bucketGet = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors');
const bucketGetVersioning = require('./bucketGetVersioning');
const bucketGetWebsite = require('./bucketGetWebsite');
const bucketGetLocation = require('./bucketGetLocation');
const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut');
const bucketPutACL = require('./bucketPutACL');
const bucketPutCors = require('./bucketPutCors');
const bucketPutVersioning = require('./bucketPutVersioning');
const bucketPutTagging = require('./bucketPutTagging');
const bucketDeleteTagging = require('./bucketDeleteTagging');
const bucketGetTagging = require('./bucketGetTagging');
const bucketPutWebsite = require('./bucketPutWebsite');
const bucketPutReplication = require('./bucketPutReplication');
const bucketPutLifecycle = require('./bucketPutLifecycle');
const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight');
@ -43,72 +29,35 @@ const completeMultipartUpload = require('./completeMultipartUpload');
const initiateMultipartUpload = require('./initiateMultipartUpload');
const listMultipartUploads = require('./listMultipartUploads');
const listParts = require('./listParts');
const metadataSearch = require('./metadataSearch');
const { multiObjectDelete } = require('./multiObjectDelete');
const multipartDelete = require('./multipartDelete');
const objectCopy = require('./objectCopy');
const { objectDelete } = require('./objectDelete');
const objectDelete = require('./objectDelete');
const objectDeleteTagging = require('./objectDeleteTagging');
const objectGet = require('./objectGet');
const objectGetACL = require('./objectGetACL');
const objectGetLegalHold = require('./objectGetLegalHold');
const objectGetRetention = require('./objectGetRetention');
const objectGetTagging = require('./objectGetTagging');
const objectHead = require('./objectHead');
const objectPut = require('./objectPut');
const objectPutACL = require('./objectPutACL');
const objectPutLegalHold = require('./objectPutLegalHold');
const objectPutTagging = require('./objectPutTagging');
const objectPutPart = require('./objectPutPart');
const objectPutCopyPart = require('./objectPutCopyPart');
const objectPutRetention = require('./objectPutRetention');
const objectRestore = require('./objectRestore');
const prepareRequestContexts
= require('./apiUtils/authorization/prepareRequestContexts');
const serviceGet = require('./serviceGet');
const vault = require('../auth/vault');
const website = require('./website');
const websiteGet = require('./websiteGet');
const websiteHead = require('./websiteHead');
const writeContinue = require('../utilities/writeContinue');
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
const parseCopySource = require('./apiUtils/object/parseCopySource');
const { tagConditionKeyAuth } = require('./apiUtils/authorization/tagConditionKeys');
const { isRequesterASessionUser } = require('./apiUtils/authorization/permissionChecks');
const checkHttpHeadersSize = require('./apiUtils/object/checkHttpHeadersSize');
const monitoringMap = policies.actionMaps.actionMonitoringMapS3;
auth.setHandler(vault);
/* eslint-disable no-param-reassign */
const api = {
callApiMethod(apiMethod, request, response, log, callback) {
// Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod];
if (!actionLog &&
apiMethod !== 'websiteGet' &&
apiMethod !== 'websiteHead' &&
apiMethod !== 'corsPreflight') {
log.error('callApiMethod(): No actionLog for this api method', {
apiMethod,
});
}
log.addDefaultFields({
service: 's3',
action: actionLog,
bucketName: request.bucketName,
});
if (request.objectKey) {
log.addDefaultFields({
objectKey: request.objectKey,
});
}
let returnTagCount = true;
const validationRes = validateQueryAndHeaders(request, log);
@ -123,7 +72,6 @@ const api = {
// no need to check auth on website or cors preflight requests
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
apiMethod === 'corsPreflight') {
request.actionImplicitDenies = false;
return this[apiMethod](request, log, callback);
}
@ -136,92 +84,43 @@ const api = {
return process.nextTick(callback, parsingError);
}
const { httpHeadersSizeError } = checkHttpHeadersSize(request.headers);
if (httpHeadersSizeError) {
log.debug('http header size limit exceeded', {
error: httpHeadersSizeError,
});
return process.nextTick(callback, httpHeadersSizeError);
}
const requestContexts = prepareRequestContexts(apiMethod, request,
sourceBucket, sourceObject, sourceVersionId);
// Extract all the _apiMethods and store them in an array
const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : [];
// Attach the names to the current request
// eslint-disable-next-line no-param-reassign
request.apiMethods = apiMethods;
function checkAuthResults(authResults) {
let returnTagCount = true;
const isImplicitDeny = {};
let isOnlyImplicitDeny = true;
return auth.server.doAuth(request, log, (err, userInfo,
authorizationResults, streamingV4Params) => {
if (err) {
log.trace('authentication error', { error: err });
return callback(err);
}
if (authorizationResults) {
if (apiMethod === 'objectGet') {
// first item checks s3:GetObject(Version) action
if (!authResults[0].isAllowed && !authResults[0].isImplicit) {
if (!authorizationResults[0].isAllowed) {
log.trace('get object authorization denial from Vault');
return errors.AccessDenied;
return callback(errors.AccessDenied);
}
// TODO add support for returnTagCount in the bucket policy
// checks
isImplicitDeny[authResults[0].action] = authResults[0].isImplicit;
// second item checks s3:GetObject(Version)Tagging action
if (!authResults[1].isAllowed) {
if (!authorizationResults[1].isAllowed) {
log.trace('get tagging authorization denial ' +
'from Vault');
returnTagCount = false;
}
} else {
for (let i = 0; i < authResults.length; i++) {
isImplicitDeny[authResults[i].action] = true;
if (!authResults[i].isAllowed && !authResults[i].isImplicit) {
// Any explicit deny rejects the current API call
for (let i = 0; i < authorizationResults.length; i++) {
if (!authorizationResults[i].isAllowed) {
log.trace('authorization denial from Vault');
return errors.AccessDenied;
}
if (authResults[i].isAllowed) {
// If the action is allowed, the result is not implicit
// Deny.
isImplicitDeny[authResults[i].action] = false;
isOnlyImplicitDeny = false;
return callback(errors.AccessDenied);
}
}
}
// These two APIs cannot use ACLs or Bucket Policies, hence, any
// implicit deny from vault must be treated as an explicit deny.
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
return errors.AccessDenied;
}
return { returnTagCount, isImplicitDeny };
}
return async.waterfall([
next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err });
return next(arsenalError);
}
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName();
}
if (isRequesterASessionUser(userInfo)) {
authNames.sessionName = userInfo.getShortid().split(':')[1];
}
log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}
// issue 100 Continue to the client
writeContinue(request, response);
const MAX_POST_LENGTH = request.method === 'POST' ?
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return this[apiMethod](userInfo, request, streamingV4Params,
log, callback);
}
const MAX_POST_LENGTH = request.method.toUpperCase() === 'POST' ?
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
const post = [];
let postLength = 0;
@ -231,147 +130,85 @@ const api = {
if (postLength <= MAX_POST_LENGTH) {
post.push(chunk);
}
return undefined;
});
request.on('error', err => {
log.trace('error receiving request', {
error: err,
});
return next(errors.InternalError);
return callback(errors.InternalError);
});
request.on('end', () => {
if (postLength > MAX_POST_LENGTH) {
log.error('body length is too long for request type',
{ postLength });
return next(errors.InvalidRequest);
return callback(errors.InvalidRequest);
}
// Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
});
return undefined;
},
// Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
authorizationResults,
request,
requestContexts,
apiMethod,
log,
(err, authResultsWithTags) => {
if (err) {
log.trace('tag authentication error', { error: err });
return next(err);
}
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
},
),
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
return callback(err);
}
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) {
return callback(checkedResults);
}
returnTagCount = checkedResults.returnTagCount;
request.actionImplicitDenies = checkedResults.isImplicitDeny;
} else {
// create an object of keys apiMethods with all values to false:
// for backward compatibility, all apiMethods are allowed by default
// thus it is explicitly allowed, so implicit deny is false
request.actionImplicitDenies = apiMethods.reduce((acc, curr) => {
acc[curr] = false;
return acc;
}, {});
}
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params,
log, methodCallback, authorizationResults);
}
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (apiMethod === 'objectCopy' ||
apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, methodCallback);
sourceObject, sourceVersionId, log, callback);
}
if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
return this[apiMethod](userInfo, request,
returnTagCount, log, callback);
}
return this[apiMethod](userInfo, request, log, methodCallback);
return this[apiMethod](userInfo, request, log, callback);
});
return undefined;
}, 's3', requestContexts);
},
bucketDelete,
bucketDeleteCors,
bucketDeleteEncryption,
bucketDeleteWebsite,
bucketGet,
bucketGetACL,
bucketGetCors,
bucketGetObjectLock,
bucketGetVersioning,
bucketGetWebsite,
bucketGetLocation,
bucketGetEncryption,
bucketHead,
bucketPut,
bucketPutACL,
bucketPutCors,
bucketPutVersioning,
bucketPutTagging,
bucketDeleteTagging,
bucketGetTagging,
bucketPutWebsite,
bucketPutReplication,
bucketGetReplication,
bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle,
bucketDeleteLifecycle,
bucketPutPolicy,
bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy,
bucketPutObjectLock,
bucketPutNotification,
bucketGetNotification,
bucketPutEncryption,
corsPreflight,
completeMultipartUpload,
initiateMultipartUpload,
listMultipartUploads,
listParts,
metadataSearch,
multiObjectDelete,
multipartDelete,
objectDelete,
objectDeleteTagging,
objectGet,
objectGetACL,
objectGetLegalHold,
objectGetRetention,
objectGetTagging,
objectCopy,
objectHead,
objectPut,
objectPutACL,
objectPutLegalHold,
objectPutTagging,
objectPutPart,
objectPutCopyPart,
objectPutRetention,
objectRestore,
serviceGet,
websiteGet: website,
websiteHead: website,
websiteGet,
websiteHead,
};
module.exports = api;

View File

@ -1,29 +0,0 @@
const { errors } = require('arsenal');
const vault = require('../../../auth/vault');
function checkExpectedBucketOwner(headers, bucket, log, cb) {
const expectedOwner = headers['x-amz-expected-bucket-owner'];
if (expectedOwner === undefined) {
return cb();
}
const bucketOwner = bucket.getOwner();
return vault.getAccountIds([bucketOwner], log, (error, res) => {
if (error) {
log.error('error fetch accountId from vault', {
method: 'checkExpectedBucketOwner',
error,
});
}
if (error || res[bucketOwner] !== expectedOwner) {
return cb(errors.AccessDenied);
}
return cb();
});
}
module.exports = {
checkExpectedBucketOwner,
};

View File

@ -1,23 +1,42 @@
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
const { errors } = require('arsenal');
const { parseCIDR, isValid } = require('ipaddr.js');
const { evaluators } = require('arsenal').policies;
const constants = require('../../../../constants');
const { config } = require('../../../Config');
const {
allAuthedUsersId,
bucketOwnerActions,
logId,
publicId,
arrayOfAllowed,
assumedRoleArnResourceType,
backbeatLifecycleSessionName,
actionsToConsiderAsObjectPut,
} = constants;
const actionMap = {
's3:AbortMultipartUpload': 'multipartDelete',
's3:DeleteBucket': 'bucketDelete',
's3:DeleteBucketPolicy': 'bucketDeletePolicy',
's3:DeleteBucketWebsite': 'bucketDeleteWebsite',
's3:DeleteObject': 'objectDelete',
's3:DeleteObjectTagging': 'objectDeleteTagging',
's3:GetBucketAcl': 'bucketGetACL',
's3:GetBucketCORS': 'bucketGetCors',
's3:GetBucketLocation': 'bucketGetLocation',
's3:GetBucketPolicy': 'bucketGetPolicy',
's3:GetBucketVersioning': 'bucketGetVersioning',
's3:GetBucketWebsite': 'bucketGetWebsite',
's3:GetLifecycleConfiguration': 'bucketGetLifecycle',
's3:GetObject': 'objectGet',
's3:GetObjectAcl': 'objectGetACL',
's3:GetObjectTagging': 'objectGetTagging',
's3:GetReplicationConfiguration': 'bucketGetReplication',
's3:ListBucket': 'bucketHead',
's3:ListBucketMultipartUploads': 'listMultipartUploads',
's3:ListMultipartUploadParts': 'listParts',
's3:PutBucketAcl': 'bucketPutACL',
's3:PutBucketCORS': 'bucketPutCors',
's3:PutBucketPolicy': 'bucketPutPolicy',
's3:PutBucketVersioning': 'bucketPutVersioning',
's3:PutBucketWebsite': 'bucketPutWebsite',
's3:PutLifecycleConfiguration': 'bucketPutLifecycle',
's3:PutObject': 'objectPut',
's3:PutObjectAcl': 'objectPutACL',
's3:PutObjectTagging': 'objectPutTagging',
's3:PutReplicationConfiguration': 'bucketPutReplication',
};
// whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
function getServiceAccountProperties(canonicalID) {
const canonicalIDArray = canonicalID.split('/');
@ -29,109 +48,49 @@ function isServiceAccount(canonicalID) {
return getServiceAccountProperties(canonicalID) !== undefined;
}
function isRequesterASessionUser(authInfo) {
const regexpAssumedRoleArn = /^arn:aws:sts::[0-9]{12}:assumed-role\/.*$/;
return regexpAssumedRoleArn.test(authInfo.getArn());
}
function isRequesterNonAccountUser(authInfo) {
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
}
/**
* Checks the access control for a given bucket based on the request type and user's canonical ID.
*
* @param {Bucket} bucket - The bucket to check access control for.
* @param {string} requestType - The list of s3 actions to check within the API call.
* @param {string} canonicalID - The canonical ID of the user making the request.
* @param {string} mainApiCall - The main API call (first item of the requestType).
*
* @returns {boolean} - Returns true if the user has the necessary access rights, otherwise false.
*/
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
// Same logic applies on the Versioned APIs, so let's simplify it.
let requestTypeParsed = requestType.endsWith('Version') ?
requestType.slice(0, 'Version'.length * -1) : requestType;
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
'objectPut' : requestTypeParsed;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
if (bucket.getOwner() === canonicalID) {
return true;
}
if (parsedMainApiCall === 'objectGet') {
if (requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (parsedMainApiCall === 'objectPut') {
if (arrayOfAllowed.includes(requestTypeParsed)) {
return true;
}
function checkBucketAcls(bucket, requestType, canonicalID) {
if (constants.bucketOwnerActions.includes(requestType)) {
// only bucket owner can modify or retrieve this property of a bucket
return false;
}
const bucketAcl = bucket.getAcl();
if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') {
if (requestType === 'bucketGet' || requestType === 'bucketHead') {
if (bucketAcl.Canned === 'public-read'
|| bucketAcl.Canned === 'public-read-write'
|| (bucketAcl.Canned === 'authenticated-read'
&& canonicalID !== publicId)) {
&& canonicalID !== constants.publicId)) {
return true;
} else if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.READ.indexOf(canonicalID) > -1) {
return true;
} else if (bucketAcl.READ.indexOf(publicId) > -1
|| (bucketAcl.READ.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
if (requestTypeParsed === 'bucketGetACL') {
if (requestType === 'bucketGetACL') {
if ((bucketAcl.Canned === 'log-delivery-write'
&& canonicalID === logId)
&& canonicalID === constants.logId)
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.READ_ACP.indexOf(canonicalID) > -1) {
return true;
} else if (bucketAcl.READ_ACP.indexOf(publicId) > -1
|| (bucketAcl.READ_ACP.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
if (requestTypeParsed === 'bucketPutACL') {
if (requestType === 'bucketPutACL') {
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true;
} else if (bucketAcl.WRITE_ACP.indexOf(publicId) > -1
|| (bucketAcl.WRITE_ACP.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') {
if (requestType === 'bucketDelete' && bucket.getOwner() === canonicalID) {
return true;
}
if (requestType === 'objectDelete' || requestType === 'objectPut') {
if (bucketAcl.Canned === 'public-read-write'
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
return true;
} else if (bucketAcl.WRITE.indexOf(publicId) > -1
|| (bucketAcl.WRITE.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
// Note that an account can have the ability to do objectPutACL,
@ -140,43 +99,21 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
// authorization check should just return true so can move on to check
// rights at the object level.
return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
|| requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
}
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
isUserUnauthenticated, mainApiCall) {
const bucketOwner = bucket.getOwner();
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
'objectPut' : requestType;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
// acls don't distinguish between users and accounts, so both should be allowed
if (bucketOwnerActions.includes(requestTypeParsed)
&& (bucketOwner === canonicalID)) {
return true;
}
if (objectMD['owner-id'] === canonicalID) {
return true;
}
// Backward compatibility
if (parsedMainApiCall === 'objectGet') {
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
&& requestTypeParsed === 'objectGetTagging') {
return true;
}
return (requestType === 'objectPutACL' || requestType === 'objectGetACL' ||
requestType === 'objectGet' || requestType === 'objectHead');
}
function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
if (!objectMD.acl) {
return false;
}
const bucketOwner = bucket.getOwner();
if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
if (requestType === 'objectGet' || requestType === 'objectHead') {
if (objectMD.acl.Canned === 'public-read'
|| objectMD.acl.Canned === 'public-read-write'
|| (objectMD.acl.Canned === 'authenticated-read'
&& canonicalID !== publicId)) {
&& canonicalID !== constants.publicId)) {
return true;
} else if (objectMD.acl.Canned === 'bucket-owner-read'
&& bucketOwner === canonicalID) {
@ -186,100 +123,55 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.READ.indexOf(canonicalID) > -1) {
return true;
} else if (objectMD.acl.READ.indexOf(publicId) > -1
|| (objectMD.acl.READ.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
if (requestType === 'objectPut' || requestType === 'objectDelete') {
return true;
}
if (requestTypeParsed === 'objectPutACL') {
if (requestType === 'objectPutACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true;
} else if (objectMD.acl.WRITE_ACP.indexOf(publicId) > -1
|| (objectMD.acl.WRITE_ACP.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
if (requestTypeParsed === 'objectGetACL') {
if (requestType === 'objectGetACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.READ_ACP.indexOf(canonicalID) > -1) {
return true;
} else if (objectMD.acl.READ_ACP.indexOf(publicId) > -1
|| (objectMD.acl.READ_ACP.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
// allow public reads on buckets that are whitelisted for anonymous reads
// TODO: remove this after bucket policies are implemented
const bucketAcl = bucket.getAcl();
const allowPublicReads = publicReadBuckets.includes(bucket.getName())
&& bucketAcl.Canned === 'public-read'
&& (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
const allowPublicReads = publicReadBuckets.includes(bucket.getName()) &&
bucketAcl.Canned === 'public-read' &&
(requestType === 'objectGet' || requestType === 'objectHead');
if (allowPublicReads) {
return true;
}
return false;
}
function _checkBucketPolicyActions(requestType, actions, log) {
const mappedAction = actionMaps.actionMapBP[requestType];
// Deny any action that isn't in list of controlled actions
if (!mappedAction) {
return false;
function _checkActions(requestType, actions, log) {
// if requestType isn't in list of controlled actions
if (!Object.values(actionMap).includes(requestType)) {
return true;
}
const mappedAction = Object.keys(actionMap)
[Object.values(actionMap).indexOf(requestType)];
return evaluators.isActionApplicable(mappedAction, actions, log);
}
function _checkBucketPolicyResources(request, resource, log) {
if (!request || (Array.isArray(resource) && resource.length === 0)) {
return true;
}
// build request context from the request!
const requestContext = new RequestContext(request.headers, request.query,
request.bucketName, request.objectKey, null,
request.connection.encrypted, request.resourceType, 's3');
return evaluators.isResourceApplicable(requestContext, resource, log);
}
function _checkBucketPolicyConditions(request, conditions, log) {
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
if (!conditions) {
return true;
}
// build request context from the request!
const requestContext = new RequestContext(request.headers, request.query,
request.bucketName, request.objectKey, ip,
request.connection.encrypted, request.resourceType, 's3', null, null,
null, null, null, null, null, null, null, null, null,
request.objectLockRetentionDays);
return evaluators.meetConditions(requestContext, conditions, log);
}
function _getAccountId(arn) {
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
return arn.substr(13, 12);
@ -324,26 +216,19 @@ function _checkPrincipals(canonicalID, arn, principal) {
return false;
}
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) {
function checkBucketPolicy(policy, requestType, canonicalID, arn, log) {
let permission = 'defaultDeny';
// if requester is user within bucket owner account, actions should be
// allowed unless explicitly denied (assumes allowed by IAM policy)
if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) {
permission = 'allow';
}
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
while (copiedStatement.length > 0) {
const s = copiedStatement[0];
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
const actionMatch = _checkActions(requestType, s.Action, log);
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') {
if (principalMatch && actionMatch && s.Effect === 'Deny') {
// explicit deny trumps any allows, so return immediately
return 'explicitDeny';
}
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') {
if (principalMatch && actionMatch && s.Effect === 'Allow') {
permission = 'allow';
}
copiedStatement = copiedStatement.splice(1);
@ -351,141 +236,60 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
return permission;
}
function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log,
request, aclPermission, results, actionImplicitDenies) {
const bucketPolicy = bucket.getBucketPolicy();
let processedResult = results[requestType];
if (!bucketPolicy) {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
} else {
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
bucketOwner, log, request, actionImplicitDenies);
if (bucketPolicyPermission === 'explicitDeny') {
processedResult = false;
} else if (bucketPolicyPermission === 'allow') {
processedResult = true;
} else {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
}
}
return processedResult;
}
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const mainApiCall = requestTypes[0];
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
function isBucketAuthorized(bucket, requestType, canonicalID, arn, log) {
// Check to see if user is authorized to perform a
// particular action on bucket based on ACLs.
// TODO: Add IAM checks
let requesterIsNotUser = true;
let arn = null;
if (authInfo) {
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn();
if (bucket.getOwner() === canonicalID || isServiceAccount(canonicalID)) {
return true;
}
// if the bucket owner is an account, users should not have default access
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
const aclPermission = checkBucketAcls(bucket, requestType, canonicalID);
const bucketPolicy = bucket.getBucketPolicy();
if (!bucketPolicy) {
if (constants.bucketOwnerActions.includes(requestType)) {
return false;
}
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
// In case of error bucket access is checked with bucketGet
// For website, bucket policy only uses objectGet and ignores bucketGet
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
// bucketGet should be used to check acl but switched to objectGet for bucket policy
if (isWebsite && _requestType === 'bucketGet') {
// eslint-disable-next-line no-param-reassign
_requestType = 'objectGet';
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
return aclPermission;
}
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
request, aclPermission, results, actionImplicitDenies);
});
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
canonicalID, arn, log);
if (bucketPolicyPermission === 'explicitDeny') {
return false;
}
return (aclPermission || (bucketPolicyPermission === 'allow'));
}
function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {},
log, request) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
let arn = null;
if (authInfo) {
arn = authInfo.getArn();
}
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
request, true, results, actionImplicitDenies);
});
}
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
const mainApiCall = requestTypes[0];
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
const parsedMethodName = _requestType.endsWith('Version')
? _requestType.slice(0, -7) : _requestType;
function isObjAuthorized(bucket, objectMD, requestType, canonicalID, arn, log) {
const bucketOwner = bucket.getOwner();
if (!objectMD) {
// check bucket has read access
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
let permission = 'bucketGet';
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
permission = 'objectPut';
return false;
}
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
actionImplicitDenies, isWebsite);
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
&& results[_requestType] === false) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
if (objectMD['owner-id'] === canonicalID) {
return true;
}
return results[_requestType];
}
let requesterIsNotUser = true;
let arn = null;
let isUserUnauthenticated = false;
if (authInfo) {
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn();
isUserUnauthenticated = arn === undefined;
}
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
if (isServiceAccount(canonicalID)) {
return true;
}
// account is authorized if:
// - requesttype is included in bucketOwnerActions and
// - account is the bucket owner
// - requester is account, not user
if (bucketOwnerActions.includes(parsedMethodName)
&& (bucketOwner === canonicalID)
&& requesterIsNotUser) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
if (constants.bucketOwnerActions.includes(requestType)
&& bucketOwner === canonicalID) {
return true;
}
const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName,
canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall);
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner,
log, request, aclPermission, results, actionImplicitDenies);
});
const aclPermission = checkObjectAcls(bucket, objectMD, requestType,
canonicalID);
const bucketPolicy = bucket.getBucketPolicy();
if (!bucketPolicy) {
return aclPermission;
}
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
canonicalID, arn, log);
if (bucketPolicyPermission === 'explicitDeny') {
return false;
}
return (aclPermission || (bucketPolicyPermission === 'allow'));
}
function _checkResource(resource, bucketArn) {
@ -514,128 +318,12 @@ function validatePolicyResource(bucketName, policy) {
});
}
function checkIp(value) {
const errString = 'Invalid IP address in Conditions';
const values = Array.isArray(value) ? value : [value];
for (let i = 0; i < values.length; i++) {
// these preliminary checks are validating the provided
// ip address against ipaddr.js, the library we use when
// evaluating IP condition keys. It ensures compatibility,
// but additional checks are required to enforce the right
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
// we would accept different ip formats, which is not
// standard in an AWS use case.
try {
try {
parseCIDR(values[i]);
} catch (err) {
isValid(values[i]);
}
} catch (err) {
return errString;
}
// Apply the existing IP validation logic to each element
const validateIpRegex = ip => {
if (constants.ipv4Regex.test(ip)) {
return ip.split('.').every(part => parseInt(part, 10) <= 255);
}
if (constants.ipv6Regex.test(ip)) {
return ip.split(':').every(part => part.length <= 4);
}
return false;
};
if (validateIpRegex(values[i]) !== true) {
return errString;
}
}
// If the function hasn't returned by now, all elements are valid
return null;
}
// This function checks all bucket policy conditions if the values provided
// are valid for the condition type. If not it returns a relevant Malformed policy error string
function validatePolicyConditions(policy) {
const validConditions = [
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
{ conditionKey: 's3:object-lock-remaining-retention-days' },
];
// keys where value type does not seem to be checked by AWS:
// - s3:object-lock-remaining-retention-days
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
return null;
}
// there can be multiple statements in the policy, each with a Condition enclosure
for (let i = 0; i < policy.Statement.length; i++) {
const s = policy.Statement[i];
if (s.Condition) {
const conditionOperators = Object.keys(s.Condition);
// there can be multiple condition operations in the Condition enclosure
// eslint-disable-next-line no-restricted-syntax
for (const conditionOperator of conditionOperators) {
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
const conditionValue = s.Condition[conditionOperator][conditionKey];
const validCondition = validConditions.find(validCondition =>
validCondition.conditionKey === conditionKey
);
// AWS returns does not return an error if the condition starts with 'aws:'
// so we reproduce this behaviour
if (!validCondition && !conditionKey.startsWith('aws:')) {
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
}
if (validCondition && validCondition.conditionValueTypeChecker) {
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
if (conditionValueTypeError) {
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
}
}
}
}
}
return null;
}
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
* @param {string} arn - Amazon resource name - example:
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
*/
function isLifecycleSession(arn) {
if (!arn) {
return false;
}
const arnSplits = arn.split(':');
const service = arnSplits[2];
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
const resourceType = resourceNames[0];
const sessionName = resourceNames[resourceNames.length - 1];
return (service === 'sts'
&& resourceType === assumedRoleArnResourceType
&& sessionName === backbeatLifecycleSessionName);
}
module.exports = {
isBucketAuthorized,
isObjAuthorized,
getServiceAccountProperties,
isServiceAccount,
isRequesterASessionUser,
isRequesterNonAccountUser,
checkBucketAcls,
checkObjectAcls,
validatePolicyResource,
validatePolicyConditions,
isLifecycleSession,
evaluateBucketPolicyWithIAM,
};

View File

@ -1,20 +1,12 @@
const { policies } = require('arsenal');
const { config } = require('../../../Config');
const { RequestContext, requestUtils } = policies;
const RequestContext = policies.RequestContext;
let apiMethodAfterVersionCheck;
const apiMethodWithVersion = {
objectGetACL: true,
objectPutACL: true,
objectGet: true,
objectDelete: true,
objectPutTagging: true,
objectGetTagging: true,
objectDeleteTagging: true,
objectGetLegalHold: true,
objectPutLegalHold: true,
objectPutRetention: true,
};
const apiMethodWithVersion = { objectGetACL: true, objectPutACL: true,
objectGet: true, objectDelete: true, objectPutTagging: true,
objectGetTagging: true, objectDeleteTagging: true };
const requestUtils = require('../../../utilities/requestUtils');
function isHeaderAcl(headers) {
return headers['x-amz-grant-read'] || headers['x-amz-grant-read-acp'] ||
@ -43,7 +35,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
// null as the requestContext to Vault so it will only do an authentication
// check.
const ip = requestUtils.getClientIp(request, config);
const ip = requestUtils.getClientIp(request);
function generateRequestContext(apiMethod) {
return new RequestContext(request.headers,
@ -52,7 +44,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3');
}
if (apiMethod === 'bucketPut') {
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
return null;
}
@ -65,17 +57,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = [];
if (apiMethod === 'multiObjectDelete') {
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet';
@ -107,63 +89,12 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const objectGetTaggingAction = (request.query &&
request.query.versionId) ? 'objectGetTaggingVersion' :
'objectGetTagging';
if (request.headers['x-amz-version-id']) {
const objectGetVersionAction = 'objectGetVersion';
const getVersionResourceVersion =
generateRequestContext(objectGetVersionAction);
requestContexts.push(getVersionResourceVersion);
}
const getRequestContext =
generateRequestContext(apiMethodAfterVersionCheck);
const getTaggingRequestContext =
generateRequestContext(objectGetTaggingAction);
requestContexts.push(getRequestContext, getTaggingRequestContext);
} else if (apiMethodAfterVersionCheck === 'objectGetTagging') {
const objectGetTaggingAction = 'objectGetTagging';
const getTaggingResourceVersion =
generateRequestContext(objectGetTaggingAction);
requestContexts.push(getTaggingResourceVersion);
if (request.headers['x-amz-version-id']) {
const objectGetTaggingVersionAction = 'objectGetTaggingVersion';
const getTaggingVersionResourceVersion =
generateRequestContext(objectGetTaggingVersionAction);
requestContexts.push(getTaggingVersionResourceVersion);
}
} else if (apiMethodAfterVersionCheck === 'objectHead') {
const objectHeadAction = 'objectHead';
const headObjectAction =
generateRequestContext(objectHeadAction);
requestContexts.push(headObjectAction);
if (request.headers['x-amz-version-id']) {
const objectHeadVersionAction = 'objectGetVersion';
const headObjectVersion =
generateRequestContext(objectHeadVersionAction);
requestContexts.push(headObjectVersion);
}
} else if (apiMethodAfterVersionCheck === 'objectPutTagging') {
const putObjectTaggingRequestContext =
generateRequestContext('objectPutTagging');
requestContexts.push(putObjectTaggingRequestContext);
if (request.headers['x-amz-version-id']) {
const putObjectVersionRequestContext =
generateRequestContext('objectPutTaggingVersion');
requestContexts.push(putObjectVersionRequestContext);
}
} else if (apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const putObjectRequestContext =
generateRequestContext('objectPut');
requestContexts.push(putObjectRequestContext);
const getObjectRequestContext =
generateRequestContext('objectGet');
requestContexts.push(getObjectRequestContext);
} else if (apiMethodAfterVersionCheck === 'objectPut') {
// if put object with version
if (request.headers['x-scal-s3-version-id'] ||
request.headers['x-scal-s3-version-id'] === '') {
const putVersionRequestContext =
generateRequestContext('objectPutVersion');
requestContexts.push(putVersionRequestContext);
} else {
const putRequestContext =
generateRequestContext(apiMethodAfterVersionCheck);
requestContexts.push(putRequestContext);
@ -173,60 +104,12 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
generateRequestContext('objectPutTagging');
requestContexts.push(putTaggingRequestContext);
}
if (['ON', 'OFF'].includes(request.headers['x-amz-object-lock-legal-hold-status'])) {
const putLegalHoldStatusAction =
generateRequestContext('objectPutLegalHold');
requestContexts.push(putLegalHoldStatusAction);
}
// if put object (versioning) with ACL
if (isHeaderAcl(request.headers)) {
const putAclRequestContext =
generateRequestContext('objectPutACL');
requestContexts.push(putAclRequestContext);
}
if (request.headers['x-amz-object-lock-mode']) {
const putObjectLockRequestContext =
generateRequestContext('objectPutRetention');
requestContexts.push(putObjectLockRequestContext);
}
if (request.headers['x-amz-version-id']) {
const putObjectVersionRequestContext =
generateRequestContext('objectPutTaggingVersion');
requestContexts.push(putObjectVersionRequestContext);
}
}
} else if (apiMethodAfterVersionCheck === 'initiateMultipartUpload' ||
apiMethodAfterVersionCheck === 'objectPutPart' ||
apiMethodAfterVersionCheck === 'completeMultipartUpload'
) {
if (request.headers['x-scal-s3-version-id'] ||
request.headers['x-scal-s3-version-id'] === '') {
const putVersionRequestContext =
generateRequestContext('objectPutVersion');
requestContexts.push(putVersionRequestContext);
} else {
const putRequestContext =
generateRequestContext(apiMethodAfterVersionCheck);
requestContexts.push(putRequestContext);
}
// if put object (versioning) with ACL
if (isHeaderAcl(request.headers)) {
const putAclRequestContext =
generateRequestContext('objectPutACL');
requestContexts.push(putAclRequestContext);
}
if (request.headers['x-amz-object-lock-mode']) {
const putObjectLockRequestContext =
generateRequestContext('objectPutRetention');
requestContexts.push(putObjectLockRequestContext);
}
if (request.headers['x-amz-version-id']) {
const putObjectVersionRequestContext =
generateRequestContext('objectPutTaggingVersion');
requestContexts.push(putObjectVersionRequestContext);
}
} else {
const requestContext =
generateRequestContext(apiMethodAfterVersionCheck);

View File

@ -1,99 +0,0 @@
const async = require('async');
const { auth, s3middleware } = require('arsenal');
const metadata = require('../../../metadata/wrapper');
const { decodeVersionId } = require('../object/versioning');
const { parseTagXml } = s3middleware.tagging;
function makeTagQuery(tags) {
return Object.entries(tags)
.map(i => i.join('='))
.join('&');
}
function updateRequestContextsWithTags(request, requestContexts, apiMethod, log, cb) {
async.waterfall([
next => {
if (request.headers['x-amz-tagging']) {
return next(null, request.headers['x-amz-tagging']);
}
if (request.post && apiMethod === 'objectPutTagging') {
return parseTagXml(request.post, log, (err, tags) => {
if (err) {
log.trace('error parsing request tags');
return next(err);
}
return next(null, makeTagQuery(tags));
});
}
return next(null, null);
},
(requestTagsQuery, next) => {
const objectKey = request.objectKey;
const bucketName = request.bucketName;
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return next(decodedVidResult);
}
const reqVersionId = decodedVidResult;
return metadata.getObjectMD(
bucketName, objectKey, { versionId: reqVersionId }, log, (err, objMD) => {
if (err) {
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (err.NoSuchKey) {
return next(null, requestTagsQuery, null);
}
log.trace('error getting request object tags');
return next(err);
}
const existingTagsQuery = objMD.tags && makeTagQuery(objMD.tags);
return next(null, requestTagsQuery, existingTagsQuery);
});
},
], (err, requestTagsQuery, existingTagsQuery) => {
if (err) {
log.trace('error processing tag condition key evaluation');
return cb(err);
}
// FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter
// eslint-disable-next-line no-restricted-syntax
for (const rc of requestContexts) {
rc.setNeedTagEval(true);
if (requestTagsQuery) {
rc.setRequestObjTags(requestTagsQuery);
}
if (existingTagsQuery) {
rc.setExistingObjTag(existingTagsQuery);
}
}
return cb();
});
}
function tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log, cb) {
if (!authorizationResults) {
return cb();
}
if (!authorizationResults.some(authRes => authRes.checkTagConditions)) {
return cb(null, authorizationResults);
}
return updateRequestContextsWithTags(request, requestContexts, apiMethod, log, err => {
if (err) {
return cb(err);
}
return auth.server.doAuth(request, log,
(err, userInfo, authResults) => cb(err, authResults), 's3', requestContexts);
});
}
module.exports = {
tagConditionKeyAuth,
updateRequestContextsWithTags,
makeTagQuery,
};

View File

@ -6,7 +6,6 @@ const acl = require('../../../metadata/acl');
const BucketInfo = require('arsenal').models.BucketInfo;
const constants = require('../../../../constants');
const createKeyForUserBucket = require('./createKeyForUserBucket');
const { parseBucketEncryptionHeaders } = require('./bucketEncryption');
const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper');
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
@ -24,7 +23,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
// Get new format usersBucket to see if it exists
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
if (err && !err.is.NoSuchBucket && !err.is.BucketAlreadyExists) {
if (err && !err.NoSuchBucket && !err.BucketAlreadyExists) {
return cb(err);
}
const splitter = usersBucketAttrs ?
@ -41,7 +40,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
usersBucket : oldUsersBucket;
return metadata.putObjectMD(usersBucketBeingCalled, key,
omVal, {}, log, err => {
if (err?.is?.NoSuchBucket) {
if (err && err.NoSuchBucket) {
// There must be no usersBucket so createBucket
// one using the new format
log.trace('users bucket does not exist, ' +
@ -61,8 +60,9 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
// from getting a BucketAlreadyExists
// error with respect
// to the usersBucket.
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (err && !err.BucketAlreadyExists) {
if (err &&
err !==
errors.BucketAlreadyExists) {
log.error('error from metadata', {
error: err,
});
@ -171,13 +171,11 @@ function createBucket(authInfo, bucketName, headers,
authInfo.getAccountDisplayName();
const creationDate = new Date().toJSON();
const isNFSEnabled = headers['x-scal-nfs-enabled'] === 'true';
const headerObjectLock = headers['x-amz-bucket-object-lock-enabled'];
const objectLockEnabled
= headerObjectLock && headerObjectLock.toLowerCase() === 'true';
const bucket = new BucketInfo(bucketName, canonicalID, ownerDisplayName,
creationDate, BucketInfo.currentModelVersion(), null, null, null, null,
null, null, null, null, null, null, null, null, null, isNFSEnabled,
null, null, objectLockEnabled);
const bucket = new BucketInfo(bucketName,
canonicalID, ownerDisplayName, creationDate,
BucketInfo.currentModelVersion(), null, null, null,
null, null, null, null, null, null, null, null,
null, isNFSEnabled);
let locationConstraintVal = null;
if (locationConstraint) {
@ -193,15 +191,6 @@ function createBucket(authInfo, bucketName, headers,
bucket.setVersioningConfiguration({ Status: 'Enabled' });
}
}
if (objectLockEnabled) {
// default versioning configuration AWS sets
// when a bucket is created with object lock
const versioningConfiguration = {
Status: 'Enabled',
MfaDelete: 'Disabled',
};
bucket.setVersioningConfiguration(versioningConfiguration);
}
const parseAclParams = {
headers,
resourceType: 'bucket',
@ -223,7 +212,6 @@ function createBucket(authInfo, bucketName, headers,
},
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
metadata.getBucket(bucketName, log, (err, data) => {
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (err && err.NoSuchBucket) {
return callback(null, 'NoBucketYet');
}
@ -248,9 +236,8 @@ function createBucket(authInfo, bucketName, headers,
}
const newBucketMD = results.prepareNewBucketMD;
if (existingBucketMD === 'NoBucketYet') {
const sseConfig = parseBucketEncryptionHeaders(headers);
return bucketLevelServerSideEncryption(
bucketName, sseConfig, log,
bucketName, headers, log,
(err, sseInfo) => {
if (err) {
return cb(err);

View File

@ -3,7 +3,6 @@ const async = require('async');
const { errors } = require('arsenal');
const abortMultipartUpload = require('../object/abortMultipartUpload');
const { pushMetric } = require('../../../utapi/utilities');
const { splitter, oldSplitter, mpuBucketPrefix } =
require('../../../../constants');
@ -16,7 +15,6 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
`${mpuBucketPrefix}${destinationBucketName}`;
return metadata.deleteBucket(mpuBucketName, log, err => {
// If the mpu bucket does not exist, just move on
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (err && err.NoSuchBucket) {
return cb();
}
@ -24,23 +22,14 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
});
}
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) {
function _deleteOngoingMPUs(authInfo, bucketName, mpus, log, cb) {
async.mapLimit(mpus, 1, (mpu, next) => {
const splitterChar = mpu.key.includes(oldSplitter) ?
oldSplitter : splitter;
// `overview${splitter}${objectKey}${splitter}${uploadId}
const [, objectKey, uploadId] = mpu.key.split(splitterChar);
abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
(err, destBucket, partSizeSum) => {
pushMetric('abortMultipartUpload', log, {
authInfo,
canonicalID: bucketMD.getOwner(),
bucket: bucketName,
keys: [objectKey],
byteLength: partSizeSum,
});
next(err);
}, request);
next);
}, cb);
}
/**
@ -49,13 +38,11 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log,
* @param {object} bucketMD - bucket attributes/metadata
* @param {string} bucketName - bucket in which objectMetadata is stored
* @param {string} canonicalID - account canonicalID of requester
* @param {object} request - request object given by router
* including normalized headers
* @param {object} log - Werelogs logger
* @param {function} cb - callback from async.waterfall in bucketDelete
* @return {undefined}
*/
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) {
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
log.trace('deleting bucket from metadata');
assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof canonicalID, 'string');
@ -93,7 +80,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
log, (err, objectsListRes) => {
// If no shadow bucket ever created, no ongoing MPU's, so
// continue with deletion
if (err?.is.NoSuchBucket) {
if (err && err.NoSuchBucket) {
return next();
}
if (err) {
@ -102,7 +89,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
}
if (objectsListRes.Contents.length) {
return _deleteOngoingMPUs(authInfo, bucketName,
bucketMD, objectsListRes.Contents, request, log, err => {
objectsListRes.Contents, log, err => {
if (err) {
return next(err);
}

View File

@ -1,255 +0,0 @@
const { errors } = require('arsenal');
const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper');
const { parseString } = require('xml2js');
/**
* ServerSideEncryptionInfo - user configuration for server side encryption
* @typedef {Object} ServerSideEncryptionInfo
* @property {string} algorithm - Algorithm to use for encryption. Either AES256 or aws:kms.
* @property {string} masterKeyId - Key id for the kms key used to encrypt data keys.
* @property {string} configuredMasterKeyId - User configured master key id.
* @property {boolean} mandatory - Whether a default encryption policy has been enabled.
*/
/**
* @callback ServerSideEncryptionInfo~callback
* @param {Object} error - Instance of Arsenal error
* @param {ServerSideEncryptionInfo} - SSE configuration
*/
/**
* parseEncryptionXml - Parses and validates a ServerSideEncryptionConfiguration xml document
* @param {object} xml - ServerSideEncryptionConfiguration doc
* @param {object} log - logger
* @param {ServerSideEncryptionInfo~callback} cb - callback
* @returns {undefined}
*/
function parseEncryptionXml(xml, log, cb) {
return parseString(xml, (err, parsed) => {
if (err) {
log.trace('xml parsing failed', {
error: err,
method: 'parseEncryptionXml',
});
log.debug('invalid xml', { xml });
return cb(errors.MalformedXML);
}
if (!parsed
|| !parsed.ServerSideEncryptionConfiguration
|| !parsed.ServerSideEncryptionConfiguration.Rule) {
log.trace('error in sse config, invalid ServerSideEncryptionConfiguration section', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
const { Rule } = parsed.ServerSideEncryptionConfiguration;
if (!Array.isArray(Rule)
|| Rule.length > 1
|| !Rule[0]
|| !Rule[0].ApplyServerSideEncryptionByDefault
|| !Rule[0].ApplyServerSideEncryptionByDefault[0]) {
log.trace('error in sse config, invalid ApplyServerSideEncryptionByDefault section', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
const [encConfig] = Rule[0].ApplyServerSideEncryptionByDefault;
if (!encConfig.SSEAlgorithm || !encConfig.SSEAlgorithm[0]) {
log.trace('error in sse config, no SSEAlgorithm provided', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
const [algorithm] = encConfig.SSEAlgorithm;
if (algorithm !== 'AES256' && algorithm !== 'aws:kms') {
log.trace('error in sse config, unknown SSEAlgorithm', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
const result = { algorithm, mandatory: true };
if (encConfig.KMSMasterKeyID) {
if (algorithm === 'AES256') {
log.trace('error in sse config, can not specify KMSMasterKeyID when using AES256', {
method: 'parseEncryptionXml',
});
return cb(errors.InvalidArgument.customizeDescription(
'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms'));
}
if (!encConfig.KMSMasterKeyID[0] || typeof encConfig.KMSMasterKeyID[0] !== 'string') {
log.trace('error in sse config, invalid KMSMasterKeyID', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
result.configuredMasterKeyId = encConfig.KMSMasterKeyID[0];
}
return cb(null, result);
});
}
/**
* hydrateEncryptionConfig - Constructs a ServerSideEncryptionInfo object from arguments
* ensuring no invalid or undefined keys are added
*
* @param {string} algorithm - Algorithm to use for encryption. Either AES256 or aws:kms.
* @param {string} configuredMasterKeyId - User configured master key id.
* @param {boolean} [mandatory] - Whether a default encryption policy has been enabled.
* @returns {ServerSideEncryptionInfo} - SSE configuration
*/
function hydrateEncryptionConfig(algorithm, configuredMasterKeyId, mandatory = null) {
if (algorithm !== 'AES256' && algorithm !== 'aws:kms') {
return {
algorithm: null,
};
}
const sseConfig = { algorithm, mandatory };
if (algorithm === 'aws:kms' && configuredMasterKeyId) {
sseConfig.configuredMasterKeyId = configuredMasterKeyId;
}
if (mandatory !== null) {
sseConfig.mandatory = mandatory;
}
return sseConfig;
}
/**
* parseBucketEncryptionHeaders - retrieves bucket level sse configuration from request headers
* @param {object} headers - Request headers
* @returns {ServerSideEncryptionInfo} - SSE configuration
*/
function parseBucketEncryptionHeaders(headers) {
const sseAlgorithm = headers['x-amz-scal-server-side-encryption'];
const configuredMasterKeyId = headers['x-amz-scal-server-side-encryption-aws-kms-key-id'] || null;
return hydrateEncryptionConfig(sseAlgorithm, configuredMasterKeyId, true);
}
/**
* parseObjectEncryptionHeaders - retrieves bucket level sse configuration from request headers
* @param {object} headers - Request headers
* @returns {ServerSideEncryptionInfo} - SSE configuration
*/
function parseObjectEncryptionHeaders(headers) {
const sseAlgorithm = headers['x-amz-server-side-encryption'];
const configuredMasterKeyId = headers['x-amz-server-side-encryption-aws-kms-key-id'] || null;
if (sseAlgorithm && sseAlgorithm !== 'AES256' && sseAlgorithm !== 'aws:kms') {
return {
error: errors.InvalidArgument.customizeDescription('The encryption method specified is not supported'),
};
}
if (sseAlgorithm !== 'aws:kms' && configuredMasterKeyId) {
return {
error: errors.InvalidArgument.customizeDescription(
'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms'),
};
}
return { objectSSE: hydrateEncryptionConfig(sseAlgorithm, configuredMasterKeyId) };
}
/**
* createDefaultBucketEncryptionMetadata - Creates master key and sets up default server side encryption configuration
* @param {BucketInfo} bucket - bucket metadata
* @param {object} log - werelogs logger
* @param {ServerSideEncryptionInfo~callback} cb - callback
* @returns {undefined}
*/
function createDefaultBucketEncryptionMetadata(bucket, log, cb) {
return kms.bucketLevelEncryption(
bucket.getName(),
{ algorithm: 'AES256', mandatory: false },
log,
(error, sseConfig) => {
if (error) {
return cb(error);
}
bucket.setServerSideEncryption(sseConfig);
return metadata.updateBucket(bucket.getName(), bucket, log, err => cb(err, sseConfig));
});
}
/**
*
* @param {object} headers - request headers
* @param {BucketInfo} bucket - BucketInfo model
* @param {*} log - werelogs logger
* @param {ServerSideEncryptionInfo~callback} cb - callback
* @returns {undefined}
*/
function getObjectSSEConfiguration(headers, bucket, log, cb) {
const bucketSSE = bucket.getServerSideEncryption();
const { error, objectSSE } = parseObjectEncryptionHeaders(headers);
if (error) {
return cb(error);
}
// If a per object sse algo has been passed through
// x-amz-server-side-encryption
if (objectSSE.algorithm) {
// If aws:kms and a custom key id
// pass it through without updating the bucket md
if (objectSSE.algorithm === 'aws:kms' && objectSSE.configuredMasterKeyId) {
return cb(null, objectSSE);
}
// If the client has not specified a key id,
// and we have a default config, then we reuse
// it and pass it through
if (!objectSSE.configuredMasterKeyId && bucketSSE) {
// The default configs algo is overridden with the one passed in the
// request headers. Our implementations of AES256 and aws:kms are the
// same underneath so this is only cosmetic change.
const sseConfig = Object.assign({}, bucketSSE, { algorithm: objectSSE.algorithm });
return cb(null, sseConfig);
}
// If the client has not specified a key id, and we
// don't have a default config, generate it
if (!objectSSE.configuredMasterKeyId && !bucketSSE) {
return createDefaultBucketEncryptionMetadata(bucket, log, (error, sseConfig) => {
if (error) {
return cb(error);
}
// Override the algorithm, for the same reasons as above.
Object.assign(sseConfig, { algorithm: objectSSE.algorithm });
return cb(null, sseConfig);
});
}
}
// If the bucket has a default encryption config, and it is mandatory
// (created with putBucketEncryption or legacy headers)
// pass it through
if (bucketSSE && bucketSSE.mandatory) {
return cb(null, bucketSSE);
}
// No encryption config
return cb(null, null);
}
module.exports = {
createDefaultBucketEncryptionMetadata,
getObjectSSEConfiguration,
hydrateEncryptionConfig,
parseEncryptionXml,
parseBucketEncryptionHeaders,
parseObjectEncryptionHeaders,
};

View File

@ -30,9 +30,6 @@ function bucketShield(bucket, requestType) {
// Otherwise return an error to the client
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
(requestType !== 'objectPut' &&
requestType !== 'initiateMultipartUpload' &&
requestType !== 'objectPutPart' &&
requestType !== 'completeMultipartUpload' &&
requestType !== 'bucketPutACL' &&
requestType !== 'bucketDelete')) {
return true;

View File

@ -11,16 +11,15 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
// If the object representing the bucket is not in the
// users bucket just continue
if (error?.is.NoSuchKey) {
if (error && error.NoSuchKey) {
return cb(null);
// BACKWARDS COMPATIBILITY: Remove this once no longer
// have old user bucket format
} else if (error?.is.NoSuchBucket) {
} else if (error && error.NoSuchBucket) {
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
oldSplitter, bucketName);
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
{}, log, error => {
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (error && !error.NoSuchKey) {
log.error('from metadata while deleting user bucket',
{ error });

View File

@ -1,37 +0,0 @@
const { errors, models } = require('arsenal');
const { NotificationConfiguration } = models;
const { config } = require('../../../Config');
function getNotificationConfiguration(parsedXml) {
const notifConfig = new NotificationConfiguration(parsedXml).getValidatedNotificationConfiguration();
// if notifConfig is empty object, effectively delete notification configuration
if (notifConfig.error || Object.keys(notifConfig).length === 0) {
return notifConfig;
}
if (!config.bucketNotificationDestinations) {
return { error: errors.InvalidArgument.customizeDescription(
'Unable to validate the following destination configurations') };
}
const targets = new Set(config.bucketNotificationDestinations.map(t => t.resource));
const notifConfigTargets = notifConfig.queueConfig.map(t => t.queueArn.split(':')[5]);
// getting invalid targets
const invalidTargets = [];
notifConfigTargets.forEach((t, i) => {
if (!targets.has(t)) {
invalidTargets.push({
ArgumentName: notifConfig.queueConfig[i].queueArn,
ArgumentValue: 'The destination queue does not exist',
});
}
});
if (invalidTargets.length > 0) {
const errDesc = 'Unable to validate the following destination configurations';
let error = errors.InvalidArgument.customizeDescription(errDesc);
error = error.addMetadataEntry('invalidArguments', invalidTargets);
return { error };
}
return notifConfig;
}
module.exports = getNotificationConfiguration;

View File

@ -3,7 +3,7 @@ const async = require('async');
const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper');
const locationConstraintCheck = require('../object/locationConstraintCheck');
const { standardMetadataValidateBucketAndObj } =
const { metadataValidateBucketAndObj } =
require('../../../metadata/metadataUtils');
const services = require('../../../services');
@ -14,19 +14,17 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
bucketName,
objectKey,
uploadId,
preciseRequestType: request.apiMethods || 'multipartDelete',
request,
preciseRequestType: 'multipartDelete',
};
// For validating the request at the destinationBucket level
// params are the same as validating at the MPU level
// but the requestType is the more general 'objectDelete'
const metadataValParams = Object.assign({}, metadataValMPUparams);
metadataValParams.requestType = 'objectPut';
const authzIdentityResult = request ? request.actionImplicitDenies : false;
async.waterfall([
function checkDestBucketVal(next) {
standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log,
metadataValidateBucketAndObj(metadataValParams, log,
(err, destinationBucket) => {
if (err) {
return next(err, destinationBucket);
@ -57,14 +55,9 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
next) {
const location = mpuOverviewObj.controllingLocationConstraint;
const originalIdentityAuthzResults = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.abortMPU(objectKey, uploadId, location, bucketName,
request, destBucket, locationConstraintCheck, log,
(err, skipDataDelete) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityAuthzResults;
if (err) {
return next(err, destBucket);
}

View File

@ -1,25 +0,0 @@
const { errors } = require('arsenal');
const { maxHttpHeadersSize } = require('../../../../constants');
/**
* Checks the size of the HTTP headers
* @param {object} requestHeaders - HTTP request headers
* @return {object} object with error or null
*/
function checkHttpHeadersSize(requestHeaders) {
let httpHeadersSize = 0;
Object.keys(requestHeaders).forEach(header => {
httpHeadersSize += Buffer.byteLength(header, 'utf8') +
Buffer.byteLength(requestHeaders[header], 'utf8');
});
if (httpHeadersSize > maxHttpHeadersSize) {
return {
httpHeadersSizeError: errors.HttpHeadersTooLarge,
};
}
return {};
}
module.exports = checkHttpHeadersSize;

View File

@ -1,247 +0,0 @@
/*
* Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020
*/
const { ObjectMDArchive } = require('arsenal').models;
const errors = require('arsenal').errors;
const { config } = require('../../../Config');
const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Get response header "x-amz-restore"
* Be called by objectHead.js
* @param {object} objMD - object's metadata
* @returns {string|undefined} x-amz-restore
*/
function getAmzRestoreResHeader(objMD) {
if (objMD.archive &&
objMD.archive.restoreRequestedAt &&
!objMD.archive.restoreCompletedAt) {
// Avoid race condition by relying on the `archive` MD of the object
// and return the right header after a RESTORE request.
// eslint-disable-next-line
return `ongoing-request="true"`;
}
if (objMD['x-amz-restore']) {
if (objMD['x-amz-restore']['expiry-date']) {
const utcDateTime = new Date(objMD['x-amz-restore']['expiry-date']).toUTCString();
// eslint-disable-next-line
return `ongoing-request="${objMD['x-amz-restore']['ongoing-request']}", expiry-date="${utcDateTime}"`;
}
}
return undefined;
}
/**
* Check if restore can be done.
*
* @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
*/
function _validateStartRestore(objectMD, log) {
if (objectMD.archive?.restoreCompletedAt) {
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
// If object is already restored, no further check is needed
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
// been reset.
return undefined;
}
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
if (!isLocationCold) {
// return InvalidObjectState error if the object is not in cold storage,
// not in cold storage means either location cold flag not exists or cold flag is explicit false
log.debug('The bucket of the object is not in a cold storage location.',
{
isLocationCold,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreRequestedAt) {
// return RestoreAlreadyInProgress error if the object is currently being restored
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
log.debug('The object is currently being restored.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.RestoreAlreadyInProgress;
}
return undefined;
}
/**
* Check if "put version id" is allowed
*
* @param {ObjectMD} objMD - object metadata
* @param {string} versionId - object's version id
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} - undefined if "put version id" is allowed
*/
function validatePutVersionId(objMD, versionId, log) {
if (!objMD) {
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
log.error('error no object metadata found', { method: 'validatePutVersionId', versionId });
return err;
}
if (objMD.isDeleteMarker) {
log.error('version is a delete marker', { method: 'validatePutVersionId', versionId });
return errors.MethodNotAllowed;
}
const isLocationCold = locationConstraints[objMD.dataStoreName]?.isCold;
if (!isLocationCold) {
log.error('The object data is not stored in a cold storage location.',
{
isLocationCold,
dataStoreName: objMD.dataStoreName,
method: 'validatePutVersionId',
});
return errors.InvalidObjectState;
}
// make sure object archive restoration is in progress
// NOTE: we do not use putObjectVersion to update the restoration period.
if (!objMD.archive || !objMD.archive.restoreRequestedAt || !objMD.archive.restoreRequestedDays
|| objMD.archive.restoreCompletedAt || objMD.archive.restoreWillExpireAt) {
log.error('object archive restoration is not in progress',
{ method: 'validatePutVersionId', versionId });
return errors.InvalidObjectState;
}
return undefined;
}
/**
* Check if the object is already restored, and update the expiration date accordingly:
* > After restoring an archived object, you can update the restoration period by reissuing the
* > request with a new period. Amazon S3 updates the restoration period relative to the current
* > time.
*
* @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger
* @return {boolean} - true if the object is already restored
*/
function _updateObjectExpirationDate(objectMD, log) {
// Check if restoreCompletedAt field exists
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
// checked earlier in the process, so checking again here would create weird states
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
log.debug('The restore status of the object.', {
isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored'
});
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;
objectMD['x-amz-restore'] = {
'ongoing-request': false,
'expiry-date': expiryDate,
};
/* eslint-enable no-param-reassign */
}
return isObjectAlreadyRestored;
}
/**
* update restore expiration date.
*
* @param {ObjectMD} objectMD - objectMD instance
* @param {object} restoreParam - restore param
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} internal error if object MD is not valid
*
*/
function _updateRestoreInfo(objectMD, restoreParam, log) {
if (!objectMD.archive) {
log.debug('objectMD.archive doesn\'t exits', {
objectMD,
method: '_updateRestoreInfo'
});
return errors.InternalError.customizeDescription('Archive metadata is missing.');
}
/* eslint-disable no-param-reassign */
objectMD.archive.restoreRequestedAt = new Date();
objectMD.archive.restoreRequestedDays = restoreParam.days;
objectMD.originOp = 's3:ObjectRestore:Post';
/* eslint-enable no-param-reassign */
if (!ObjectMDArchive.isValid(objectMD.archive)) {
log.debug('archive is not valid', {
archive: objectMD.archive,
method: '_updateRestoreInfo'
});
return errors.InternalError.customizeDescription('Invalid archive metadata.');
}
return undefined;
}
/**
* start to restore object.
* If not exist x-amz-restore, add it to objectMD.(x-amz-restore = false)
* calculate restore expiry-date and add it to objectMD.
* Be called by objectRestore.js
*
* @param {ObjectMD} objectMD - objectMd instance
* @param {object} restoreParam - bucket name
* @param {object} log - werelogs logger
* @param {function} cb - bucket name
* @return {undefined}
*
*/
function startRestore(objectMD, restoreParam, log, cb) {
log.info('Validating if restore can be done or not.');
const checkResultError = _validateStartRestore(objectMD, log);
if (checkResultError) {
return cb(checkResultError);
}
log.info('Updating restore information.');
const updateResultError = _updateRestoreInfo(objectMD, restoreParam, log);
if (updateResultError) {
return cb(updateResultError);
}
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
return cb(null, isObjectAlreadyRestored);
}
/**
* checks if object data is available or if it's in cold storage
* @param {ObjectMD} objMD Object metadata
* @returns {ArsenalError|null} error if object data is not available
*/
function verifyColdObjectAvailable(objMD) {
// return error when object is cold
if (objMD.archive &&
// Object is in cold backend
(!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
const err = errors.InvalidObjectState
.customizeDescription('The operation is not valid for the object\'s storage class');
return err;
}
return null;
}
module.exports = {
startRestore,
getAmzRestoreResHeader,
validatePutVersionId,
verifyColdObjectAvailable,
};

View File

@ -5,9 +5,10 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper');
const services = require('../../../services');
const logger = require('../../../utilities/logger');
const { dataStore } = require('./storeObject');
const locationConstraintCheck = require('./locationConstraintCheck');
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
const { versioningPreprocessing } = require('./versioning');
const removeAWSChunked = require('./removeAWSChunked');
const getReplicationInfo = require('./getReplicationInfo');
const { config } = require('../../../Config');
@ -20,7 +21,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure or GCP.';
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, log, requestMethod, callback) {
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
services.metadataStoreObject(bucketName, dataGetInfo,
cipherBundle, metadataStoreParams, (err, result) => {
if (err) {
@ -30,7 +31,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
const newDataStoreName = Array.isArray(dataGetInfo) ?
dataGetInfo[0].dataStoreName : null;
return data.batchDelete(dataToDelete, requestMethod,
newDataStoreName, log, err => callback(err, result));
newDataStoreName, deleteLog, err => callback(err, result));
}
return callback(null, result);
});
@ -50,9 +51,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation
* @param {function} callback - callback function
* @return {undefined} and call callback with (err, result) -
* result.contentMD5 - content md5 of new object or version
@ -60,10 +59,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
overheadField, log, originOp, callback) {
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
log, callback) {
const size = isDeleteMarker ? 0 : request.parsedContentLength;
// although the request method may actually be 'DELETE' if creating a
// delete marker, for our purposes we consider this to be a 'PUT'
@ -116,7 +112,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
isDeleteMarker,
replicationInfo: getReplicationInfo(
objectKey, bucketMD, false, size, null, null, authInfo),
overheadField,
log,
};
@ -134,6 +129,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
}
}
if (!isDeleteMarker) {
metadataStoreParams.contentType = request.headers['content-type'];
metadataStoreParams.cacheControl = request.headers['cache-control'];
@ -143,13 +139,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
removeAWSChunked(request.headers['content-encoding']);
metadataStoreParams.expires = request.headers.expires;
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
metadataStoreParams.originOp = originOp;
const defaultObjectLockConfiguration
= bucketMD.getObjectLockConfiguration();
if (defaultObjectLockConfiguration) {
metadataStoreParams.defaultRetention
= defaultObjectLockConfiguration;
}
}
// if creating new delete marker and there is an existing object, copy
@ -158,7 +147,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
// eslint-disable-next-line no-param-reassign
request.headers[constants.objectLocationConstraintHeader] =
objMD[constants.objectLocationConstraintHeader];
metadataStoreParams.originOp = originOp;
}
const backendInfoObj =
@ -189,17 +177,14 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
}
}
if (objMD && objMD.uploadId) {
metadataStoreParams.oldReplayId = objMD.uploadId;
}
/* eslint-disable camelcase */
const dontSkipBackend = externalBackends;
/* eslint-enable camelcase */
const requestLogger =
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
const mdOnlySize = request.headers['x-amz-meta-size'];
return async.waterfall([
function storeData(next) {
if (size === 0) {
@ -215,22 +200,15 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
const md5 = request.headers['x-amz-meta-md5chksum']
? new Buffer(request.headers['x-amz-meta-md5chksum'],
'base64').toString('hex') : null;
const numParts = request.headers['x-amz-meta-md5numparts'];
let _md5;
if (numParts === undefined) {
_md5 = md5;
} else {
_md5 = `${md5}-${numParts}`;
}
const versionId = request.headers['x-amz-meta-version-id'];
const dataGetInfo = {
key: objectKey,
dataStoreName: location,
dataStoreType: locationType,
dataStoreVersionId: versionId,
dataStoreMD5: _md5,
dataStoreMD5: md5,
};
return next(null, dataGetInfo, _md5);
return next(null, dataGetInfo, md5);
}
}
return dataStore(objectKeyContext, cipherBundle, request, size,
@ -264,17 +242,12 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
return next(null, dataGetInfoArr);
},
function getVersioningInfo(infoArr, next) {
// if x-scal-s3-version-id header is specified, we overwrite the object/version metadata.
if (isPutVersion) {
const options = overwritingVersioning(objMD, metadataStoreParams);
return process.nextTick(() => next(null, options, infoArr));
}
return versioningPreprocessing(bucketName, bucketMD,
metadataStoreParams.objectKey, objMD, log, (err, options) => {
if (err) {
// TODO: check AWS error when user requested a specific
// version before any versions have been put
const logLvl = err.is.BadRequest ?
const logLvl = err === errors.BadRequest ?
'debug' : 'error';
log[logLvl]('error getting versioning info', {
error: err,
@ -288,13 +261,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
metadataStoreParams.versionId = options.versionId;
metadataStoreParams.versioning = options.versioning;
metadataStoreParams.isNull = options.isNull;
metadataStoreParams.deleteNullKey = options.deleteNullKey;
if (options.extraMD) {
Object.assign(metadataStoreParams, options.extraMD);
}
metadataStoreParams.nullVersionId = options.nullVersionId;
return _storeInMDandDeleteData(bucketName, infoArr,
cipherBundle, metadataStoreParams,
options.dataToDelete, log, requestMethod, next);
options.dataToDelete, requestLogger, requestMethod, next);
},
], callback);
}

View File

@ -1,18 +0,0 @@
/**
* _bucketRequiresOplogUpdate - DELETE an object from a bucket
* @param {BucketInfo} bucket - bucket object
* @return {boolean} whether objects require oplog updates on deletion, or not
*/
function _bucketRequiresOplogUpdate(bucket) {
// Default behavior is to require an oplog update
if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) {
return true;
}
// If the bucket has lifecycle configuration or notification configuration
// set, we also require an oplog update
return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration();
}
module.exports = {
_bucketRequiresOplogUpdate,
};

View File

@ -1,140 +0,0 @@
const { supportedLifecycleRules } = require('arsenal').constants;
const { LifecycleConfiguration } = require('arsenal').models;
const {
LifecycleDateTime,
LifecycleUtils,
} = require('arsenal').s3middleware.lifecycleHelpers;
const { config } = require('../../../Config');
const {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
} = config.getTimeOptions();
const lifecycleDateTime = new LifecycleDateTime({
transitionOneDayEarlier,
expireOneDayEarlier,
timeProgressionFactor,
});
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
function calculateDate(objDate, expDays, datetime) {
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
}
function formatExpirationHeader(date, id) {
return `expiry-date="${date}", rule-id="${encodeURIComponent(id)}"`;
}
// format: x-amz-expiration: expiry-date="Fri, 21 Dec 2012 00:00:00 GMT", rule-id="id"
const AMZ_EXP_HEADER = 'x-amz-expiration';
// format: x-amz-abort-date: "Fri, 21 Dec 2012 00:00:00 GMT"
const AMZ_ABORT_DATE_HEADER = 'x-amz-abort-date';
// format: x-amz-abort-rule-id: "rule id"
const AMZ_ABORT_ID_HEADER = 'x-amz-abort-rule-id';
function _generateExpHeadersObjects(rules, params, datetime) {
const tags = {
TagSet: params.tags
? Object.keys(params.tags)
.map(key => ({ Key: key, Value: params.tags[key] }))
: [],
};
const objectInfo = { Key: params.key };
const filteredRules = lifecycleUtils.filterRules(rules, objectInfo, tags);
const applicable = lifecycleUtils.getApplicableRules(filteredRules, objectInfo, datetime);
if (applicable.Expiration) {
const rule = applicable.Expiration;
if (rule.Days === undefined && rule.Date === undefined) {
return {};
}
if (rule.Date) {
return {
[AMZ_EXP_HEADER]: formatExpirationHeader(rule.Date, rule.ID),
};
}
const date = calculateDate(params.date, rule.Days, datetime);
return {
[AMZ_EXP_HEADER]: formatExpirationHeader(date.toUTCString(), rule.ID),
};
}
return {};
}
function _generateExpHeadresMPU(rules, params, datetime) {
const noTags = { TagSet: [] };
const objectInfo = { Key: params.key };
const filteredRules = lifecycleUtils.filterRules(rules, objectInfo, noTags);
const applicable = lifecycleUtils.getApplicableRules(filteredRules, {}, datetime);
if (applicable.AbortIncompleteMultipartUpload) {
const rule = applicable.AbortIncompleteMultipartUpload;
const date = calculateDate(
params.date,
rule.DaysAfterInitiation,
datetime
);
return {
[AMZ_ABORT_ID_HEADER]: encodeURIComponent(rule.ID),
[AMZ_ABORT_DATE_HEADER]: date.toUTCString(),
};
}
return {};
}
/**
* generate response expiration headers
* @param {object} params - params
* @param {LifecycleDateTime} datetime - lifecycle datetime object
* @returns {object} - expiration response headers
*/
function generateExpirationHeaders(params, datetime) {
const { lifecycleConfig, objectParams, mpuParams, isVersionedReq } = params;
if (!lifecycleConfig || isVersionedReq) {
return {};
}
const lcfg = LifecycleConfiguration.getConfigJson(lifecycleConfig);
if (objectParams) {
return _generateExpHeadersObjects(lcfg.Rules, objectParams, datetime);
}
if (mpuParams) {
return _generateExpHeadresMPU(lcfg.Rules, mpuParams, datetime);
}
return {};
}
/**
* set response expiration headers to target header object
* @param {object} headers - target header object
* @param {object} params - params
* @returns {undefined}
*/
function setExpirationHeaders(headers, params) {
const expHeaders = generateExpirationHeaders(params, lifecycleDateTime);
Object.assign(headers, expHeaders);
}
module.exports = {
lifecycleDateTime,
generateExpirationHeaders,
setExpirationHeaders,
};

View File

@ -23,12 +23,12 @@ function _getStorageClasses(rule) {
}
const { replicationEndpoints } = s3config;
// If no storage class, use the given default endpoint or the sole endpoint
if (replicationEndpoints.length > 0) {
if (replicationEndpoints.length > 1) {
const endPoint =
replicationEndpoints.find(endpoint => endpoint.default) || replicationEndpoints[0];
replicationEndpoints.find(endpoint => endpoint.default);
return [endPoint.site];
}
return undefined;
return [replicationEndpoints[0].site];
}
function _getReplicationInfo(rule, replicationConfig, content, operationType,
@ -36,9 +36,6 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
const storageTypes = [];
const backends = [];
const storageClasses = _getStorageClasses(rule);
if (!storageClasses) {
return undefined;
}
storageClasses.forEach(storageClass => {
const storageClassName =
storageClass.endsWith(':preferred_read') ?

View File

@ -1,190 +0,0 @@
const { versioning } = require('arsenal');
const versionIdUtils = versioning.VersionID;
const { lifecycleListing } = require('../../../../constants');
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = lifecycleListing;
function _makeTags(tags) {
const res = [];
Object.entries(tags).forEach(([key, value]) =>
res.push(
{
Key: key,
Value: value,
}
));
return res;
}
function processCurrents(bucketName, listParams, isBucketVersioned, list) {
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
Marker: listParams.marker,
BeforeDate: listParams.beforeDate,
NextMarker: list.NextMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const content = {
Key: item.key,
LastModified: v.LastModified,
ETag: `"${v.ETag}"`,
Size: v.Size,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags),
IsLatest: true, // for compatibility with AWS ListObjectVersions.
DataStoreName: v.dataStoreName,
ListType: CURRENT_TYPE,
};
// NOTE: The current versions listed to be lifecycle should include version id
// if the bucket is versioned.
if (isBucketVersioned) {
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
content.VersionId = versionId;
}
data.Contents.push(content);
});
return data;
}
function _encodeVersionId(vid) {
let versionId = vid;
if (versionId && versionId !== 'null') {
versionId = versionIdUtils.encode(versionId);
}
return versionId;
}
function processNonCurrents(bucketName, listParams, list) {
const nextVersionIdMarker = _encodeVersionId(list.NextVersionIdMarker);
const versionIdMarker = _encodeVersionId(listParams.versionIdMarker);
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
KeyMarker: listParams.keyMarker,
VersionIdMarker: versionIdMarker,
BeforeDate: listParams.beforeDate,
NextKeyMarker: list.NextKeyMarker,
NextVersionIdMarker: nextVersionIdMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
const content = {
Key: item.key,
LastModified: v.LastModified,
ETag: `"${v.ETag}"`,
Size: v.Size,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags),
staleDate: v.staleDate, // lowerCamelCase to be compatible with existing lifecycle.
VersionId: versionId,
DataStoreName: v.dataStoreName,
ListType: NON_CURRENT_TYPE,
};
data.Contents.push(content);
});
return data;
}
function processOrphans(bucketName, listParams, list) {
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
Marker: listParams.marker,
BeforeDate: listParams.beforeDate,
NextMarker: list.NextMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
data.Contents.push({
Key: item.key,
LastModified: v.LastModified,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
VersionId: versionId,
IsLatest: true, // for compatibility with AWS ListObjectVersions.
ListType: ORPHAN_DM_TYPE,
});
});
return data;
}
function getLocationConstraintErrorMessage(locationName) {
return 'value of the location you are attempting to set ' +
`- ${locationName} - is not listed in the locationConstraint config`;
}
/**
* validateMaxScannedEntries - Validates and returns the maximum scanned entries value.
*
* @param {object} params - Query parameters
* @param {object} config - CloudServer configuration
* @param {number} min - Minimum number of entries to be scanned
* @returns {Object} - An object indicating the validation result:
* - isValid (boolean): Whether the validation is successful.
* - maxScannedLifecycleListingEntries (number): The validated maximum scanned entries value if isValid is true.
*/
function validateMaxScannedEntries(params, config, min) {
let maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
if (params['max-scanned-lifecycle-listing-entries']) {
const maxEntriesParams = Number.parseInt(params['max-scanned-lifecycle-listing-entries'], 10);
if (Number.isNaN(maxEntriesParams) || maxEntriesParams < min ||
maxEntriesParams > maxScannedLifecycleListingEntries) {
return { isValid: false };
}
maxScannedLifecycleListingEntries = maxEntriesParams;
}
return { isValid: true, maxScannedLifecycleListingEntries };
}
module.exports = {
processCurrents,
processNonCurrents,
processOrphans,
getLocationConstraintErrorMessage,
validateMaxScannedEntries,
};

View File

@ -1,34 +0,0 @@
/**
* Check if all keys that exist in the current list which will be used
* in composing object are not present in the old object's list.
*
* This method can be used to check against accidentally removing data
* keys due to instability from the metadata layer, or for replay
* detection in general.
*
* @param {array|string|null} prev - list of keys from the object being
* overwritten
* @param {array|null} curr - list of keys to be used in composing
* current object
* @returns {boolean} true if no key in `curr` is present in `prev`,
* false otherwise
*/
function locationKeysHaveChanged(prev, curr) {
if (!prev || prev.length === 0 || !curr) {
return true;
}
// backwards compatibility check if object is of model version 2
if (typeof prev === 'string') {
return curr.every(v => v.key !== prev);
}
const keysMap = {};
prev.forEach(v => {
if (!keysMap[v.dataStoreType]) {
keysMap[v.dataStoreType] = {};
}
keysMap[v.dataStoreType][v.key] = true;
});
return curr.every(v => !(keysMap[v.dataStoreType] && keysMap[v.dataStoreType][v.key]));
}
module.exports = locationKeysHaveChanged;

View File

@ -0,0 +1,24 @@
/**
* Check keys that exist in the current list which will be used in composing
* object. This method checks against accidentally removing data keys due to
* instability from the metadata layer. The check returns true if there was no
* match and false if at least one key from the previous list exists in the
* current list
* @param {array|string} prev - list of keys from the object being overwritten
* @param {array} curr - list of keys to be used in composing current object
* @returns {array} list of keys that can be deleted
*/
function locationKeysSanityCheck(prev, curr) {
if (!prev || prev.length === 0) {
return true;
}
// backwards compatibility check if object is of model version 2
if (typeof prev === 'string') {
return curr.every(v => v.key !== prev);
}
const keysMap = {};
prev.forEach(v => { keysMap[v.key] = true; });
return curr.every(v => !keysMap[v.key]);
}
module.exports = locationKeysSanityCheck;

View File

@ -1,348 +0,0 @@
const { errors, auth, policies } = require('arsenal');
const moment = require('moment');
const { config } = require('../../../Config');
const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period
* @return {object} the date until the object version remains locked
*/
function calculateRetainUntilDate(retention) {
const { days, years } = retention;
if (!days && !years) {
return undefined;
}
const date = moment();
// Calculate the number of days to retain the lock on the object
const retainUntilDays = days || years * 365;
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
const retainUntilDate
= date.add(retainUntilDaysInMs, 'ms');
return retainUntilDate.toISOString();
}
/**
* Validates object lock headers
* @param {object} bucket - bucket metadata
* @param {object} headers - request headers
* @param {object} log - the log request
* @return {object} - object with error if validation fails
*/
function validateHeaders(bucket, headers, log) {
const bucketObjectLockEnabled = bucket.isObjectLockEnabled();
const objectLegalHold = headers['x-amz-object-lock-legal-hold'];
const objectLockDate = headers['x-amz-object-lock-retain-until-date'];
const objectLockMode = headers['x-amz-object-lock-mode'];
// If retention headers or legal hold header present but
// object lock is not enabled on the bucket return error
if ((objectLockDate || objectLockMode || objectLegalHold)
&& !bucketObjectLockEnabled) {
log.trace('bucket is missing ObjectLockConfiguration');
return errors.InvalidRequest.customizeDescription(
'Bucket is missing ObjectLockConfiguration');
}
if ((objectLockMode || objectLockDate) &&
!(objectLockMode && objectLockDate)) {
return errors.InvalidArgument.customizeDescription(
'x-amz-object-lock-retain-until-date and ' +
'x-amz-object-lock-mode must both be supplied',
);
}
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
if (objectLockMode && !validModes.has(objectLockMode)) {
return errors.InvalidArgument.customizeDescription(
'Unknown wormMode directive');
}
const validLegalHolds = new Set(['ON', 'OFF']);
if (objectLegalHold && !validLegalHolds.has(objectLegalHold)) {
return errors.InvalidArgument.customizeDescription(
'Legal hold status must be one of "ON", "OFF"');
}
const currentDate = new Date().toISOString();
if (objectLockMode && objectLockDate <= currentDate) {
return errors.InvalidArgument.customizeDescription(
'The retain until date must be in the future!');
}
return null;
}
/**
* Compares new object retention to bucket default retention
* @param {object} headers - request headers
* @param {object} defaultRetention - bucket retention configuration
* @return {object} - final object lock information to set on object
*/
function compareObjectLockInformation(headers, defaultRetention) {
const objectLockInfoToSave = {};
if (defaultRetention && defaultRetention.rule) {
const defaultMode = defaultRetention.rule.mode;
const defaultTime = calculateRetainUntilDate(defaultRetention.rule);
if (defaultMode && defaultTime) {
objectLockInfoToSave.retentionInfo = {
mode: defaultMode,
date: defaultTime,
};
}
}
if (headers) {
const headerMode = headers['x-amz-object-lock-mode'];
const headerDate = headers['x-amz-object-lock-retain-until-date'];
if (headerMode && headerDate) {
objectLockInfoToSave.retentionInfo = {
mode: headerMode,
date: headerDate,
};
}
const headerLegalHold = headers['x-amz-object-lock-legal-hold'];
if (headerLegalHold) {
const legalHold = headerLegalHold === 'ON';
objectLockInfoToSave.legalHold = legalHold;
}
}
return objectLockInfoToSave;
}
/**
* Sets object retention ond/or legal hold information on object's metadata
* @param {object} headers - request headers
* @param {object} md - object metadata
* @param {(object|null)} defaultRetention - bucket retention configuration if
* bucket has any configuration set
* @return {undefined}
*/
function setObjectLockInformation(headers, md, defaultRetention) {
// Stores retention information if object either has its own retention
// configuration or default retention configuration from its bucket
const finalObjectLockInfo =
compareObjectLockInformation(headers, defaultRetention);
if (finalObjectLockInfo.retentionInfo) {
md.setRetentionMode(finalObjectLockInfo.retentionInfo.mode);
md.setRetentionDate(finalObjectLockInfo.retentionInfo.date);
}
if (finalObjectLockInfo.legalHold || finalObjectLockInfo.legalHold === false) {
md.setLegalHold(finalObjectLockInfo.legalHold);
}
}
/**
* Helper class for check object lock state checks
*/
class ObjectLockInfo {
/**
*
* @param {object} retentionInfo - The object lock retention policy
* @param {"GOVERNANCE" | "COMPLIANCE" | null} retentionInfo.mode - Retention policy mode.
* @param {string} retentionInfo.date - Expiration date of retention policy. A string in ISO-8601 format
* @param {bool} retentionInfo.legalHold - Whether a legal hold is enable for the object
*/
constructor(retentionInfo) {
this.mode = retentionInfo.mode || null;
this.date = retentionInfo.date || null;
this.legalHold = retentionInfo.legalHold || false;
}
/**
* ObjectLockInfo.isLocked
* @returns {bool} - Whether the retention policy is active and protecting the object
*/
isLocked() {
if (this.legalHold) {
return true;
}
if (!this.mode || !this.date) {
return false;
}
return !this.isExpired();
}
/**
* ObjectLockInfo.isGovernanceMode
* @returns {bool} - true if retention mode is GOVERNANCE
*/
isGovernanceMode() {
return this.mode === 'GOVERNANCE';
}
/**
* ObjectLockInfo.isComplianceMode
* @returns {bool} - True if retention mode is COMPLIANCE
*/
isComplianceMode() {
return this.mode === 'COMPLIANCE';
}
/**
* ObjectLockInfo.isExpired
* @returns {bool} - True if the retention policy has expired
*/
isExpired() {
const now = moment();
return this.date === null || now.isSameOrAfter(this.date);
}
/**
* ObjectLockInfo.isExtended
* @param {string} timestamp - Timestamp in ISO-8601 format
* @returns {bool} - True if the given timestamp is after the policy expiration date or if no expiration date is set
*/
isExtended(timestamp) {
return timestamp !== undefined && (this.date === null || moment(timestamp).isSameOrAfter(this.date));
}
/**
* ObjectLockInfo.canModifyObject
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
*/
canModifyObject(hasGovernanceBypass) {
// can modify object if object is not locked
// cannot modify object in any cases if legal hold is enabled
// if no legal hold, can only modify object if bypassing governance when locked
if (!this.isLocked()) {
return true;
}
return !this.legalHold && this.isGovernanceMode() && !!hasGovernanceBypass;
}
/**
* ObjectLockInfo.canModifyPolicy
* @param {object} policyChanges - Proposed changes to the retention policy
* @param {"GOVERNANCE" | "COMPLIANCE" | undefined} policyChanges.mode - Retention policy mode.
* @param {string} policyChanges.date - Expiration date of retention policy. A string in ISO-8601 format
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
* @returns {bool} - True if the changes are allowed to be applied to the retention policy
*/
canModifyPolicy(policyChanges, hasGovernanceBypass) {
// If an object does not have a retention policy or it is expired then all changes are allowed
if (!this.isLocked()) {
return true;
}
// The only allowed change in compliance mode is extending the retention period
if (this.isComplianceMode()) {
if (policyChanges.mode === 'COMPLIANCE' && this.isExtended(policyChanges.date)) {
return true;
}
}
if (this.isGovernanceMode()) {
// Extensions are always allowed in governance mode
if (policyChanges.mode === 'GOVERNANCE' && this.isExtended(policyChanges.date)) {
return true;
}
// All other changes in governance mode require a bypass
if (hasGovernanceBypass) {
return true;
}
}
return false;
}
}
/**
*
* @param {object} headers - s3 request headers
* @returns {bool} - True if the headers is present and === "true"
*/
function hasGovernanceBypassHeader(headers) {
const bypassHeader = headers['x-amz-bypass-governance-retention'] || '';
return bypassHeader.toLowerCase() === 'true';
}
/**
* checkUserGovernanceBypass
*
* Checks for the presence of the s3:BypassGovernanceRetention permission for a given user
*
* @param {object} request - Incoming s3 request
* @param {object} authInfo - s3 authentication info
* @param {object} bucketMD - bucket metadata
* @param {string} objectKey - object key
* @param {object} log - Werelogs logger
* @param {function} cb - callback returns errors.AccessDenied if the authorization fails
* @returns {undefined} -
*/
function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, cb) {
log.trace(
'object in GOVERNANCE mode and is user, checking for attached policies',
{ method: 'checkUserPolicyGovernanceBypass' },
);
const authParams = auth.server.extractParams(request, log, 's3', request.query);
const ip = policies.requestUtils.getClientIp(request, config);
const requestContextParams = {
constantParams: {
headers: request.headers,
query: request.query,
generalResource: bucketMD.getName(),
specificResource: { key: objectKey },
requesterIp: ip,
sslEnabled: request.connection.encrypted,
apiMethod: 'bypassGovernanceRetention',
awsService: 's3',
locationConstraint: bucketMD.getLocationConstraint(),
requesterInfo: authInfo,
signatureVersion: authParams.params.data.signatureVersion,
authType: authParams.params.data.authType,
signatureAge: authParams.params.data.signatureAge,
},
};
return vault.checkPolicies(requestContextParams,
authInfo.getArn(), log, (err, authorizationResults) => {
if (err) {
return cb(err);
}
const explicitDenyExists = authorizationResults.some(
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
if (explicitDenyExists) {
log.trace('authorization check failed for user',
{
'method': 'checkUserPolicyGovernanceBypass',
's3:BypassGovernanceRetention': false,
});
return cb(errors.AccessDenied);
}
// Convert authorization results into an easier to handle format
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
authInfo.getCanonicalID(),
authInfo,
actionImplicitDenies,
log,
request);
return cb(areAllActionsAllowed === true ? null : errors.AccessDenied);
});
}
module.exports = {
calculateRetainUntilDate,
compareObjectLockInformation,
setObjectLockInformation,
validateHeaders,
hasGovernanceBypassHeader,
checkUserGovernanceBypass,
ObjectLockInfo,
};

View File

@ -1,172 +0,0 @@
const async = require('async');
const { errors, s3middleware } = require('arsenal');
const { allowedRestoreObjectRequestTierValues } = require('../../../../constants');
const coldStorage = require('./coldStorage');
const monitoring = require('../../../utilities/monitoringHandler');
const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/**
* Check if tier is supported
* @param {object} restoreInfo - restore information
* @returns {ArsenalError|undefined} return NotImplemented error if tier not support
*/
function checkTierSupported(restoreInfo) {
if (!allowedRestoreObjectRequestTierValues.includes(restoreInfo.tier)) {
return errors.NotImplemented;
}
return undefined;
}
/**
* POST Object restore process
*
* @param {MetadataWrapper} metadata - metadata wrapper
* @param {object} mdUtils - utility object to treat metadata
* @param {AuthInfo} userInfo - Instance of AuthInfo class with requester's info
* @param {IncomingMessage} request - request info
* @param {object} log - Werelogs logger
* @param {function} callback callback function
* @return {undefined}
*/
function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
const METHOD = 'objectRestore';
const { bucketName, objectKey } = request;
log.debug('processing request', { method: METHOD });
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query',
{
method: METHOD,
versionId: request.query.versionId,
error: decodedVidResult,
});
return process.nextTick(() => callback(decodedVidResult));
}
let isObjectRestored = false;
const mdValueParams = {
authInfo: userInfo,
bucketName,
objectKey,
versionId: decodedVidResult,
requestType: request.apiMethods || 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
};
return async.waterfall([
// get metadata of bucket and object
function validateBucketAndObject(next) {
return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies,
log, (err, bucketMD, objectMD) => {
if (err) {
log.trace('request authorization failed', { method: METHOD, error: err });
return next(err);
}
// Call back error if object metadata could not be obtained
if (!objectMD) {
const err = decodedVidResult ? errors.NoSuchVersion : errors.NoSuchKey;
log.trace('error no object metadata found', { method: METHOD, error: err });
return next(err, bucketMD);
}
// If object metadata is delete marker,
// call back NoSuchKey or MethodNotAllowed depending on specifying versionId
if (objectMD.isDeleteMarker) {
let err = errors.NoSuchKey;
if (decodedVidResult) {
err = errors.MethodNotAllowed;
}
log.trace('version is a delete marker', { method: METHOD, error: err });
return next(err, bucketMD, objectMD);
}
log.info('it acquired the object metadata.', {
'method': METHOD,
});
return next(null, bucketMD, objectMD);
});
},
// generate restore param obj from xml of request body and check tier validity
function parseRequestXmlAndCheckTier(bucketMD, objectMD, next) {
log.trace('parsing object restore information');
return parseRestoreRequestXml(request.post, log, (err, restoreInfo) => {
if (err) {
return next(err, bucketMD, objectMD, restoreInfo);
}
log.info('it parsed xml of the request body.', { method: METHOD, value: restoreInfo });
const checkTierResult = checkTierSupported(restoreInfo);
if (checkTierResult instanceof Error) {
return next(checkTierResult);
}
return next(null, bucketMD, objectMD, restoreInfo);
});
},
// start restore process
function startRestore(bucketMD, objectMD, restoreInfo, next) {
return coldStorage.startRestore(objectMD, restoreInfo, log,
(err, _isObjectRestored) => {
isObjectRestored = _isObjectRestored;
return next(err, bucketMD, objectMD);
});
},
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
log, err => next(err, bucketMD, objectMD));
},
],
(err, bucketMD) => {
// generate CORS response header
const responseHeaders = collectCorsHeaders(request.headers.origin, request.method, bucketMD);
if (err) {
log.trace('error processing request',
{
method: METHOD,
error: err,
});
monitoring.promMetrics(
'POST', bucketName, err.code, 'restoreObject');
return callback(err, err.code, responseHeaders);
}
pushMetric('restoreObject', log, {
userInfo,
bucket: bucketName,
});
if (isObjectRestored) {
monitoring.promMetrics(
'POST', bucketName, '200', 'restoreObject');
return callback(null, 200, responseHeaders);
}
monitoring.promMetrics(
'POST', bucketName, '202', 'restoreObject');
return callback(null, 202, responseHeaders);
});
}
module.exports = objectRestore;

View File

@ -20,23 +20,13 @@ function getPartNumber(query) {
*/
function getPartSize(objMD, partNumber) {
let size;
let locationPartNumber;
if (partNumber && objMD && objMD.location
&& objMD.location.length >= partNumber) {
const locations = [];
for (let i = 0; i < objMD.location.length; i++) {
const { dataStoreETag } = objMD.location[i];
if (dataStoreETag) {
locationPartNumber =
const locationPartNumber =
Number.parseInt(dataStoreETag.split(':')[0], 10);
} else {
/**
* Location objects prior to GA7.1 do not include the
* dataStoreETag field so we cannot find the part range,
* the objects are treated as if they only have 1 part
*/
locationPartNumber = 1;
}
// Get all parts that belong to the requested part number
if (partNumber === locationPartNumber) {
locations.push(objMD.location[i]);
@ -54,21 +44,7 @@ function getPartSize(objMD, partNumber) {
return size;
}
/**
* Gets parts count if object was put with mpu
* @param {object} objMD - object metadata
* @return {(Integer|null)} - number of parts of mpu object or null
*/
function getPartCountFromMd5(objMD) {
const md5 = objMD['content-md5'];
if (md5.includes('-')) {
return md5.split('-')[1];
}
return null;
}
module.exports = {
getPartNumber,
getPartSize,
getPartCountFromMd5,
};

View File

@ -7,12 +7,12 @@ const V4Transform = require('../../../auth/streamingV4/V4Transform');
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable)
* @param {RequestLogger} log - the current request logger
* @param {function} errCb - callback called if an error occurs
* @param {function} cb - callback containing the result for V4Transform
* @return {object|null} - V4Transform object if v4 Auth request, or
* the original stream, or null if the request has no V4 params but
* the type of request requires them
*/
function prepareStream(stream, streamingV4Params, log, errCb) {
function prepareStream(stream, streamingV4Params, log, cb) {
if (stream.headers['x-amz-content-sha256'] ===
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD') {
if (typeof streamingV4Params !== 'object') {
@ -22,7 +22,7 @@ function prepareStream(stream, streamingV4Params, log, errCb) {
// and we should return an error to the client.
return null;
}
const v4Transform = new V4Transform(streamingV4Params, log, errCb);
const v4Transform = new V4Transform(streamingV4Params, log, cb);
stream.pipe(v4Transform);
return v4Transform;
}

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const {
parseRangeSpec,
parseRange,
} = require('arsenal').network.http.utils;
} = require('arsenal/lib/network/http/utils');
const constants = require('../../../../constants');
const setPartRanges = require('./setPartRanges');

View File

@ -1,4 +1,4 @@
const { errors, jsutil } = require('arsenal');
const { errors } = require('arsenal');
const { data } = require('../../../data/wrapper');
const { prepareStream } = require('./prepareStream');
@ -57,8 +57,7 @@ function checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cb) {
*/
function dataStore(objectContext, cipherBundle, stream, size,
streamingV4Params, backendInfo, log, cb) {
const cbOnce = jsutil.once(cb);
const dataStream = prepareStream(stream, streamingV4Params, log, cbOnce);
const dataStream = prepareStream(stream, streamingV4Params, log, cb);
if (!dataStream) {
return process.nextTick(() => cb(errors.InvalidArgument));
}
@ -69,19 +68,19 @@ function dataStore(objectContext, cipherBundle, stream, size,
log.error('error in datastore', {
error: err,
});
return cbOnce(err);
return cb(err);
}
if (!dataRetrievalInfo) {
log.fatal('data put returned neither an error nor a key', {
method: 'storeObject::dataStore',
});
return cbOnce(errors.InternalError);
return cb(errors.InternalError);
}
log.trace('dataStore: backend stored key', {
dataRetrievalInfo,
});
return checkHashMatchMD5(stream, hashedStream,
dataRetrievalInfo, log, cbOnce);
dataRetrievalInfo, log, cb);
});
}

View File

@ -1,32 +0,0 @@
const { errors } = require('arsenal');
const { unsupportedSignatureChecksums, supportedSignatureChecksums } = require('../../../../constants');
function validateChecksumHeaders(headers) {
// If the x-amz-trailer header is present the request is using one of the
// trailing checksum algorithms, which are not supported.
if (headers['x-amz-trailer'] !== undefined) {
return errors.BadRequest.customizeDescription('trailing checksum is not supported');
}
const signatureChecksum = headers['x-amz-content-sha256'];
if (signatureChecksum === undefined) {
return null;
}
if (supportedSignatureChecksums.has(signatureChecksum)) {
return null;
}
// If the value is not one of the possible checksum algorithms
// the only other valid value is the actual sha256 checksum of the payload.
// Do a simple sanity check of the length to guard against future algos.
// If the value is an unknown algo, then it will fail checksum validation.
if (!unsupportedSignatureChecksums.has(signatureChecksum) && signatureChecksum.length === 64) {
return null;
}
return errors.BadRequest.customizeDescription('unsupported checksum algorithm');
}
module.exports = validateChecksumHeaders;

View File

@ -4,40 +4,13 @@ const async = require('async');
const metadata = require('../../../metadata/wrapper');
const { config } = require('../../../Config');
const { scaledMsPerDay } = config.getTimeOptions();
const versionIdUtils = versioning.VersionID;
// Use Arsenal function to generate a version ID used internally by metadata
// for null versions that are created before bucket versioning is configured
const nonVersionedObjId =
versionIdUtils.getInfVid(config.replicationGroupId);
/** decodeVID - decode the version id
* @param {string} versionId - version ID
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
* fails due to improper format, otherwise undefined or the decoded version id
*/
function decodeVID(versionId) {
if (versionId === 'null') {
return versionId;
}
let decoded;
const invalidErr = errors.InvalidArgument.customizeDescription('Invalid version id specified');
try {
decoded = versionIdUtils.decode(versionId);
} catch (err) {
return invalidErr;
}
if (decoded instanceof Error) {
return invalidErr;
}
return decoded;
}
/** decodeVersionId - decode the version id from a query object
/** decodedVidResult - decode the version id from a query object
* @param {object} [reqQuery] - request query object
* @param {string} [reqQuery.versionId] - version ID sent in request query
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
@ -47,7 +20,16 @@ function decodeVersionId(reqQuery) {
if (!reqQuery || !reqQuery.versionId) {
return undefined;
}
return decodeVID(reqQuery.versionId);
let versionId = reqQuery.versionId;
if (versionId === 'null') {
return versionId;
}
versionId = versionIdUtils.decode(versionId);
if (versionId instanceof Error) {
return errors.InvalidArgument
.customizeDescription('Invalid version id specified');
}
return versionId;
}
/** getVersionIdResHeader - return encrypted version ID if appropriate
@ -58,7 +40,7 @@ function decodeVersionId(reqQuery) {
*/
function getVersionIdResHeader(verCfg, objectMD) {
if (verCfg) {
if (objectMD.isNull || !objectMD.versionId) {
if (objectMD.isNull || (objectMD && !objectMD.versionId)) {
return 'null';
}
return versionIdUtils.encode(objectMD.versionId);
@ -79,34 +61,17 @@ function checkQueryVersionId(query) {
return undefined;
}
function _storeNullVersionMD(bucketName, objKey, nullVersionId, objMD, log, cb) {
// In compatibility mode, create null versioned keys instead of null keys
let versionId;
let nullVersionMD;
if (config.nullVersionCompatMode) {
versionId = nullVersionId;
nullVersionMD = Object.assign({}, objMD, {
versionId: nullVersionId,
isNull: true,
});
} else {
versionId = 'null';
nullVersionMD = Object.assign({}, objMD, {
versionId: nullVersionId,
isNull: true,
isNull2: true,
});
}
metadata.putObjectMD(bucketName, objKey, nullVersionMD, { versionId }, log, err => {
function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => {
if (err) {
log.debug('error from metadata storing null version as new version',
{ error: err });
}
cb(err);
cb(err, options);
});
}
/** check existence and get location of null version data for deletion
/** get location of null version data for deletion
* @param {string} bucketName - name of bucket
* @param {string} objKey - name of object key
* @param {object} options - metadata options for getting object MD
@ -117,179 +82,100 @@ function _storeNullVersionMD(bucketName, objKey, nullVersionId, objMD, log, cb)
* @param {function} cb - callback
* @return {undefined} - and call callback with (err, dataToDelete)
*/
function _prepareNullVersionDeletion(bucketName, objKey, options, mst, log, cb) {
const nullOptions = {};
if (!options.deleteData) {
return process.nextTick(cb, null, nullOptions);
}
function _getNullVersionsToDelete(bucketName, objKey, options, mst, log, cb) {
if (options.versionId === mst.versionId) {
// no need to get another key as the master is the target
nullOptions.dataToDelete = mst.objLocation;
return process.nextTick(cb, null, nullOptions);
}
if (options.versionId === 'null') {
// deletion of the null key will be done by the main metadata
// PUT via this option
nullOptions.deleteNullKey = true;
// no need to get delete location, we already have the master's metadata
const dataToDelete = mst.objLocation;
return process.nextTick(cb, null, dataToDelete);
}
return metadata.getObjectMD(bucketName, objKey, options, log,
(err, versionMD) => {
if (err) {
// the null key may not exist, hence it's a normal
// situation to have a NoSuchKey error, in which case
// there is nothing to delete
if (err.is.NoSuchKey) {
log.debug('null version does not exist', {
method: '_prepareNullVersionDeletion',
});
} else {
log.warn('could not get null version metadata', {
log.debug('err from metadata getting specified version', {
error: err,
method: '_prepareNullVersionDeletion',
method: '_getNullVersionsToDelete',
});
}
return cb(err);
}
if (versionMD.location) {
if (!versionMD.location) {
return cb();
}
const dataToDelete = Array.isArray(versionMD.location) ?
versionMD.location : [versionMD.location];
nullOptions.dataToDelete = dataToDelete;
}
return cb(null, nullOptions);
return cb(null, dataToDelete);
});
}
function _deleteNullVersionMD(bucketName, objKey, options, log, cb) {
return metadata.deleteObjectMD(bucketName, objKey, options, log, err => {
function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) {
return _getNullVersionsToDelete(bucketName, objKey, options, mst, log,
(err, nullDataToDelete) => {
if (err) {
log.warn('metadata error deleting null versioned key',
{ bucketName, objKey, error: err, method: '_deleteNullVersionMD' });
}
log.warn('could not find null version metadata', {
error: err,
method: '_deleteNullVersionMD',
});
return cb(err);
}
return metadata.deleteObjectMD(bucketName, objKey, options, log,
err => {
if (err) {
log.warn('metadata error deleting null version',
{ error: err, method: '_deleteNullVersionMD' });
return cb(err);
}
return cb(null, nullDataToDelete);
});
});
}
/**
* Process state from the master version of an object and the bucket
* versioning configuration, return a set of options objects
*
* @param {object} mst - state of master version, as returned by
* getMasterState()
* @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended'
* @param {boolean} nullVersionCompatMode - if true, behaves in null
* version compatibility mode and return appropriate values: this mode
* does not attempt to create null keys but create null versioned keys
* instead
*
* @return {object} result object with the following attributes:
* - {object} options: versioning-related options to pass to the
services.metadataStoreObject() call
* - {object} [options.extraMD]: extra attributes to set in object metadata
* - {string} [nullVersionId]: null version key to create, if needed
* - {object} [delOptions]: options for metadata to delete the null
version key, if needed
*/
function processVersioningState(mst, vstat, nullVersionCompatMode) {
const versioningSuspended = (vstat === 'Suspended');
const masterIsNull = mst.exists && (mst.isNull || !mst.versionId);
if (versioningSuspended) {
// versioning is suspended: overwrite the existing null version
const options = { versionId: '', isNull: true };
if (masterIsNull) {
// if the null version exists, clean it up prior to put
if (mst.objLocation) {
function processVersioningState(mst, vstat, cb) {
const options = {};
const storeOptions = {};
const delOptions = {};
// object does not exist or is not versioned (before versioning)
if (mst.versionId === undefined || mst.isNull) {
// versioning is suspended, overwrite existing master version
if (vstat === 'Suspended') {
options.versionId = '';
options.isNull = true;
options.dataToDelete = mst.objLocation;
// if null version exists, clean it up prior to put
if (mst.isNull) {
delOptions.versionId = mst.versionId;
return cb(null, options, null, delOptions);
}
// backward-compat: a null version key may exist even with
// a null master (due to S3C-7526), if so, delete it (its
// data will be deleted as part of the master cleanup, so
// no "deleteData" param is needed)
//
// "isNull2" attribute is set in master metadata when
// null keys are used, which is used as an optimization to
// avoid having to check the versioned key since there can
// be no more versioned key to clean up
if (mst.isNull && mst.versionId && !mst.isNull2) {
const delOptions = { versionId: mst.versionId };
return { options, delOptions };
return cb(null, options);
}
return { options };
// versioning is enabled, create a new version
options.versioning = true;
if (mst.exists) {
// store master version in a new key
const versionId = mst.isNull ? mst.versionId : nonVersionedObjId;
storeOptions.versionId = versionId;
storeOptions.isNull = true;
options.nullVersionId = versionId;
return cb(null, options, storeOptions);
}
if (mst.nullVersionId) {
// backward-compat: delete the null versioned key and data
const delOptions = { versionId: mst.nullVersionId, deleteData: true };
if (mst.nullUploadId) {
delOptions.replayId = mst.nullUploadId;
return cb(null, options);
}
return { options, delOptions };
// master is versioned and is not a null version
const nullVersionId = mst.nullVersionId;
if (vstat === 'Suspended') {
// versioning is suspended, overwrite the existing master version
options.versionId = '';
options.isNull = true;
if (nullVersionId === undefined) {
return cb(null, options);
}
// clean up the eventual null key's location data prior to put
// NOTE: due to metadata v1 internal format, we cannot guess
// from the master key whether there is an associated null
// key, because the master key may be removed whenever the
// latest version becomes a delete marker. Hence we need to
// pessimistically try to get the null key metadata and delete
// it if it exists.
const delOptions = { versionId: 'null', deleteData: true };
return { options, delOptions };
delOptions.versionId = nullVersionId;
return cb(null, options, null, delOptions);
}
// versioning is enabled, put the new version
options.versioning = true;
options.nullVersionId = nullVersionId;
return cb(null, options);
}
// versioning is enabled: create a new version
const options = { versioning: true };
if (masterIsNull) {
// if master is a null version or a non-versioned key,
// copy it to a new null key
const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId;
if (nullVersionCompatMode) {
options.extraMD = {
nullVersionId,
};
if (mst.uploadId) {
options.extraMD.nullUploadId = mst.uploadId;
}
return { options, nullVersionId };
}
if (mst.isNull && !mst.isNull2) {
// if master null version was put with an older
// Cloudserver (or in compat mode), there is a
// possibility that it also has a null versioned key
// associated, so we need to delete it as we write the
// null key
const delOptions = {
versionId: nullVersionId,
};
return { options, nullVersionId, delOptions };
}
return { options, nullVersionId };
}
// backward-compat: keep a reference to the existing null
// versioned key
if (mst.nullVersionId) {
options.extraMD = {
nullVersionId: mst.nullVersionId,
};
if (mst.nullUploadId) {
options.extraMD.nullUploadId = mst.nullUploadId;
}
}
return { options };
}
/**
* Build the state of the master version from its object metadata
*
* @param {object} objMD - object metadata parsed from JSON
*
* @return {object} state of master version, with the following attributes:
* - {boolean} exists - true if the object exists (i.e. if `objMD` is truish)
* - {string} versionId - version ID of the master key
* - {boolean} isNull - whether the master version is a null version
* - {string} nullVersionId - if not a null version, reference to the
* null version ID
* - {array} objLocation - array of data locations
*/
function getMasterState(objMD) {
if (!objMD) {
return {};
@ -297,11 +183,8 @@ function getMasterState(objMD) {
const mst = {
exists: true,
versionId: objMD.versionId,
uploadId: objMD.uploadId,
isNull: objMD.isNull,
isNull2: objMD.isNull2,
nullVersionId: objMD.nullVersionId,
nullUploadId: objMD.nullUploadId,
};
if (objMD.location) {
mst.objLocation = Array.isArray(objMD.location) ?
@ -323,113 +206,62 @@ function getMasterState(objMD) {
* ('' overwrites the master version)
* options.versioning - (true/undefined) metadata instruction to create new ver
* options.isNull - (true/undefined) whether new version is null or not
* options.nullVersionId - if storing a null version in version history, the
* version id of the null version
* options.deleteNullVersionData - whether to delete the data of the null ver
*/
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
log, callback) {
const options = {};
const mst = getMasterState(objMD);
const vCfg = bucketMD.getVersioningConfiguration();
// bucket is not versioning configured
if (!vCfg) {
const options = { dataToDelete: mst.objLocation };
options.dataToDelete = mst.objLocation;
return process.nextTick(callback, null, options);
}
// bucket is versioning configured
const { options, nullVersionId, delOptions } =
processVersioningState(mst, vCfg.Status, config.nullVersionCompatMode);
return async.series([
function storeNullVersionMD(next) {
if (!nullVersionId) {
return process.nextTick(next);
}
return _storeNullVersionMD(bucketName, objectKey, nullVersionId, objMD, log, next);
return async.waterfall([
function processState(next) {
processVersioningState(mst, vCfg.Status,
(err, options, storeOptions, delOptions) => {
process.nextTick(next, err, options, storeOptions,
delOptions);
});
},
function prepareNullVersionDeletion(next) {
function storeVersion(options, storeOptions, delOptions, next) {
if (!storeOptions) {
return process.nextTick(next, null, options, delOptions);
}
const versionMD = Object.assign({}, objMD, storeOptions);
const params = { versionId: storeOptions.versionId };
return _storeNullVersionMD(bucketName, objectKey, versionMD,
params, log, err => next(err, options, delOptions));
},
function deleteNullVersion(options, delOptions, next) {
if (!delOptions) {
return process.nextTick(next);
return process.nextTick(next, null, options);
}
return _prepareNullVersionDeletion(
bucketName, objectKey, delOptions, mst, log,
(err, nullOptions) => {
return _deleteNullVersionMD(bucketName, objectKey, delOptions, mst,
log, (err, nullDataToDelete) => {
if (err) {
return next(err);
log.warn('unexpected error deleting null version md', {
error: err,
method: 'versioningPreprocessing',
});
// it's possible there was a concurrent request to
// delete the null version, so proceed with putting a
// new version
if (err === errors.NoSuchKey) {
return next(null, options);
}
Object.assign(options, nullOptions);
return next();
return next(errors.InternalError);
}
Object.assign(options, { dataToDelete: nullDataToDelete });
return next(null, options);
});
},
function deleteNullVersionMD(next) {
if (delOptions &&
delOptions.versionId &&
delOptions.versionId !== 'null') {
// backward-compat: delete old null versioned key
return _deleteNullVersionMD(
bucketName, objectKey, { versionId: delOptions.versionId }, log, next);
}
return process.nextTick(next);
},
], err => {
// it's possible there was a prior request that deleted the
// null version, so proceed with putting a new version
if (err && err.is.NoSuchKey) {
return callback(null, options);
}
return callback(err, options);
});
}
/** Return options to pass to Metadata layer for version-specific
* operations with the given requested version ID
*
* @param {object} objectMD - object metadata
* @param {boolean} nullVersionCompatMode - if true, behaves in null
* version compatibility mode
* @return {object} options object with params:
* {string} [options.versionId] - specific versionId to update
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
* Metadata backend if we're updating or deleting a new-style null
* version (stored in master or null key), or not a null version.
*/
function getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode) {
// Use the internal versionId if it is a "real" null version (not
// non-versioned)
//
// If the target object is non-versioned: do not specify a
// "versionId" attribute nor "isNull"
//
// If the target version is a null version, i.e. has the "isNull"
// attribute:
//
// - send the "isNull=true" param to Metadata if the version is
// already a null key put by a non-compat mode Cloudserver, to
// let Metadata know that the null key is to be updated or
// deleted. This is the case if the "isNull2" metadata attribute
// exists
//
// - otherwise, do not send the "isNull" parameter to hint
// Metadata that it is a legacy null version
//
// If the target version is not a null version and is versioned:
//
// - send the "isNull=false" param to Metadata in non-compat
// mode (mandatory for v1 format)
//
// - otherwise, do not send the "isNull" parameter to hint
// Metadata that an existing null version may not be stored in a
// null key
//
//
if (objectMD.versionId === undefined) {
return {};
}
const options = { versionId: objectMD.versionId };
if (objectMD.isNull) {
if (objectMD.isNull2) {
options.isNull = true;
}
} else if (!nullVersionCompatMode) {
options.isNull = false;
}
return options;
], (err, options) => callback(err, options));
}
/** preprocessingVersioningDelete - return versioning information for S3 to
@ -438,124 +270,59 @@ function getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode) {
* @param {object} bucketMD - bucket metadata
* @param {object} objectMD - obj metadata
* @param {string} [reqVersionId] - specific version ID sent as part of request
* @param {boolean} nullVersionCompatMode - if true, behaves in null version compatibility mode
* @return {object} options object with params:
* {boolean} [options.deleteData=true|undefined] - whether to delete data (if undefined
* @param {RequestLogger} log - logger instance
* @param {function} callback - callback
* @return {undefined} and call callback with params (err, options):
* options.deleteData - (true/undefined) whether to delete data (if undefined
* means creating a delete marker instead)
* {string} [options.versionId] - specific versionId to delete
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
* Metadata backend if we're deleting a new-style null version (stored
* in master or null key), or not a null version.
* options.versionId - specific versionId to delete
*/
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId, nullVersionCompatMode) {
let options = {};
if (bucketMD.getVersioningConfiguration() && reqVersionId) {
options = getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode);
}
if (!bucketMD.getVersioningConfiguration() || reqVersionId) {
// delete data if bucket is non-versioned or the request
// deletes a specific version
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD,
reqVersionId, log, callback) {
const options = {};
// bucket is not versioning enabled
if (!bucketMD.getVersioningConfiguration()) {
options.deleteData = true;
return callback(null, options);
}
return options;
// bucket is versioning enabled
if (reqVersionId && reqVersionId !== 'null') {
// deleting a specific version
options.deleteData = true;
options.versionId = reqVersionId;
return callback(null, options);
}
/**
* Keep metadatas when the object is restored from cold storage
* but remove the specific ones we don't want to keep
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {undefined}
*/
function restoreMetadata(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
// We need to keep user metadata and tags
Object.keys(objMD).forEach(key => {
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
metadataStoreParams.metaHeaders[key] = objMD[key];
if (reqVersionId) {
// deleting the 'null' version if it exists
if (objectMD.versionId === undefined) {
// object is not versioned, deleting it
options.deleteData = true;
return callback(null, options);
}
});
if (objMD['x-amz-website-redirect-location']) {
if (!metadataStoreParams.headers) {
metadataStoreParams.headers = {};
if (objectMD.isNull) {
// master is the null version
options.deleteData = true;
options.versionId = objectMD.versionId;
return callback(null, options);
}
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
if (objectMD.nullVersionId) {
// null version exists, deleting it
options.deleteData = true;
options.versionId = objectMD.nullVersionId;
return callback(null, options);
}
if (objMD.replicationInfo) {
metadataStoreParams.replicationInfo = objMD.replicationInfo;
// null version does not exist, no deletion
// TODO check AWS behaviour for no deletion (seems having no error)
return callback(errors.NoSuchKey);
}
if (objMD.legalHold) {
metadataStoreParams.legalHold = objMD.legalHold;
}
if (objMD.acl) {
metadataStoreParams.acl = objMD.acl;
}
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.taggingCopy = objMD.tags;
}
/** overwritingVersioning - return versioning information for S3 to handle
* storing version metadata with a specific version id.
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {object} options
* options.versionId - specific versionId to overwrite in metadata
* options.isNull - (true/undefined) whether new version is null or not
* options.nullVersionId - if storing a null version in version history, the
* version id of the null version
*/
function overwritingVersioning(objMD, metadataStoreParams) {
metadataStoreParams.updateMicroVersionId = true;
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
// set correct originOp
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
// update restore
const days = objMD.archive?.restoreRequestedDays;
const now = Date.now();
metadataStoreParams.archive = {
archiveInfo: objMD.archive?.archiveInfo,
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
restoreCompletedAt: new Date(now),
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
};
/* eslint-enable no-param-reassign */
const versionId = objMD.versionId || undefined;
const options = {
versionId,
isNull: objMD.isNull,
};
if (objMD.nullVersionId) {
options.extraMD = {
nullVersionId: objMD.nullVersionId,
};
}
restoreMetadata(objMD, metadataStoreParams);
return options;
// not deleting any specific version, making a delete marker instead
return callback(null, options);
}
module.exports = {
decodeVersionId,
getVersionIdResHeader,
checkQueryVersionId,
processVersioningState,
getMasterState,
versioningPreprocessing,
getVersionSpecificMetadataOptions,
preprocessingVersioningDelete,
overwritingVersioning,
decodeVID,
};

View File

@ -101,33 +101,8 @@ function validateWebsiteHeader(header) {
header.startsWith('http://') || header.startsWith('https://'));
}
/**
* appendWebsiteIndexDocument - append index to objectKey if necessary
* @param {object} request - normalized request object
* @param {string} indexDocumentSuffix - index document from website config
* @param {boolean} force - flag to force append index
* @return {undefined}
*/
function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) {
const reqObjectKey = request.objectKey ? request.objectKey : '';
/* eslint-disable no-param-reassign */
// find index document if "directory" sent in request
if (reqObjectKey.endsWith('/')) {
request.objectKey += indexDocumentSuffix;
// find index document if no key provided
} else if (reqObjectKey === '') {
request.objectKey = indexDocumentSuffix;
// force for redirect 302 on folder without trailing / that has an index
} else if (force) {
request.objectKey += `/${indexDocumentSuffix}`;
}
/* eslint-enable no-param-reassign */
}
module.exports = {
findRoutingRule,
extractRedirectInfo,
validateWebsiteHeader,
appendWebsiteIndexDocument,
};

View File

@ -1,314 +0,0 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -1,117 +0,0 @@
const { errors } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processCurrents,
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, isBucketVersioned, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processCurrents(bucketName, listParams, isBucketVersioned, list);
pushMetric('listLifecycleCurrents', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleCurrents');
return callback(null, res);
}
/**
* listLifecycleCurrents - Return list of current versions/masters in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleCurrents(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleCurrents' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
const minEntriesToBeScanned = 1;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidArgument);
}
const excludedDataStoreName = params['excluded-data-store-name'];
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleCurrents',
request,
};
const listParams = {
listingType: 'DelimiterCurrent',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
marker: params.marker,
excludedDataStoreName,
maxScannedLifecycleListingEntries,
};
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleCurrents');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, isBucketVersioned, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleCurrents');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, isBucketVersioned, log, callback);
});
});
}
module.exports = {
listLifecycleCurrents,
};

View File

@ -1,127 +0,0 @@
const { errors, versioning } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const versionIdUtils = versioning.VersionID;
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processNonCurrents,
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processNonCurrents(bucketName, listParams, list);
pushMetric('listLifecycleNonCurrents', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleNonCurrents');
return callback(null, res);
}
/**
* listLifecycleNonCurrents - Return list of non-current versions in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleNonCurrents' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleNonCurrents');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
const minEntriesToBeScanned = 3;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleNonCurrents');
return callback(errors.InvalidArgument);
}
const excludedDataStoreName = params['excluded-data-store-name'];
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleNonCurrents',
request,
};
const listParams = {
listingType: 'DelimiterNonCurrent',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
keyMarker: params['key-marker'],
excludedDataStoreName,
maxScannedLifecycleListingEntries,
};
listParams.versionIdMarker = params['version-id-marker'] ?
versionIdUtils.decode(params['version-id-marker']) : undefined;
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!isBucketVersioned) {
log.debug('bucket is not versioned');
return callback(errors.InvalidRequest.customizeDescription(
'bucket is not versioned'), null);
}
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback);
});
});
}
module.exports = {
listLifecycleNonCurrents,
};

View File

@ -1,112 +0,0 @@
const { errors } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler');
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processOrphans(bucketName, listParams, list);
pushMetric('listLifecycleOrphanDeleteMarkers', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleOrphanDeleteMarkers');
return callback(null, res);
}
/**
* listLifecycleOrphanDeleteMarkers - Return list of expired object delete marker in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleOrphanDeleteMarkers' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
const minEntriesToBeScanned = 3;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
return callback(errors.InvalidArgument);
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleOrphanDeleteMarkers',
request,
};
const listParams = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
marker: params.marker,
maxScannedLifecycleListingEntries,
};
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!isBucketVersioned) {
log.debug('bucket is not versioned or suspended');
return callback(errors.InvalidRequest.customizeDescription(
'bucket is not versioned'), null);
}
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback);
});
});
}
module.exports = {
listLifecycleOrphanDeleteMarkers,
};

Some files were not shown because too many files have changed in this diff Show More