Compare commits

..

2 Commits

Author SHA1 Message Date
Salim 33050820d7 ci: npm cache clean 2019-07-31 14:55:07 -07:00
Salim d5b540d2cf bf: update cdmiclient dependency 2019-07-31 14:25:03 -07:00
520 changed files with 8061 additions and 51228 deletions

View File

@ -1,8 +1,7 @@
node_modules node_modules
localData/* localData/*
localMetadata/* localMetadata/*
# Keep the .git/HEAD file in order to properly report version .git
.git/objects
.github .github
.tox .tox
coverage coverage

View File

@ -1,10 +1,6 @@
{ {
"extends": "scality", "extends": "scality",
"plugins": [
"mocha"
],
"rules": { "rules": {
"import/extensions": "off",
"lines-around-directive": "off", "lines-around-directive": "off",
"no-underscore-dangle": "off", "no-underscore-dangle": "off",
"indent": "off", "indent": "off",
@ -45,10 +41,6 @@
"no-restricted-properties": "off", "no-restricted-properties": "off",
"new-parens": "off", "new-parens": "off",
"no-multi-spaces": "off", "no-multi-spaces": "off",
"quote-props": "off", "quote-props": "off"
"mocha/no-exclusive-tests": "error",
},
"parserOptions": {
"ecmaVersion": 2020
} }
} }

View File

@ -48,7 +48,7 @@ Describe the results you expected
- Node.js version, - Node.js version,
- Docker version, - Docker version,
- yarn version, - npm version,
- distribution/OS, - distribution/OS,
- optional: anything else you deem helpful to us. - optional: anything else you deem helpful to us.

View File

@ -1,43 +0,0 @@
---
name: "Setup CI environment"
description: "Setup Cloudserver CI environment"
runs:
using: composite
steps:
- name: Setup etc/hosts
shell: bash
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
- name: Setup Credentials
shell: bash
run: bash .github/scripts/credentials.bash
- name: Setup job artifacts directory
shell: bash
run: |-
set -exu;
mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: 'yarn'
- name: install dependencies
shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Setup python2 test environment
shell: bash
run: |
sudo apt-get install -y libdigest-hmac-perl
pip install 's3cmd==2.3.0'
- name: fix sproxyd.conf permissions
shell: bash
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
- name: ensure fuse kernel module is loaded (for sproxyd)
shell: bash
run: sudo modprobe fuse

View File

@ -1,10 +0,0 @@
---
version: 2
updates:
- package-ecosystem: npm
directory: "/"
schedule:
interval: daily
time: "13:00"
open-pull-requests-limit: 10
target-branch: "development/7.4"

View File

@ -1,36 +0,0 @@
azurebackend_AZURE_STORAGE_ACCESS_KEY
azurebackend_AZURE_STORAGE_ACCOUNT_NAME
azurebackend_AZURE_STORAGE_ENDPOINT
azurebackend2_AZURE_STORAGE_ACCESS_KEY
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME
azurebackend2_AZURE_STORAGE_ENDPOINT
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME
azurebackendmismatch_AZURE_STORAGE_ENDPOINT
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT
azuretest_AZURE_BLOB_ENDPOINT
b2backend_B2_ACCOUNT_ID
b2backend_B2_STORAGE_ACCESS_KEY
GOOGLE_SERVICE_EMAIL
GOOGLE_SERVICE_KEY
AWS_S3_BACKEND_ACCESS_KEY
AWS_S3_BACKEND_SECRET_KEY
AWS_S3_BACKEND_ACCESS_KEY_2
AWS_S3_BACKEND_SECRET_KEY_2
AWS_GCP_BACKEND_ACCESS_KEY
AWS_GCP_BACKEND_SECRET_KEY
AWS_GCP_BACKEND_ACCESS_KEY_2
AWS_GCP_BACKEND_SECRET_KEY_2
b2backend_B2_STORAGE_ENDPOINT
gcpbackend2_GCP_SERVICE_EMAIL
gcpbackend2_GCP_SERVICE_KEY
gcpbackend2_GCP_SERVICE_KEYFILE
gcpbackend_GCP_SERVICE_EMAIL
gcpbackend_GCP_SERVICE_KEY
gcpbackendmismatch_GCP_SERVICE_EMAIL
gcpbackendmismatch_GCP_SERVICE_KEY
gcpbackend_GCP_SERVICE_KEYFILE
gcpbackendmismatch_GCP_SERVICE_KEYFILE
gcpbackendnoproxy_GCP_SERVICE_KEYFILE

View File

@ -1,92 +0,0 @@
services:
cloudserver:
image: ${CLOUDSERVER_IMAGE}
command: sh -c "yarn start > /artifacts/s3.log"
network_mode: "host"
volumes:
- /tmp/ssl:/ssl
- /tmp/ssl-kmip:/ssl-kmip
- ${HOME}/.aws/credentials:/root/.aws/credentials
- /tmp/artifacts/${JOB_NAME}:/artifacts
environment:
- CI=true
- ENABLE_LOCAL_CACHE=true
- REDIS_HOST=0.0.0.0
- REDIS_PORT=6379
- REPORT_TOKEN=report-token-1
- REMOTE_MANAGEMENT_DISABLE=1
- HEALTHCHECKS_ALLOWFROM=0.0.0.0/0
- DATA_HOST=0.0.0.0
- METADATA_HOST=0.0.0.0
- S3BACKEND
- S3DATA
- S3METADATA
- MPU_TESTING
- S3VAULT
- S3_LOCATION_FILE
- ENABLE_UTAPI_V2
- BUCKET_DENY_FILTER
- S3KMS
- S3KMIP_PORT
- S3KMIP_HOSTS
- S3KMIP-COMPOUND_CREATE
- S3KMIP_BUCKET_ATTRIBUTE_NAME
- S3KMIP_PIPELINE_DEPTH
- S3KMIP_KEY
- S3KMIP_CERT
- S3KMIP_CA
- MONGODB_HOSTS=0.0.0.0:27018
- MONGODB_RS=rs0
- DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file:
- creds.env
depends_on:
- redis
extra_hosts:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1"
- "pykmip.local:127.0.0.1"
redis:
image: redis:alpine
network_mode: "host"
squid:
network_mode: "host"
profiles: ['ci-proxy']
image: scality/ci-squid
command: >-
sh -c 'mkdir -p /ssl &&
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
-keyout /ssl/myca.pem -out /ssl/myca.pem &&
cp /ssl/myca.pem /ssl/CA.pem &&
squid -f /etc/squid/squid.conf -N -z &&
squid -f /etc/squid/squid.conf -NYCd 1'
volumes:
- /tmp/ssl:/ssl
pykmip:
network_mode: "host"
profiles: ['pykmip']
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts
mongo:
network_mode: "host"
profiles: ['mongo', 'ceph']
image: ${MONGODB_IMAGE}
ceph:
network_mode: "host"
profiles: ['ceph']
image: ghcr.io/scality/cloudserver/ci-ceph
sproxyd:
network_mode: "host"
profiles: ['sproxyd']
image: sproxyd-standalone
build: ./sproxyd
user: 0:0
privileged: yes

View File

@ -1,28 +0,0 @@
FROM mongo:5.0.21
ENV USER=scality \
HOME_DIR=/home/scality \
CONF_DIR=/conf \
DATA_DIR=/data
# Set up directories and permissions
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
mkdir /logs; \
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
# Set up environment variables and directories for scality user
RUN mkdir ${CONF_DIR} && \
chown -R ${USER} ${CONF_DIR} && \
chown -R ${USER} ${DATA_DIR}
# copy the mongo config file
COPY /conf/mongod.conf /conf/mongod.conf
COPY /conf/mongo-run.sh /conf/mongo-run.sh
COPY /conf/initReplicaSet /conf/initReplicaSet.js
EXPOSE 27017/tcp
EXPOSE 27018
# Set up CMD
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
CMD ["bash", "/conf/mongo-run.sh"]

View File

@ -1,4 +0,0 @@
rs.initiate({
_id: "rs0",
members: [{ _id: 0, host: "127.0.0.1:27018" }]
});

View File

@ -1,10 +0,0 @@
#!/bin/bash
set -exo pipefail
init_RS() {
sleep 5
mongo --port 27018 /conf/initReplicaSet.js
}
init_RS &
mongod --bind_ip_all --config=/conf/mongod.conf

View File

@ -1,15 +0,0 @@
storage:
journal:
enabled: true
engine: wiredTiger
dbPath: "/data/db"
processManagement:
fork: false
net:
port: 27018
bindIp: 0.0.0.0
replication:
replSetName: "rs0"
enableMajorityReadConcern: true
security:
authorization: disabled

View File

@ -1,3 +0,0 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -1,26 +0,0 @@
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param SCRIPT_NAME /var/www;
fastcgi_param PATH_INFO $document_uri;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
# PHP only, required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;

View File

@ -1,88 +0,0 @@
worker_processes 1;
error_log /logs/error.log;
user root root;
events {
worker_connections 1000;
reuse_port on;
multi_accept on;
}
worker_rlimit_nofile 20000;
http {
root /var/www/;
upstream sproxyds {
least_conn;
keepalive 40;
server 127.0.0.1:20000;
}
server {
client_max_body_size 0;
client_body_timeout 150;
client_header_timeout 150;
postpone_output 0;
client_body_postpone_size 0;
keepalive_requests 1100;
keepalive_timeout 300s;
server_tokens off;
default_type application/octet-stream;
gzip off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
listen 81;
server_name localhost;
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
location ~* ^/proxy/(.*)$ {
rewrite ^/proxy/(.*)$ /$1 last;
}
allow 127.0.0.1;
deny all;
set $usermd '-';
set $sentusermd '-';
set $elapsed_ms '-';
set $now '-';
log_by_lua '
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
end
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
end
local elapsed_ms = tonumber(ngx.var.request_time)
if not ( elapsed_ms == nil) then
elapsed_ms = elapsed_ms * 1000
ngx.var.elapsed_ms = tostring(elapsed_ms)
end
local time = tonumber(ngx.var.msec) * 1000
ngx.var.now = time
';
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
'"contentType":"$content_type","s3Address":"$remote_addr",'
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
access_log /dev/stdout irm;
error_log /dev/stdout error;
location / {
proxy_request_buffering off;
fastcgi_request_buffering off;
fastcgi_no_cache 1;
fastcgi_cache_bypass 1;
fastcgi_buffering off;
fastcgi_ignore_client_abort on;
fastcgi_keep_conn on;
include fastcgi_params;
fastcgi_pass sproxyds;
fastcgi_next_upstream error timeout;
fastcgi_send_timeout 285s;
fastcgi_read_timeout 285s;
}
}
}

View File

@ -1,12 +0,0 @@
{
"general": {
"ring": "DATA",
"port": 20000,
"syslog_facility": "local0"
},
"ring_driver:0": {
"alias": "dc1",
"type": "local",
"queue_path": "/tmp/ring-objs"
},
}

View File

@ -1,43 +0,0 @@
[supervisord]
nodaemon = true
loglevel = info
logfile = %(ENV_LOG_DIR)s/supervisord.log
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
logfile_maxbytes = 20MB
logfile_backups = 2
[unix_http_server]
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
[program:nginx]
directory=%(ENV_SUP_RUN_DIR)s
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=7
autorestart=true
autostart=true
user=root
[program:sproxyd]
directory=%(ENV_SUP_RUN_DIR)s
process_name=%(program_name)s-%(process_num)s
numprocs=1
numprocs_start=0
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
redirect_stderr=true
autorestart=true
autostart=true
user=root

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC6zCCAdOgAwIBAgIUPIpMY95b4HjKAk+FyydZApAEFskwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowJDEQMA4GA1UECgwHU2NhbGl0eTEQ
MA4GA1UEAwwHUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AKqLFEsWtfRTxnoZrQe63tq+rQnVgninHMahRmXkzyjK/uNhoKnIh8bXdTC/eCZ6
FBROqBYNL0TJb0HDv1FzcZS1UCUldRqTlvr6wZb0pfrp40fvztsqQgAh1t/Blg5i
Zv5+ESSlNs5rWbFTxtq+FbMW/ERYTrVfnMkBiLg4Gq0HwID9a5jvJatzrrno2s1m
OfZCT3HaE3tMZ6vvYuoamvLNdvdH+9KeTmBCursfNejt0rSGjIqfi6DvFJSayydQ
is5DMSTbCLGdKQmA85VfEQmlQ8v0232WDSd6gVfp2tthDEDHnCbgWkEd1vsTyS85
ubdt5v4CWGOWV+mu3bf8xM0CAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAQEARTjc2zV/ol1/LsSzZy6l1R0uFBmR2KumH+Se1Yq2vKpY
Dv6xmrvmjOUr5RBO77nRhIgdcQA+LyAg8ii2Dfzc8r1RTD+j1bYOxESXctBOBcXM
Chy6FEBydR6m7S8qQyL+caJWO1WZWp2tapcm6sUG1oRVznWtK1/SHKIzOBwsmJ07
79KsCJ6wf9tzD05EDTI2QhAObE9/thy+zc8l8cmv9A6p3jKkx9rwXUttSUqTn0CW
w45bgKg6+DDcrhZ+MATbzuTfhuA4NFUTzK7KeX9sMuOV03Zs8SA3VhAOXmu063M3
0f9X7P/0RmGTTp7GGCqEINcZdbLh3k7CpFb2Ox998Q==
-----END CERTIFICATE-----

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC2zCCAcOgAwIBAgIUIlE8UAkqQ+6mbJDtrt9kkmi8aJYwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowKTEQMA4GA1UECgwHU2NhbGl0eTEV
MBMGA1UEAwwMcHlrbWlwLmxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAtxr7pq/lnzVeZz4z52Yc3DeaPqjNfRSyW5cPUlT7ABXFb7+tja7K2C7u
DYVK+Q+2yJCQwYJY47aKJB++ewam9t2V8Xy0Z8S+0I2ImCwuyeihaD/f6uJZRzms
ycdECH22BA6tCPlQLnlboRiZzI6rcIvXAbUMvLvFm3nyYIs9qidExRnfyMjISknM
V+83LT5QW4IcHgKYqzdz2ZmOnk+f4wmMmitcivTdIZCL8Z0cxr7BJlOh5JZ/V5uj
WUXeNa+ttW0RKKBlg9T+wj0JvwoJBPZTmsMAy3tI9tjLg3DwGYKsflbFeU2tebXI
gncGFZ/dFxj331GGtq3kz1PzAUYf2wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB1
8HgJ0fu6/pCrDxAm90eESFjmaTFyTN8q00zhq4Cb3zAT9KMWzAygkZ9n4ZFgELPo
7kBE2H6RcDdoBmjVYd8HnBloDdYzYbncKgt5YBvxRaMSF4/l65BM8wjatyXErqnH
QLLTRe5AuF0/F0KtPeDQ2JFVu8dZ35W3fyKGPRsEdVOSCTHROmqpGhZCpscyUP4W
Hb0dBTESQ9mQHw14OCaaahARd0X5WdcA/E+m0fpGqj1rQCXS+PrRcSLe1E1hqPlK
q/hXSXD5nybwipktELvJCbB7l4HmJr2pIpldeR5+ef68Cs8hqs6DRlsJX9sK2ng+
TFe5v6SCarqZ9kFvr6Yp
-----END CERTIFICATE-----

View File

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC8zCCAdugAwIBAgIUBs6nVXQXhrFbClub3aSLg72/DiYwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowJTEQMA4GA1UECgwHU2NhbGl0eTER
MA8GA1UEAwwISm9obiBEb2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQC6neSYoBoWh/i2mBpduJnTlXacpJ0iQqLezvcGy8qR0s/48mtfV2IRGTNVsq4L
jLLRsPGt9KkJlUhHGWhG00cBGEsIiJiBUr+WrEsO04ME/Sk76kX8wk/t9Oljl7jt
UDnQUwshj+hRFe0iKAyE65JIutu5EiiNtOqMzbVgPNfNniAaGlrgwByJaS9arzsH
PVju9yZBYzYhwAMyYFcXUGrgvHRCHKmxBi4QmV7DX4TeN4l9TrCyEmqDev4PRFip
yR2Fh3WGSwWh45HgMT+Jp6Uv6yI4wMXWJAcNkHdx1OhjBoUQrkavvdeVEnCwjQ+p
SMLm0T4iNxedQWBtDM7ts4EjAgMBAAGjGjAYMBYGA1UdJQEB/wQMMAoGCCsGAQUF
BwMCMA0GCSqGSIb3DQEBCwUAA4IBAQCMi9HEhZc5jHJMj18Wq00fZy4O9XtjCe0J
nntW9tzi3rTQcQWKA7i9uVdDoCg+gMFVxWMvV7luFEUc/VYV1v8hFfbIFygzFsZY
xwv4GQaIwbsgzD+oziia53w0FSuNL0uE0MeKvrt3yzHxCxylHyl+TQd/UdAtAo+k
RL1sI0mBZx5qo6d1J7ZMCxzAGaT7KjnJvziFr/UbfSNnwDsxsUwGaI1ZeAxJN8DI
zTrg3f3lrrmHcauEgKnuQwIqaMZR6veG6RkjtcYSlJYID1irkE6njs7+wivOAkzt
fBt/0PD76FmAI0VArgU/zDB8dGyYzrq39W749LuEfm1TPmlnUtDr
-----END CERTIFICATE-----

View File

@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6neSYoBoWh/i2
mBpduJnTlXacpJ0iQqLezvcGy8qR0s/48mtfV2IRGTNVsq4LjLLRsPGt9KkJlUhH
GWhG00cBGEsIiJiBUr+WrEsO04ME/Sk76kX8wk/t9Oljl7jtUDnQUwshj+hRFe0i
KAyE65JIutu5EiiNtOqMzbVgPNfNniAaGlrgwByJaS9arzsHPVju9yZBYzYhwAMy
YFcXUGrgvHRCHKmxBi4QmV7DX4TeN4l9TrCyEmqDev4PRFipyR2Fh3WGSwWh45Hg
MT+Jp6Uv6yI4wMXWJAcNkHdx1OhjBoUQrkavvdeVEnCwjQ+pSMLm0T4iNxedQWBt
DM7ts4EjAgMBAAECggEANNXdUeUKXdSzcycPV/ea/c+0XFcy8e9B46lfQTpTqQOx
xD8GbWD1L/gdk6baJgT43+ukEWdSsJbmdtLXti29Ta8OF2VtIDhIbCVtvs3dq3zt
vrvugsiVDr8nkP306qOrKrNIVIFE+igmEmSaXsu/h/33ladxeeV9/s2DC7NOOjWN
Mu4KYr5BBbu3qAavdzbrcz7Sch+GzsYqK/pBounCTQu3o9E4TSUcmcsasWmtHN3u
e6G2UjObdzEW7J0wWvvtJ0wHQUVRueHfqwqKf0dymcZ3xOlx3ZPhKPz5n4F1UGUt
RQaNazqs5SzZpUgDuPw4k8h/aCHK21Yexw/l4+O9KQKBgQD1WZSRK54zFoExBQgt
OZSBNZW3Ibti5lSiF0M0g+66yNZSWfPuABEH0tu5CXopdPDXo4kW8NLGEqQStWTX
RGK0DE9buEL3eebOfjIdS2IZ3t3dX3lMypplVCj4HzAgITlweSH1LLTyAtaaOpwa
jksqfcn5Zw+XGkyc6GBBVaZetQKBgQDCt6Xf/g26+zjvHscjdzsfBhnYvTOrr6+F
xqFFxOEOocGr+mL7UTAs+a9m/6lOWhlagk+m+TIZNL8o3IN7KFTYxPYPxTiewgVE
rIm3JBmPxRiPn01P3HrtjaqfzsXF30j3ele7ix5OxieZq4vsW7ZXP3GZE34a08Ov
12sE1DlvdwKBgQDzpYQOLhyqazzcqzyVfMrnDYmiFVN7QXTmiudobWRUBUIhAcdl
oJdJB7K/rJOuO704x+RJ7dnCbZyWH6EGzZifaGIemXuXO21jvpqR0NyZCGOXhUp2
YfS1j8AntwEZxyS9du2sBjui4gKvomiHTquChOxgSmKHEcznPTTpbN8MyQKBgF5F
LVCZniolkLXsL7tS8VOez4qoZ0i6wP7CYLf3joJX+/z4N023S9yqcaorItvlMRsp
tciAIyoi6F2vDRTmPNXJ3dtav4PVKVnLMs1w89MwOCjoljSQ6Q7zpGTEZenbpWbz
W2BYBS9cLjXu4MpoyInLFINo9YeleLs8TvrCiKAXAoGBANsduqLnlUW/f5zDb5Fe
SB51+KhBjsVIeYmU+8xtur9Z7IxZXK28wpoEsm7LmX7Va5dERjI+tItBiJ5+Unu1
Xs2ljDg35ARKHs0dWBJGpbnZg4dbT6xpIL4YMPXm1Zu++PgRpxPIMn646xqd8GlH
bavm6Km/fXNG58xus+EeLpV5
-----END PRIVATE KEY-----

View File

@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC3Gvumr+WfNV5n
PjPnZhzcN5o+qM19FLJblw9SVPsAFcVvv62NrsrYLu4NhUr5D7bIkJDBgljjtook
H757Bqb23ZXxfLRnxL7QjYiYLC7J6KFoP9/q4llHOazJx0QIfbYEDq0I+VAueVuh
GJnMjqtwi9cBtQy8u8WbefJgiz2qJ0TFGd/IyMhKScxX7zctPlBbghweApirN3PZ
mY6eT5/jCYyaK1yK9N0hkIvxnRzGvsEmU6Hkln9Xm6NZRd41r621bREooGWD1P7C
PQm/CgkE9lOawwDLe0j22MuDcPAZgqx+VsV5Ta15tciCdwYVn90XGPffUYa2reTP
U/MBRh/bAgMBAAECggEABCvcMcbuDztzBB0Zp5re63Fk1SqZS9Et4wJE+hYvhaf5
UHtoY8LoohYnnC0+MQBXpKgOdCoZBk8BRKNofnr/UL5pjQ/POFH2GuAujXDsO/NN
wgc6fapcaE/7DLm6ZgsfG2aOMJclaXmgScI6trtFUpIM+t/6A06vyMP1bpeddwPW
Fqu7NvpDiEcTRUGd+z1JooYgUhGgC7peYUx5+9zqFrwoDBKxnUOnz3BkDsXBy3qm
65Vu0BSjuJzf6vVMpNGUHY6JXjopVNWku+JAX0wD+iikOd5sziNVdIj1fnZ+IHIf
7G5h5owHpvSGzJFQ18/g5VHtJdCm+4WQSnbSJRsCAQKBgQDu4IH8yspyeH44fhoS
PAp/OtILqSP+Da0zAp2LbhrOgyzyuSTdEAYyptqjqHS6QkB1Bu1H44FS0BYUxRXc
iu2e9AndiLVCGngsE7TpA/ZVLN1B0LEZEHjM6p4d6zZM6iveKVnPAOkTWTBAgzCt
b31nj4jL8PdlPKQil1AMrOlRAQKBgQDEOwshzIdr2Iy6B/n4CuBViEtwnbAd5f/c
atA9bcfF8kCahokJsI4eCCLgBwDZpYKD+v0AwOBlacF6t6TX+vdlJsi5EP7uxZ22
ILsuWqVm/0H77PACuckc5/qLZoGGC81l0DhnpoeMEb6r/TKOo5xAK1gxdlwNNrq+
nP1zdZnU2wKBgBAS92xFUR4m0YeHpMV5WNN658t1FEDyNqdqE6PgQtmGpi2nG73s
aB5cb/X3TfOCpce6MZlWy8sAyZuYL4Jprte1YDySCHBsS43bvZ64b4kHvdPB8UjY
fOh9GSq2Oy8tysnmSm7NhuGQbNjKeyoQiIXBeNkQW/VqATl6qR5RPFoBAoGACNqV
JQBCd/Y8W0Ry3eM3vgQ5SyqCQMcY5UwYez0Rz3efvJknY72InAhH8o2+VxOlsOjJ
M5iAR3MfHLdeg7Q6J2E5m0gOCJ34ALi3WV8TqXMI+iH1rlnNnjVFU7bbTz4HFXnw
oZSc9w/x53a0KkVtjmOmRg0OGDaI9ILG2MfMmhMCgYB8ZqJtX8qZ2TqKU3XdLZ4z
T2N7xMFuKohWP420r5jKm3Xw85IC+y1SUTB9XGcL79r2eJzmzmdKQ3A3sf3oyUH3
RdYWxtKcZ5PAE8hVRtn1ETZqUgxASGOUn/6w0npkYSOXPU5bc0W6RSLkjES0i+c3
fv3OMNI8qpmQhEjpHHQS1g==
-----END PRIVATE KEY-----

View File

@ -1,35 +0,0 @@
name: Test alerts
on:
push:
branches-ignore:
- 'development/**'
- 'q/*/**'
jobs:
run-alert-tests:
runs-on: ubuntu-latest
strategy:
matrix:
tests:
- name: 1 minute interval tests
file: monitoring/alerts.test.yaml
- name: 10 seconds interval tests
file: monitoring/alerts.10s.test.yaml
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.3
with:
alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }}
alert_inputs: |
namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,25 +0,0 @@
---
name: codeQL
on:
push:
branches: [w/**, q/*]
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
workflow_dispatch:
jobs:
analyze:
name: Static analysis with CodeQL
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: javascript, python, ruby
- name: Build and analyze
uses: github/codeql-action/analyze@v3

View File

@ -1,16 +0,0 @@
---
name: dependency review
on:
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4

View File

@ -1,80 +0,0 @@
---
name: release
run-name: release ${{ inputs.tag }}
on:
workflow_dispatch:
inputs:
tag:
description: 'Tag to be released'
required: true
env:
PROJECT_NAME: ${{ github.event.repository.name }}
jobs:
build-federation-image:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with:
push: true
context: .
file: images/svc-base/Dockerfile
tags: |
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildk
uses: docker/setup-buildx-action@v3
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Push dashboards into the production namespace
run: |
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create Release
uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }}
generate_release_notes: true
target_commitish: ${{ github.sha }}

View File

@ -1,533 +0,0 @@
---
name: tests
on:
workflow_dispatch:
push:
branches-ignore:
- 'development/**'
- 'q/*/**'
env:
# Secrets
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurebackend_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY_2 }}
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME_2 }}
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT_2 }}
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azuretest_AZURE_BLOB_ENDPOINT: "${{ secrets.AZURE_STORAGE_ENDPOINT }}"
b2backend_B2_ACCOUNT_ID: "${{ secrets.B2BACKEND_B2_ACCOUNT_ID }}"
b2backend_B2_STORAGE_ACCESS_KEY: >-
${{ secrets.B2BACKEND_B2_STORAGE_ACCESS_KEY }}
GOOGLE_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
GOOGLE_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
AWS_S3_BACKEND_ACCESS_KEY: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY }}"
AWS_S3_BACKEND_SECRET_KEY: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY }}"
AWS_S3_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY_2 }}"
AWS_S3_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY_2 }}"
AWS_GCP_BACKEND_ACCESS_KEY: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY }}"
AWS_GCP_BACKEND_SECRET_KEY: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY }}"
AWS_GCP_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY_2 }}"
AWS_GCP_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY_2 }}"
b2backend_B2_STORAGE_ENDPOINT: "${{ secrets.B2BACKEND_B2_STORAGE_ENDPOINT }}"
gcpbackend2_GCP_SERVICE_EMAIL: "${{ secrets.GCP2_SERVICE_EMAIL }}"
gcpbackend2_GCP_SERVICE_KEY: "${{ secrets.GCP2_SERVICE_KEY }}"
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackend_GCP_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
gcpbackend_GCP_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_EMAIL }}
gcpbackendmismatch_GCP_SERVICE_KEY: >-
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_KEY }}
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
# Configs
ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs:
linting-coverage:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps
run: pip install flake8
- name: Lint Javascript
run: yarn run --silent lint -- --max-warnings 0
- name: Lint Markdown
run: yarn run --silent lint_md
- name: Lint python
run: flake8 $(git ls-files "*.py")
- name: Lint Yaml
run: yamllint -c yamllint.yml $(git ls-files "*.yml")
- name: Unit Coverage
run: |
set -ex
mkdir -p $CIRCLE_TEST_REPORTS/unit
yarn test
yarn run test_legacy_location
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
CIRCLE_TEST_REPORTS: /tmp
CIRCLE_ARTIFACTS: /tmp
CI_REPORTS: /tmp
- name: Unit Coverage logs
run: find /tmp/unit -exec cat {} \;
- name: preparing junit files for upload
run: |
mkdir -p artifacts/junit
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always()
- name: Upload files to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: artifacts
if: always()
build:
runs-on: ubuntu-20.04
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push cloudserver image
uses: docker/build-push-action@v5
with:
push: true
context: .
provenance: false
tags: |
ghcr.io/${{ github.repository }}:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
uses: docker/build-push-action@v5
with:
push: true
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
multiple-backend:
runs-on: ubuntu-latest
needs: build
env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile sproxyd up -d
working-directory: .github/docker
- name: Run multiple backend test
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 81 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
mongo-v0-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
mongo-v1-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
file-ft-tests:
strategy:
matrix:
include:
- job-name: file-ft-tests
name: ${{ matrix.job-name }}
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes"
JOB_NAME: ${{ matrix.job-name }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory
shell: bash
run: |
set -exu
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
- name: Setup CI services
run: docker compose up -d
working-directory: .github/docker
- name: Run file ft tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
utapi-v2-tests:
runs-on: ubuntu-latest
needs: build
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose up -d
working-directory: .github/docker
- name: Run file utapi v2 tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
kmip-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy KMIP certs
run: cp -r ./certs /tmp/ssl-kmip
working-directory: .github/pykmip
- name: Setup CI services
run: docker compose --profile pykmip up -d
working-directory: .github/docker
- name: Run file KMIP tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
ceph-backend-test:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
S3DATA: multiple
S3KMS: file
CI_CEPH: 'true'
MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1
with:
ruby-version: '2.5.9'
- name: Install Ruby dependencies
run: |
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
- name: Install Java dependencies
run: |
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
- name: Setup CI services
run: docker compose --profile ceph up -d
working-directory: .github/docker
env:
S3METADATA: mongodb
- name: Run Ceph multiple backend tests
run: |-
set -ex -o pipefail;
bash .github/ceph/wait_for_ceph.sh
bash wait_for_local_port.bash 27018 40
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/multibackend-tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
S3METADATA: mem
- name: Run Java tests
run: |-
set -ex -o pipefail;
mvn test | tee /tmp/artifacts/${{ github.job }}/java-tests.log
working-directory: tests/functional/jaws
- name: Run Ruby tests
run: |-
set -ex -o pipefail;
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
working-directory: tests/functional/fog
- name: Run Javascript AWS SDK tests
run: |-
set -ex -o pipefail;
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
S3BACKEND: file
S3VAULT: mem
S3METADATA: mongodb
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()

View File

@ -1,60 +1,27 @@
ARG NODE_VERSION=16.20-bullseye-slim FROM node:10-slim
MAINTAINER Giorgio Regni <gr@scality.com>
FROM node:${NODE_VERSION} as builder
WORKDIR /usr/src/app
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
curl \
git \
gnupg2 \
jq \
python3 \
ssh \
wget \
libffi-dev \
zlib1g-dev \
&& apt-get clean \
&& mkdir -p /root/ssh \
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
################################################################################
FROM node:${NODE_VERSION}
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
&& rm -rf /var/lib/apt/lists/*
ENV NO_PROXY localhost,127.0.0.1 ENV NO_PROXY localhost,127.0.0.1
ENV no_proxy localhost,127.0.0.1 ENV no_proxy localhost,127.0.0.1
EXPOSE 8000 EXPOSE 8000
EXPOSE 8002
RUN apt-get update && \ COPY ./package.json /usr/src/app/
apt-get install -y --no-install-recommends \
jq \
tini \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app WORKDIR /usr/src/app
# Keep the .git directory in order to properly report version RUN apt-get update \
COPY . /usr/src/app && apt-get install -y jq python git build-essential --no-install-recommends \
COPY --from=builder /usr/src/app/node_modules ./node_modules/ && npm install --production \
&& rm -rf /var/lib/apt/lists/* \
&& npm cache clear --force \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/npm-*
COPY . /usr/src/app
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"] VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"] ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ] CMD [ "npm", "start" ]

View File

@ -7,16 +7,16 @@ COPY . /usr/src/app
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y jq python git build-essential --no-install-recommends \ && apt-get install -y jq python git build-essential --no-install-recommends \
&& yarn install --production \ && npm install --production \
&& apt-get autoremove --purge -y python git build-essential \ && apt-get autoremove --purge -y python git build-essential \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \ && npm cache clear \
&& rm -rf ~/.node-gyp \ && rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-* && rm -rf /tmp/npm-*
ENV S3BACKEND mem ENV S3BACKEND mem
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"] ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ] CMD [ "npm", "start" ]
EXPOSE 8000 EXPOSE 8000

View File

@ -1,7 +1,6 @@
# S3 Healthcheck # S3 Healthcheck
Scality S3 exposes a healthcheck route `/live` on the port used Scality S3 exposes a healthcheck route `/_/healthcheck` which returns a
for the metrics (defaults to port 8002) which returns a
response with HTTP code response with HTTP code
- 200 OK - 200 OK

164
README.md
View File

@ -1,7 +1,10 @@
# Zenko CloudServer with Vitastor Backend # Zenko CloudServer
![Zenko CloudServer logo](res/scality-cloudserver-logo.png) ![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview ## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -11,71 +14,126 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud. backend data storage both on-premise or public in the cloud.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor) CloudServer is useful for Developers, either to run as part of a
backend support. continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
## Quick Start with Vitastor ## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
Vitastor Backend is in experimental status, however you can already try to ## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
Installation instructions: ## Docker
### Install Vitastor [Run your Zenko CloudServer with Docker](https://hub.docker.com/r/scality/s3server/)
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md). ## Contributing
### Install Zenko with Vitastor Backend In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor` ## Installation
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Install and Configure MongoDB ### Dependencies
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/). Building and running the Zenko CloudServer requires node.js 6.9.5 and npm v3
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
### Setup Zenko ### Clone source code
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data` ```shell
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data` git clone https://github.com/scality/S3.git
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
``` ```
``` ### Install js dependencies
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \ Go to the ./S3 folder,
geesefs --endpoint http://localhost:8000 testbucket mountdir
```shell
npm install
``` ```
# Author & License If you get an error regarding installation of the diskUsage module,
please install g++.
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0) If you get an error regarding level-down bindings, try clearing your npm cache:
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way) ```shell
npm cache clear
```
## Run it with a file backend
```shell
npm start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
npm start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
npm start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
npm run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae

View File

@ -1,2 +1,2 @@
--- ---
theme: jekyll-theme-modernist theme: jekyll-theme-minimal

View File

@ -13,26 +13,20 @@ function _performSearch(host,
port, port,
bucketName, bucketName,
query, query,
listVersions,
accessKey, accessKey,
secretKey, secretKey,
sessionToken,
verbose, ssl) { verbose, ssl) {
const escapedSearch = encodeURIComponent(query); const escapedSearch = encodeURIComponent(query);
const options = { const options = {
host, host,
port, port,
method: 'GET', method: 'GET',
path: `/${bucketName}/?search=${escapedSearch}${listVersions ? '&&versions' : ''}`, path: `/${bucketName}/?search=${escapedSearch}`,
headers: { headers: {
'Content-Length': 0, 'Content-Length': 0,
}, },
rejectUnauthorized: false, rejectUnauthorized: false,
versions: '',
}; };
if (sessionToken) {
options.headers['x-amz-security-token'] = sessionToken;
}
const transport = ssl ? https : http; const transport = ssl ? https : http;
const request = transport.request(options, response => { const request = transport.request(options, response => {
if (verbose) { if (verbose) {
@ -61,9 +55,9 @@ function _performSearch(host,
// generateV4Headers exepects request object with path that does not // generateV4Headers exepects request object with path that does not
// include query // include query
request.path = `/${bucketName}`; request.path = `/${bucketName}`;
const requestData = listVersions ? { search: query, versions: '' } : { search: query }; auth.client.generateV4Headers(request, { search: query },
auth.client.generateV4Headers(request, requestData, accessKey, secretKey, 's3'); accessKey, secretKey, 's3');
request.path = `/${bucketName}?search=${escapedSearch}${listVersions ? '&&versions' : ''}`; request.path = `/${bucketName}?search=${escapedSearch}`;
if (verbose) { if (verbose) {
logger.info('request headers', { headers: request._headers }); logger.info('request headers', { headers: request._headers });
} }
@ -82,17 +76,15 @@ function searchBucket() {
.version('0.0.1') .version('0.0.1')
.option('-a, --access-key <accessKey>', 'Access key id') .option('-a, --access-key <accessKey>', 'Access key id')
.option('-k, --secret-key <secretKey>', 'Secret access key') .option('-k, --secret-key <secretKey>', 'Secret access key')
.option('-t, --session-token <sessionToken>', 'Session token')
.option('-b, --bucket <bucket>', 'Name of the bucket') .option('-b, --bucket <bucket>', 'Name of the bucket')
.option('-q, --query <query>', 'Search query') .option('-q, --query <query>', 'Search query')
.option('-h, --host <host>', 'Host of the server') .option('-h, --host <host>', 'Host of the server')
.option('-p, --port <port>', 'Port of the server') .option('-p, --port <port>', 'Port of the server')
.option('-s', '--ssl', 'Enable ssl') .option('-s', '--ssl', 'Enable ssl')
.option('-l, --list-versions', 'List all versions of the objects that meet the search query, ' +
'otherwise only list the latest version')
.option('-v, --verbose') .option('-v, --verbose')
.parse(process.argv); .parse(process.argv);
const { host, port, accessKey, secretKey, sessionToken, bucket, query, listVersions, verbose, ssl } =
const { host, port, accessKey, secretKey, bucket, query, verbose, ssl } =
commander; commander;
if (!host || !port || !accessKey || !secretKey || !bucket || !query) { if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
@ -101,7 +93,7 @@ function searchBucket() {
process.exit(1); process.exit(1);
} }
_performSearch(host, port, bucket, query, listVersions, accessKey, secretKey, sessionToken, verbose, _performSearch(host, port, bucket, query, accessKey, secretKey, verbose,
ssl); ssl);
} }

View File

@ -1,10 +1,7 @@
{ {
"port": 8000, "port": 8000,
"listenOn": [], "listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001", "replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": { "restEndpoints": {
"localhost": "us-east-1", "localhost": "us-east-1",
"127.0.0.1": "us-east-1", "127.0.0.1": "us-east-1",
@ -42,10 +39,6 @@
"host": "localhost", "host": "localhost",
"port": 8900 "port": 8900
}, },
"workflowEngineOperator": {
"host": "localhost",
"port": 3001
},
"cdmi": { "cdmi": {
"host": "localhost", "host": "localhost",
"port": 81, "port": 81,
@ -96,19 +89,11 @@
"recordLogName": "s3-recordlog" "recordLogName": "s3-recordlog"
}, },
"mongodb": { "mongodb": {
"replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020", "replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020",
"writeConcern": "majority", "writeConcern": "majority",
"replicaSet": "rs0", "replicaSet": "rs0",
"readPreference": "primary", "readPreference": "primary",
"database": "metadata" "database": "metadata"
},
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
}, },
"externalBackends": { "externalBackends": {
"aws_s3": { "aws_s3": {
@ -127,17 +112,5 @@
"maxSockets": null "maxSockets": null
} }
} }
}, }
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
} }

View File

@ -1,71 +0,0 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -86,38 +86,34 @@ const constants = {
// In testing, AWS seems to allow up to 88 more bytes, so we do the same. // In testing, AWS seems to allow up to 88 more bytes, so we do the same.
maximumMetaHeadersSize: 2136, maximumMetaHeadersSize: 2136,
// Maximum HTTP headers size allowed
maxHttpHeadersSize: 14122,
// hex digest of sha256 hash of empty string: // hex digest of sha256 hash of empty string:
emptyStringHash: crypto.createHash('sha256') emptyStringHash: crypto.createHash('sha256')
.update('', 'binary').digest('hex'), .update('', 'binary').digest('hex'),
// Queries supported by AWS that we do not currently support. // Queries supported by AWS that we do not currently support.
// Non-bucket queries
unsupportedQueries: [ unsupportedQueries: [
'accelerate', 'accelerate',
'analytics', 'analytics',
'inventory', 'inventory',
'logging', 'logging',
'metrics', 'metrics',
'policyStatus', 'notification',
'publicAccessBlock',
'requestPayment', 'requestPayment',
'restore',
'torrent', 'torrent',
], ],
// Headers supported by AWS that we do not currently support. // Headers supported by AWS that we do not currently support.
unsupportedHeaders: [ unsupportedHeaders: [
'x-amz-server-side-encryption',
'x-amz-server-side-encryption-customer-algorithm', 'x-amz-server-side-encryption-customer-algorithm',
'x-amz-server-side-encryption-aws-kms-key-id',
'x-amz-server-side-encryption-context', 'x-amz-server-side-encryption-context',
'x-amz-server-side-encryption-customer-key', 'x-amz-server-side-encryption-customer-key',
'x-amz-server-side-encryption-customer-key-md5', 'x-amz-server-side-encryption-customer-key-md5',
], ],
// user metadata header to set object locationConstraint // user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-storage-class', objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'], legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties // declare here all existing service accounts and their properties
// (if any, otherwise an empty object) // (if any, otherwise an empty object)
@ -130,7 +126,7 @@ const constants = {
}, },
}, },
/* eslint-disable camelcase */ /* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true }, externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true },
// some of the available data backends (if called directly rather // some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided // than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods. // as a string as first parameter of the get/delete methods.
@ -152,97 +148,6 @@ const constants = {
productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko', productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko',
// location constraint delimiter // location constraint delimiter
zenkoSeparator: ':', zenkoSeparator: ':',
// user metadata applied on zenko objects
zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
bucketOwnerActions: [
'bucketDeleteCors',
'bucketDeleteLifecycle',
'bucketDeletePolicy',
'bucketDeleteReplication',
'bucketDeleteWebsite',
'bucketGetCors',
'bucketGetLifecycle',
'bucketGetLocation',
'bucketGetPolicy',
'bucketGetReplication',
'bucketGetVersioning',
'bucketGetWebsite',
'bucketPutCors',
'bucketPutLifecycle',
'bucketPutPolicy',
'bucketPutReplication',
'bucketPutVersioning',
'bucketPutWebsite',
'objectDeleteTagging',
'objectGetTagging',
'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
],
// response header to be sent when there are invalid
// user metadata in the object's metadata
invalidObjectUserMetadataHeader: 'x-amz-missing-meta',
// Bucket specific queries supported by AWS that we do not currently support
// these queries may or may not be supported at object level
unsupportedBucketQueries: [
],
suppressedUtapiEventFields: [
'object',
'location',
'versionId',
],
allowedUtapiEventFilterFields: [
'operationId',
'location',
'account',
'user',
'bucket',
],
arrayOfAllowed: [
'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'],
lifecycleListing: {
CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',
ORPHAN_DM_TYPE: 'orphan',
},
multiObjectDeleteConcurrency: 50,
maxScannedLifecycleListingEntries: 10000,
overheadField: [
'content-length',
'owner-id',
'versionId',
'isNull',
'isDeleteMarker',
],
unsupportedSignatureChecksums: new Set([
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
]),
supportedSignatureChecksums: new Set([
'UNSIGNED-PAYLOAD',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
]),
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
actionsToConsiderAsObjectPut: [
'initiateMultipartUpload',
'objectPutPart',
'completeMultipartUpload',
],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
}; };
module.exports = constants; module.exports = constants;

View File

@ -147,14 +147,6 @@ if [[ "$CRR_METRICS_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT" JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
fi fi
if [[ "$WE_OPERATOR_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.host=\"$WE_OPERATOR_HOST\""
fi
if [[ "$WE_OPERATOR_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.port=$WE_OPERATOR_PORT"
fi
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]" JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
fi fi
@ -195,14 +187,6 @@ if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi fi
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
fi
if [[ "$TESTING_MODE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json mv config.json.tmp config.json

View File

@ -746,7 +746,7 @@ Operation
Startup Startup
~~~~~~~ ~~~~~~~
The simplest deployment is still to launch with yarn start, this will The simplest deployment is still to launch with npm start, this will
start one instance of the Zenko CloudServer connector and will listen on the start one instance of the Zenko CloudServer connector and will listen on the
locally bound dmd ports 9990 and 9991 (by default, see below). locally bound dmd ports 9990 and 9991 (by default, see below).
@ -755,7 +755,7 @@ command in the Zenko CloudServer directory:
:: ::
yarn run start_dmd npm run start_dmd
This will open two ports: This will open two ports:
@ -770,7 +770,7 @@ elsewhere with:
.. code:: sh .. code:: sh
yarn run start_s3server npm run start_s3server
Configuration Configuration
~~~~~~~~~~~~~ ~~~~~~~~~~~~~

View File

@ -1,146 +0,0 @@
# Bucket Policy Documentation
## Description
Bucket policy is a method of controlling access to a user's account at the
resource level.
There are three associated APIs:
- PUT Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html)
- GET Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETpolicy.html)
- DELETE Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html)
More information on bucket policies in general can be found at
https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html.
## Requirements
To prevent loss of access to a bucket, the root owner of a bucket will always
be able to perform any of the three bucket policy-related operations, even
if permission is explicitly denied.
All other users must have permission to perform the desired operation.
## Design
On a PUTBucketPolicy request, the user provides a policy in JSON format.
The policy is evaluated against our policy schema in Arsenal and, once
validated, is stored as part of the bucket's metadata.
On a GETBucketPolicy request, the policy is retrieved from the bucket's
metadata.
On a DELETEBucketPolicy request, the policy is deleted from the bucket's
metadata.
All other APIs are updated to check if a bucket policy is attached to the bucket
the request is made on. If there is a policy, user authorization to perform
the requested action is checked.
### Differences Between Bucket and IAM Policies
IAM policies are attached to an IAM identity and define what actions that
identity is allowed to or denied from doing on what resource.
Bucket policies attach only to buckets and define what actions are allowed or
denied for which principles on that bucket. Permissions specified in a bucket
policy apply to all objects in that bucket unless otherwise specified.
Besides their attachment origins, the main structural difference between
IAM policy and bucket policy is the requirement of a "Principal" element in
bucket policies. This field is redundant in IAM policies.
### Policy Validation
For general guidelines for bucket policy structure, see examples here:
https://docs.aws.amazon.com/AmazonS3/latest/dev//example-bucket-policies.html.
Each bucket policy statement object requires at least four keys:
"Effect", "Principle", "Resource", and "Action".
"Effect" defines the effect of the policy and can have a string value of either
"Allow" or "Deny".
"Resource" defines to which bucket or list of buckets a policy is attached.
An object within the bucket is also a valid resource. The element value can be
either a single bucket or object ARN string or an array of ARNs.
"Action" lists which action(s) the policy controls. Its value can also be either
a string or array of S3 APIs. Each action is the API name prepended by "s3:".
"Principle" specifies which user(s) are granted or denied access to the bucket
resource. Its value can be a string or an object containing an array of users.
Valid users can be identified with an account ARN, account id, or user ARN.
There are also two optional bucket policy statement keys: Sid and Condition.
"Sid" stands for "statement id". If this key is not included, one will be
generated for the statement.
"Condition" lists the condition under which a statement will take affect.
The possibilities are as follows:
- ArnEquals
- ArnEqualsIfExists
- ArnLike
- ArnLikeIfExists
- ArnNotEquals
- ArnNotEqualsIfExists
- ArnNotLike
- ArnNotLikeIfExists
- BinaryEquals
- BinaryEqualsIfExists
- BinaryNotEquals
- BinaryNotEqualsIfExists
- Bool
- BoolIfExists
- DateEquals
- DateEqualsIfExists
- DateGreaterThan
- DateGreaterThanEquals
- DateGreaterThanEqualsIfExists
- DateGreaterThanIfExists
- DateLessThan
- DateLessThanEquals
- DateLessThanEqualsIfExists
- DateLessThanIfExists
- DateNotEquals
- DateNotEqualsIfExists
- IpAddress
- IpAddressIfExists
- NotIpAddress
- NotIpAddressIfExists
- Null
- NumericEquals
- NumericEqualsIfExists
- NumericGreaterThan
- NumericGreaterThanEquals
- NumericGreaterThanEqualsIfExists
- NumericGreaterThanIfExists
- NumericLessThan
- NumericLessThanEquals
- NumericLessThanEqualsIfExists
- NumericLessThanIfExists
- NumericNotEquals
- NumericNotEqualsIfExists
- StringEquals
- StringEqualsIfExists
- StringEqualsIgnoreCase
- StringEqualsIgnoreCaseIfExists
- StringLike
- StringLikeIfExists
- StringNotEquals
- StringNotEqualsIfExists
- StringNotEqualsIgnoreCase
- StringNotEqualsIgnoreCaseIfExists
- StringNotLike
- StringNotLikeIfExists
The value of the Condition key will be an object containing the desired
condition name as that key. The value of inner object can be a string, boolean,
number, or object, depending on the condition.
## Authorization with Multiple Access Control Mechanisms
In the case where multiple access control mechanisms (such as IAM policies,
bucket policies, and ACLs) refer to the same resource, the principle of
least-privilege is applied. Unless an action is explicitly allowed, access will
by default be denied. An explicit DENY in any policy will trump another
policy's ALLOW for an action. The request will only be allowed if at least one
policy specifies an ALLOW, and there is no overriding DENY.
The following diagram illustrates this logic:
![Access_Control_Authorization_Chart](./images/access_control_authorization.png)

View File

@ -295,51 +295,3 @@ Should force path-style requests even though v3 advertises it does by default.
$client->createBucket(array( $client->createBucket(array(
'Bucket' => 'bucketphp', 'Bucket' => 'bucketphp',
)); ));
Go
~~
`AWS Go SDK <https://github.com/aws/aws-sdk-go>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: go
package main
import (
"context"
"fmt"
"log"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
func main() {
os.Setenv("AWS_ACCESS_KEY_ID", "accessKey1")
os.Setenv("AWS_SECRET_ACCESS_KEY", "verySecretKey1")
endpoint := "http://localhost:8000"
timeout := time.Duration(10) * time.Second
sess := session.Must(session.NewSession())
// Create a context with a timeout that will abort the upload if it takes
// more than the passed in timeout.
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
svc := s3.New(sess, &aws.Config{
Region: aws.String(endpoints.UsEast1RegionID),
Endpoint: &endpoint,
})
out, err := svc.ListBucketsWithContext(ctx, &s3.ListBucketsInput{})
if err != nil {
log.Fatal(err)
} else {
fmt.Println(out)
}
}

View File

@ -14,7 +14,7 @@ Got an idea? Get started!
In order to contribute, please follow the `Contributing In order to contribute, please follow the `Contributing
Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__. Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__.
If anything is unclear to you, reach out to us on If anything is unclear to you, reach out to us on
`forum <https://forum.zenko.io/>`__ or via a GitHub issue. `slack <https://zenko-io.slack.com/>`__ or via a GitHub issue.
Don't write code? There are other ways to help! Don't write code? There are other ways to help!
----------------------------------------------- -----------------------------------------------

View File

@ -1,6 +1,13 @@
Docker Docker
====== ======
- `Environment Variables <environment-variables>`__
- `Tunables and setup tips <tunables-and-setup-tips>`__
- `Examples for continuous integration with Docker
<continuous-integration-with-docker-hosted-cloudserver>`__
- `Examples for going into production with Docker
<in-production-w-a-Docker-hosted-cloudserver>`__
.. _environment-variables: .. _environment-variables:
Environment Variables Environment Variables
@ -19,7 +26,7 @@ For multiple data backends, a custom locationConfig.json file is required.
This file enables you to set custom regions. You must provide associated This file enables you to set custom regions. You must provide associated
rest_endpoints for each custom region in config.json. rest_endpoints for each custom region in config.json.
`Learn more about multiple-backend configurations <GETTING_STARTED.html#location-configuration>`__ `Learn more about multiple-backend configurations <./GETTING_STARTED#location-configuration>`__
If you are using Scality RING endpoints, refer to your customer documentation. If you are using Scality RING endpoints, refer to your customer documentation.
@ -73,7 +80,8 @@ S3BACKEND=file
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
For stored file data to persist, you must mount Docker volumes For stored file data to persist, you must mount Docker volumes
for both data and metadata. See :ref:`In Production with a Docker-Hosted CloudServer <in-production-w-a-Docker-hosted-cloudserver>` for both data and metadata. See
`In Production with a Docker-Hosted CloudServer <in-production-w-a-Docker-hosted-cloudserver>`__
S3BACKEND=mem S3BACKEND=mem
^^^^^^^^^^^^^ ^^^^^^^^^^^^^
@ -88,22 +96,11 @@ new.host.com, for example, specify the endpoint with:
.. code-block:: shell .. code-block:: shell
$ docker run -d --name cloudserver -p 8000:8000 -e ENDPOINT=new.host.com zenko/cloudserver $ docker run -d --name cloudserver -p 8000:8000 -e ENDPOINT=new.host.com scality/cloudserver
.. note:: On Unix-like systems (Linux, OS X, etc.) edit /etc/hosts .. note:: On Unix-like systems (Linux, OS X, etc.) edit /etc/hosts
to associate 127.0.0.1 with new.host.com. to associate 127.0.0.1 with new.host.com.
REMOTE_MANAGEMENT_DISABLE
~~~~~~~~~~~~~~~~~~~~~~~~~
CloudServer is a part of `Zenko <https://www.zenko.io/>`__. When you run CloudServer standalone it will still try to connect to Orbit by default (browser-based graphical user interface for Zenko).
Setting this variable to true(1) will default to accessKey1 and verySecretKey1 for credentials and disable the automatic Orbit management:
.. code-block:: shell
$ docker run -d --name cloudserver -p 8000:8000 -e REMOTE_MANAGEMENT_DISABLE=1 zenko/cloudserver
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -117,7 +114,7 @@ environment variables:
.. code-block:: shell .. code-block:: shell
$ docker run -d --name cloudserver -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey \ $ docker run -d --name cloudserver -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey \
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey zenko/cloudserver -e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/cloudserver
.. note:: This takes precedence over the contents of the authdata.json .. note:: This takes precedence over the contents of the authdata.json
file. The authdata.json file is ignored. file. The authdata.json file is ignored.
@ -134,7 +131,7 @@ provides the most detailed logs.
.. code-block:: shell .. code-block:: shell
$ docker run -d --name cloudserver -p 8000:8000 -e LOG_LEVEL=trace zenko/cloudserver $ docker run -d --name cloudserver -p 8000:8000 -e LOG_LEVEL=trace scality/cloudserver
SSL SSL
~~~ ~~~
@ -159,9 +156,9 @@ If SSL is set true:
.. code:: shell .. code:: shell
$ docker run -d --name cloudserver -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT> \ $ docker run -d --name cloudserver -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT> \
zenko/cloudserver scality/cloudserver
For more information about using ClousdServer with SSL, see `Using SSL <GETTING_STARTED.html#Using SSL>`__ For more information about using ClousdServer with SSL, see `Using SSL <./GETTING_STARTED#Using SSL>`__
LISTEN\_ADDR LISTEN\_ADDR
~~~~~~~~~~~~ ~~~~~~~~~~~~
@ -172,8 +169,8 @@ servers as standalone services, for example.
.. code:: shell .. code:: shell
docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0 $ docker run -d --name cloudserver-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0 \
scality/s3server yarn run start_dataserver scality/cloudserver npm run start_dataserver
DATA\_HOST and METADATA\_HOST DATA\_HOST and METADATA\_HOST
@ -186,7 +183,7 @@ Zenko CloudServer.
.. code:: shell .. code:: shell
$ docker run -d --name cloudserver -e DATA_HOST=cloudserver-data \ $ docker run -d --name cloudserver -e DATA_HOST=cloudserver-data \
-e METADATA_HOST=cloudserver-metadata zenko/cloudserver yarn run start_s3server -e METADATA_HOST=cloudserver-metadata scality/cloudserver npm run start_s3server
REDIS\_HOST REDIS\_HOST
~~~~~~~~~~~ ~~~~~~~~~~~
@ -197,7 +194,7 @@ localhost.
.. code:: shell .. code:: shell
$ docker run -d --name cloudserver -p 8000:8000 \ $ docker run -d --name cloudserver -p 8000:8000 \
-e REDIS_HOST=my-redis-server.example.com zenko/cloudserver -e REDIS_HOST=my-redis-server.example.com scality/cloudserver
REDIS\_PORT REDIS\_PORT
~~~~~~~~~~~ ~~~~~~~~~~~
@ -208,7 +205,7 @@ than the default 6379.
.. code:: shell .. code:: shell
$ docker run -d --name cloudserver -p 8000:8000 \ $ docker run -d --name cloudserver -p 8000:8000 \
-e REDIS_PORT=6379 zenko/cloudserver -e REDIS_PORT=6379 scality/cloudserver
.. _tunables-and-setup-tips: .. _tunables-and-setup-tips:
@ -228,7 +225,7 @@ and metadata are destroyed when the container is erased.
.. code-block:: shell .. code-block:: shell
$ docker run -­v $(pwd)/data:/usr/src/app/localData -­v $(pwd)/metadata:/usr/src/app/localMetadata \ $ docker run -­v $(pwd)/data:/usr/src/app/localData -­v $(pwd)/metadata:/usr/src/app/localMetadata \
-p 8000:8000 ­-d zenko/cloudserver -p 8000:8000 ­-d scality/cloudserver
This command mounts the ./data host directory to the container This command mounts the ./data host directory to the container
at /usr/src/app/localData and the ./metadata host directory to at /usr/src/app/localData and the ./metadata host directory to
@ -250,7 +247,7 @@ For example:
.. code-block:: shell .. code-block:: shell
$ docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d \ $ docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d \
zenko/cloudserver scality/cloudserver
Specifying a Host Name Specifying a Host Name
~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~
@ -268,7 +265,7 @@ of the ``location_constraints`` listed in your locationConfig.json file
`here <https://github.com/scality/S3/blob/master/locationConfig.json>`__. `here <https://github.com/scality/S3/blob/master/locationConfig.json>`__.
For more information about location configuration, see: For more information about location configuration, see:
`GETTING STARTED <GETTING_STARTED.html#location-configuration>`__ `GETTING STARTED <./GETTING_STARTED#location-configuration>`__
.. code:: json .. code:: json
@ -284,7 +281,7 @@ Next, run CloudServer using a `Docker volume
.. code-block:: shell .. code-block:: shell
$ docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d zenko/cloudserver $ docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/cloudserver
The local ``config.json`` file overrides the default one through a Docker The local ``config.json`` file overrides the default one through a Docker
file mapping. file mapping.
@ -333,11 +330,11 @@ one hosted on AWS), and custom credentials mounted:
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \ -v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \ -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
-v ~/.aws/credentials:/root/.aws/credentials \ -v ~/.aws/credentials:/root/.aws/credentials \
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver -e S3DATA=multiple -e S3BACKEND=mem scality/cloudserver
To run CloudServer for CI with custom locations, (one in-memory, one To run CloudServer for CI with custom locations, (one in-memory, one
hosted on AWS, and one file), and custom credentials `set as environment hosted on AWS, and one file), and custom credentials `set as environment
variables <GETTING_STARTED.html#scality-access-key-id-and-scality-secret-access-key>`__): variables <./GETTING_STARTED#scality-access-key-id-and-scality-secret-access-key>`__):
.. code-block:: shell .. code-block:: shell
@ -347,7 +344,7 @@ variables <GETTING_STARTED.html#scality-access-key-id-and-scality-secret-access-
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \ -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
-e SCALITY_ACCESS_KEY_ID=accessKey1 \ -e SCALITY_ACCESS_KEY_ID=accessKey1 \
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1 \ -e SCALITY_SECRET_ACCESS_KEY=verySecretKey1 \
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver -e S3DATA=multiple -e S3BACKEND=mem scality/cloudserver
.. _in-production-w-a-Docker-hosted-cloudserver: .. _in-production-w-a-Docker-hosted-cloudserver:
@ -368,4 +365,4 @@ Customize these with:
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \ -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple \ -v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple \
-e ENDPOINT=custom.endpoint.com \ -e ENDPOINT=custom.endpoint.com \
-p 8000:8000 ­-d zenko/cloudserver \ -p 8000:8000 ­-d scality/cloudserver \

View File

@ -4,12 +4,13 @@ Getting Started
.. figure:: ../res/scality-cloudserver-logo.png .. figure:: ../res/scality-cloudserver-logo.png
:alt: Zenko CloudServer logo :alt: Zenko CloudServer logo
|CircleCI| |Scality CI|
Dependencies Dependencies
------------ ------------
Building and running the Scality Zenko CloudServer requires node.js 10.x and Building and running the Scality Zenko CloudServer requires node.js 6.9.5 and
yarn v1.17.x. Up-to-date versions can be found at npm v3. Up-to-date versions can be found at
`Nodesource <https://github.com/nodesource/distributions>`__. `Nodesource <https://github.com/nodesource/distributions>`__.
Installation Installation
@ -21,19 +22,19 @@ Installation
$ git clone https://github.com/scality/cloudserver.git $ git clone https://github.com/scality/cloudserver.git
2. Go to the cloudserver directory and use yarn to install the js dependencies. 2. Go to the cloudserver directory and use npm to install the js dependencies.
.. code-block:: shell .. code-block:: shell
$ cd cloudserver $ cd cloudserver
$ yarn install $ npm install
Running CloudServer with a File Backend Running CloudServer with a File Backend
--------------------------------------- ---------------------------------------
.. code-block:: shell .. code-block:: shell
$ yarn start $ npm start
This starts a Zenko CloudServer on port 8000. Two additional ports, 9990 This starts a Zenko CloudServer on port 8000. Two additional ports, 9990
and 9991, are also open locally for internal transfer of metadata and and 9991, are also open locally for internal transfer of metadata and
@ -53,7 +54,7 @@ absolute paths. Thus, when starting the server:
$ mkdir -m 700 $(pwd)/myFavoriteMetadataPath $ mkdir -m 700 $(pwd)/myFavoriteMetadataPath
$ export S3DATAPATH="$(pwd)/myFavoriteDataPath" $ export S3DATAPATH="$(pwd)/myFavoriteDataPath"
$ export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath" $ export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
$ yarn start $ npm start
Running CloudServer with Multiple Data Backends Running CloudServer with Multiple Data Backends
----------------------------------------------- -----------------------------------------------
@ -61,7 +62,7 @@ Running CloudServer with Multiple Data Backends
.. code-block:: shell .. code-block:: shell
$ export S3DATA='multiple' $ export S3DATA='multiple'
$ yarn start $ npm start
This starts a Zenko CloudServer on port 8000. This starts a Zenko CloudServer on port 8000.
@ -85,27 +86,16 @@ Run CloudServer with an In-Memory Backend
.. code-block:: shell .. code-block:: shell
$ yarn run mem_backend $ npm run mem_backend
This starts a Zenko CloudServer on port 8000. This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1. The secret key is verySecretKey1. The default access key is accessKey1. The secret key is verySecretKey1.
Run CloudServer with Vault User Management
------------------------------------------
.. code:: shell
export S3VAULT=vault
yarn start
Note: Vault is proprietary and must be accessed separately.
This starts a Zenko CloudServer using Vault for user management.
Run CloudServer for Continuous Integration Testing or in Production with Docker Run CloudServer for Continuous Integration Testing or in Production with Docker
------------------------------------------------------------------------------- -------------------------------------------------------------------------------
Run Cloudserver with `DOCKER <DOCKER.html>`__ `DOCKER <./DOCKER>`__
Testing Testing
~~~~~~~ ~~~~~~~
@ -114,20 +104,20 @@ Run unit tests with the command:
.. code-block:: shell .. code-block:: shell
$ yarn test $ npm test
Run multiple-backend unit tests with: Run multiple-backend unit tests with:
.. code-block:: shell .. code-block:: shell
$ CI=true S3DATA=multiple yarn start $ CI=true S3DATA=multiple npm start
$ yarn run multiple_backend_test $ npm run multiple_backend_test
Run the linter with: Run the linter with:
.. code-block:: shell .. code-block:: shell
$ yarn run lint $ npm run lint
Running Functional Tests Locally Running Functional Tests Locally
-------------------------------- --------------------------------
@ -167,8 +157,8 @@ installed in the environment the tests are running in.
.. code-block:: shell .. code-block:: shell
$ CI=true yarn run mem_backend $ CI=true npm run mem_backend
$ CI=true yarn run ft_test $ CI=true npm run ft_test
.. _Configuration: .. _Configuration:
@ -271,7 +261,7 @@ These variables specify authentication credentials for an account named
.. code-block:: shell .. code-block:: shell
$ SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey yarn start $ SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey npm start
.. _Using_SSL: .. _Using_SSL:
@ -379,7 +369,7 @@ SSL certificates.
Test the Config Test the Config
^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^
If aws-sdk is not installed, run ``$> yarn install aws-sdk``. If aws-sdk is not installed, run ``$> npm install aws-sdk``.
Paste the following script into a file named "test.js": Paste the following script into a file named "test.js":

View File

@ -288,7 +288,7 @@ Deploying Zenko CloudServer with SSL
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
First, deploy CloudServer with a file backend using `our DockerHub page First, deploy CloudServer with a file backend using `our DockerHub page
<https://hub.docker.com/r/zenko/cloudserver>`__. <https://hub.docker.com/r/scality/cloudserver/>`__.
.. note:: .. note::

View File

@ -1,161 +0,0 @@
# Object Lock Feature Test Plan
## Feature Component Description
Implementing Object Lock will introduce six new APIs:
- putObjectLockConfiguration
- getObjectLockConfiguration
- putObjectRetention
- getObjectRetention
- putObjectLegalHold
- getObjectLegalHold
Along with these APIs, putBucket, putObject, deleteObject, and multiObjectDelete
be affected. In Arsenal, both the BucketInfo and ObjectMD models will be
updated. Bucket policy and IAM policy permissions will be updated to include
the new API actions.
## Functional Tests
### putBucket tests
- passing option to enable object lock updates bucket metadata and enables
bucket versioning
### putBucketVersioning tests
- suspending versioning on bucket with object lock enabled returns error
### putObject tests
- putting retention configuration on object should be allowed
- putting invalid retention configuration returns error
### getObject tests
- getting object with retention information should include retention information
### copyObject tests
- copying object with retention information should include retention information
### initiateMultipartUpload tests
- mpu object initiated with retention information should include retention
information
### putObjectLockConfiguration tests
- putting configuration as non-bucket-owner user returns AccessDenied error
- disabling object lock on bucket created with object lock returns error
- enabling object lock on bucket created without object lock returns
InvalidBucketState error
- enabling object lock with token on bucket created without object lock succeeds
- putting valid object lock configuration when bucket does not have object
lock enabled returns error (InvalidRequest?)
- putting valid object lock configuration updates bucket metadata
- putting invalid object lock configuration returns error
- ObjectLockEnabled !== "Enabled"
- Rule object doesn't contain DefaultRetention key
- Mode !== "GOVERNANCE" or "COMPLIANCE"
- Days are not an integer
- Years are not an integer
### getObjectLockConfiguration tests
- getting configuration as non-bucket-owner user returns AccessDenied error
- getting configuration when none is set returns
ObjectLockConfigurationNotFoundError error
- getting configuration returns correct object lock configuration for bucket
### putObjectRetention
- putting retention as non-bucket-owner user returns AccessDenied error
- putting retention on object in bucket without object lock enabled returns
InvalidRequest error
- putting valid retention period updates object metadata
### getObjectRetention
- getting retention as non-bucket-owner user returns AccessDenied error
- getting retention when none is set returns NoSuchObjectLockConfiguration
error
- getting retention returns correct object retention period
### putObjectLegalHold
- putting legal hold as non-bucket-owner user returns AccessDenied error
- putting legal hold on object in bucket without object lock enabled returns
InvalidRequest error
- putting valid legal hold updates object metadata
### getObjectLegalHold
- getting legal hold as non-bucket-owner user returns AccessDenied error
- getting legal hold when none is set returns NoSuchObjectLockConfiguration
error
- getting legal hold returns correct object legal hold
## End to End Tests
### Scenarios
- Create bucket with object lock enabled. Put object. Put object lock
configuration. Put another object.
- Ensure object put before configuration does not have retention period set
- Ensure object put after configuration does have retention period set
- Create bucket without object lock. Put object. Enable object lock with token
and put object lock configuration. Put another object.
- Ensure object put before configuration does not have retention period set
- Ensure object put after configuration does have retention period set
- Create bucket with object lock enabled and put configuration with COMPLIANCE
mode. Put object.
- Ensure object cannot be deleted (returns AccessDenied error).
- Ensure object cannot be overwritten.
- Create bucket with object lock enabled and put configuration with GOVERNANCE
mode. Put object.
- Ensure user without permission cannot delete object
- Ensure user without permission cannot overwrite object
- Ensure user with permission can delete object
- Ensure user with permission can overwrite object
- Ensure user with permission can lengthen retention period
- Ensure user with permission cannot shorten retention period
- Create bucket with object lock enabled and put configuration. Edit bucket
metadata so retention period is expired. Put object.
- Ensure object can be deleted.
- Ensure object can be overwritten.
- Create bucket with object lock enabled and put configuration. Edit bucket
metadata so retention period is expired. Put object. Put new retention
period on object.
- Ensure object cannot be deleted.
- Ensure object cannot be overwritten.
- Create bucket with object locked enabled and put configuration. Put object.
Edit object metadata so retention period is past expiration.
- Ensure object can be deleted.
- Ensure object can be overwritten.
- Create bucket with object lock enabled and put configuration. Edit bucket
metadata so retention period is expired. Put object. Put legal hold
on object.
- Ensure object cannot be deleted.
- Ensure object cannot be overwritten.
- Create bucket with object lock enabled and put configuration. Put object.
Check object retention. Change bucket object lock configuration.
- Ensure object retention period has not changed with bucket configuration.
- Create bucket with object lock enabled. Put object with legal hold.
- Ensure object cannot be deleted.
- Ensure object cannot be overwritten.
- Create bucket with object lock enabled. Put object with legal hold. Remove
legal hold.
- Ensure object can be deleted.
- Ensure object can be overwritten.

View File

@ -1,73 +0,0 @@
# Cloudserver Release Plan
## Docker Image Generation
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
CloudServer has a few images there:
* Cloudserver container image: ghcr.io/scality/cloudserver
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
This allows those images to be used by developers, CI builds,
build chain and so on.
Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull ghcr.io/scality/cloudserver:<commit hash>
docker pull ghcr.io/scality/cloudserver:<tag>
```
## Release Process
To release a production image:
* Create a PR to bump the package version
Update Cloudserver's `package.json` by bumping it to the relevant next
version in a new PR. Per example if the last released version was
`8.4.7`, the next version would be `8.4.8`.
```js
{
"name": "cloudserver",
"version": "8.4.8", <--- Here
[...]
}
```
* Review & merge the PR
* Create the release on GitHub
* Go the Release tab (https://github.com/scality/cloudserver/releases);
* Click on the `Draft new release button`;
* In the `tag` field, type the name of the release (`8.4.8`), and confirm
to create the tag on publish;
* Click on `Generate release notes` button to fill the fields;
* Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case);
* Click to `Publish the release` to create the GitHub release and git tag
Notes:
* the Git tag will be created automatically.
* this should be done as soon as the PR is merged, so that the tag
is put on the "version bump" commit.
* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force)
* Branch Name: The one used for the tag earlier. In this example `development/8.4`
* Override Stage: 'release'
* Extra properties:
* name: `'tag'`, value: `[release version]`, in this example`'8.4.8'`
* Release the release version on Jira
* Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page)
* Create a next version
* Name: `[next version]`, in this example `8.4.9`
* Click `...` and select `Release` on the recently released version (`8.4.8`)
* Fill in the field to move incomplete version to the next one

View File

@ -6,7 +6,7 @@ Using Public Clouds as data backends
Introduction Introduction
------------ ------------
As stated in our `GETTING STARTED guide <GETTING_STARTED.html#location-configuration>`__, As stated in our `GETTING STARTED guide <../GETTING_STARTED/#location-configuration>`__,
new data backends can be added by creating a region (also called location new data backends can be added by creating a region (also called location
constraint) with the right endpoint and credentials. constraint) with the right endpoint and credentials.
This section of the documentation shows you how to set up our currently This section of the documentation shows you how to set up our currently
@ -139,7 +139,7 @@ to start the server and start writing data to AWS S3 through CloudServer.
.. code:: shell .. code:: shell
# Start the server locally # Start the server locally
$> S3DATA=multiple yarn start $> S3DATA=multiple npm start
Run the server as a docker container with the ability to write to AWS S3 Run the server as a docker container with the ability to write to AWS S3
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -306,7 +306,7 @@ to start the server and start writing data to MS Azure through CloudServer.
.. code:: shell .. code:: shell
# Start the server locally # Start the server locally
$> S3DATA=multiple yarn start $> S3DATA=multiple npm start
Run the server as a docker container with the ability to write to MS Azure Run the server as a docker container with the ability to write to MS Azure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -18,12 +18,12 @@ service:
$ git clone https://github.com/scality/cloudserver $ git clone https://github.com/scality/cloudserver
$ cd cloudserver $ cd cloudserver
$ yarn install $ npm install
$ yarn start $ npm start
.. tip:: .. tip::
Some optional dependencies may fail, resulting in you seeing `yarn Some optional dependencies may fail, resulting in you seeing `NPM
WARN` messages; these can safely be ignored. Refer to the User WARN` messages; these can safely be ignored. Refer to the User
documentation for all available options. documentation for all available options.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

13
eve/get_product_version.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
LOCAL_BRANCH=$(git branch | grep \* | cut -d ' ' -f2)
BRANCHES=(development q stabilization)
for branch in ${BRANCHES[@]}; do
if echo "${LOCAL_BRANCH}\/" | grep -q ^${branch} ; then
cat .git/HEAD | sed 's/.*\///'
exit 0
fi
done
echo 0.0.0

435
eve/main.yml Normal file
View File

@ -0,0 +1,435 @@
---
version: 0.2
branches:
feature/*, documentation/*, improvement/*, bugfix/*, w/*, q/*, hotfix/*:
stage: pre-merge
development/*:
stage: post-merge
models:
- env: &global-env
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurebackend_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key_2)s
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name_2)s
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint_2)s
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azuretest_AZURE_BLOB_ENDPOINT: "%(secret:azure_storage_endpoint)s"
b2backend_B2_ACCOUNT_ID: "%(secret:b2backend_b2_account_id)s"
b2backend_B2_STORAGE_ACCESS_KEY: >-
%(secret:b2backend_b2_storage_access_key)s
GOOGLE_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
GOOGLE_SERVICE_KEY: "%(secret:gcp_service_key)s"
AWS_S3_BACKEND_ACCESS_KEY: "%(secret:aws_s3_backend_access_key)s"
AWS_S3_BACKEND_SECRET_KEY: "%(secret:aws_s3_backend_secret_key)s"
AWS_S3_BACKEND_ACCESS_KEY_2: "%(secret:aws_s3_backend_access_key_2)s"
AWS_S3_BACKEND_SECRET_KEY_2: "%(secret:aws_s3_backend_secret_key_2)s"
AWS_GCP_BACKEND_ACCESS_KEY: "%(secret:aws_gcp_backend_access_key)s"
AWS_GCP_BACKEND_SECRET_KEY: "%(secret:aws_gcp_backend_secret_key)s"
AWS_GCP_BACKEND_ACCESS_KEY_2: "%(secret:aws_gcp_backend_access_key_2)s"
AWS_GCP_BACKEND_SECRET_KEY_2: "%(secret:aws_gcp_backend_secret_key_2)s"
b2backend_B2_STORAGE_ENDPOINT: "%(secret:b2backend_b2_storage_endpoint)s"
gcpbackend2_GCP_SERVICE_EMAIL: "%(secret:gcp2_service_email)s"
gcpbackend2_GCP_SERVICE_KEY: "%(secret:gcp2_service_key)s"
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackend_GCP_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
gcpbackend_GCP_SERVICE_KEY: "%(secret:gcp_service_key)s"
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
%(secret:gcpbackendmismatch_gcp_service_email)s
gcpbackendmismatch_GCP_SERVICE_KEY: >-
%(secret:gcpbackendmismatch_gcp_service_key)s
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
- env: &mongo-vars
S3BACKEND: "mem"
MPU_TESTING: "yes"
S3METADATA: mongodb
- env: &multiple-backend-vars
S3BACKEND: "mem"
S3DATA: "multiple"
- env: &file-mem-mpu
S3BACKEND: "file"
S3VAULT: "mem"
MPU_TESTING: "yes"
- Git: &clone
name: Pull repo
repourl: '%(prop:git_reference)s'
shallow: True
retryFetch: True
haltOnFailure: True
- ShellCommand: &credentials
name: Setup Credentials
command: bash eve/workers/build/credentials.bash
haltOnFailure: True
env: *global-env
- ShellCommand: &npm-install
name: install modules
command: npm cache clean --force && npm install
haltOnFailure: True
- Upload: &upload-artifacts
source: /artifacts
urls:
- "*"
- ShellCommand: &follow-s3-log
logfiles:
s3:
filename: /artifacts/s3.log
follow: true
- ShellCommand: &follow-s3-ceph-logs
logfiles:
ceph:
filename: /artifacts/ceph.log
follow: true
s3:
filename: /artifacts/s3.log
follow: true
- ShellCommand: &add-hostname
name: add hostname
command: |
echo "127.0.0.1 testrequestbucket.localhost" >> /etc/hosts
echo \
"127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" \
>> /etc/hosts
haltOnFailure: True
- ShellCommand: &setup-junit-upload
name: preparing junit files for upload
command: |
mkdir -p artifacts/junit
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
alwaysRun: true
- Upload: &upload-junits
source: artifacts
urls:
- "*"
alwaysRun: true
stages:
pre-merge:
worker:
type: local
steps:
- TriggerStages:
name: Launch all workers
stage_names:
- linting-coverage
- file-ft-tests
- multiple-backend-test
- mongo-ft-tests
- ceph-backend-tests
- kmip-ft-tests
waitForFinish: True
haltOnFailure: True
linting-coverage:
worker:
type: docker
path: eve/workers/build
volumes: &default_volumes
- '/home/eve/workspace'
steps:
- Git: *clone
- ShellCommand: *npm-install
- ShellCommand: *add-hostname
- ShellCommand: *credentials
- ShellCommand:
name: Linting
command: |
set -ex
npm run --silent lint -- --max-warnings 0
npm run --silent lint_md
flake8 $(git ls-files "*.py")
yamllint $(git ls-files "*.yml")
- ShellCommand:
name: Unit Coverage
command: |
set -ex
unset HTTP_PROXY HTTPS_PROXY NO_PROXY
unset http_proxy https_proxy no_proxy
mkdir -p $CIRCLE_TEST_REPORTS/unit
npm test
npm run test_legacy_location
env: &shared-vars
<<: *global-env
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
CIRCLE_TEST_REPORTS: /tmp
CIRCLE_ARTIFACTS: /tmp
CI_REPORTS: /tmp
- ShellCommand:
name: Unit Coverage logs
command: find /tmp/unit -exec cat {} \;
- ShellCommand: *setup-junit-upload
- Upload: *upload-junits
multiple-backend-test:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2560Mi"
s3Mem: "2560Mi"
env:
<<: *multiple-backend-vars
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *npm-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash wait_for_local_port.bash 8000 40
npm run multiple_backend_test
npm run ft_awssdk_external_backends"
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
<<: *global-env
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- ShellCommand:
command: mvn test
workdir: build/tests/functional/jaws
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
- ShellCommand:
command: rspec tests.rb
workdir: build/tests/functional/fog
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
ceph-backend-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
ceph: eve/workers/ceph
vars:
aggressorMem: "2500Mi"
s3Mem: "2560Mi"
redis: enabled
env:
<<: *multiple-backend-vars
<<: *global-env
CI_CEPH: "true"
MPU_TESTING: "yes"
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *npm-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash eve/workers/ceph/wait_for_ceph.sh
bash wait_for_local_port.bash 8000 40
npm run multiple_backend_test"
env:
<<: *multiple-backend-vars
<<: *global-env
<<: *follow-s3-ceph-logs
- ShellCommand:
command: mvn test
workdir: build/tests/functional/jaws
<<: *follow-s3-ceph-logs
env:
<<: *multiple-backend-vars
- ShellCommand:
command: rspec tests.rb
workdir: build/tests/functional/fog
<<: *follow-s3-ceph-logs
env:
<<: *multiple-backend-vars
- ShellCommand:
command: |
npm run ft_awssdk &&
npm run ft_s3cmd
env:
<<: *file-mem-mpu
<<: *global-env
S3_LOCATION_FILE: "/kube_pod-prod-cloudserver-backend-0/\
build/tests/locationConfig/locationConfigCeph.json"
<<: *follow-s3-ceph-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
mongo-ft-tests:
worker: &s3-pod
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2Gi"
s3Mem: "1664Mi"
redis: enabled
env:
<<: *mongo-vars
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *npm-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
npm run ft_test
<<: *follow-s3-log
env:
<<: *mongo-vars
<<: *global-env
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
file-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "3Gi"
s3Mem: "2560Mi"
redis: enabled
env:
<<: *file-mem-mpu
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *npm-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
npm run ft_test
<<: *follow-s3-log
env:
<<: *file-mem-mpu
<<: *global-env
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
kmip-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
pykmip: eve/workers/pykmip
vars:
aggressorMem: "2Gi"
s3Mem: "1664Mi"
redis: enabled
pykmip: enabled
env:
<<: *mongo-vars
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *npm-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 5696 40
npm run ft_kmip
logfiles:
pykmip:
filename: /artifacts/pykmip.log
follow: true
s3:
filename: /artifacts/s3.log
follow: true
env:
<<: *mongo-vars
<<: *global-env
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
post-merge:
worker:
type: local
steps:
- Git: *clone
- ShellCommand: &docker_login
name: Private Registry Login
command: >
docker login
-u '%(secret:private_registry_username)s'
-p '%(secret:private_registry_password)s'
'%(secret:private_registry_url)s'
- ShellCommand:
name: Dockerhub Login
command: >
docker login
-u '%(secret:dockerhub_ro_user)s'
-p '%(secret:dockerhub_ro_password)s'
- SetProperty: &docker_image_name
name: Set docker image name property
property: docker_image_name
value:
"%(secret:private_registry_url)s/zenko/cloudserver:\
%(prop:commit_short_revision)s"
- ShellCommand:
name: Build docker image
command: >-
docker build
--no-cache
-t %(prop:docker_image_name)s
.
- ShellCommand:
name: Tag images
command: |
docker tag %(prop:docker_image_name)s zenko/cloudserver:$TAG
docker tag %(prop:docker_image_name)s zenko/cloudserver:latest
env:
TAG: "latest-%(prop:product_version)s"
- ShellCommand:
name: Push image
command: |
docker push %(prop:docker_image_name)s
docker push zenko/cloudserver:latest-%(prop:product_version)s
docker push zenko/cloudserver:latest

View File

@ -0,0 +1,57 @@
FROM buildpack-deps:xenial-curl
#
# Install packages needed by the buildchain
#
ENV LANG C.UTF-8
COPY ./s3_packages.list ./buildbot_worker_packages.list /tmp/
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
&& apt-get update \
&& cat /tmp/*packages.list | xargs apt-get install -y \
&& git clone https://github.com/tj/n.git \
&& make -C ./n \
&& n 10 latest \
&& pip install pip==9.0.1 \
&& rm -rf ./n \
&& rm -rf /var/lib/apt/lists/* \
&& rm -f /tmp/packages.list
#
# Add user eve
#
RUN adduser -u 1042 --home /home/eve --disabled-password --gecos "" eve \
&& adduser eve sudo \
&& sed -ri 's/(%sudo.*)ALL$/\1NOPASSWD:ALL/' /etc/sudoers
#
# Install Dependencies
#
# Install RVM and gems
ENV RUBY_VERSION="2.4.1"
COPY ./gems.list /tmp/
RUN cat /tmp/gems.list | xargs gem install
#RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 \
# && curl -sSL https://get.rvm.io | bash -s stable --ruby=$RUBY_VERSION \
# && usermod -a -G rvm eve
#RUN /bin/bash -l -c "\
# source /usr/local/rvm/scripts/rvm \
# && cat /tmp/gems.list | xargs gem install \
# && rm /tmp/gems.list"
# Install Pip packages
COPY ./pip_packages.list /tmp/
RUN cat /tmp/pip_packages.list | xargs pip install \
&& rm -f /tmp/pip_packages.list \
&& mkdir /home/eve/.aws \
&& chown eve /home/eve/.aws
#
# Run buildbot-worker on startup
#
ARG BUILDBOT_VERSION
RUN pip install buildbot-worker==$BUILDBOT_VERSION
CMD ["/bin/bash", "-l", "-c", "buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS && buildbot-worker start --nodaemon"]

View File

@ -0,0 +1,13 @@
ca-certificates
git
gnupg
libffi-dev
libssl-dev
python-pip
python2.7
python2.7-dev
software-properties-common
sudo
tcl
wget
procps

View File

@ -2,9 +2,9 @@
set -x #echo on set -x #echo on
set -e #exit at the first error set -e #exit at the first error
mkdir -p $HOME/.aws mkdir -p ~/.aws
cat >>$HOME/.aws/credentials <<EOF cat >>/root/.aws/credentials <<EOF
[default] [default]
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY

View File

@ -0,0 +1,4 @@
fog-aws:1.3.0
json
mime-types:3.1
rspec:3.5

View File

@ -0,0 +1,3 @@
flake8
s3cmd==1.6.1
yamllint

View File

@ -0,0 +1,11 @@
build-essential
curl
default-jdk
libdigest-hmac-perl
lsof
maven
netcat
redis-server
ruby-full
yarn=1.7.0-1
zlib1g-dev

233
eve/workers/pod.yaml Normal file
View File

@ -0,0 +1,233 @@
---
apiVersion: v1
kind: Pod
metadata:
name: "proxy-ci-test-pod"
spec:
restartPolicy: Never
terminationGracePeriodSeconds: 10
hostAliases:
- ip: "127.0.0.1"
hostnames:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com"
- "testrequestbucket.localhost"
- "pykmip.local"
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
initContainers:
- name: kmip-certs-installer
image: {{ images.pykmip }}
command: [ 'sh', '-c', 'cp /ssl/* /ssl-kmip/']
volumeMounts:
- name: kmip-certs
readOnly: false
mountPath: /ssl-kmip
{%- endif %}
containers:
{% if vars.env.S3METADATA is defined and vars.env.S3METADATA == "mongodb" -%}
- name: mongo
image: scality/ci-mongo:3.6.8
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 500m
memory: 1Gi
{%- endif %}
- name: aggressor
image: {{ images.aggressor }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "1"
memory: {{ vars.aggressorMem }}
limits:
cpu: "1"
memory: {{ vars.aggressorMem }}
volumeMounts:
- name: creds
readOnly: false
mountPath: /root/.aws
- name: artifacts
readOnly: true
mountPath: /artifacts
command:
- bash
- -lc
- |
buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS
buildbot-worker start --nodaemon
env:
- name: CI
value: "true"
- name: ENABLE_LOCAL_CACHE
value: "true"
- name: REPORT_TOKEN
value: "report-token-1"
- name: REMOTE_MANAGEMENT_DISABLE
value: "1"
{% for key, value in vars.env.items() %}
- name: {{ key }}
value: "{{ value }}"
{% endfor %}
- name: s3
image: {{ images.s3 }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "2"
memory: {{ vars.s3Mem }}
limits:
cpu: "2"
memory: {{ vars.s3Mem }}
volumeMounts:
- name: creds
readOnly: false
mountPath: /root/.aws
- name: certs
readOnly: true
mountPath: /tmp
- name: artifacts
readOnly: false
mountPath: /artifacts
- name: kmip-certs
readOnly: false
mountPath: /ssl-kmip
command:
- bash
- -ec
- |
sleep 10 # wait for mongo
/usr/src/app/docker-entrypoint.sh npm start | tee -a /artifacts/s3.log
env:
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" and vars.env.CI_CEPH is not defined -%}
- name: S3_LOCATION_FILE
value: "/usr/src/app/tests/locationConfig/locationConfigTests.json"
{%- endif %}
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" and vars.env.CI_CEPH is defined and vars.env.CI_CEPH == "true" -%}
- name: S3_LOCATION_FILE
value: "/usr/src/app/tests/locationConfig/locationConfigCeph.json"
{%- endif %}
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
- name: S3KMS
value: kmip
- name: S3KMIP_PORT
value: "5696"
- name: S3KMIP_HOSTS
value: "pykmip.local"
- name: S3KMIP_COMPOUND_CREATE
value: "false"
- name: S3KMIP_BUCKET_ATTRIBUTE_NAME
value: ''
- name: S3KMIP_PIPELINE_DEPTH
value: "8"
- name: S3KMIP_KEY
value: /ssl-kmip/kmip-client-key.pem
- name: S3KMIP_CERT
value: /ssl-kmip/kmip-client-cert.pem
- name: S3KMIP_CA
value: /ssl-kmip/kmip-ca.pem
{%- endif %}
- name: CI
value: "true"
- name: ENABLE_LOCAL_CACHE
value: "true"
- name: MONGODB_HOSTS
value: "localhost:27018"
- name: MONGODB_RS
value: "rs0"
- name: REDIS_HOST
value: "localhost"
- name: REDIS_PORT
value: "6379"
- name: REPORT_TOKEN
value: "report-token-1"
- name: REMOTE_MANAGEMENT_DISABLE
value: "1"
- name: HEALTHCHECKS_ALLOWFROM
value: "0.0.0.0/0"
{% for key, value in vars.env.items() %}
- name: {{ key }}
value: "{{ value }}"
{% endfor %}
{% if vars.redis is defined and vars.redis == "enabled" -%}
- name: redis
image: redis:alpine
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 200m
memory: 128Mi
limits:
cpu: 200m
memory: 128Mi
{%- endif %}
{% if vars.env.CI_PROXY is defined and vars.env.CI_PROXY == "true" -%}
- name: squid
image: scality/ci-squid
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 250m
memory: 128Mi
volumeMounts:
- name: certs
readOnly: false
mountPath: /ssl
command:
- sh
- -exc
- |
mkdir -p /ssl
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
-keyout /ssl/myca.pem -out /ssl/myca.pem
cp /ssl/myca.pem /ssl/CA.pem
squid -f /etc/squid/squid.conf -N -z
squid -f /etc/squid/squid.conf -NYCd 1
{%- endif %}
{% if vars.env.CI_CEPH is defined and vars.env.CI_CEPH == "true" -%}
- name: ceph
image: {{ images.ceph }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1536Mi
limits:
cpu: 500m
memory: 1536Mi
volumeMounts:
- name: artifacts
readOnly: false
mountPath: /artifacts
{%- endif %}
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
- name: pykmip
image: {{ images.pykmip }}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: artifacts
readOnly: false
mountPath: /artifacts
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
{%- endif %}
volumes:
- name: creds
emptyDir: {}
- name: certs
emptyDir: {}
- name: artifacts
emptyDir: {}
- name: kmip-certs
emptyDir: {}

View File

@ -1,4 +1,5 @@
FROM python:3.10-alpine FROM python:3-alpine
RUN apk add --no-cache \ RUN apk add --no-cache \
libressl && \ libressl && \
@ -7,14 +8,8 @@ RUN apk add --no-cache \
libffi-dev \ libffi-dev \
libressl-dev \ libressl-dev \
sqlite-dev \ sqlite-dev \
build-base \ build-base && \
curl pip install pykmip requests && \
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
RUN pip3 install -U pip && \
pip3 install pykmip requests && \
apk del .build-deps && \ apk del .build-deps && \
mkdir /pykmip mkdir /pykmip

View File

@ -30,9 +30,7 @@ def create_rsa_private_key(key_size=2048, public_exponent=65537):
return private_key return private_key
def create_self_signed_certificate(subject_name, def create_self_signed_certificate(subject_name, private_key, days_valid=365):
private_key,
days_valid=36500):
subject = x509.Name([ subject = x509.Name([
x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"), x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"),
x509.NameAttribute(x509.NameOID.COMMON_NAME, subject_name) x509.NameAttribute(x509.NameOID.COMMON_NAME, subject_name)
@ -61,7 +59,7 @@ def create_certificate(subject_name,
private_key, private_key,
signing_certificate, signing_certificate,
signing_key, signing_key,
days_valid=36500, days_valid=365,
client_auth=False): client_auth=False):
subject = x509.Name([ subject = x509.Name([
x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"), x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"),

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC6TCCAdGgAwIBAgIUO54wXmqIJGCKxQAH4jhGQXa6ZHIwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAeFw0xOTA0
MDIxODE2MjBaFw0yMDA0MDExODE2MjBaMCQxEDAOBgNVBAoMB1NjYWxpdHkxEDAO
BgNVBAMMB1Jvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDL
T3oapzN3ZEWh5cfe5PlOTgewZ55j7Xjz5ZEWNjPWYmBJfh0+5dntK+c1HvtEL/oa
6vnliPbb3kcl01eoOgmWX7ZgRwWsSb05otBSXJ040eJ8IFKw5Pp8OiWS3wXNusBs
HI/exrGdDTukqarhTBuscbBVtVd/IdQNQZRxB14ci1DjD+i3zBv/oRfrDUbXoBDJ
/ucyCICMthqWzFI509FU6DD1554xvDOoryhCOTHfQFcEWgSln8HaiELlJk9D7164
9qRse2R0s0STTrDgclbQpvt8gJfsWpTuRhjEFe0MKmWWhuYfA+o8eHNvCqQKdRwH
QLx9q1fCwi6Czz7aO8lTAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
hvcNAQELBQADggEBAITJfBlmABEwO/d7320ukoQyV/i2dhC/y2C/mKxuzPXPU8hR
FsBC6LA2oJbatSTSuG3YmHcf8/0mj3o4fCgz2+B7rJLH9WXd8lZdz8CRMwsyVmFY
aI3NvtMc0tV+4W1pxmxBs5IDITLAYIxuTm6kowH9jy85bAnGDYjGK9Hr84keWJIg
a2z7DGhL1HEd6tqJvhEFJFLL4VqDB9vEdvILnav3D3EkKU6lQ0Bvi2XO4t8rOclm
lEfYU7taNgCAYc9y/KcQ13jAokjZxmT8Bhep+Xq4BAHDakzqgD7USUnSUZ0inu8e
2bZItCXIJq/wD5ysOyeT490qVJ6F/8LKS1HYsUE=
-----END CERTIFICATE-----

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC2TCCAcGgAwIBAgIUG83im4Ny72RE536mxXOtJxzjOgAwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAeFw0xOTA0
MDIxODE2MjBaFw0yMDA0MDExODE2MjBaMCkxEDAOBgNVBAoMB1NjYWxpdHkxFTAT
BgNVBAMMDHB5a21pcC5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBALCSoV6WSaIS0FHmJk1+b1/DEBolk4hru4gkk7qqS+vSlHMog2pu37Dvd6Ll
2G2EUVxkrf7bJP1pz5qxLYoZeMDd2MVCzGpdVHrLx7yRB+A7UwSrVpMcQkpxaBrK
upBZuJZoiYgsqAdxxD/NLUAUSyTm3RQ/xJmLRSs3w8F8AfjoGbFZwsAgfnO4kxNR
tdeVf4a4yGp2tmF7QMtMQr2ov0ktGiFwvmosvhkwJjCgvq5IfL4kc4iLhW6vSKPh
e/51Mhq2ntqX+4WxXuhGV4O9rc8tMZ/8zQY8KuabETDgIeBgpE9u+uEqvjnOZmNx
/bZW7MW2tldBmiPFQ+HMFTNICMsCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAhcJt
1605IDKAlCisolU49574gjJv6OdMSOuMF2ZDibK0mIqta65+t9eoRSmwSzysTyWe
6dDt0BRwxzEwuGN8B4DHe5jV8+9NHq8wh/ImyfohX75xSvgnCW2aRuA0qZhi4qCa
pZjy+rZu7Xa10hAmQ8lllYmLrNyRld8dL0eyL3sYzxb5SWX/60kJ4Fo/OkDAOzBo
7P5PKNlCthHL0ND/jV1jqrr+822xWGzGeN4vvqMYMvR71J+dBTCkJj5lRW3MailR
R2zu+W7idIxbs4Gh2JZ4LwPrWG63KhA7Hc+4sMSdrw3Mcp5IMpwIjllBJlllNbKE
x8ARszTHSgP6WtzvaQ==
-----END CERTIFICATE-----

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC8TCCAdmgAwIBAgIUaGl4Kplsv0uUwIgJKaZByGk6JMwwDQYJKoZIhvcNAQEL
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAeFw0xOTA0
MDIxODE2MjBaFw0yMDA0MDExODE2MjBaMCUxEDAOBgNVBAoMB1NjYWxpdHkxETAP
BgNVBAMMCEpvaG4gRG9lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
xrms0paQYVeXg/Gra21DQNW9t6lm7/CzsYZRKcqWnALVhg1Ol6NzZYxhx4ezYlga
ztHnKwFUReOqNYWl2Cgivxwav9lpZp4W7LvHemfOcSmy+7fX8ttEs9df/1p9uMu+
AtnE0yiqWrDYQAu2gSQMWv+SjdnTOrOuz88//6CgxKzE6ccC4EKkJuxIr5Vqy+Up
OgF8wZSnx6JuW+o20XhodCLBac/diwvltODif8FHIhkzR948PfGKAofIaV0s2eri
Kfsc7bRDLnUjYLGeEk/PxjydpCy4e+U+fctTvrRxi0KVqnbxz3+B0nx2Cp0IW/DM
RP09Lfx7uQvieiKaqLJElQIDAQABoxowGDAWBgNVHSUBAf8EDDAKBggrBgEFBQcD
AjANBgkqhkiG9w0BAQsFAAOCAQEAk5Xh1EpxMsWhlorkrBadYtkTqsiC4UIBEJvT
hqU2eb3Fom39gSoKTFSJEO2mopMCq34TRG6klkxfMQRxzQxWAmAHAu0BLeWcJ0Rx
FCxZZ5CexZYAH2yJBQDvvTfFCXZ6VmpHfDa+7Z9DBNYm3WPuDROWgnwiTqrtVmu7
3HeBi4KG/DE6tC6QxI+A9Ofj3wcfv25et1NdBQnNMoMmjyWIGlEmhNShgtasNScV
mp+9LxZUFxeVDx3Qnw+wo/bwbyjYGh+osI+7RHpwXobmSQxxC8Vs+hZlKWGIh47b
DU2ONZAO4565Kppp48mTcgmq96IQFLoIK9XY3CkLsowS7IWczQ==
-----END CERTIFICATE-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDGuazSlpBhV5eD
8atrbUNA1b23qWbv8LOxhlEpypacAtWGDU6Xo3NljGHHh7NiWBrO0ecrAVRF46o1
haXYKCK/HBq/2Wlmnhbsu8d6Z85xKbL7t9fy20Sz11//Wn24y74C2cTTKKpasNhA
C7aBJAxa/5KN2dM6s67Pzz//oKDErMTpxwLgQqQm7EivlWrL5Sk6AXzBlKfHom5b
6jbReGh0IsFpz92LC+W04OJ/wUciGTNH3jw98YoCh8hpXSzZ6uIp+xzttEMudSNg
sZ4ST8/GPJ2kLLh75T59y1O+tHGLQpWqdvHPf4HSfHYKnQhb8MxE/T0t/Hu5C+J6
IpqoskSVAgMBAAECggEAJJ4aBkPQHt+w/5MMbyMW/V9lMe55FUVZFyEU24qE7gJn
refNz4tCvDd93PJYT4rEhc+PtRDtomMs/ee+g3IB1Q0ssKUzEsGWn9CKFTgDhj6U
yGU72Xgl1K2e9sKJ3/9K5+OQrQgVO9jSQBroaitmS25EZvb4QRzS3V/m/wduGE8a
V08HuJX7K6XDptaj2wuYxJmb+Zx4RMVc7D9R38zEoPu5yDVIFQSmZtbGSzo2ElyL
93zEiCno9PVIvNCvEayoYA1Mm4hvp7/gmeF8K9aYGHBeMmHJQpixrBRca0zm1en5
TxirD07P7mqENTHUKq9GXciHAmflzBAVYXB7S0zDkQKBgQDrRUNZEN5d9YuteN18
RYdc7bttE5lY1CLtdWera1d1SZ912selsdKBBYl0ukAvVHvjC/XdA/FGfykfK1h1
UV8bWSH3lxaW9wlwn4mtNS4itWY5CaLLlhmUMM3wehSw+iZgTUiNDhhmt5Anj3/H
uSVwumO54+JWC+XjTheIOikAPwKBgQDYPBnbrbEFHp4ZNVArNApyfpefgrnj/ypV
Bs/rJxGtbSe9x92POYmJwHIH44LuU14XQAwSIIKoGwXX66auJ+JPaFf8tAoPQS6j
MxP4YfLesS1sQweEKoYgZ9O+AdyBgOWdubyi063XlM7pUzeCrmxdC5koJ/PQoGOc
8haBAsJGKwKBgQC8qbxKDfbjjeZGY6fo4bCc2p7z50WPL/4aQY2yrs9hZHqU/a4f
tytA/3msuzaBPdRiy9KLO4Adshb9wbqbyXbk7WMJsoUQ5mURhT3YQc8PUjv4/Tso
2uMELObYMm2pRc/EZfUJ+AWlSQo2TyJ+vH/DmBQkmxODQONGlfbU7R637QKBgQC7
aFuA6ajipwafEnXI+/GSCeWfec1irWQjDSRmyhWoGVK4SODdoSBzIzexXp27sMV7
oSbVDxguWj1WRgbQKgEakXSwr9mIHxYsm7hTLZExMJ4NloqNIc3diB8cLsDN/MkF
SlUTSiMBFRe/YUBbIpEIk2TKSNYnmtq6y5Z1ec6mwQKBgDY9sv5xUkON9CjNn4KO
lIs/Ef4id2awMOb1fsTSFTxPjo47UdDQsEF/anxjwc2tYto3WuVpurWxU8Jqknkz
uDtCr8aNVar3d5y3NNFg8oWY1tk9Ha7U05t/PraJRmeduOzy/nWMuZAi+mW9kdRK
WYX5TjM6GzAOrlUr4QdHAfP/
-----END PRIVATE KEY-----

View File

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCwkqFelkmiEtBR
5iZNfm9fwxAaJZOIa7uIJJO6qkvr0pRzKINqbt+w73ei5dhthFFcZK3+2yT9ac+a
sS2KGXjA3djFQsxqXVR6y8e8kQfgO1MEq1aTHEJKcWgayrqQWbiWaImILKgHccQ/
zS1AFEsk5t0UP8SZi0UrN8PBfAH46BmxWcLAIH5zuJMTUbXXlX+GuMhqdrZhe0DL
TEK9qL9JLRohcL5qLL4ZMCYwoL6uSHy+JHOIi4Vur0ij4Xv+dTIatp7al/uFsV7o
RleDva3PLTGf/M0GPCrmmxEw4CHgYKRPbvrhKr45zmZjcf22VuzFtrZXQZojxUPh
zBUzSAjLAgMBAAECggEAPXHpQdcerI3LfJSQg6sZ6sMgmVi2LGUBD3FbDzwvy1Ku
YhyZDrKimRncEg3V8NZ40aQfgG6WQrFNk1FQvZv7j3Ij+xExvVnZChpb2VzG1tsO
GrPdHrhYwTsRtTETFySBvaWHJqITnvOSDXnC42esdpz4FhHSwnPakB2Ju40ByrG2
M/ljiLpWZC9lMdk+BK7MdjoDOvawCKghovKqxHkxjLVA1CG1sovmm3mTCqO0iOst
b6go/ETb/oSLB+aHV9++bK1Zc3fO26vjz/BDkOqbz66U/74KVqIyOeJy2WarwiDa
sqaRi5c3izwZynhl/eiIDcJJDKD6AyoCHWUbuIdTgQKBgQDW6/Hfb7iO/M+lx4ey
uTxgS5MXOn2ozQFxLbfE5aTdRjPHlq2OhPOK+rmfk0nupEQTPDe7YFwvKaI5kiQQ
R6eJMAcpO4iLGGqjQw1jCBHLo73Ri58v2KEkjOAA+S1jVcWi/nqCPrlpjZqj2640
SKksEUMOfXsPVVbpfEAvqOL0IQKBgQDSUkkdPAALrbaZuOovUurbJoKY6GFHPVJ5
kvrpqNNBYZ12FnEWqJFCTWeF9g3WEjy8XKltxkf8ARxOwXC9P7FTBHX2hXmxRNuE
N7tZ/WML9/9GDlwb4TOQgiIxBOBT5NN7DAS+1api8ik93ZFExD2ufpcBdjVfLxn+
RIIpxVwfawKBgQCirlAUJ9XUbeqjeqftkabw4OPC9mQ9jIfl6owqvwUO9N+m2Rgg
Q+SxM12kO3H/8FkTEkbBT4wXqvT/jO49YG+hOTiCbmzJlL7LO6r7ZhVKRnQdFAl3
xwsaxoOcWQCRK1CBMwz6X44rJqOCGnv/WWysTZirdDHdBmTWMVXIfZbk4QKBgC9M
FekfHxuBOzkinnd5/BrAdEoSqB2vKqbwaMC3GJrxasmtjkz8J35zjb5QcRgdDc+G
PwvStUl0rnr/gWztr+DtdeG0boNw6rS3G8jG9MkyQhPtEsWqRUBQI4RGhnQXV3+q
Wj7YKfMKZj/lXc/LGdvt1+OaQ7JeE0hc+7CNE4R1AoGAGXbXJLj5kGLuq0r2Sj2p
JQPgzp4ZWjQwbkOcTLmGYpn61YjZrlbDDB/3gQ3sQ7q8LYQhgKEpH+cuX0vZixul
V7fj/D8RUxI72+2WlE6rCxYXYmSXEOS2DIx61KdzsL/oSw28dujKxyPRdt8H4uxg
Z7zoolTN1y/WZUxYD2Pxfb4=
-----END PRIVATE KEY-----

View File

@ -4,7 +4,7 @@ port=5696
certificate_path=/ssl/kmip-cert.pem certificate_path=/ssl/kmip-cert.pem
key_path=/ssl/kmip-key.pem key_path=/ssl/kmip-key.pem
ca_path=/ssl/kmip-ca.pem ca_path=/ssl/kmip-ca.pem
auth_suite=TLS1.2 auth_suite=Basic
policy_path=/etc/pykmip/policies policy_path=/etc/pykmip/policies
enable_tls_client_auth=True enable_tls_client_auth=True
database_path=/pykmip/pykmip.db database_path=/pykmip/pykmip.db

View File

@ -48,7 +48,7 @@ signed_headers = 'host;x-amz-content-sha256;x-amz-date'
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \ canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
.format(method, canonical_uri, canonical_querystring, canonical_headers, .format(method, canonical_uri, canonical_querystring, canonical_headers,
signed_headers, payload_hash) signed_headers, payload_hash)
print(canonical_request) print canonical_request
credential_scope = '{0}/{1}/{2}/aws4_request' \ credential_scope = '{0}/{1}/{2}/aws4_request' \
.format(date_stamp, region, service) .format(date_stamp, region, service)
@ -76,4 +76,4 @@ headers = {
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring
r = requests.get(endpoint, headers=headers) r = requests.get(endpoint, headers=headers)
print(r.text) print (r.text)

View File

@ -1,28 +0,0 @@
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
COPY . ${HOME_DIR}/s3
RUN chown -R ${USER} ${HOME_DIR}
RUN pip3 install redis===3.5.3 requests==2.27.1 && \
apt-get install -y git-lfs
USER ${USER}
WORKDIR ${HOME_DIR}/s3
RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \
git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all && \
yarn global remove typescript
# run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed!
RUN ln -sf /scality-kms node_modules
EXPOSE 8000
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/supervisord.conf"

View File

@ -1,10 +1,3 @@
'use strict'; // eslint-disable-line strict 'use strict'; // eslint-disable-line strict
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
require('./lib/server.js')(); require('./lib/server.js')();

File diff suppressed because it is too large Load Diff

View File

@ -1,41 +1,27 @@
const { auth, errors, policies } = require('arsenal'); const { auth, errors } = require('arsenal');
const async = require('async');
const bucketDelete = require('./bucketDelete'); const bucketDelete = require('./bucketDelete');
const bucketDeleteCors = require('./bucketDeleteCors'); const bucketDeleteCors = require('./bucketDeleteCors');
const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite'); const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle'); const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy'); const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketDeleteQuota = require('./bucketDeleteQuota'); const bucketGet = require('./bucketGet');
const { bucketGet } = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL'); const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors'); const bucketGetCors = require('./bucketGetCors');
const bucketGetVersioning = require('./bucketGetVersioning'); const bucketGetVersioning = require('./bucketGetVersioning');
const bucketGetWebsite = require('./bucketGetWebsite'); const bucketGetWebsite = require('./bucketGetWebsite');
const bucketGetLocation = require('./bucketGetLocation'); const bucketGetLocation = require('./bucketGetLocation');
const bucketGetLifecycle = require('./bucketGetLifecycle'); const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy'); const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead'); const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut'); const { bucketPut } = require('./bucketPut');
const bucketPutACL = require('./bucketPutACL'); const bucketPutACL = require('./bucketPutACL');
const bucketPutCors = require('./bucketPutCors'); const bucketPutCors = require('./bucketPutCors');
const bucketPutVersioning = require('./bucketPutVersioning'); const bucketPutVersioning = require('./bucketPutVersioning');
const bucketPutTagging = require('./bucketPutTagging');
const bucketDeleteTagging = require('./bucketDeleteTagging');
const bucketGetTagging = require('./bucketGetTagging');
const bucketPutWebsite = require('./bucketPutWebsite'); const bucketPutWebsite = require('./bucketPutWebsite');
const bucketPutReplication = require('./bucketPutReplication'); const bucketPutReplication = require('./bucketPutReplication');
const bucketPutLifecycle = require('./bucketPutLifecycle'); const bucketPutLifecycle = require('./bucketPutLifecycle');
const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy'); const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication'); const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication'); const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight'); const corsPreflight = require('./corsPreflight');
@ -43,75 +29,40 @@ const completeMultipartUpload = require('./completeMultipartUpload');
const initiateMultipartUpload = require('./initiateMultipartUpload'); const initiateMultipartUpload = require('./initiateMultipartUpload');
const listMultipartUploads = require('./listMultipartUploads'); const listMultipartUploads = require('./listMultipartUploads');
const listParts = require('./listParts'); const listParts = require('./listParts');
const metadataSearch = require('./metadataSearch');
const { multiObjectDelete } = require('./multiObjectDelete'); const { multiObjectDelete } = require('./multiObjectDelete');
const multipartDelete = require('./multipartDelete'); const multipartDelete = require('./multipartDelete');
const objectCopy = require('./objectCopy'); const objectCopy = require('./objectCopy');
const { objectDelete } = require('./objectDelete'); const objectDelete = require('./objectDelete');
const objectDeleteTagging = require('./objectDeleteTagging'); const objectDeleteTagging = require('./objectDeleteTagging');
const objectGet = require('./objectGet'); const objectGet = require('./objectGet');
const objectGetACL = require('./objectGetACL'); const objectGetACL = require('./objectGetACL');
const objectGetLegalHold = require('./objectGetLegalHold');
const objectGetRetention = require('./objectGetRetention');
const objectGetTagging = require('./objectGetTagging'); const objectGetTagging = require('./objectGetTagging');
const objectHead = require('./objectHead'); const objectHead = require('./objectHead');
const objectPut = require('./objectPut'); const objectPut = require('./objectPut');
const objectPutACL = require('./objectPutACL'); const objectPutACL = require('./objectPutACL');
const objectPutLegalHold = require('./objectPutLegalHold');
const objectPutTagging = require('./objectPutTagging'); const objectPutTagging = require('./objectPutTagging');
const objectPutPart = require('./objectPutPart'); const objectPutPart = require('./objectPutPart');
const objectPutCopyPart = require('./objectPutCopyPart'); const objectPutCopyPart = require('./objectPutCopyPart');
const objectPutRetention = require('./objectPutRetention');
const objectRestore = require('./objectRestore');
const prepareRequestContexts const prepareRequestContexts
= require('./apiUtils/authorization/prepareRequestContexts'); = require('./apiUtils/authorization/prepareRequestContexts');
const serviceGet = require('./serviceGet'); const serviceGet = require('./serviceGet');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const website = require('./website'); const websiteGet = require('./websiteGet');
const websiteHead = require('./websiteHead');
const writeContinue = require('../utilities/writeContinue'); const writeContinue = require('../utilities/writeContinue');
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders'); const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
const parseCopySource = require('./apiUtils/object/parseCopySource'); const parseCopySource = require('./apiUtils/object/parseCopySource');
const { tagConditionKeyAuth } = require('./apiUtils/authorization/tagConditionKeys');
const { isRequesterASessionUser } = require('./apiUtils/authorization/permissionChecks');
const checkHttpHeadersSize = require('./apiUtils/object/checkHttpHeadersSize');
const monitoringMap = policies.actionMaps.actionMonitoringMapS3;
auth.setHandler(vault); auth.setHandler(vault);
/* eslint-disable no-param-reassign */ /* eslint-disable no-param-reassign */
const api = { const api = {
callApiMethod(apiMethod, request, response, log, callback) { callApiMethod(apiMethod, request, response, log, callback) {
// Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod];
if (!actionLog &&
apiMethod !== 'websiteGet' &&
apiMethod !== 'websiteHead' &&
apiMethod !== 'corsPreflight') {
log.error('callApiMethod(): No actionLog for this api method', {
apiMethod,
});
}
log.addDefaultFields({
service: 's3',
action: actionLog,
bucketName: request.bucketName,
});
if (request.objectKey) {
log.addDefaultFields({
objectKey: request.objectKey,
});
}
let returnTagCount = true; let returnTagCount = true;
const validationRes = validateQueryAndHeaders(request, log); const validationRes =
validateQueryAndHeaders(request.method, request.query,
request.headers, log);
if (validationRes.error) { if (validationRes.error) {
log.debug('request query / header validation failed', { log.debug('request query / header validation failed', {
error: validationRes.error, error: validationRes.error,
@ -123,7 +74,6 @@ const api = {
// no need to check auth on website or cors preflight requests // no need to check auth on website or cors preflight requests
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' || if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
apiMethod === 'corsPreflight') { apiMethod === 'corsPreflight') {
request.actionImplicitDenies = false;
return this[apiMethod](request, log, callback); return this[apiMethod](request, log, callback);
} }
@ -136,242 +86,131 @@ const api = {
return process.nextTick(callback, parsingError); return process.nextTick(callback, parsingError);
} }
const { httpHeadersSizeError } = checkHttpHeadersSize(request.headers);
if (httpHeadersSizeError) {
log.debug('http header size limit exceeded', {
error: httpHeadersSizeError,
});
return process.nextTick(callback, httpHeadersSizeError);
}
const requestContexts = prepareRequestContexts(apiMethod, request, const requestContexts = prepareRequestContexts(apiMethod, request,
sourceBucket, sourceObject, sourceVersionId); sourceBucket, sourceObject, sourceVersionId);
// Extract all the _apiMethods and store them in an array return auth.server.doAuth(request, log, (err, userInfo,
const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : []; authorizationResults, streamingV4Params) => {
// Attach the names to the current request
// eslint-disable-next-line no-param-reassign
request.apiMethods = apiMethods;
function checkAuthResults(authResults) {
let returnTagCount = true;
const isImplicitDeny = {};
let isOnlyImplicitDeny = true;
if (apiMethod === 'objectGet') {
// first item checks s3:GetObject(Version) action
if (!authResults[0].isAllowed && !authResults[0].isImplicit) {
log.trace('get object authorization denial from Vault');
return errors.AccessDenied;
}
// TODO add support for returnTagCount in the bucket policy
// checks
isImplicitDeny[authResults[0].action] = authResults[0].isImplicit;
// second item checks s3:GetObject(Version)Tagging action
if (!authResults[1].isAllowed) {
log.trace('get tagging authorization denial ' +
'from Vault');
returnTagCount = false;
}
} else {
for (let i = 0; i < authResults.length; i++) {
isImplicitDeny[authResults[i].action] = true;
if (!authResults[i].isAllowed && !authResults[i].isImplicit) {
// Any explicit deny rejects the current API call
log.trace('authorization denial from Vault');
return errors.AccessDenied;
}
if (authResults[i].isAllowed) {
// If the action is allowed, the result is not implicit
// Deny.
isImplicitDeny[authResults[i].action] = false;
isOnlyImplicitDeny = false;
}
}
}
// These two APIs cannot use ACLs or Bucket Policies, hence, any
// implicit deny from vault must be treated as an explicit deny.
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
return errors.AccessDenied;
}
return { returnTagCount, isImplicitDeny };
}
return async.waterfall([
next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err });
return next(arsenalError);
}
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName();
}
if (isRequesterASessionUser(userInfo)) {
authNames.sessionName = userInfo.getShortid().split(':')[1];
}
log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}
// issue 100 Continue to the client
writeContinue(request, response);
const MAX_POST_LENGTH = request.method === 'POST' ?
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
const post = [];
let postLength = 0;
request.on('data', chunk => {
postLength += chunk.length;
// Sanity check on post length
if (postLength <= MAX_POST_LENGTH) {
post.push(chunk);
}
});
request.on('error', err => {
log.trace('error receiving request', {
error: err,
});
return next(errors.InternalError);
});
request.on('end', () => {
if (postLength > MAX_POST_LENGTH) {
log.error('body length is too long for request type',
{ postLength });
return next(errors.InvalidRequest);
}
// Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
});
return undefined;
},
// Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
authorizationResults,
request,
requestContexts,
apiMethod,
log,
(err, authResultsWithTags) => {
if (err) {
log.trace('tag authentication error', { error: err });
return next(err);
}
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
},
),
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) { if (err) {
log.trace('authentication error', { error: err });
return callback(err); return callback(err);
} }
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) { if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults); if (apiMethod === 'objectGet') {
if (checkedResults instanceof Error) { // first item checks s3:GetObject(Version) action
return callback(checkedResults); if (!authorizationResults[0].isAllowed) {
log.trace('get object authorization denial from Vault');
return callback(errors.AccessDenied);
}
// second item checks s3:GetObject(Version)Tagging action
if (!authorizationResults[1].isAllowed) {
log.trace('get tagging authorization denial ' +
'from Vault');
returnTagCount = false;
}
} else {
for (let i = 0; i < authorizationResults.length; i++) {
if (!authorizationResults[i].isAllowed) {
log.trace('authorization denial from Vault');
return callback(errors.AccessDenied);
}
}
} }
returnTagCount = checkedResults.returnTagCount;
request.actionImplicitDenies = checkedResults.isImplicitDeny;
} else {
// create an object of keys apiMethods with all values to false:
// for backward compatibility, all apiMethods are allowed by default
// thus it is explicitly allowed, so implicit deny is false
request.actionImplicitDenies = apiMethods.reduce((acc, curr) => {
acc[curr] = false;
return acc;
}, {});
} }
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5, // issue 100 Continue to the client
(hook, done) => hook(err, done), writeContinue(request, response);
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params, return this[apiMethod](userInfo, request, streamingV4Params,
log, methodCallback, authorizationResults); log, callback);
} }
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { const MAX_POST_LENGTH = request.method.toUpperCase() === 'POST' ?
return this[apiMethod](userInfo, request, sourceBucket, 1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
sourceObject, sourceVersionId, log, methodCallback); const post = [];
} let postLength = 0;
if (apiMethod === 'objectGet') { request.on('data', chunk => {
return this[apiMethod](userInfo, request, returnTagCount, log, callback); postLength += chunk.length;
} // Sanity check on post length
return this[apiMethod](userInfo, request, log, methodCallback); if (postLength <= MAX_POST_LENGTH) {
}); post.push(chunk);
}
return undefined;
});
request.on('error', err => {
log.trace('error receiving request', {
error: err,
});
return callback(errors.InternalError);
});
request.on('end', () => {
if (postLength > MAX_POST_LENGTH) {
log.error('body length is too long for request type',
{ postLength });
return callback(errors.InvalidRequest);
}
// Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString();
if (apiMethod === 'objectCopy' ||
apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, callback);
}
if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request,
returnTagCount, log, callback);
}
return this[apiMethod](userInfo, request, log, callback);
});
return undefined;
}, 's3', requestContexts);
}, },
bucketDelete, bucketDelete,
bucketDeleteCors, bucketDeleteCors,
bucketDeleteEncryption,
bucketDeleteWebsite, bucketDeleteWebsite,
bucketGet, bucketGet,
bucketGetACL, bucketGetACL,
bucketGetCors, bucketGetCors,
bucketGetObjectLock,
bucketGetVersioning, bucketGetVersioning,
bucketGetWebsite, bucketGetWebsite,
bucketGetLocation, bucketGetLocation,
bucketGetEncryption,
bucketHead, bucketHead,
bucketPut, bucketPut,
bucketPutACL, bucketPutACL,
bucketPutCors, bucketPutCors,
bucketPutVersioning, bucketPutVersioning,
bucketPutTagging,
bucketDeleteTagging,
bucketGetTagging,
bucketPutWebsite, bucketPutWebsite,
bucketPutReplication, bucketPutReplication,
bucketGetReplication, bucketGetReplication,
bucketDeleteReplication, bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle, bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle, bucketGetLifecycle,
bucketDeleteLifecycle, bucketDeleteLifecycle,
bucketPutPolicy, bucketPutPolicy,
bucketGetPolicy, bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy, bucketDeletePolicy,
bucketPutObjectLock,
bucketPutNotification,
bucketGetNotification,
bucketPutEncryption,
corsPreflight, corsPreflight,
completeMultipartUpload, completeMultipartUpload,
initiateMultipartUpload, initiateMultipartUpload,
listMultipartUploads, listMultipartUploads,
listParts, listParts,
metadataSearch,
multiObjectDelete, multiObjectDelete,
multipartDelete, multipartDelete,
objectDelete, objectDelete,
objectDeleteTagging, objectDeleteTagging,
objectGet, objectGet,
objectGetACL, objectGetACL,
objectGetLegalHold,
objectGetRetention,
objectGetTagging, objectGetTagging,
objectCopy, objectCopy,
objectHead, objectHead,
objectPut, objectPut,
objectPutACL, objectPutACL,
objectPutLegalHold,
objectPutTagging, objectPutTagging,
objectPutPart, objectPutPart,
objectPutCopyPart, objectPutCopyPart,
objectPutRetention,
objectRestore,
serviceGet, serviceGet,
websiteGet: website, websiteGet,
websiteHead: website, websiteHead,
}; };
module.exports = api; module.exports = api;

View File

@ -0,0 +1,154 @@
const constants = require('../../../../constants');
// whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
function getServiceAccountProperties(canonicalID) {
const canonicalIDArray = canonicalID.split('/');
const serviceName = canonicalIDArray[canonicalIDArray.length - 1];
return constants.serviceAccountProperties[serviceName];
}
function isServiceAccount(canonicalID) {
return getServiceAccountProperties(canonicalID) !== undefined;
}
function isBucketAuthorized(bucket, requestType, canonicalID) {
// Check to see if user is authorized to perform a
// particular action on bucket based on ACLs.
// TODO: Add IAM checks and bucket policy checks.
if (bucket.getOwner() === canonicalID || isServiceAccount(canonicalID)) {
return true;
} else if (requestType === 'bucketOwnerAction') {
// only bucket owner can modify or retrieve this property of a bucket
return false;
}
const bucketAcl = bucket.getAcl();
if (requestType === 'bucketGet' || requestType === 'bucketHead') {
if (bucketAcl.Canned === 'public-read'
|| bucketAcl.Canned === 'public-read-write'
|| (bucketAcl.Canned === 'authenticated-read'
&& canonicalID !== constants.publicId)) {
return true;
} else if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.READ.indexOf(canonicalID) > -1) {
return true;
}
}
if (requestType === 'bucketGetACL') {
if ((bucketAcl.Canned === 'log-delivery-write'
&& canonicalID === constants.logId)
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.READ_ACP.indexOf(canonicalID) > -1) {
return true;
}
}
if (requestType === 'bucketPutACL') {
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true;
}
}
if (requestType === 'bucketDelete' && bucket.getOwner() === canonicalID) {
return true;
}
if (requestType === 'objectDelete' || requestType === 'objectPut') {
if (bucketAcl.Canned === 'public-read-write'
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
return true;
}
}
// Note that an account can have the ability to do objectPutACL,
// objectGetACL, objectHead or objectGet even if the account has no rights
// to the bucket holding the object. So, if the request type is
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
// authorization check should just return true so can move on to check
// rights at the object level.
return (requestType === 'objectPutACL' || requestType === 'objectGetACL' ||
requestType === 'objectGet' || requestType === 'objectHead');
}
function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
const bucketOwner = bucket.getOwner();
if (!objectMD) {
return false;
}
if (objectMD['owner-id'] === canonicalID) {
return true;
}
if (isServiceAccount(canonicalID)) {
return true;
}
// account is authorized if:
// - requesttype is "bucketOwnerAction" (example: for objectTagging) and
// - account is the bucket owner
if (requestType === 'bucketOwnerAction' && bucketOwner === canonicalID) {
return true;
}
if (requestType === 'objectGet' || requestType === 'objectHead') {
if (objectMD.acl.Canned === 'public-read'
|| objectMD.acl.Canned === 'public-read-write'
|| (objectMD.acl.Canned === 'authenticated-read'
&& canonicalID !== constants.publicId)) {
return true;
} else if (objectMD.acl.Canned === 'bucket-owner-read'
&& bucketOwner === canonicalID) {
return true;
} else if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.READ.indexOf(canonicalID) > -1) {
return true;
}
}
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if (requestType === 'objectPut' || requestType === 'objectDelete') {
return true;
}
if (requestType === 'objectPutACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true;
}
}
if (requestType === 'objectGetACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.READ_ACP.indexOf(canonicalID) > -1) {
return true;
}
}
// allow public reads on buckets that are whitelisted for anonymous reads
// TODO: remove this after bucket policies are implemented
const bucketAcl = bucket.getAcl();
const allowPublicReads = publicReadBuckets.includes(bucket.getName()) &&
bucketAcl.Canned === 'public-read' &&
(requestType === 'objectGet' || requestType === 'objectHead');
if (allowPublicReads) {
return true;
}
return false;
}
module.exports = {
isBucketAuthorized,
isObjAuthorized,
getServiceAccountProperties,
isServiceAccount,
};

View File

@ -1,29 +0,0 @@
const { errors } = require('arsenal');
const vault = require('../../../auth/vault');
function checkExpectedBucketOwner(headers, bucket, log, cb) {
const expectedOwner = headers['x-amz-expected-bucket-owner'];
if (expectedOwner === undefined) {
return cb();
}
const bucketOwner = bucket.getOwner();
return vault.getAccountIds([bucketOwner], log, (error, res) => {
if (error) {
log.error('error fetch accountId from vault', {
method: 'checkExpectedBucketOwner',
error,
});
}
if (error || res[bucketOwner] !== expectedOwner) {
return cb(errors.AccessDenied);
}
return cb();
});
}
module.exports = {
checkExpectedBucketOwner,
};

View File

@ -1,641 +0,0 @@
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
const { errors } = require('arsenal');
const { parseCIDR, isValid } = require('ipaddr.js');
const constants = require('../../../../constants');
const { config } = require('../../../Config');
const {
allAuthedUsersId,
bucketOwnerActions,
logId,
publicId,
arrayOfAllowed,
assumedRoleArnResourceType,
backbeatLifecycleSessionName,
actionsToConsiderAsObjectPut,
} = constants;
// whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
function getServiceAccountProperties(canonicalID) {
const canonicalIDArray = canonicalID.split('/');
const serviceName = canonicalIDArray[canonicalIDArray.length - 1];
return constants.serviceAccountProperties[serviceName];
}
function isServiceAccount(canonicalID) {
return getServiceAccountProperties(canonicalID) !== undefined;
}
function isRequesterASessionUser(authInfo) {
const regexpAssumedRoleArn = /^arn:aws:sts::[0-9]{12}:assumed-role\/.*$/;
return regexpAssumedRoleArn.test(authInfo.getArn());
}
function isRequesterNonAccountUser(authInfo) {
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
}
/**
* Checks the access control for a given bucket based on the request type and user's canonical ID.
*
* @param {Bucket} bucket - The bucket to check access control for.
* @param {string} requestType - The list of s3 actions to check within the API call.
* @param {string} canonicalID - The canonical ID of the user making the request.
* @param {string} mainApiCall - The main API call (first item of the requestType).
*
* @returns {boolean} - Returns true if the user has the necessary access rights, otherwise false.
*/
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
// Same logic applies on the Versioned APIs, so let's simplify it.
let requestTypeParsed = requestType.endsWith('Version') ?
requestType.slice(0, 'Version'.length * -1) : requestType;
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
'objectPut' : requestTypeParsed;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
if (bucket.getOwner() === canonicalID) {
return true;
}
if (parsedMainApiCall === 'objectGet') {
if (requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (parsedMainApiCall === 'objectPut') {
if (arrayOfAllowed.includes(requestTypeParsed)) {
return true;
}
}
const bucketAcl = bucket.getAcl();
if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') {
if (bucketAcl.Canned === 'public-read'
|| bucketAcl.Canned === 'public-read-write'
|| (bucketAcl.Canned === 'authenticated-read'
&& canonicalID !== publicId)) {
return true;
} else if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.READ.indexOf(canonicalID) > -1) {
return true;
} else if (bucketAcl.READ.indexOf(publicId) > -1
|| (bucketAcl.READ.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
if (requestTypeParsed === 'bucketGetACL') {
if ((bucketAcl.Canned === 'log-delivery-write'
&& canonicalID === logId)
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.READ_ACP.indexOf(canonicalID) > -1) {
return true;
} else if (bucketAcl.READ_ACP.indexOf(publicId) > -1
|| (bucketAcl.READ_ACP.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
if (requestTypeParsed === 'bucketPutACL') {
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true;
} else if (bucketAcl.WRITE_ACP.indexOf(publicId) > -1
|| (bucketAcl.WRITE_ACP.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') {
if (bucketAcl.Canned === 'public-read-write'
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
return true;
} else if (bucketAcl.WRITE.indexOf(publicId) > -1
|| (bucketAcl.WRITE.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
// Note that an account can have the ability to do objectPutACL,
// objectGetACL, objectHead or objectGet even if the account has no rights
// to the bucket holding the object. So, if the request type is
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
// authorization check should just return true so can move on to check
// rights at the object level.
return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
|| requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
}
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
isUserUnauthenticated, mainApiCall) {
const bucketOwner = bucket.getOwner();
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
'objectPut' : requestType;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
// acls don't distinguish between users and accounts, so both should be allowed
if (bucketOwnerActions.includes(requestTypeParsed)
&& (bucketOwner === canonicalID)) {
return true;
}
if (objectMD['owner-id'] === canonicalID) {
return true;
}
// Backward compatibility
if (parsedMainApiCall === 'objectGet') {
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
&& requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (!objectMD.acl) {
return false;
}
if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
if (objectMD.acl.Canned === 'public-read'
|| objectMD.acl.Canned === 'public-read-write'
|| (objectMD.acl.Canned === 'authenticated-read'
&& canonicalID !== publicId)) {
return true;
} else if (objectMD.acl.Canned === 'bucket-owner-read'
&& bucketOwner === canonicalID) {
return true;
} else if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.READ.indexOf(canonicalID) > -1) {
return true;
} else if (objectMD.acl.READ.indexOf(publicId) > -1
|| (objectMD.acl.READ.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
return true;
}
if (requestTypeParsed === 'objectPutACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true;
} else if (objectMD.acl.WRITE_ACP.indexOf(publicId) > -1
|| (objectMD.acl.WRITE_ACP.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
if (requestTypeParsed === 'objectGetACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|| objectMD.acl.READ_ACP.indexOf(canonicalID) > -1) {
return true;
} else if (objectMD.acl.READ_ACP.indexOf(publicId) > -1
|| (objectMD.acl.READ_ACP.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
&& canonicalID !== publicId)
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
return true;
}
}
// allow public reads on buckets that are whitelisted for anonymous reads
// TODO: remove this after bucket policies are implemented
const bucketAcl = bucket.getAcl();
const allowPublicReads = publicReadBuckets.includes(bucket.getName())
&& bucketAcl.Canned === 'public-read'
&& (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
if (allowPublicReads) {
return true;
}
return false;
}
function _checkBucketPolicyActions(requestType, actions, log) {
const mappedAction = actionMaps.actionMapBP[requestType];
// Deny any action that isn't in list of controlled actions
if (!mappedAction) {
return false;
}
return evaluators.isActionApplicable(mappedAction, actions, log);
}
function _checkBucketPolicyResources(request, resource, log) {
if (!request || (Array.isArray(resource) && resource.length === 0)) {
return true;
}
// build request context from the request!
const requestContext = new RequestContext(request.headers, request.query,
request.bucketName, request.objectKey, null,
request.connection.encrypted, request.resourceType, 's3');
return evaluators.isResourceApplicable(requestContext, resource, log);
}
function _checkBucketPolicyConditions(request, conditions, log) {
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
if (!conditions) {
return true;
}
// build request context from the request!
const requestContext = new RequestContext(request.headers, request.query,
request.bucketName, request.objectKey, ip,
request.connection.encrypted, request.resourceType, 's3', null, null,
null, null, null, null, null, null, null, null, null,
request.objectLockRetentionDays);
return evaluators.meetConditions(requestContext, conditions, log);
}
function _getAccountId(arn) {
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
return arn.substr(13, 12);
}
function _isAccountId(principal) {
return (principal.length === 12 && /^\d+$/.test(principal));
}
function _checkPrincipal(requester, principal) {
if (principal === '*') {
return true;
}
if (principal === requester) {
return true;
}
if (_isAccountId(principal)) {
return _getAccountId(requester) === principal;
}
if (principal.endsWith('root')) {
return _getAccountId(requester) === _getAccountId(principal);
}
return false;
}
function _checkPrincipals(canonicalID, arn, principal) {
if (principal === '*') {
return true;
}
if (principal.CanonicalUser) {
if (Array.isArray(principal.CanonicalUser)) {
return principal.CanonicalUser.some(p => _checkPrincipal(canonicalID, p));
}
return _checkPrincipal(canonicalID, principal.CanonicalUser);
}
if (principal.AWS) {
if (Array.isArray(principal.AWS)) {
return principal.AWS.some(p => _checkPrincipal(arn, p));
}
return _checkPrincipal(arn, principal.AWS);
}
return false;
}
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) {
let permission = 'defaultDeny';
// if requester is user within bucket owner account, actions should be
// allowed unless explicitly denied (assumes allowed by IAM policy)
if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) {
permission = 'allow';
}
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
while (copiedStatement.length > 0) {
const s = copiedStatement[0];
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') {
// explicit deny trumps any allows, so return immediately
return 'explicitDeny';
}
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') {
permission = 'allow';
}
copiedStatement = copiedStatement.splice(1);
}
return permission;
}
function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log,
request, aclPermission, results, actionImplicitDenies) {
const bucketPolicy = bucket.getBucketPolicy();
let processedResult = results[requestType];
if (!bucketPolicy) {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
} else {
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
bucketOwner, log, request, actionImplicitDenies);
if (bucketPolicyPermission === 'explicitDeny') {
processedResult = false;
} else if (bucketPolicyPermission === 'allow') {
processedResult = true;
} else {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
}
}
return processedResult;
}
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const mainApiCall = requestTypes[0];
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
// Check to see if user is authorized to perform a
// particular action on bucket based on ACLs.
// TODO: Add IAM checks
let requesterIsNotUser = true;
let arn = null;
if (authInfo) {
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn();
}
// if the bucket owner is an account, users should not have default access
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
}
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
// In case of error bucket access is checked with bucketGet
// For website, bucket policy only uses objectGet and ignores bucketGet
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
// bucketGet should be used to check acl but switched to objectGet for bucket policy
if (isWebsite && _requestType === 'bucketGet') {
// eslint-disable-next-line no-param-reassign
_requestType = 'objectGet';
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
}
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
request, aclPermission, results, actionImplicitDenies);
});
}
function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {},
log, request) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
let arn = null;
if (authInfo) {
arn = authInfo.getArn();
}
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
request, true, results, actionImplicitDenies);
});
}
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
const mainApiCall = requestTypes[0];
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
const parsedMethodName = _requestType.endsWith('Version')
? _requestType.slice(0, -7) : _requestType;
const bucketOwner = bucket.getOwner();
if (!objectMD) {
// check bucket has read access
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
let permission = 'bucketGet';
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
permission = 'objectPut';
}
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
actionImplicitDenies, isWebsite);
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
&& results[_requestType] === false) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
}
return results[_requestType];
}
let requesterIsNotUser = true;
let arn = null;
let isUserUnauthenticated = false;
if (authInfo) {
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn();
isUserUnauthenticated = arn === undefined;
}
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
}
// account is authorized if:
// - requesttype is included in bucketOwnerActions and
// - account is the bucket owner
// - requester is account, not user
if (bucketOwnerActions.includes(parsedMethodName)
&& (bucketOwner === canonicalID)
&& requesterIsNotUser) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
}
const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName,
canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall);
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner,
log, request, aclPermission, results, actionImplicitDenies);
});
}
function _checkResource(resource, bucketArn) {
if (resource === bucketArn) {
return true;
}
if (resource.includes('/')) {
const rSubs = resource.split('/');
return rSubs[0] === bucketArn;
}
return false;
}
// the resources specified in the bucket policy should contain the bucket name
function validatePolicyResource(bucketName, policy) {
const bucketArn = `arn:aws:s3:::${bucketName}`;
return policy.Statement.every(s => {
if (Array.isArray(s.Resource)) {
return s.Resource.every(r => _checkResource(r, bucketArn));
}
if (typeof s.Resource === 'string') {
return _checkResource(s.Resource, bucketArn);
}
return false;
});
}
function checkIp(value) {
const errString = 'Invalid IP address in Conditions';
const values = Array.isArray(value) ? value : [value];
for (let i = 0; i < values.length; i++) {
// these preliminary checks are validating the provided
// ip address against ipaddr.js, the library we use when
// evaluating IP condition keys. It ensures compatibility,
// but additional checks are required to enforce the right
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
// we would accept different ip formats, which is not
// standard in an AWS use case.
try {
try {
parseCIDR(values[i]);
} catch (err) {
isValid(values[i]);
}
} catch (err) {
return errString;
}
// Apply the existing IP validation logic to each element
const validateIpRegex = ip => {
if (constants.ipv4Regex.test(ip)) {
return ip.split('.').every(part => parseInt(part, 10) <= 255);
}
if (constants.ipv6Regex.test(ip)) {
return ip.split(':').every(part => part.length <= 4);
}
return false;
};
if (validateIpRegex(values[i]) !== true) {
return errString;
}
}
// If the function hasn't returned by now, all elements are valid
return null;
}
// This function checks all bucket policy conditions if the values provided
// are valid for the condition type. If not it returns a relevant Malformed policy error string
function validatePolicyConditions(policy) {
const validConditions = [
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
{ conditionKey: 's3:object-lock-remaining-retention-days' },
];
// keys where value type does not seem to be checked by AWS:
// - s3:object-lock-remaining-retention-days
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
return null;
}
// there can be multiple statements in the policy, each with a Condition enclosure
for (let i = 0; i < policy.Statement.length; i++) {
const s = policy.Statement[i];
if (s.Condition) {
const conditionOperators = Object.keys(s.Condition);
// there can be multiple condition operations in the Condition enclosure
// eslint-disable-next-line no-restricted-syntax
for (const conditionOperator of conditionOperators) {
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
const conditionValue = s.Condition[conditionOperator][conditionKey];
const validCondition = validConditions.find(validCondition =>
validCondition.conditionKey === conditionKey
);
// AWS returns does not return an error if the condition starts with 'aws:'
// so we reproduce this behaviour
if (!validCondition && !conditionKey.startsWith('aws:')) {
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
}
if (validCondition && validCondition.conditionValueTypeChecker) {
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
if (conditionValueTypeError) {
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
}
}
}
}
}
return null;
}
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
* @param {string} arn - Amazon resource name - example:
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
*/
function isLifecycleSession(arn) {
if (!arn) {
return false;
}
const arnSplits = arn.split(':');
const service = arnSplits[2];
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
const resourceType = resourceNames[0];
const sessionName = resourceNames[resourceNames.length - 1];
return (service === 'sts'
&& resourceType === assumedRoleArnResourceType
&& sessionName === backbeatLifecycleSessionName);
}
module.exports = {
isBucketAuthorized,
isObjAuthorized,
getServiceAccountProperties,
isServiceAccount,
isRequesterASessionUser,
isRequesterNonAccountUser,
checkBucketAcls,
checkObjectAcls,
validatePolicyResource,
validatePolicyConditions,
isLifecycleSession,
evaluateBucketPolicyWithIAM,
};

View File

@ -1,20 +1,10 @@
const { policies } = require('arsenal'); const { policies } = require('arsenal');
const { config } = require('../../../Config');
const { RequestContext, requestUtils } = policies; const RequestContext = policies.RequestContext;
let apiMethodAfterVersionCheck; let apiMethodAfterVersionCheck;
const apiMethodWithVersion = { const apiMethodWithVersion = { objectGetACL: true, objectPutACL: true,
objectGetACL: true, objectGet: true, objectDelete: true, objectPutTagging: true,
objectPutACL: true, objectGetTagging: true, objectDeleteTagging: true };
objectGet: true,
objectDelete: true,
objectPutTagging: true,
objectGetTagging: true,
objectDeleteTagging: true,
objectGetLegalHold: true,
objectPutLegalHold: true,
objectPutRetention: true,
};
function isHeaderAcl(headers) { function isHeaderAcl(headers) {
return headers['x-amz-grant-read'] || headers['x-amz-grant-read-acp'] || return headers['x-amz-grant-read'] || headers['x-amz-grant-read-acp'] ||
@ -43,7 +33,8 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
// null as the requestContext to Vault so it will only do an authentication // null as the requestContext to Vault so it will only do an authentication
// check. // check.
const ip = requestUtils.getClientIp(request, config); const ip = request.headers['x-forwarded-for'] ||
request.socket.remoteAddress;
function generateRequestContext(apiMethod) { function generateRequestContext(apiMethod) {
return new RequestContext(request.headers, return new RequestContext(request.headers,
@ -52,7 +43,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3'); apiMethod, 's3');
} }
if (apiMethod === 'bucketPut') { if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
return null; return null;
} }
@ -65,17 +56,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = []; const requestContexts = [];
if (apiMethod === 'multiObjectDelete') { if (apiMethodAfterVersionCheck === 'objectCopy'
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') { || apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' : const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet'; 'objectGet';
@ -107,126 +88,27 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const objectGetTaggingAction = (request.query && const objectGetTaggingAction = (request.query &&
request.query.versionId) ? 'objectGetTaggingVersion' : request.query.versionId) ? 'objectGetTaggingVersion' :
'objectGetTagging'; 'objectGetTagging';
if (request.headers['x-amz-version-id']) {
const objectGetVersionAction = 'objectGetVersion';
const getVersionResourceVersion =
generateRequestContext(objectGetVersionAction);
requestContexts.push(getVersionResourceVersion);
}
const getRequestContext = const getRequestContext =
generateRequestContext(apiMethodAfterVersionCheck); generateRequestContext(apiMethodAfterVersionCheck);
const getTaggingRequestContext = const getTaggingRequestContext =
generateRequestContext(objectGetTaggingAction); generateRequestContext(objectGetTaggingAction);
requestContexts.push(getRequestContext, getTaggingRequestContext); requestContexts.push(getRequestContext, getTaggingRequestContext);
} else if (apiMethodAfterVersionCheck === 'objectGetTagging') {
const objectGetTaggingAction = 'objectGetTagging';
const getTaggingResourceVersion =
generateRequestContext(objectGetTaggingAction);
requestContexts.push(getTaggingResourceVersion);
if (request.headers['x-amz-version-id']) {
const objectGetTaggingVersionAction = 'objectGetTaggingVersion';
const getTaggingVersionResourceVersion =
generateRequestContext(objectGetTaggingVersionAction);
requestContexts.push(getTaggingVersionResourceVersion);
}
} else if (apiMethodAfterVersionCheck === 'objectHead') {
const objectHeadAction = 'objectHead';
const headObjectAction =
generateRequestContext(objectHeadAction);
requestContexts.push(headObjectAction);
if (request.headers['x-amz-version-id']) {
const objectHeadVersionAction = 'objectGetVersion';
const headObjectVersion =
generateRequestContext(objectHeadVersionAction);
requestContexts.push(headObjectVersion);
}
} else if (apiMethodAfterVersionCheck === 'objectPutTagging') {
const putObjectTaggingRequestContext =
generateRequestContext('objectPutTagging');
requestContexts.push(putObjectTaggingRequestContext);
if (request.headers['x-amz-version-id']) {
const putObjectVersionRequestContext =
generateRequestContext('objectPutTaggingVersion');
requestContexts.push(putObjectVersionRequestContext);
}
} else if (apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const putObjectRequestContext =
generateRequestContext('objectPut');
requestContexts.push(putObjectRequestContext);
const getObjectRequestContext =
generateRequestContext('objectGet');
requestContexts.push(getObjectRequestContext);
} else if (apiMethodAfterVersionCheck === 'objectPut') { } else if (apiMethodAfterVersionCheck === 'objectPut') {
// if put object with version const putRequestContext =
if (request.headers['x-scal-s3-version-id'] || generateRequestContext(apiMethodAfterVersionCheck);
request.headers['x-scal-s3-version-id'] === '') { requestContexts.push(putRequestContext);
const putVersionRequestContext = // if put object (versioning) with tag set
generateRequestContext('objectPutVersion'); if (request.headers['x-amz-tagging']) {
requestContexts.push(putVersionRequestContext); const putTaggingRequestContext =
} else { generateRequestContext('objectPutTagging');
const putRequestContext = requestContexts.push(putTaggingRequestContext);
generateRequestContext(apiMethodAfterVersionCheck);
requestContexts.push(putRequestContext);
// if put object (versioning) with tag set
if (request.headers['x-amz-tagging']) {
const putTaggingRequestContext =
generateRequestContext('objectPutTagging');
requestContexts.push(putTaggingRequestContext);
}
if (['ON', 'OFF'].includes(request.headers['x-amz-object-lock-legal-hold-status'])) {
const putLegalHoldStatusAction =
generateRequestContext('objectPutLegalHold');
requestContexts.push(putLegalHoldStatusAction);
}
// if put object (versioning) with ACL
if (isHeaderAcl(request.headers)) {
const putAclRequestContext =
generateRequestContext('objectPutACL');
requestContexts.push(putAclRequestContext);
}
if (request.headers['x-amz-object-lock-mode']) {
const putObjectLockRequestContext =
generateRequestContext('objectPutRetention');
requestContexts.push(putObjectLockRequestContext);
}
if (request.headers['x-amz-version-id']) {
const putObjectVersionRequestContext =
generateRequestContext('objectPutTaggingVersion');
requestContexts.push(putObjectVersionRequestContext);
}
} }
} else if (apiMethodAfterVersionCheck === 'initiateMultipartUpload' ||
apiMethodAfterVersionCheck === 'objectPutPart' ||
apiMethodAfterVersionCheck === 'completeMultipartUpload'
) {
if (request.headers['x-scal-s3-version-id'] ||
request.headers['x-scal-s3-version-id'] === '') {
const putVersionRequestContext =
generateRequestContext('objectPutVersion');
requestContexts.push(putVersionRequestContext);
} else {
const putRequestContext =
generateRequestContext(apiMethodAfterVersionCheck);
requestContexts.push(putRequestContext);
}
// if put object (versioning) with ACL // if put object (versioning) with ACL
if (isHeaderAcl(request.headers)) { if (isHeaderAcl(request.headers)) {
const putAclRequestContext = const putAclRequestContext =
generateRequestContext('objectPutACL'); generateRequestContext('objectPutACL');
requestContexts.push(putAclRequestContext); requestContexts.push(putAclRequestContext);
} }
if (request.headers['x-amz-object-lock-mode']) {
const putObjectLockRequestContext =
generateRequestContext('objectPutRetention');
requestContexts.push(putObjectLockRequestContext);
}
if (request.headers['x-amz-version-id']) {
const putObjectVersionRequestContext =
generateRequestContext('objectPutTaggingVersion');
requestContexts.push(putObjectVersionRequestContext);
}
} else { } else {
const requestContext = const requestContext =
generateRequestContext(apiMethodAfterVersionCheck); generateRequestContext(apiMethodAfterVersionCheck);

View File

@ -1,99 +0,0 @@
const async = require('async');
const { auth, s3middleware } = require('arsenal');
const metadata = require('../../../metadata/wrapper');
const { decodeVersionId } = require('../object/versioning');
const { parseTagXml } = s3middleware.tagging;
function makeTagQuery(tags) {
return Object.entries(tags)
.map(i => i.join('='))
.join('&');
}
function updateRequestContextsWithTags(request, requestContexts, apiMethod, log, cb) {
async.waterfall([
next => {
if (request.headers['x-amz-tagging']) {
return next(null, request.headers['x-amz-tagging']);
}
if (request.post && apiMethod === 'objectPutTagging') {
return parseTagXml(request.post, log, (err, tags) => {
if (err) {
log.trace('error parsing request tags');
return next(err);
}
return next(null, makeTagQuery(tags));
});
}
return next(null, null);
},
(requestTagsQuery, next) => {
const objectKey = request.objectKey;
const bucketName = request.bucketName;
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return next(decodedVidResult);
}
const reqVersionId = decodedVidResult;
return metadata.getObjectMD(
bucketName, objectKey, { versionId: reqVersionId }, log, (err, objMD) => {
if (err) {
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (err.NoSuchKey) {
return next(null, requestTagsQuery, null);
}
log.trace('error getting request object tags');
return next(err);
}
const existingTagsQuery = objMD.tags && makeTagQuery(objMD.tags);
return next(null, requestTagsQuery, existingTagsQuery);
});
},
], (err, requestTagsQuery, existingTagsQuery) => {
if (err) {
log.trace('error processing tag condition key evaluation');
return cb(err);
}
// FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter
// eslint-disable-next-line no-restricted-syntax
for (const rc of requestContexts) {
rc.setNeedTagEval(true);
if (requestTagsQuery) {
rc.setRequestObjTags(requestTagsQuery);
}
if (existingTagsQuery) {
rc.setExistingObjTag(existingTagsQuery);
}
}
return cb();
});
}
function tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log, cb) {
if (!authorizationResults) {
return cb();
}
if (!authorizationResults.some(authRes => authRes.checkTagConditions)) {
return cb(null, authorizationResults);
}
return updateRequestContextsWithTags(request, requestContexts, apiMethod, log, err => {
if (err) {
return cb(err);
}
return auth.server.doAuth(request, log,
(err, userInfo, authResults) => cb(err, authResults), 's3', requestContexts);
});
}
module.exports = {
tagConditionKeyAuth,
updateRequestContextsWithTags,
makeTagQuery,
};

View File

@ -6,11 +6,10 @@ const acl = require('../../../metadata/acl');
const BucketInfo = require('arsenal').models.BucketInfo; const BucketInfo = require('arsenal').models.BucketInfo;
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const createKeyForUserBucket = require('./createKeyForUserBucket'); const createKeyForUserBucket = require('./createKeyForUserBucket');
const { parseBucketEncryptionHeaders } = require('./bucketEncryption');
const metadata = require('../../../metadata/wrapper'); const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper'); const kms = require('../../../kms/wrapper');
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior'); const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
const { isServiceAccount } = require('../authorization/permissionChecks'); const { isServiceAccount } = require('../authorization/aclChecks');
const usersBucket = constants.usersBucket; const usersBucket = constants.usersBucket;
const oldUsersBucket = constants.oldUsersBucket; const oldUsersBucket = constants.oldUsersBucket;
@ -24,7 +23,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
// Get new format usersBucket to see if it exists // Get new format usersBucket to see if it exists
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => { return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
if (err && !err.is.NoSuchBucket && !err.is.BucketAlreadyExists) { if (err && !err.NoSuchBucket && !err.BucketAlreadyExists) {
return cb(err); return cb(err);
} }
const splitter = usersBucketAttrs ? const splitter = usersBucketAttrs ?
@ -41,7 +40,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
usersBucket : oldUsersBucket; usersBucket : oldUsersBucket;
return metadata.putObjectMD(usersBucketBeingCalled, key, return metadata.putObjectMD(usersBucketBeingCalled, key,
omVal, {}, log, err => { omVal, {}, log, err => {
if (err?.is?.NoSuchBucket) { if (err && err.NoSuchBucket) {
// There must be no usersBucket so createBucket // There must be no usersBucket so createBucket
// one using the new format // one using the new format
log.trace('users bucket does not exist, ' + log.trace('users bucket does not exist, ' +
@ -61,8 +60,9 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
// from getting a BucketAlreadyExists // from getting a BucketAlreadyExists
// error with respect // error with respect
// to the usersBucket. // to the usersBucket.
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver if (err &&
if (err && !err.BucketAlreadyExists) { err !==
errors.BucketAlreadyExists) {
log.error('error from metadata', { log.error('error from metadata', {
error: err, error: err,
}); });
@ -171,13 +171,11 @@ function createBucket(authInfo, bucketName, headers,
authInfo.getAccountDisplayName(); authInfo.getAccountDisplayName();
const creationDate = new Date().toJSON(); const creationDate = new Date().toJSON();
const isNFSEnabled = headers['x-scal-nfs-enabled'] === 'true'; const isNFSEnabled = headers['x-scal-nfs-enabled'] === 'true';
const headerObjectLock = headers['x-amz-bucket-object-lock-enabled']; const bucket = new BucketInfo(bucketName,
const objectLockEnabled canonicalID, ownerDisplayName, creationDate,
= headerObjectLock && headerObjectLock.toLowerCase() === 'true'; BucketInfo.currentModelVersion(), null, null, null,
const bucket = new BucketInfo(bucketName, canonicalID, ownerDisplayName, null, null, null, null, null, null, null, null,
creationDate, BucketInfo.currentModelVersion(), null, null, null, null, null, isNFSEnabled);
null, null, null, null, null, null, null, null, null, isNFSEnabled,
null, null, objectLockEnabled);
let locationConstraintVal = null; let locationConstraintVal = null;
if (locationConstraint) { if (locationConstraint) {
@ -193,15 +191,6 @@ function createBucket(authInfo, bucketName, headers,
bucket.setVersioningConfiguration({ Status: 'Enabled' }); bucket.setVersioningConfiguration({ Status: 'Enabled' });
} }
} }
if (objectLockEnabled) {
// default versioning configuration AWS sets
// when a bucket is created with object lock
const versioningConfiguration = {
Status: 'Enabled',
MfaDelete: 'Disabled',
};
bucket.setVersioningConfiguration(versioningConfiguration);
}
const parseAclParams = { const parseAclParams = {
headers, headers,
resourceType: 'bucket', resourceType: 'bucket',
@ -223,7 +212,6 @@ function createBucket(authInfo, bucketName, headers,
}, },
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) { getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
metadata.getBucket(bucketName, log, (err, data) => { metadata.getBucket(bucketName, log, (err, data) => {
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (err && err.NoSuchBucket) { if (err && err.NoSuchBucket) {
return callback(null, 'NoBucketYet'); return callback(null, 'NoBucketYet');
} }
@ -248,9 +236,8 @@ function createBucket(authInfo, bucketName, headers,
} }
const newBucketMD = results.prepareNewBucketMD; const newBucketMD = results.prepareNewBucketMD;
if (existingBucketMD === 'NoBucketYet') { if (existingBucketMD === 'NoBucketYet') {
const sseConfig = parseBucketEncryptionHeaders(headers);
return bucketLevelServerSideEncryption( return bucketLevelServerSideEncryption(
bucketName, sseConfig, log, bucketName, headers, log,
(err, sseInfo) => { (err, sseInfo) => {
if (err) { if (err) {
return cb(err); return cb(err);

View File

@ -3,7 +3,6 @@ const async = require('async');
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const abortMultipartUpload = require('../object/abortMultipartUpload'); const abortMultipartUpload = require('../object/abortMultipartUpload');
const { pushMetric } = require('../../../utapi/utilities');
const { splitter, oldSplitter, mpuBucketPrefix } = const { splitter, oldSplitter, mpuBucketPrefix } =
require('../../../../constants'); require('../../../../constants');
@ -16,7 +15,6 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
`${mpuBucketPrefix}${destinationBucketName}`; `${mpuBucketPrefix}${destinationBucketName}`;
return metadata.deleteBucket(mpuBucketName, log, err => { return metadata.deleteBucket(mpuBucketName, log, err => {
// If the mpu bucket does not exist, just move on // If the mpu bucket does not exist, just move on
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (err && err.NoSuchBucket) { if (err && err.NoSuchBucket) {
return cb(); return cb();
} }
@ -24,23 +22,14 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
}); });
} }
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) { function _deleteOngoingMPUs(authInfo, bucketName, mpus, log, cb) {
async.mapLimit(mpus, 1, (mpu, next) => { async.mapLimit(mpus, 1, (mpu, next) => {
const splitterChar = mpu.key.includes(oldSplitter) ? const splitterChar = mpu.key.includes(oldSplitter) ?
oldSplitter : splitter; oldSplitter : splitter;
// `overview${splitter}${objectKey}${splitter}${uploadId} // `overview${splitter}${objectKey}${splitter}${uploadId}
const [, objectKey, uploadId] = mpu.key.split(splitterChar); const [, objectKey, uploadId] = mpu.key.split(splitterChar);
abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log, abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
(err, destBucket, partSizeSum) => { next);
pushMetric('abortMultipartUpload', log, {
authInfo,
canonicalID: bucketMD.getOwner(),
bucket: bucketName,
keys: [objectKey],
byteLength: partSizeSum,
});
next(err);
}, request);
}, cb); }, cb);
} }
/** /**
@ -49,13 +38,11 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log,
* @param {object} bucketMD - bucket attributes/metadata * @param {object} bucketMD - bucket attributes/metadata
* @param {string} bucketName - bucket in which objectMetadata is stored * @param {string} bucketName - bucket in which objectMetadata is stored
* @param {string} canonicalID - account canonicalID of requester * @param {string} canonicalID - account canonicalID of requester
* @param {object} request - request object given by router
* including normalized headers
* @param {object} log - Werelogs logger * @param {object} log - Werelogs logger
* @param {function} cb - callback from async.waterfall in bucketDelete * @param {function} cb - callback from async.waterfall in bucketDelete
* @return {undefined} * @return {undefined}
*/ */
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) { function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
log.trace('deleting bucket from metadata'); log.trace('deleting bucket from metadata');
assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof canonicalID, 'string'); assert.strictEqual(typeof canonicalID, 'string');
@ -93,7 +80,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
log, (err, objectsListRes) => { log, (err, objectsListRes) => {
// If no shadow bucket ever created, no ongoing MPU's, so // If no shadow bucket ever created, no ongoing MPU's, so
// continue with deletion // continue with deletion
if (err?.is.NoSuchBucket) { if (err && err.NoSuchBucket) {
return next(); return next();
} }
if (err) { if (err) {
@ -102,7 +89,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
} }
if (objectsListRes.Contents.length) { if (objectsListRes.Contents.length) {
return _deleteOngoingMPUs(authInfo, bucketName, return _deleteOngoingMPUs(authInfo, bucketName,
bucketMD, objectsListRes.Contents, request, log, err => { objectsListRes.Contents, log, err => {
if (err) { if (err) {
return next(err); return next(err);
} }

View File

@ -1,255 +0,0 @@
const { errors } = require('arsenal');
const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper');
const { parseString } = require('xml2js');
/**
* ServerSideEncryptionInfo - user configuration for server side encryption
* @typedef {Object} ServerSideEncryptionInfo
* @property {string} algorithm - Algorithm to use for encryption. Either AES256 or aws:kms.
* @property {string} masterKeyId - Key id for the kms key used to encrypt data keys.
* @property {string} configuredMasterKeyId - User configured master key id.
* @property {boolean} mandatory - Whether a default encryption policy has been enabled.
*/
/**
* @callback ServerSideEncryptionInfo~callback
* @param {Object} error - Instance of Arsenal error
* @param {ServerSideEncryptionInfo} - SSE configuration
*/
/**
* parseEncryptionXml - Parses and validates a ServerSideEncryptionConfiguration xml document
* @param {object} xml - ServerSideEncryptionConfiguration doc
* @param {object} log - logger
* @param {ServerSideEncryptionInfo~callback} cb - callback
* @returns {undefined}
*/
function parseEncryptionXml(xml, log, cb) {
return parseString(xml, (err, parsed) => {
if (err) {
log.trace('xml parsing failed', {
error: err,
method: 'parseEncryptionXml',
});
log.debug('invalid xml', { xml });
return cb(errors.MalformedXML);
}
if (!parsed
|| !parsed.ServerSideEncryptionConfiguration
|| !parsed.ServerSideEncryptionConfiguration.Rule) {
log.trace('error in sse config, invalid ServerSideEncryptionConfiguration section', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
const { Rule } = parsed.ServerSideEncryptionConfiguration;
if (!Array.isArray(Rule)
|| Rule.length > 1
|| !Rule[0]
|| !Rule[0].ApplyServerSideEncryptionByDefault
|| !Rule[0].ApplyServerSideEncryptionByDefault[0]) {
log.trace('error in sse config, invalid ApplyServerSideEncryptionByDefault section', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
const [encConfig] = Rule[0].ApplyServerSideEncryptionByDefault;
if (!encConfig.SSEAlgorithm || !encConfig.SSEAlgorithm[0]) {
log.trace('error in sse config, no SSEAlgorithm provided', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
const [algorithm] = encConfig.SSEAlgorithm;
if (algorithm !== 'AES256' && algorithm !== 'aws:kms') {
log.trace('error in sse config, unknown SSEAlgorithm', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
const result = { algorithm, mandatory: true };
if (encConfig.KMSMasterKeyID) {
if (algorithm === 'AES256') {
log.trace('error in sse config, can not specify KMSMasterKeyID when using AES256', {
method: 'parseEncryptionXml',
});
return cb(errors.InvalidArgument.customizeDescription(
'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms'));
}
if (!encConfig.KMSMasterKeyID[0] || typeof encConfig.KMSMasterKeyID[0] !== 'string') {
log.trace('error in sse config, invalid KMSMasterKeyID', {
method: 'parseEncryptionXml',
});
return cb(errors.MalformedXML);
}
result.configuredMasterKeyId = encConfig.KMSMasterKeyID[0];
}
return cb(null, result);
});
}
/**
* hydrateEncryptionConfig - Constructs a ServerSideEncryptionInfo object from arguments
* ensuring no invalid or undefined keys are added
*
* @param {string} algorithm - Algorithm to use for encryption. Either AES256 or aws:kms.
* @param {string} configuredMasterKeyId - User configured master key id.
* @param {boolean} [mandatory] - Whether a default encryption policy has been enabled.
* @returns {ServerSideEncryptionInfo} - SSE configuration
*/
function hydrateEncryptionConfig(algorithm, configuredMasterKeyId, mandatory = null) {
if (algorithm !== 'AES256' && algorithm !== 'aws:kms') {
return {
algorithm: null,
};
}
const sseConfig = { algorithm, mandatory };
if (algorithm === 'aws:kms' && configuredMasterKeyId) {
sseConfig.configuredMasterKeyId = configuredMasterKeyId;
}
if (mandatory !== null) {
sseConfig.mandatory = mandatory;
}
return sseConfig;
}
/**
* parseBucketEncryptionHeaders - retrieves bucket level sse configuration from request headers
* @param {object} headers - Request headers
* @returns {ServerSideEncryptionInfo} - SSE configuration
*/
function parseBucketEncryptionHeaders(headers) {
const sseAlgorithm = headers['x-amz-scal-server-side-encryption'];
const configuredMasterKeyId = headers['x-amz-scal-server-side-encryption-aws-kms-key-id'] || null;
return hydrateEncryptionConfig(sseAlgorithm, configuredMasterKeyId, true);
}
/**
* parseObjectEncryptionHeaders - retrieves bucket level sse configuration from request headers
* @param {object} headers - Request headers
* @returns {ServerSideEncryptionInfo} - SSE configuration
*/
function parseObjectEncryptionHeaders(headers) {
const sseAlgorithm = headers['x-amz-server-side-encryption'];
const configuredMasterKeyId = headers['x-amz-server-side-encryption-aws-kms-key-id'] || null;
if (sseAlgorithm && sseAlgorithm !== 'AES256' && sseAlgorithm !== 'aws:kms') {
return {
error: errors.InvalidArgument.customizeDescription('The encryption method specified is not supported'),
};
}
if (sseAlgorithm !== 'aws:kms' && configuredMasterKeyId) {
return {
error: errors.InvalidArgument.customizeDescription(
'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms'),
};
}
return { objectSSE: hydrateEncryptionConfig(sseAlgorithm, configuredMasterKeyId) };
}
/**
* createDefaultBucketEncryptionMetadata - Creates master key and sets up default server side encryption configuration
* @param {BucketInfo} bucket - bucket metadata
* @param {object} log - werelogs logger
* @param {ServerSideEncryptionInfo~callback} cb - callback
* @returns {undefined}
*/
function createDefaultBucketEncryptionMetadata(bucket, log, cb) {
return kms.bucketLevelEncryption(
bucket.getName(),
{ algorithm: 'AES256', mandatory: false },
log,
(error, sseConfig) => {
if (error) {
return cb(error);
}
bucket.setServerSideEncryption(sseConfig);
return metadata.updateBucket(bucket.getName(), bucket, log, err => cb(err, sseConfig));
});
}
/**
*
* @param {object} headers - request headers
* @param {BucketInfo} bucket - BucketInfo model
* @param {*} log - werelogs logger
* @param {ServerSideEncryptionInfo~callback} cb - callback
* @returns {undefined}
*/
function getObjectSSEConfiguration(headers, bucket, log, cb) {
const bucketSSE = bucket.getServerSideEncryption();
const { error, objectSSE } = parseObjectEncryptionHeaders(headers);
if (error) {
return cb(error);
}
// If a per object sse algo has been passed through
// x-amz-server-side-encryption
if (objectSSE.algorithm) {
// If aws:kms and a custom key id
// pass it through without updating the bucket md
if (objectSSE.algorithm === 'aws:kms' && objectSSE.configuredMasterKeyId) {
return cb(null, objectSSE);
}
// If the client has not specified a key id,
// and we have a default config, then we reuse
// it and pass it through
if (!objectSSE.configuredMasterKeyId && bucketSSE) {
// The default configs algo is overridden with the one passed in the
// request headers. Our implementations of AES256 and aws:kms are the
// same underneath so this is only cosmetic change.
const sseConfig = Object.assign({}, bucketSSE, { algorithm: objectSSE.algorithm });
return cb(null, sseConfig);
}
// If the client has not specified a key id, and we
// don't have a default config, generate it
if (!objectSSE.configuredMasterKeyId && !bucketSSE) {
return createDefaultBucketEncryptionMetadata(bucket, log, (error, sseConfig) => {
if (error) {
return cb(error);
}
// Override the algorithm, for the same reasons as above.
Object.assign(sseConfig, { algorithm: objectSSE.algorithm });
return cb(null, sseConfig);
});
}
}
// If the bucket has a default encryption config, and it is mandatory
// (created with putBucketEncryption or legacy headers)
// pass it through
if (bucketSSE && bucketSSE.mandatory) {
return cb(null, bucketSSE);
}
// No encryption config
return cb(null, null);
}
module.exports = {
createDefaultBucketEncryptionMetadata,
getObjectSSEConfiguration,
hydrateEncryptionConfig,
parseEncryptionXml,
parseBucketEncryptionHeaders,
parseObjectEncryptionHeaders,
};

View File

@ -1,5 +1,4 @@
const invisiblyDelete = require('./invisiblyDelete'); const invisiblyDelete = require('./invisiblyDelete');
const constants = require('../../../../constants');
/** /**
* Checks whether to proceed with a request based on the bucket flags * Checks whether to proceed with a request based on the bucket flags
@ -9,17 +8,9 @@ const constants = require('../../../../constants');
* @return {boolean} true if the bucket should be shielded, false otherwise * @return {boolean} true if the bucket should be shielded, false otherwise
*/ */
function bucketShield(bucket, requestType) { function bucketShield(bucket, requestType) {
const invisiblyDeleteRequests = constants.bucketOwnerActions.concat( const invisiblyDeleteRequests = ['bucketGet', 'bucketHead',
[ 'bucketGetACL', 'bucketOwnerAction', 'objectGet', 'objectGetACL',
'bucketGet', 'objectHead', 'objectPutACL', 'objectDelete'];
'bucketHead',
'bucketGetACL',
'objectGet',
'objectGetACL',
'objectHead',
'objectPutACL',
'objectDelete',
]);
if (invisiblyDeleteRequests.indexOf(requestType) > -1 && if (invisiblyDeleteRequests.indexOf(requestType) > -1 &&
bucket.hasDeletedFlag()) { bucket.hasDeletedFlag()) {
invisiblyDelete(bucket.getName(), bucket.getOwner()); invisiblyDelete(bucket.getName(), bucket.getOwner());
@ -30,9 +21,6 @@ function bucketShield(bucket, requestType) {
// Otherwise return an error to the client // Otherwise return an error to the client
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) && if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
(requestType !== 'objectPut' && (requestType !== 'objectPut' &&
requestType !== 'initiateMultipartUpload' &&
requestType !== 'objectPutPart' &&
requestType !== 'completeMultipartUpload' &&
requestType !== 'bucketPutACL' && requestType !== 'bucketPutACL' &&
requestType !== 'bucketDelete')) { requestType !== 'bucketDelete')) {
return true; return true;

View File

@ -11,16 +11,15 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => { metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
// If the object representing the bucket is not in the // If the object representing the bucket is not in the
// users bucket just continue // users bucket just continue
if (error?.is.NoSuchKey) { if (error && error.NoSuchKey) {
return cb(null); return cb(null);
// BACKWARDS COMPATIBILITY: Remove this once no longer // BACKWARDS COMPATIBILITY: Remove this once no longer
// have old user bucket format // have old user bucket format
} else if (error?.is.NoSuchBucket) { } else if (error && error.NoSuchBucket) {
const keyForUserBucket2 = createKeyForUserBucket(canonicalID, const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
oldSplitter, bucketName); oldSplitter, bucketName);
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2, return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
{}, log, error => { {}, log, error => {
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
if (error && !error.NoSuchKey) { if (error && !error.NoSuchKey) {
log.error('from metadata while deleting user bucket', log.error('from metadata while deleting user bucket',
{ error }); { error });

View File

@ -1,37 +0,0 @@
const { errors, models } = require('arsenal');
const { NotificationConfiguration } = models;
const { config } = require('../../../Config');
function getNotificationConfiguration(parsedXml) {
const notifConfig = new NotificationConfiguration(parsedXml).getValidatedNotificationConfiguration();
// if notifConfig is empty object, effectively delete notification configuration
if (notifConfig.error || Object.keys(notifConfig).length === 0) {
return notifConfig;
}
if (!config.bucketNotificationDestinations) {
return { error: errors.InvalidArgument.customizeDescription(
'Unable to validate the following destination configurations') };
}
const targets = new Set(config.bucketNotificationDestinations.map(t => t.resource));
const notifConfigTargets = notifConfig.queueConfig.map(t => t.queueArn.split(':')[5]);
// getting invalid targets
const invalidTargets = [];
notifConfigTargets.forEach((t, i) => {
if (!targets.has(t)) {
invalidTargets.push({
ArgumentName: notifConfig.queueConfig[i].queueArn,
ArgumentValue: 'The destination queue does not exist',
});
}
});
if (invalidTargets.length > 0) {
const errDesc = 'Unable to validate the following destination configurations';
let error = errors.InvalidArgument.customizeDescription(errDesc);
error = error.addMetadataEntry('invalidArguments', invalidTargets);
return { error };
}
return notifConfig;
}
module.exports = getNotificationConfiguration;

View File

@ -3,7 +3,7 @@ const async = require('async');
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper'); const { data } = require('../../../data/wrapper');
const locationConstraintCheck = require('../object/locationConstraintCheck'); const locationConstraintCheck = require('../object/locationConstraintCheck');
const { standardMetadataValidateBucketAndObj } = const { metadataValidateBucketAndObj } =
require('../../../metadata/metadataUtils'); require('../../../metadata/metadataUtils');
const services = require('../../../services'); const services = require('../../../services');
@ -14,19 +14,17 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
bucketName, bucketName,
objectKey, objectKey,
uploadId, uploadId,
preciseRequestType: request.apiMethods || 'multipartDelete', requestType: 'deleteMPU',
request,
}; };
// For validating the request at the destinationBucket level // For validating the request at the destinationBucket level
// params are the same as validating at the MPU level // params are the same as validating at the MPU level
// but the requestType is the more general 'objectDelete' // but the requestType is the more general 'objectDelete'
const metadataValParams = Object.assign({}, metadataValMPUparams); const metadataValParams = Object.assign({}, metadataValMPUparams);
metadataValParams.requestType = 'objectPut'; metadataValParams.requestType = 'objectPut';
const authzIdentityResult = request ? request.actionImplicitDenies : false;
async.waterfall([ async.waterfall([
function checkDestBucketVal(next) { function checkDestBucketVal(next) {
standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log, metadataValidateBucketAndObj(metadataValParams, log,
(err, destinationBucket) => { (err, destinationBucket) => {
if (err) { if (err) {
return next(err, destinationBucket); return next(err, destinationBucket);
@ -57,14 +55,9 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
next) { next) {
const location = mpuOverviewObj.controllingLocationConstraint; const location = mpuOverviewObj.controllingLocationConstraint;
const originalIdentityAuthzResults = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.abortMPU(objectKey, uploadId, location, bucketName, return data.abortMPU(objectKey, uploadId, location, bucketName,
request, destBucket, locationConstraintCheck, log, request, destBucket, locationConstraintCheck, log,
(err, skipDataDelete) => { (err, skipDataDelete) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityAuthzResults;
if (err) { if (err) {
return next(err, destBucket); return next(err, destBucket);
} }

View File

@ -1,25 +0,0 @@
const { errors } = require('arsenal');
const { maxHttpHeadersSize } = require('../../../../constants');
/**
* Checks the size of the HTTP headers
* @param {object} requestHeaders - HTTP request headers
* @return {object} object with error or null
*/
function checkHttpHeadersSize(requestHeaders) {
let httpHeadersSize = 0;
Object.keys(requestHeaders).forEach(header => {
httpHeadersSize += Buffer.byteLength(header, 'utf8') +
Buffer.byteLength(requestHeaders[header], 'utf8');
});
if (httpHeadersSize > maxHttpHeadersSize) {
return {
httpHeadersSizeError: errors.HttpHeadersTooLarge,
};
}
return {};
}
module.exports = checkHttpHeadersSize;

View File

@ -1,38 +0,0 @@
const { maximumMetaHeadersSize,
invalidObjectUserMetadataHeader } = require('../../../../constants');
/**
* Checks the size of the user metadata in the object metadata and removes
* them from the response if the size of the user metadata is larger than
* the maximum size allowed. A custome metadata key is added to the response
* with the number of user metadata keys not returned as its value
* @param {object} responseMetadata - response metadata
* @return {object} responseMetaHeaders headers with object metadata to include
* in response to client
*/
function checkUserMetadataSize(responseMetadata) {
let userMetadataSize = 0;
// collect the user metadata keys from the object metadata
const userMetadataHeaders = Object.keys(responseMetadata)
.filter(key => key.startsWith('x-amz-meta-'));
// compute the size of all user metadata key and its value
userMetadataHeaders.forEach(header => {
userMetadataSize += header.length + responseMetadata[header].length;
});
// check the size computed against the maximum allowed
// if the computed size is greater, then remove all the
// user metadata from the response object
if (userMetadataSize > maximumMetaHeadersSize) {
const md = Object.assign({}, responseMetadata);
userMetadataHeaders.forEach(header => {
delete md[header];
});
// add the prescribed/custom metadata with number of user metadata
// as its value
md[invalidObjectUserMetadataHeader] = userMetadataHeaders.length;
return md;
}
return responseMetadata;
}
module.exports = checkUserMetadataSize;

View File

@ -1,247 +0,0 @@
/*
* Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020
*/
const { ObjectMDArchive } = require('arsenal').models;
const errors = require('arsenal').errors;
const { config } = require('../../../Config');
const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Get response header "x-amz-restore"
* Be called by objectHead.js
* @param {object} objMD - object's metadata
* @returns {string|undefined} x-amz-restore
*/
function getAmzRestoreResHeader(objMD) {
if (objMD.archive &&
objMD.archive.restoreRequestedAt &&
!objMD.archive.restoreCompletedAt) {
// Avoid race condition by relying on the `archive` MD of the object
// and return the right header after a RESTORE request.
// eslint-disable-next-line
return `ongoing-request="true"`;
}
if (objMD['x-amz-restore']) {
if (objMD['x-amz-restore']['expiry-date']) {
const utcDateTime = new Date(objMD['x-amz-restore']['expiry-date']).toUTCString();
// eslint-disable-next-line
return `ongoing-request="${objMD['x-amz-restore']['ongoing-request']}", expiry-date="${utcDateTime}"`;
}
}
return undefined;
}
/**
* Check if restore can be done.
*
* @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
*/
function _validateStartRestore(objectMD, log) {
if (objectMD.archive?.restoreCompletedAt) {
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
// If object is already restored, no further check is needed
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
// been reset.
return undefined;
}
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
if (!isLocationCold) {
// return InvalidObjectState error if the object is not in cold storage,
// not in cold storage means either location cold flag not exists or cold flag is explicit false
log.debug('The bucket of the object is not in a cold storage location.',
{
isLocationCold,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreRequestedAt) {
// return RestoreAlreadyInProgress error if the object is currently being restored
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
log.debug('The object is currently being restored.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.RestoreAlreadyInProgress;
}
return undefined;
}
/**
* Check if "put version id" is allowed
*
* @param {ObjectMD} objMD - object metadata
* @param {string} versionId - object's version id
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} - undefined if "put version id" is allowed
*/
function validatePutVersionId(objMD, versionId, log) {
if (!objMD) {
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
log.error('error no object metadata found', { method: 'validatePutVersionId', versionId });
return err;
}
if (objMD.isDeleteMarker) {
log.error('version is a delete marker', { method: 'validatePutVersionId', versionId });
return errors.MethodNotAllowed;
}
const isLocationCold = locationConstraints[objMD.dataStoreName]?.isCold;
if (!isLocationCold) {
log.error('The object data is not stored in a cold storage location.',
{
isLocationCold,
dataStoreName: objMD.dataStoreName,
method: 'validatePutVersionId',
});
return errors.InvalidObjectState;
}
// make sure object archive restoration is in progress
// NOTE: we do not use putObjectVersion to update the restoration period.
if (!objMD.archive || !objMD.archive.restoreRequestedAt || !objMD.archive.restoreRequestedDays
|| objMD.archive.restoreCompletedAt || objMD.archive.restoreWillExpireAt) {
log.error('object archive restoration is not in progress',
{ method: 'validatePutVersionId', versionId });
return errors.InvalidObjectState;
}
return undefined;
}
/**
* Check if the object is already restored, and update the expiration date accordingly:
* > After restoring an archived object, you can update the restoration period by reissuing the
* > request with a new period. Amazon S3 updates the restoration period relative to the current
* > time.
*
* @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger
* @return {boolean} - true if the object is already restored
*/
function _updateObjectExpirationDate(objectMD, log) {
// Check if restoreCompletedAt field exists
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
// checked earlier in the process, so checking again here would create weird states
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
log.debug('The restore status of the object.', {
isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored'
});
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;
objectMD['x-amz-restore'] = {
'ongoing-request': false,
'expiry-date': expiryDate,
};
/* eslint-enable no-param-reassign */
}
return isObjectAlreadyRestored;
}
/**
* update restore expiration date.
*
* @param {ObjectMD} objectMD - objectMD instance
* @param {object} restoreParam - restore param
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} internal error if object MD is not valid
*
*/
function _updateRestoreInfo(objectMD, restoreParam, log) {
if (!objectMD.archive) {
log.debug('objectMD.archive doesn\'t exits', {
objectMD,
method: '_updateRestoreInfo'
});
return errors.InternalError.customizeDescription('Archive metadata is missing.');
}
/* eslint-disable no-param-reassign */
objectMD.archive.restoreRequestedAt = new Date();
objectMD.archive.restoreRequestedDays = restoreParam.days;
objectMD.originOp = 's3:ObjectRestore:Post';
/* eslint-enable no-param-reassign */
if (!ObjectMDArchive.isValid(objectMD.archive)) {
log.debug('archive is not valid', {
archive: objectMD.archive,
method: '_updateRestoreInfo'
});
return errors.InternalError.customizeDescription('Invalid archive metadata.');
}
return undefined;
}
/**
* start to restore object.
* If not exist x-amz-restore, add it to objectMD.(x-amz-restore = false)
* calculate restore expiry-date and add it to objectMD.
* Be called by objectRestore.js
*
* @param {ObjectMD} objectMD - objectMd instance
* @param {object} restoreParam - bucket name
* @param {object} log - werelogs logger
* @param {function} cb - bucket name
* @return {undefined}
*
*/
function startRestore(objectMD, restoreParam, log, cb) {
log.info('Validating if restore can be done or not.');
const checkResultError = _validateStartRestore(objectMD, log);
if (checkResultError) {
return cb(checkResultError);
}
log.info('Updating restore information.');
const updateResultError = _updateRestoreInfo(objectMD, restoreParam, log);
if (updateResultError) {
return cb(updateResultError);
}
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
return cb(null, isObjectAlreadyRestored);
}
/**
* checks if object data is available or if it's in cold storage
* @param {ObjectMD} objMD Object metadata
* @returns {ArsenalError|null} error if object data is not available
*/
function verifyColdObjectAvailable(objMD) {
// return error when object is cold
if (objMD.archive &&
// Object is in cold backend
(!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
const err = errors.InvalidObjectState
.customizeDescription('The operation is not valid for the object\'s storage class');
return err;
}
return null;
}
module.exports = {
startRestore,
getAmzRestoreResHeader,
validatePutVersionId,
verifyColdObjectAvailable,
};

View File

@ -5,9 +5,10 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper'); const { data } = require('../../../data/wrapper');
const services = require('../../../services'); const services = require('../../../services');
const logger = require('../../../utilities/logger');
const { dataStore } = require('./storeObject'); const { dataStore } = require('./storeObject');
const locationConstraintCheck = require('./locationConstraintCheck'); const locationConstraintCheck = require('./locationConstraintCheck');
const { versioningPreprocessing, overwritingVersioning } = require('./versioning'); const { versioningPreprocessing } = require('./versioning');
const removeAWSChunked = require('./removeAWSChunked'); const removeAWSChunked = require('./removeAWSChunked');
const getReplicationInfo = require('./getReplicationInfo'); const getReplicationInfo = require('./getReplicationInfo');
const { config } = require('../../../Config'); const { config } = require('../../../Config');
@ -20,7 +21,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure or GCP.'; 'a versioned object to a location-constraint of type Azure or GCP.';
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle, function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, log, requestMethod, callback) { metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
services.metadataStoreObject(bucketName, dataGetInfo, services.metadataStoreObject(bucketName, dataGetInfo,
cipherBundle, metadataStoreParams, (err, result) => { cipherBundle, metadataStoreParams, (err, result) => {
if (err) { if (err) {
@ -30,7 +31,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
const newDataStoreName = Array.isArray(dataGetInfo) ? const newDataStoreName = Array.isArray(dataGetInfo) ?
dataGetInfo[0].dataStoreName : null; dataGetInfo[0].dataStoreName : null;
return data.batchDelete(dataToDelete, requestMethod, return data.batchDelete(dataToDelete, requestMethod,
newDataStoreName, log, err => callback(err, result)); newDataStoreName, deleteLog, err => callback(err, result));
} }
return callback(null, result); return callback(null, result);
}); });
@ -50,9 +51,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* @param {(object|null)} streamingV4Params - if v4 auth, object containing * @param {(object|null)} streamingV4Params - if v4 auth, object containing
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and * accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable) * credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance * @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation
* @param {function} callback - callback function * @param {function} callback - callback function
* @return {undefined} and call callback with (err, result) - * @return {undefined} and call callback with (err, result) -
* result.contentMD5 - content md5 of new object or version * result.contentMD5 - content md5 of new object or version
@ -60,10 +59,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/ */
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params, canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
overheadField, log, originOp, callback) { log, callback) {
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
const size = isDeleteMarker ? 0 : request.parsedContentLength; const size = isDeleteMarker ? 0 : request.parsedContentLength;
// although the request method may actually be 'DELETE' if creating a // although the request method may actually be 'DELETE' if creating a
// delete marker, for our purposes we consider this to be a 'PUT' // delete marker, for our purposes we consider this to be a 'PUT'
@ -116,24 +112,8 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
isDeleteMarker, isDeleteMarker,
replicationInfo: getReplicationInfo( replicationInfo: getReplicationInfo(
objectKey, bucketMD, false, size, null, null, authInfo), objectKey, bucketMD, false, size, null, null, authInfo),
overheadField,
log, log,
}; };
// For Azure BlobStorage API compatability
// If an object already exists copy/repair creation-time
// creation-time must remain static after an object is created
// --> EVEN FOR VERSIONS <--
if (objMD) {
if (objMD['creation-time']) {
metadataStoreParams.creationTime = objMD['creation-time'];
} else {
// If creation-time is not set (for old objects)
// fall back to the last modified and store it back to the db
metadataStoreParams.creationTime = objMD['last-modified'];
}
}
if (!isDeleteMarker) { if (!isDeleteMarker) {
metadataStoreParams.contentType = request.headers['content-type']; metadataStoreParams.contentType = request.headers['content-type'];
metadataStoreParams.cacheControl = request.headers['cache-control']; metadataStoreParams.cacheControl = request.headers['cache-control'];
@ -143,13 +123,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
removeAWSChunked(request.headers['content-encoding']); removeAWSChunked(request.headers['content-encoding']);
metadataStoreParams.expires = request.headers.expires; metadataStoreParams.expires = request.headers.expires;
metadataStoreParams.tagging = request.headers['x-amz-tagging']; metadataStoreParams.tagging = request.headers['x-amz-tagging'];
metadataStoreParams.originOp = originOp;
const defaultObjectLockConfiguration
= bucketMD.getObjectLockConfiguration();
if (defaultObjectLockConfiguration) {
metadataStoreParams.defaultRetention
= defaultObjectLockConfiguration;
}
} }
// if creating new delete marker and there is an existing object, copy // if creating new delete marker and there is an existing object, copy
@ -158,7 +131,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
request.headers[constants.objectLocationConstraintHeader] = request.headers[constants.objectLocationConstraintHeader] =
objMD[constants.objectLocationConstraintHeader]; objMD[constants.objectLocationConstraintHeader];
metadataStoreParams.originOp = originOp;
} }
const backendInfoObj = const backendInfoObj =
@ -189,49 +161,35 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
} }
} }
if (objMD && objMD.uploadId) {
metadataStoreParams.oldReplayId = objMD.uploadId;
}
/* eslint-disable camelcase */ /* eslint-disable camelcase */
const dontSkipBackend = externalBackends; const dontSkipBackend = externalBackends;
/* eslint-enable camelcase */ /* eslint-enable camelcase */
const requestLogger =
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
const mdOnlyHeader = request.headers['x-amz-meta-mdonly']; const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
const mdOnlySize = request.headers['x-amz-meta-size']; const mdOnlySize = request.headers['x-amz-meta-size'];
return async.waterfall([ return async.waterfall([
function storeData(next) { function storeData(next) {
if (size === 0) { if (size === 0 && !dontSkipBackend[locationType]) {
if (!dontSkipBackend[locationType]) { metadataStoreParams.contentMD5 = constants.emptyFileMd5;
metadataStoreParams.contentMD5 = constants.emptyFileMd5; return next(null, null, null);
return next(null, null, null); }
} // Handle mdOnlyHeader as a metadata only operation. If
// Handle mdOnlyHeader as a metadata only operation. If // the object in question is actually 0 byte or has a body size
// the object in question is actually 0 byte or has a body size // then handle normally.
// then handle normally. if (mdOnlyHeader === 'true' && mdOnlySize > 0 && size === 0) {
if (mdOnlyHeader === 'true' && mdOnlySize > 0) { log.debug('metadata only operation x-amz-meta-mdonly');
log.debug('metadata only operation x-amz-meta-mdonly'); const md5 = new Buffer(request.headers
const md5 = request.headers['x-amz-meta-md5chksum'] ['x-amz-meta-md5chksum'], 'base64').toString('hex');
? new Buffer(request.headers['x-amz-meta-md5chksum'], const dataGetInfo = {
'base64').toString('hex') : null; key: objectKey,
const numParts = request.headers['x-amz-meta-md5numparts']; dataStoreName: location,
let _md5; dataStoreType: locationType,
if (numParts === undefined) { dataStoreVersionId: request.headers['x-amz-version-id'],
_md5 = md5; dataStoreMD5: md5,
} else { };
_md5 = `${md5}-${numParts}`; return next(null, dataGetInfo, md5);
}
const versionId = request.headers['x-amz-meta-version-id'];
const dataGetInfo = {
key: objectKey,
dataStoreName: location,
dataStoreType: locationType,
dataStoreVersionId: versionId,
dataStoreMD5: _md5,
};
return next(null, dataGetInfo, _md5);
}
} }
return dataStore(objectKeyContext, cipherBundle, request, size, return dataStore(objectKeyContext, cipherBundle, request, size,
streamingV4Params, backendInfo, log, next); streamingV4Params, backendInfo, log, next);
@ -264,17 +222,12 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
return next(null, dataGetInfoArr); return next(null, dataGetInfoArr);
}, },
function getVersioningInfo(infoArr, next) { function getVersioningInfo(infoArr, next) {
// if x-scal-s3-version-id header is specified, we overwrite the object/version metadata.
if (isPutVersion) {
const options = overwritingVersioning(objMD, metadataStoreParams);
return process.nextTick(() => next(null, options, infoArr));
}
return versioningPreprocessing(bucketName, bucketMD, return versioningPreprocessing(bucketName, bucketMD,
metadataStoreParams.objectKey, objMD, log, (err, options) => { metadataStoreParams.objectKey, objMD, log, (err, options) => {
if (err) { if (err) {
// TODO: check AWS error when user requested a specific // TODO: check AWS error when user requested a specific
// version before any versions have been put // version before any versions have been put
const logLvl = err.is.BadRequest ? const logLvl = err === errors.BadRequest ?
'debug' : 'error'; 'debug' : 'error';
log[logLvl]('error getting versioning info', { log[logLvl]('error getting versioning info', {
error: err, error: err,
@ -288,13 +241,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
metadataStoreParams.versionId = options.versionId; metadataStoreParams.versionId = options.versionId;
metadataStoreParams.versioning = options.versioning; metadataStoreParams.versioning = options.versioning;
metadataStoreParams.isNull = options.isNull; metadataStoreParams.isNull = options.isNull;
metadataStoreParams.deleteNullKey = options.deleteNullKey; metadataStoreParams.nullVersionId = options.nullVersionId;
if (options.extraMD) {
Object.assign(metadataStoreParams, options.extraMD);
}
return _storeInMDandDeleteData(bucketName, infoArr, return _storeInMDandDeleteData(bucketName, infoArr,
cipherBundle, metadataStoreParams, cipherBundle, metadataStoreParams,
options.dataToDelete, log, requestMethod, next); options.dataToDelete, requestLogger, requestMethod, next);
}, },
], callback); ], callback);
} }

View File

@ -1,18 +0,0 @@
/**
* _bucketRequiresOplogUpdate - DELETE an object from a bucket
* @param {BucketInfo} bucket - bucket object
* @return {boolean} whether objects require oplog updates on deletion, or not
*/
function _bucketRequiresOplogUpdate(bucket) {
// Default behavior is to require an oplog update
if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) {
return true;
}
// If the bucket has lifecycle configuration or notification configuration
// set, we also require an oplog update
return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration();
}
module.exports = {
_bucketRequiresOplogUpdate,
};

View File

@ -1,140 +0,0 @@
const { supportedLifecycleRules } = require('arsenal').constants;
const { LifecycleConfiguration } = require('arsenal').models;
const {
LifecycleDateTime,
LifecycleUtils,
} = require('arsenal').s3middleware.lifecycleHelpers;
const { config } = require('../../../Config');
const {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
} = config.getTimeOptions();
const lifecycleDateTime = new LifecycleDateTime({
transitionOneDayEarlier,
expireOneDayEarlier,
timeProgressionFactor,
});
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
function calculateDate(objDate, expDays, datetime) {
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
}
function formatExpirationHeader(date, id) {
return `expiry-date="${date}", rule-id="${encodeURIComponent(id)}"`;
}
// format: x-amz-expiration: expiry-date="Fri, 21 Dec 2012 00:00:00 GMT", rule-id="id"
const AMZ_EXP_HEADER = 'x-amz-expiration';
// format: x-amz-abort-date: "Fri, 21 Dec 2012 00:00:00 GMT"
const AMZ_ABORT_DATE_HEADER = 'x-amz-abort-date';
// format: x-amz-abort-rule-id: "rule id"
const AMZ_ABORT_ID_HEADER = 'x-amz-abort-rule-id';
function _generateExpHeadersObjects(rules, params, datetime) {
const tags = {
TagSet: params.tags
? Object.keys(params.tags)
.map(key => ({ Key: key, Value: params.tags[key] }))
: [],
};
const objectInfo = { Key: params.key };
const filteredRules = lifecycleUtils.filterRules(rules, objectInfo, tags);
const applicable = lifecycleUtils.getApplicableRules(filteredRules, objectInfo, datetime);
if (applicable.Expiration) {
const rule = applicable.Expiration;
if (rule.Days === undefined && rule.Date === undefined) {
return {};
}
if (rule.Date) {
return {
[AMZ_EXP_HEADER]: formatExpirationHeader(rule.Date, rule.ID),
};
}
const date = calculateDate(params.date, rule.Days, datetime);
return {
[AMZ_EXP_HEADER]: formatExpirationHeader(date.toUTCString(), rule.ID),
};
}
return {};
}
function _generateExpHeadresMPU(rules, params, datetime) {
const noTags = { TagSet: [] };
const objectInfo = { Key: params.key };
const filteredRules = lifecycleUtils.filterRules(rules, objectInfo, noTags);
const applicable = lifecycleUtils.getApplicableRules(filteredRules, {}, datetime);
if (applicable.AbortIncompleteMultipartUpload) {
const rule = applicable.AbortIncompleteMultipartUpload;
const date = calculateDate(
params.date,
rule.DaysAfterInitiation,
datetime
);
return {
[AMZ_ABORT_ID_HEADER]: encodeURIComponent(rule.ID),
[AMZ_ABORT_DATE_HEADER]: date.toUTCString(),
};
}
return {};
}
/**
* generate response expiration headers
* @param {object} params - params
* @param {LifecycleDateTime} datetime - lifecycle datetime object
* @returns {object} - expiration response headers
*/
function generateExpirationHeaders(params, datetime) {
const { lifecycleConfig, objectParams, mpuParams, isVersionedReq } = params;
if (!lifecycleConfig || isVersionedReq) {
return {};
}
const lcfg = LifecycleConfiguration.getConfigJson(lifecycleConfig);
if (objectParams) {
return _generateExpHeadersObjects(lcfg.Rules, objectParams, datetime);
}
if (mpuParams) {
return _generateExpHeadresMPU(lcfg.Rules, mpuParams, datetime);
}
return {};
}
/**
* set response expiration headers to target header object
* @param {object} headers - target header object
* @param {object} params - params
* @returns {undefined}
*/
function setExpirationHeaders(headers, params) {
const expHeaders = generateExpirationHeaders(params, lifecycleDateTime);
Object.assign(headers, expHeaders);
}
module.exports = {
lifecycleDateTime,
generateExpirationHeaders,
setExpirationHeaders,
};

View File

@ -1,6 +1,6 @@
const s3config = require('../../../Config').config; const s3config = require('../../../Config').config;
const { isServiceAccount, getServiceAccountProperties } = const { isServiceAccount, getServiceAccountProperties } =
require('../authorization/permissionChecks'); require('../authorization/aclChecks');
const { replicationBackends } = require('arsenal').constants; const { replicationBackends } = require('arsenal').constants;
function _getBackend(objectMD, site) { function _getBackend(objectMD, site) {
@ -23,12 +23,12 @@ function _getStorageClasses(rule) {
} }
const { replicationEndpoints } = s3config; const { replicationEndpoints } = s3config;
// If no storage class, use the given default endpoint or the sole endpoint // If no storage class, use the given default endpoint or the sole endpoint
if (replicationEndpoints.length > 0) { if (replicationEndpoints.length > 1) {
const endPoint = const endPoint =
replicationEndpoints.find(endpoint => endpoint.default) || replicationEndpoints[0]; replicationEndpoints.find(endpoint => endpoint.default);
return [endPoint.site]; return [endPoint.site];
} }
return undefined; return [replicationEndpoints[0].site];
} }
function _getReplicationInfo(rule, replicationConfig, content, operationType, function _getReplicationInfo(rule, replicationConfig, content, operationType,
@ -36,9 +36,6 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
const storageTypes = []; const storageTypes = [];
const backends = []; const backends = [];
const storageClasses = _getStorageClasses(rule); const storageClasses = _getStorageClasses(rule);
if (!storageClasses) {
return undefined;
}
storageClasses.forEach(storageClass => { storageClasses.forEach(storageClass => {
const storageClassName = const storageClassName =
storageClass.endsWith(':preferred_read') ? storageClass.endsWith(':preferred_read') ?

View File

@ -1,190 +0,0 @@
const { versioning } = require('arsenal');
const versionIdUtils = versioning.VersionID;
const { lifecycleListing } = require('../../../../constants');
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = lifecycleListing;
function _makeTags(tags) {
const res = [];
Object.entries(tags).forEach(([key, value]) =>
res.push(
{
Key: key,
Value: value,
}
));
return res;
}
function processCurrents(bucketName, listParams, isBucketVersioned, list) {
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
Marker: listParams.marker,
BeforeDate: listParams.beforeDate,
NextMarker: list.NextMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const content = {
Key: item.key,
LastModified: v.LastModified,
ETag: `"${v.ETag}"`,
Size: v.Size,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags),
IsLatest: true, // for compatibility with AWS ListObjectVersions.
DataStoreName: v.dataStoreName,
ListType: CURRENT_TYPE,
};
// NOTE: The current versions listed to be lifecycle should include version id
// if the bucket is versioned.
if (isBucketVersioned) {
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
content.VersionId = versionId;
}
data.Contents.push(content);
});
return data;
}
function _encodeVersionId(vid) {
let versionId = vid;
if (versionId && versionId !== 'null') {
versionId = versionIdUtils.encode(versionId);
}
return versionId;
}
function processNonCurrents(bucketName, listParams, list) {
const nextVersionIdMarker = _encodeVersionId(list.NextVersionIdMarker);
const versionIdMarker = _encodeVersionId(listParams.versionIdMarker);
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
KeyMarker: listParams.keyMarker,
VersionIdMarker: versionIdMarker,
BeforeDate: listParams.beforeDate,
NextKeyMarker: list.NextKeyMarker,
NextVersionIdMarker: nextVersionIdMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
const content = {
Key: item.key,
LastModified: v.LastModified,
ETag: `"${v.ETag}"`,
Size: v.Size,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags),
staleDate: v.staleDate, // lowerCamelCase to be compatible with existing lifecycle.
VersionId: versionId,
DataStoreName: v.dataStoreName,
ListType: NON_CURRENT_TYPE,
};
data.Contents.push(content);
});
return data;
}
function processOrphans(bucketName, listParams, list) {
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
Marker: listParams.marker,
BeforeDate: listParams.beforeDate,
NextMarker: list.NextMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
data.Contents.push({
Key: item.key,
LastModified: v.LastModified,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
VersionId: versionId,
IsLatest: true, // for compatibility with AWS ListObjectVersions.
ListType: ORPHAN_DM_TYPE,
});
});
return data;
}
function getLocationConstraintErrorMessage(locationName) {
return 'value of the location you are attempting to set ' +
`- ${locationName} - is not listed in the locationConstraint config`;
}
/**
* validateMaxScannedEntries - Validates and returns the maximum scanned entries value.
*
* @param {object} params - Query parameters
* @param {object} config - CloudServer configuration
* @param {number} min - Minimum number of entries to be scanned
* @returns {Object} - An object indicating the validation result:
* - isValid (boolean): Whether the validation is successful.
* - maxScannedLifecycleListingEntries (number): The validated maximum scanned entries value if isValid is true.
*/
function validateMaxScannedEntries(params, config, min) {
let maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
if (params['max-scanned-lifecycle-listing-entries']) {
const maxEntriesParams = Number.parseInt(params['max-scanned-lifecycle-listing-entries'], 10);
if (Number.isNaN(maxEntriesParams) || maxEntriesParams < min ||
maxEntriesParams > maxScannedLifecycleListingEntries) {
return { isValid: false };
}
maxScannedLifecycleListingEntries = maxEntriesParams;
}
return { isValid: true, maxScannedLifecycleListingEntries };
}
module.exports = {
processCurrents,
processNonCurrents,
processOrphans,
getLocationConstraintErrorMessage,
validateMaxScannedEntries,
};

View File

@ -1,34 +0,0 @@
/**
* Check if all keys that exist in the current list which will be used
* in composing object are not present in the old object's list.
*
* This method can be used to check against accidentally removing data
* keys due to instability from the metadata layer, or for replay
* detection in general.
*
* @param {array|string|null} prev - list of keys from the object being
* overwritten
* @param {array|null} curr - list of keys to be used in composing
* current object
* @returns {boolean} true if no key in `curr` is present in `prev`,
* false otherwise
*/
function locationKeysHaveChanged(prev, curr) {
if (!prev || prev.length === 0 || !curr) {
return true;
}
// backwards compatibility check if object is of model version 2
if (typeof prev === 'string') {
return curr.every(v => v.key !== prev);
}
const keysMap = {};
prev.forEach(v => {
if (!keysMap[v.dataStoreType]) {
keysMap[v.dataStoreType] = {};
}
keysMap[v.dataStoreType][v.key] = true;
});
return curr.every(v => !(keysMap[v.dataStoreType] && keysMap[v.dataStoreType][v.key]));
}
module.exports = locationKeysHaveChanged;

View File

@ -0,0 +1,24 @@
/**
* Check keys that exist in the current list which will be used in composing
* object. This method checks against accidentally removing data keys due to
* instability from the metadata layer. The check returns true if there was no
* match and false if at least one key from the previous list exists in the
* current list
* @param {array|string} prev - list of keys from the object being overwritten
* @param {array} curr - list of keys to be used in composing current object
* @returns {array} list of keys that can be deleted
*/
function locationKeysSanityCheck(prev, curr) {
if (!prev || prev.length === 0) {
return true;
}
// backwards compatibility check if object is of model version 2
if (typeof prev === 'string') {
return curr.every(v => v.key !== prev);
}
const keysMap = {};
prev.forEach(v => { keysMap[v.key] = true; });
return curr.every(v => !keysMap[v.key]);
}
module.exports = locationKeysSanityCheck;

View File

@ -1,348 +0,0 @@
const { errors, auth, policies } = require('arsenal');
const moment = require('moment');
const { config } = require('../../../Config');
const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period
* @return {object} the date until the object version remains locked
*/
function calculateRetainUntilDate(retention) {
const { days, years } = retention;
if (!days && !years) {
return undefined;
}
const date = moment();
// Calculate the number of days to retain the lock on the object
const retainUntilDays = days || years * 365;
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
const retainUntilDate
= date.add(retainUntilDaysInMs, 'ms');
return retainUntilDate.toISOString();
}
/**
* Validates object lock headers
* @param {object} bucket - bucket metadata
* @param {object} headers - request headers
* @param {object} log - the log request
* @return {object} - object with error if validation fails
*/
function validateHeaders(bucket, headers, log) {
const bucketObjectLockEnabled = bucket.isObjectLockEnabled();
const objectLegalHold = headers['x-amz-object-lock-legal-hold'];
const objectLockDate = headers['x-amz-object-lock-retain-until-date'];
const objectLockMode = headers['x-amz-object-lock-mode'];
// If retention headers or legal hold header present but
// object lock is not enabled on the bucket return error
if ((objectLockDate || objectLockMode || objectLegalHold)
&& !bucketObjectLockEnabled) {
log.trace('bucket is missing ObjectLockConfiguration');
return errors.InvalidRequest.customizeDescription(
'Bucket is missing ObjectLockConfiguration');
}
if ((objectLockMode || objectLockDate) &&
!(objectLockMode && objectLockDate)) {
return errors.InvalidArgument.customizeDescription(
'x-amz-object-lock-retain-until-date and ' +
'x-amz-object-lock-mode must both be supplied',
);
}
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
if (objectLockMode && !validModes.has(objectLockMode)) {
return errors.InvalidArgument.customizeDescription(
'Unknown wormMode directive');
}
const validLegalHolds = new Set(['ON', 'OFF']);
if (objectLegalHold && !validLegalHolds.has(objectLegalHold)) {
return errors.InvalidArgument.customizeDescription(
'Legal hold status must be one of "ON", "OFF"');
}
const currentDate = new Date().toISOString();
if (objectLockMode && objectLockDate <= currentDate) {
return errors.InvalidArgument.customizeDescription(
'The retain until date must be in the future!');
}
return null;
}
/**
* Compares new object retention to bucket default retention
* @param {object} headers - request headers
* @param {object} defaultRetention - bucket retention configuration
* @return {object} - final object lock information to set on object
*/
function compareObjectLockInformation(headers, defaultRetention) {
const objectLockInfoToSave = {};
if (defaultRetention && defaultRetention.rule) {
const defaultMode = defaultRetention.rule.mode;
const defaultTime = calculateRetainUntilDate(defaultRetention.rule);
if (defaultMode && defaultTime) {
objectLockInfoToSave.retentionInfo = {
mode: defaultMode,
date: defaultTime,
};
}
}
if (headers) {
const headerMode = headers['x-amz-object-lock-mode'];
const headerDate = headers['x-amz-object-lock-retain-until-date'];
if (headerMode && headerDate) {
objectLockInfoToSave.retentionInfo = {
mode: headerMode,
date: headerDate,
};
}
const headerLegalHold = headers['x-amz-object-lock-legal-hold'];
if (headerLegalHold) {
const legalHold = headerLegalHold === 'ON';
objectLockInfoToSave.legalHold = legalHold;
}
}
return objectLockInfoToSave;
}
/**
* Sets object retention ond/or legal hold information on object's metadata
* @param {object} headers - request headers
* @param {object} md - object metadata
* @param {(object|null)} defaultRetention - bucket retention configuration if
* bucket has any configuration set
* @return {undefined}
*/
function setObjectLockInformation(headers, md, defaultRetention) {
// Stores retention information if object either has its own retention
// configuration or default retention configuration from its bucket
const finalObjectLockInfo =
compareObjectLockInformation(headers, defaultRetention);
if (finalObjectLockInfo.retentionInfo) {
md.setRetentionMode(finalObjectLockInfo.retentionInfo.mode);
md.setRetentionDate(finalObjectLockInfo.retentionInfo.date);
}
if (finalObjectLockInfo.legalHold || finalObjectLockInfo.legalHold === false) {
md.setLegalHold(finalObjectLockInfo.legalHold);
}
}
/**
* Helper class for check object lock state checks
*/
class ObjectLockInfo {
/**
*
* @param {object} retentionInfo - The object lock retention policy
* @param {"GOVERNANCE" | "COMPLIANCE" | null} retentionInfo.mode - Retention policy mode.
* @param {string} retentionInfo.date - Expiration date of retention policy. A string in ISO-8601 format
* @param {bool} retentionInfo.legalHold - Whether a legal hold is enable for the object
*/
constructor(retentionInfo) {
this.mode = retentionInfo.mode || null;
this.date = retentionInfo.date || null;
this.legalHold = retentionInfo.legalHold || false;
}
/**
* ObjectLockInfo.isLocked
* @returns {bool} - Whether the retention policy is active and protecting the object
*/
isLocked() {
if (this.legalHold) {
return true;
}
if (!this.mode || !this.date) {
return false;
}
return !this.isExpired();
}
/**
* ObjectLockInfo.isGovernanceMode
* @returns {bool} - true if retention mode is GOVERNANCE
*/
isGovernanceMode() {
return this.mode === 'GOVERNANCE';
}
/**
* ObjectLockInfo.isComplianceMode
* @returns {bool} - True if retention mode is COMPLIANCE
*/
isComplianceMode() {
return this.mode === 'COMPLIANCE';
}
/**
* ObjectLockInfo.isExpired
* @returns {bool} - True if the retention policy has expired
*/
isExpired() {
const now = moment();
return this.date === null || now.isSameOrAfter(this.date);
}
/**
* ObjectLockInfo.isExtended
* @param {string} timestamp - Timestamp in ISO-8601 format
* @returns {bool} - True if the given timestamp is after the policy expiration date or if no expiration date is set
*/
isExtended(timestamp) {
return timestamp !== undefined && (this.date === null || moment(timestamp).isSameOrAfter(this.date));
}
/**
* ObjectLockInfo.canModifyObject
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
*/
canModifyObject(hasGovernanceBypass) {
// can modify object if object is not locked
// cannot modify object in any cases if legal hold is enabled
// if no legal hold, can only modify object if bypassing governance when locked
if (!this.isLocked()) {
return true;
}
return !this.legalHold && this.isGovernanceMode() && !!hasGovernanceBypass;
}
/**
* ObjectLockInfo.canModifyPolicy
* @param {object} policyChanges - Proposed changes to the retention policy
* @param {"GOVERNANCE" | "COMPLIANCE" | undefined} policyChanges.mode - Retention policy mode.
* @param {string} policyChanges.date - Expiration date of retention policy. A string in ISO-8601 format
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
* @returns {bool} - True if the changes are allowed to be applied to the retention policy
*/
canModifyPolicy(policyChanges, hasGovernanceBypass) {
// If an object does not have a retention policy or it is expired then all changes are allowed
if (!this.isLocked()) {
return true;
}
// The only allowed change in compliance mode is extending the retention period
if (this.isComplianceMode()) {
if (policyChanges.mode === 'COMPLIANCE' && this.isExtended(policyChanges.date)) {
return true;
}
}
if (this.isGovernanceMode()) {
// Extensions are always allowed in governance mode
if (policyChanges.mode === 'GOVERNANCE' && this.isExtended(policyChanges.date)) {
return true;
}
// All other changes in governance mode require a bypass
if (hasGovernanceBypass) {
return true;
}
}
return false;
}
}
/**
*
* @param {object} headers - s3 request headers
* @returns {bool} - True if the headers is present and === "true"
*/
function hasGovernanceBypassHeader(headers) {
const bypassHeader = headers['x-amz-bypass-governance-retention'] || '';
return bypassHeader.toLowerCase() === 'true';
}
/**
* checkUserGovernanceBypass
*
* Checks for the presence of the s3:BypassGovernanceRetention permission for a given user
*
* @param {object} request - Incoming s3 request
* @param {object} authInfo - s3 authentication info
* @param {object} bucketMD - bucket metadata
* @param {string} objectKey - object key
* @param {object} log - Werelogs logger
* @param {function} cb - callback returns errors.AccessDenied if the authorization fails
* @returns {undefined} -
*/
function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, cb) {
log.trace(
'object in GOVERNANCE mode and is user, checking for attached policies',
{ method: 'checkUserPolicyGovernanceBypass' },
);
const authParams = auth.server.extractParams(request, log, 's3', request.query);
const ip = policies.requestUtils.getClientIp(request, config);
const requestContextParams = {
constantParams: {
headers: request.headers,
query: request.query,
generalResource: bucketMD.getName(),
specificResource: { key: objectKey },
requesterIp: ip,
sslEnabled: request.connection.encrypted,
apiMethod: 'bypassGovernanceRetention',
awsService: 's3',
locationConstraint: bucketMD.getLocationConstraint(),
requesterInfo: authInfo,
signatureVersion: authParams.params.data.signatureVersion,
authType: authParams.params.data.authType,
signatureAge: authParams.params.data.signatureAge,
},
};
return vault.checkPolicies(requestContextParams,
authInfo.getArn(), log, (err, authorizationResults) => {
if (err) {
return cb(err);
}
const explicitDenyExists = authorizationResults.some(
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
if (explicitDenyExists) {
log.trace('authorization check failed for user',
{
'method': 'checkUserPolicyGovernanceBypass',
's3:BypassGovernanceRetention': false,
});
return cb(errors.AccessDenied);
}
// Convert authorization results into an easier to handle format
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
authInfo.getCanonicalID(),
authInfo,
actionImplicitDenies,
log,
request);
return cb(areAllActionsAllowed === true ? null : errors.AccessDenied);
});
}
module.exports = {
calculateRetainUntilDate,
compareObjectLockInformation,
setObjectLockInformation,
validateHeaders,
hasGovernanceBypassHeader,
checkUserGovernanceBypass,
ObjectLockInfo,
};

Some files were not shown because too many files have changed in this diff Show More