Compare commits
2 Commits
developmen
...
feature/ZE
Author | SHA1 | Date |
---|---|---|
jeremyds | 812751a5fb | |
jeremyds | 0cee70f3f9 |
|
@ -1,9 +1,3 @@
|
|||
node_modules
|
||||
localData/*
|
||||
localMetadata/*
|
||||
# Keep the .git/HEAD file in order to properly report version
|
||||
.git/objects
|
||||
.github
|
||||
.tox
|
||||
coverage
|
||||
.DS_Store
|
||||
|
|
55
.eslintrc
55
.eslintrc
|
@ -1,54 +1 @@
|
|||
{
|
||||
"extends": "scality",
|
||||
"plugins": [
|
||||
"mocha"
|
||||
],
|
||||
"rules": {
|
||||
"import/extensions": "off",
|
||||
"lines-around-directive": "off",
|
||||
"no-underscore-dangle": "off",
|
||||
"indent": "off",
|
||||
"object-curly-newline": "off",
|
||||
"operator-linebreak": "off",
|
||||
"function-paren-newline": "off",
|
||||
"import/newline-after-import": "off",
|
||||
"prefer-destructuring": "off",
|
||||
"implicit-arrow-linebreak": "off",
|
||||
"no-bitwise": "off",
|
||||
"dot-location": "off",
|
||||
"comma-dangle": "off",
|
||||
"no-undef-init": "off",
|
||||
"global-require": "off",
|
||||
"import/no-dynamic-require": "off",
|
||||
"class-methods-use-this": "off",
|
||||
"no-plusplus": "off",
|
||||
"no-else-return": "off",
|
||||
"object-property-newline": "off",
|
||||
"import/order": "off",
|
||||
"no-continue": "off",
|
||||
"no-tabs": "off",
|
||||
"lines-between-class-members": "off",
|
||||
"prefer-spread": "off",
|
||||
"no-lonely-if": "off",
|
||||
"no-useless-escape": "off",
|
||||
"no-restricted-globals": "off",
|
||||
"no-buffer-constructor": "off",
|
||||
"import/no-extraneous-dependencies": "off",
|
||||
"space-unary-ops": "off",
|
||||
"no-useless-return": "off",
|
||||
"no-unexpected-multiline": "off",
|
||||
"no-mixed-operators": "off",
|
||||
"newline-per-chained-call": "off",
|
||||
"operator-assignment": "off",
|
||||
"spaced-comment": "off",
|
||||
"comma-style": "off",
|
||||
"no-restricted-properties": "off",
|
||||
"new-parens": "off",
|
||||
"no-multi-spaces": "off",
|
||||
"quote-props": "off",
|
||||
"mocha/no-exclusive-tests": "error",
|
||||
},
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2020
|
||||
}
|
||||
}
|
||||
{ "extends": "scality" }
|
||||
|
|
|
@ -48,7 +48,7 @@ Describe the results you expected
|
|||
|
||||
- Node.js version,
|
||||
- Docker version,
|
||||
- yarn version,
|
||||
- npm version,
|
||||
- distribution/OS,
|
||||
- optional: anything else you deem helpful to us.
|
||||
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
---
|
||||
name: "Setup CI environment"
|
||||
description: "Setup Cloudserver CI environment"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Setup etc/hosts
|
||||
shell: bash
|
||||
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
|
||||
- name: Setup Credentials
|
||||
shell: bash
|
||||
run: bash .github/scripts/credentials.bash
|
||||
- name: Setup job artifacts directory
|
||||
shell: bash
|
||||
run: |-
|
||||
set -exu;
|
||||
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: 'yarn'
|
||||
- name: install dependencies
|
||||
shell: bash
|
||||
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Setup python2 test environment
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y libdigest-hmac-perl
|
||||
pip install 's3cmd==2.3.0'
|
||||
- name: fix sproxyd.conf permissions
|
||||
shell: bash
|
||||
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
|
||||
- name: ensure fuse kernel module is loaded (for sproxyd)
|
||||
shell: bash
|
||||
run: sudo modprobe fuse
|
|
@ -1,25 +0,0 @@
|
|||
FROM ceph/daemon:v3.2.1-stable-3.2-mimic-centos-7
|
||||
|
||||
ENV CEPH_DAEMON demo
|
||||
ENV CEPH_DEMO_DAEMONS mon,mgr,osd,rgw
|
||||
|
||||
ENV CEPH_DEMO_UID zenko
|
||||
ENV CEPH_DEMO_ACCESS_KEY accessKey1
|
||||
ENV CEPH_DEMO_SECRET_KEY verySecretKey1
|
||||
ENV CEPH_DEMO_BUCKET zenkobucket
|
||||
|
||||
ENV CEPH_PUBLIC_NETWORK 0.0.0.0/0
|
||||
ENV MON_IP 0.0.0.0
|
||||
ENV NETWORK_AUTO_DETECT 4
|
||||
ENV RGW_CIVETWEB_PORT 8001
|
||||
|
||||
RUN rm /etc/yum.repos.d/tcmu-runner.repo
|
||||
|
||||
ADD ./entrypoint-wrapper.sh /
|
||||
RUN chmod +x /entrypoint-wrapper.sh && \
|
||||
yum install -y python-pip && \
|
||||
yum clean all && \
|
||||
pip install awscli && \
|
||||
rm -rf /root/.cache/pip
|
||||
|
||||
ENTRYPOINT [ "/entrypoint-wrapper.sh" ]
|
|
@ -1,37 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
touch /artifacts/ceph.log
|
||||
mkfifo /tmp/entrypoint_output
|
||||
# We run this in the background so that we can tail the RGW log after init,
|
||||
# because entrypoint.sh never returns
|
||||
|
||||
# The next line will be needed when ceph builds 3.2.2 so I'll leave it here
|
||||
# bash /opt/ceph-container/bin/entrypoint.sh > /tmp/entrypoint_output &
|
||||
|
||||
bash /entrypoint.sh > /tmp/entrypoint_output &
|
||||
entrypoint_pid="$!"
|
||||
while read -r line; do
|
||||
echo $line
|
||||
# When we find this line server has started
|
||||
if [ -n "$(echo $line | grep 'Creating bucket')" ]; then
|
||||
break
|
||||
fi
|
||||
done < /tmp/entrypoint_output
|
||||
|
||||
# Make our buckets - CEPH_DEMO_BUCKET is set to force the "Creating bucket" message, but unused
|
||||
s3cmd mb s3://cephbucket s3://cephbucket2
|
||||
|
||||
mkdir /root/.aws
|
||||
cat > /root/.aws/credentials <<EOF
|
||||
[default]
|
||||
aws_access_key_id = accessKey1
|
||||
aws_secret_access_key = verySecretKey1
|
||||
EOF
|
||||
|
||||
# Enable versioning on them
|
||||
for bucket in cephbucket cephbucket2; do
|
||||
echo "Enabling versiong for $bucket"
|
||||
aws --endpoint http://127.0.0.1:8001 s3api put-bucket-versioning --bucket $bucket --versioning Status=Enabled
|
||||
done
|
||||
tail -f /var/log/ceph/client.rgw.*.log | tee -a /artifacts/ceph.log
|
||||
wait $entrypoint_pid
|
|
@ -1,11 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
# This script is needed because RADOS Gateway
|
||||
# will open the port before beginning to serve traffic
|
||||
# causing wait_for_local_port.bash to exit immediately
|
||||
|
||||
echo 'Waiting for ceph'
|
||||
while [ -z "$(curl 127.0.0.1:8001 2>/dev/null)" ]; do
|
||||
sleep 1
|
||||
echo -n "."
|
||||
done
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: npm
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "13:00"
|
||||
open-pull-requests-limit: 10
|
||||
target-branch: "development/7.4"
|
|
@ -1,36 +0,0 @@
|
|||
azurebackend_AZURE_STORAGE_ACCESS_KEY
|
||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME
|
||||
azurebackend_AZURE_STORAGE_ENDPOINT
|
||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY
|
||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME
|
||||
azurebackend2_AZURE_STORAGE_ENDPOINT
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME
|
||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME
|
||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT
|
||||
azuretest_AZURE_BLOB_ENDPOINT
|
||||
b2backend_B2_ACCOUNT_ID
|
||||
b2backend_B2_STORAGE_ACCESS_KEY
|
||||
GOOGLE_SERVICE_EMAIL
|
||||
GOOGLE_SERVICE_KEY
|
||||
AWS_S3_BACKEND_ACCESS_KEY
|
||||
AWS_S3_BACKEND_SECRET_KEY
|
||||
AWS_S3_BACKEND_ACCESS_KEY_2
|
||||
AWS_S3_BACKEND_SECRET_KEY_2
|
||||
AWS_GCP_BACKEND_ACCESS_KEY
|
||||
AWS_GCP_BACKEND_SECRET_KEY
|
||||
AWS_GCP_BACKEND_ACCESS_KEY_2
|
||||
AWS_GCP_BACKEND_SECRET_KEY_2
|
||||
b2backend_B2_STORAGE_ENDPOINT
|
||||
gcpbackend2_GCP_SERVICE_EMAIL
|
||||
gcpbackend2_GCP_SERVICE_KEY
|
||||
gcpbackend2_GCP_SERVICE_KEYFILE
|
||||
gcpbackend_GCP_SERVICE_EMAIL
|
||||
gcpbackend_GCP_SERVICE_KEY
|
||||
gcpbackendmismatch_GCP_SERVICE_EMAIL
|
||||
gcpbackendmismatch_GCP_SERVICE_KEY
|
||||
gcpbackend_GCP_SERVICE_KEYFILE
|
||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE
|
||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
|
|
@ -1,92 +0,0 @@
|
|||
services:
|
||||
cloudserver:
|
||||
image: ${CLOUDSERVER_IMAGE}
|
||||
command: sh -c "yarn start > /artifacts/s3.log"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- /tmp/ssl:/ssl
|
||||
- /tmp/ssl-kmip:/ssl-kmip
|
||||
- ${HOME}/.aws/credentials:/root/.aws/credentials
|
||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||
environment:
|
||||
- CI=true
|
||||
- ENABLE_LOCAL_CACHE=true
|
||||
- REDIS_HOST=0.0.0.0
|
||||
- REDIS_PORT=6379
|
||||
- REPORT_TOKEN=report-token-1
|
||||
- REMOTE_MANAGEMENT_DISABLE=1
|
||||
- HEALTHCHECKS_ALLOWFROM=0.0.0.0/0
|
||||
- DATA_HOST=0.0.0.0
|
||||
- METADATA_HOST=0.0.0.0
|
||||
- S3BACKEND
|
||||
- S3DATA
|
||||
- S3METADATA
|
||||
- MPU_TESTING
|
||||
- S3VAULT
|
||||
- S3_LOCATION_FILE
|
||||
- ENABLE_UTAPI_V2
|
||||
- BUCKET_DENY_FILTER
|
||||
- S3KMS
|
||||
- S3KMIP_PORT
|
||||
- S3KMIP_HOSTS
|
||||
- S3KMIP-COMPOUND_CREATE
|
||||
- S3KMIP_BUCKET_ATTRIBUTE_NAME
|
||||
- S3KMIP_PIPELINE_DEPTH
|
||||
- S3KMIP_KEY
|
||||
- S3KMIP_CERT
|
||||
- S3KMIP_CA
|
||||
- MONGODB_HOSTS=0.0.0.0:27018
|
||||
- MONGODB_RS=rs0
|
||||
- DEFAULT_BUCKET_KEY_FORMAT
|
||||
- METADATA_MAX_CACHED_BUCKETS
|
||||
- ENABLE_NULL_VERSION_COMPAT_MODE
|
||||
- SCUBA_HOST
|
||||
- SCUBA_PORT
|
||||
- SCUBA_HEALTHCHECK_FREQUENCY
|
||||
- S3QUOTA
|
||||
- QUOTA_ENABLE_INFLIGHTS
|
||||
env_file:
|
||||
- creds.env
|
||||
depends_on:
|
||||
- redis
|
||||
extra_hosts:
|
||||
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1"
|
||||
- "pykmip.local:127.0.0.1"
|
||||
redis:
|
||||
image: redis:alpine
|
||||
network_mode: "host"
|
||||
squid:
|
||||
network_mode: "host"
|
||||
profiles: ['ci-proxy']
|
||||
image: scality/ci-squid
|
||||
command: >-
|
||||
sh -c 'mkdir -p /ssl &&
|
||||
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
|
||||
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
|
||||
-keyout /ssl/myca.pem -out /ssl/myca.pem &&
|
||||
cp /ssl/myca.pem /ssl/CA.pem &&
|
||||
squid -f /etc/squid/squid.conf -N -z &&
|
||||
squid -f /etc/squid/squid.conf -NYCd 1'
|
||||
volumes:
|
||||
- /tmp/ssl:/ssl
|
||||
pykmip:
|
||||
network_mode: "host"
|
||||
profiles: ['pykmip']
|
||||
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
|
||||
volumes:
|
||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||
mongo:
|
||||
network_mode: "host"
|
||||
profiles: ['mongo', 'ceph']
|
||||
image: ${MONGODB_IMAGE}
|
||||
ceph:
|
||||
network_mode: "host"
|
||||
profiles: ['ceph']
|
||||
image: ghcr.io/scality/cloudserver/ci-ceph
|
||||
sproxyd:
|
||||
network_mode: "host"
|
||||
profiles: ['sproxyd']
|
||||
image: sproxyd-standalone
|
||||
build: ./sproxyd
|
||||
user: 0:0
|
||||
privileged: yes
|
|
@ -1,28 +0,0 @@
|
|||
FROM mongo:5.0.21
|
||||
|
||||
ENV USER=scality \
|
||||
HOME_DIR=/home/scality \
|
||||
CONF_DIR=/conf \
|
||||
DATA_DIR=/data
|
||||
|
||||
# Set up directories and permissions
|
||||
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
|
||||
mkdir /logs; \
|
||||
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
|
||||
|
||||
# Set up environment variables and directories for scality user
|
||||
RUN mkdir ${CONF_DIR} && \
|
||||
chown -R ${USER} ${CONF_DIR} && \
|
||||
chown -R ${USER} ${DATA_DIR}
|
||||
|
||||
# copy the mongo config file
|
||||
COPY /conf/mongod.conf /conf/mongod.conf
|
||||
COPY /conf/mongo-run.sh /conf/mongo-run.sh
|
||||
COPY /conf/initReplicaSet /conf/initReplicaSet.js
|
||||
|
||||
EXPOSE 27017/tcp
|
||||
EXPOSE 27018
|
||||
|
||||
# Set up CMD
|
||||
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
|
||||
CMD ["bash", "/conf/mongo-run.sh"]
|
|
@ -1,4 +0,0 @@
|
|||
rs.initiate({
|
||||
_id: "rs0",
|
||||
members: [{ _id: 0, host: "127.0.0.1:27018" }]
|
||||
});
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -exo pipefail
|
||||
|
||||
init_RS() {
|
||||
sleep 5
|
||||
mongo --port 27018 /conf/initReplicaSet.js
|
||||
}
|
||||
init_RS &
|
||||
|
||||
mongod --bind_ip_all --config=/conf/mongod.conf
|
|
@ -1,15 +0,0 @@
|
|||
storage:
|
||||
journal:
|
||||
enabled: true
|
||||
engine: wiredTiger
|
||||
dbPath: "/data/db"
|
||||
processManagement:
|
||||
fork: false
|
||||
net:
|
||||
port: 27018
|
||||
bindIp: 0.0.0.0
|
||||
replication:
|
||||
replSetName: "rs0"
|
||||
enableMajorityReadConcern: true
|
||||
security:
|
||||
authorization: disabled
|
|
@ -1,3 +0,0 @@
|
|||
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
|
||||
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
|
||||
RUN chown root:root /conf/sproxyd0.conf
|
|
@ -1,26 +0,0 @@
|
|||
fastcgi_param QUERY_STRING $query_string;
|
||||
fastcgi_param REQUEST_METHOD $request_method;
|
||||
fastcgi_param CONTENT_TYPE $content_type;
|
||||
fastcgi_param CONTENT_LENGTH $content_length;
|
||||
|
||||
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
||||
fastcgi_param SCRIPT_NAME /var/www;
|
||||
fastcgi_param PATH_INFO $document_uri;
|
||||
|
||||
fastcgi_param REQUEST_URI $request_uri;
|
||||
fastcgi_param DOCUMENT_URI $document_uri;
|
||||
fastcgi_param DOCUMENT_ROOT $document_root;
|
||||
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
||||
fastcgi_param HTTPS $https if_not_empty;
|
||||
|
||||
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
||||
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
||||
|
||||
fastcgi_param REMOTE_ADDR $remote_addr;
|
||||
fastcgi_param REMOTE_PORT $remote_port;
|
||||
fastcgi_param SERVER_ADDR $server_addr;
|
||||
fastcgi_param SERVER_PORT $server_port;
|
||||
fastcgi_param SERVER_NAME $server_name;
|
||||
|
||||
# PHP only, required if PHP was built with --enable-force-cgi-redirect
|
||||
fastcgi_param REDIRECT_STATUS 200;
|
|
@ -1,88 +0,0 @@
|
|||
worker_processes 1;
|
||||
error_log /logs/error.log;
|
||||
user root root;
|
||||
events {
|
||||
worker_connections 1000;
|
||||
reuse_port on;
|
||||
multi_accept on;
|
||||
}
|
||||
worker_rlimit_nofile 20000;
|
||||
http {
|
||||
root /var/www/;
|
||||
upstream sproxyds {
|
||||
least_conn;
|
||||
keepalive 40;
|
||||
server 127.0.0.1:20000;
|
||||
}
|
||||
server {
|
||||
client_max_body_size 0;
|
||||
client_body_timeout 150;
|
||||
client_header_timeout 150;
|
||||
postpone_output 0;
|
||||
client_body_postpone_size 0;
|
||||
keepalive_requests 1100;
|
||||
keepalive_timeout 300s;
|
||||
server_tokens off;
|
||||
default_type application/octet-stream;
|
||||
gzip off;
|
||||
tcp_nodelay on;
|
||||
tcp_nopush on;
|
||||
sendfile on;
|
||||
listen 81;
|
||||
server_name localhost;
|
||||
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
|
||||
location ~* ^/proxy/(.*)$ {
|
||||
rewrite ^/proxy/(.*)$ /$1 last;
|
||||
}
|
||||
allow 127.0.0.1;
|
||||
|
||||
deny all;
|
||||
set $usermd '-';
|
||||
set $sentusermd '-';
|
||||
set $elapsed_ms '-';
|
||||
set $now '-';
|
||||
log_by_lua '
|
||||
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
|
||||
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
|
||||
end
|
||||
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
|
||||
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
|
||||
end
|
||||
local elapsed_ms = tonumber(ngx.var.request_time)
|
||||
if not ( elapsed_ms == nil) then
|
||||
elapsed_ms = elapsed_ms * 1000
|
||||
ngx.var.elapsed_ms = tostring(elapsed_ms)
|
||||
end
|
||||
local time = tonumber(ngx.var.msec) * 1000
|
||||
ngx.var.now = time
|
||||
';
|
||||
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
|
||||
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
|
||||
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
|
||||
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
|
||||
'"contentType":"$content_type","s3Address":"$remote_addr",'
|
||||
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
|
||||
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
|
||||
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
|
||||
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
|
||||
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
|
||||
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
|
||||
access_log /dev/stdout irm;
|
||||
error_log /dev/stdout error;
|
||||
location / {
|
||||
proxy_request_buffering off;
|
||||
fastcgi_request_buffering off;
|
||||
fastcgi_no_cache 1;
|
||||
fastcgi_cache_bypass 1;
|
||||
fastcgi_buffering off;
|
||||
fastcgi_ignore_client_abort on;
|
||||
fastcgi_keep_conn on;
|
||||
include fastcgi_params;
|
||||
fastcgi_pass sproxyds;
|
||||
fastcgi_next_upstream error timeout;
|
||||
fastcgi_send_timeout 285s;
|
||||
fastcgi_read_timeout 285s;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"general": {
|
||||
"ring": "DATA",
|
||||
"port": 20000,
|
||||
"syslog_facility": "local0"
|
||||
},
|
||||
"ring_driver:0": {
|
||||
"alias": "dc1",
|
||||
"type": "local",
|
||||
"queue_path": "/tmp/ring-objs"
|
||||
},
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
[supervisord]
|
||||
nodaemon = true
|
||||
loglevel = info
|
||||
logfile = %(ENV_LOG_DIR)s/supervisord.log
|
||||
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
|
||||
logfile_maxbytes = 20MB
|
||||
logfile_backups = 2
|
||||
|
||||
[unix_http_server]
|
||||
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
|
||||
|
||||
[rpcinterface:supervisor]
|
||||
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
||||
|
||||
[supervisorctl]
|
||||
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
|
||||
|
||||
[program:nginx]
|
||||
directory=%(ENV_SUP_RUN_DIR)s
|
||||
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
|
||||
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
||||
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
|
||||
stdout_logfile_maxbytes=100MB
|
||||
stdout_logfile_backups=7
|
||||
stderr_logfile_maxbytes=100MB
|
||||
stderr_logfile_backups=7
|
||||
autorestart=true
|
||||
autostart=true
|
||||
user=root
|
||||
|
||||
[program:sproxyd]
|
||||
directory=%(ENV_SUP_RUN_DIR)s
|
||||
process_name=%(program_name)s-%(process_num)s
|
||||
numprocs=1
|
||||
numprocs_start=0
|
||||
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
|
||||
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
||||
stdout_logfile_maxbytes=100MB
|
||||
stdout_logfile_backups=7
|
||||
redirect_stderr=true
|
||||
autorestart=true
|
||||
autostart=true
|
||||
user=root
|
|
@ -1,29 +0,0 @@
|
|||
FROM python:3.10-alpine
|
||||
|
||||
RUN apk add --no-cache \
|
||||
libressl && \
|
||||
apk add --no-cache --virtual .build-deps \
|
||||
python3-dev \
|
||||
libffi-dev \
|
||||
libressl-dev \
|
||||
sqlite-dev \
|
||||
build-base \
|
||||
curl
|
||||
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
RUN pip3 install -U pip && \
|
||||
pip3 install pykmip requests && \
|
||||
apk del .build-deps && \
|
||||
mkdir /pykmip
|
||||
|
||||
|
||||
ADD ./bin /usr/local/bin
|
||||
ADD ./certs /ssl
|
||||
ADD policy.json /etc/pykmip/policies/policy.json
|
||||
ADD server.conf /etc/pykmip/server.conf
|
||||
ADD docker-entrypoint.sh /
|
||||
RUN chmod +x /docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
|
@ -1,156 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat import backends
|
||||
from cryptography.hazmat.primitives import hashes
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
|
||||
import datetime
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
|
||||
def get_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog=sys.argv[0],
|
||||
description='Tool to generate a x509 CA root, server and client certs')
|
||||
parser.add_argument('-c', '--common-name', action='store',
|
||||
default='localhost',
|
||||
help='Set the common name for the server-side cert')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def create_rsa_private_key(key_size=2048, public_exponent=65537):
|
||||
private_key = rsa.generate_private_key(
|
||||
public_exponent=public_exponent,
|
||||
key_size=key_size,
|
||||
backend=backends.default_backend()
|
||||
)
|
||||
return private_key
|
||||
|
||||
|
||||
def create_self_signed_certificate(subject_name,
|
||||
private_key,
|
||||
days_valid=36500):
|
||||
subject = x509.Name([
|
||||
x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"),
|
||||
x509.NameAttribute(x509.NameOID.COMMON_NAME, subject_name)
|
||||
])
|
||||
certificate = x509.CertificateBuilder().subject_name(
|
||||
subject
|
||||
).issuer_name(
|
||||
subject
|
||||
).public_key(
|
||||
private_key.public_key()
|
||||
).serial_number(
|
||||
x509.random_serial_number()
|
||||
).not_valid_before(
|
||||
datetime.datetime.utcnow()
|
||||
).not_valid_after(
|
||||
datetime.datetime.utcnow() + datetime.timedelta(days=days_valid)
|
||||
).add_extension(
|
||||
x509.BasicConstraints(True, None),
|
||||
critical=True
|
||||
).sign(private_key, hashes.SHA256(), backends.default_backend())
|
||||
|
||||
return certificate
|
||||
|
||||
|
||||
def create_certificate(subject_name,
|
||||
private_key,
|
||||
signing_certificate,
|
||||
signing_key,
|
||||
days_valid=36500,
|
||||
client_auth=False):
|
||||
subject = x509.Name([
|
||||
x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"),
|
||||
x509.NameAttribute(x509.NameOID.COMMON_NAME, subject_name)
|
||||
])
|
||||
builder = x509.CertificateBuilder().subject_name(
|
||||
subject
|
||||
).issuer_name(
|
||||
signing_certificate.subject
|
||||
).public_key(
|
||||
private_key.public_key()
|
||||
).serial_number(
|
||||
x509.random_serial_number()
|
||||
).not_valid_before(
|
||||
datetime.datetime.utcnow()
|
||||
).not_valid_after(
|
||||
datetime.datetime.utcnow() + datetime.timedelta(days=days_valid)
|
||||
)
|
||||
|
||||
if client_auth:
|
||||
builder = builder.add_extension(
|
||||
x509.ExtendedKeyUsage([x509.ExtendedKeyUsageOID.CLIENT_AUTH]),
|
||||
critical=True
|
||||
)
|
||||
|
||||
certificate = builder.sign(
|
||||
signing_key,
|
||||
hashes.SHA256(),
|
||||
backends.default_backend()
|
||||
)
|
||||
return certificate
|
||||
|
||||
|
||||
def main(common_name):
|
||||
root_key = create_rsa_private_key()
|
||||
root_certificate = create_self_signed_certificate(
|
||||
u"Root CA",
|
||||
root_key
|
||||
)
|
||||
|
||||
server_key = create_rsa_private_key()
|
||||
server_certificate = create_certificate(
|
||||
common_name,
|
||||
server_key,
|
||||
root_certificate,
|
||||
root_key
|
||||
)
|
||||
|
||||
john_doe_client_key = create_rsa_private_key()
|
||||
john_doe_client_certificate = create_certificate(
|
||||
u"John Doe",
|
||||
john_doe_client_key,
|
||||
root_certificate,
|
||||
root_key,
|
||||
client_auth=True
|
||||
)
|
||||
|
||||
with open("certs/kmip-ca.pem", "wb") as f:
|
||||
f.write(
|
||||
root_certificate.public_bytes(
|
||||
serialization.Encoding.PEM
|
||||
)
|
||||
)
|
||||
with open("certs/kmip-key.pem", "wb") as f:
|
||||
f.write(server_key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.PKCS8,
|
||||
encryption_algorithm=serialization.NoEncryption()
|
||||
))
|
||||
with open("certs/kmip-cert.pem", "wb") as f:
|
||||
f.write(
|
||||
server_certificate.public_bytes(
|
||||
serialization.Encoding.PEM
|
||||
)
|
||||
)
|
||||
with open("certs/kmip-client-key.pem", "wb") as f:
|
||||
f.write(john_doe_client_key.private_bytes(
|
||||
encoding=serialization.Encoding.PEM,
|
||||
format=serialization.PrivateFormat.PKCS8,
|
||||
encryption_algorithm=serialization.NoEncryption()
|
||||
))
|
||||
with open("certs/kmip-client-cert.pem", "wb") as f:
|
||||
f.write(
|
||||
john_doe_client_certificate.public_bytes(
|
||||
serialization.Encoding.PEM
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = get_args()
|
||||
main(args.common_name)
|
|
@ -1,26 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging # noqa: E402
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
from kmip.services.server import server # noqa: E402
|
||||
|
||||
if __name__ == '__main__':
|
||||
print('Starting PyKMIP server on 0.0.0.0:5696')
|
||||
server.main()
|
|
@ -1,18 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC6zCCAdOgAwIBAgIUPIpMY95b4HjKAk+FyydZApAEFskwDQYJKoZIhvcNAQEL
|
||||
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
|
||||
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowJDEQMA4GA1UECgwHU2NhbGl0eTEQ
|
||||
MA4GA1UEAwwHUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
|
||||
AKqLFEsWtfRTxnoZrQe63tq+rQnVgninHMahRmXkzyjK/uNhoKnIh8bXdTC/eCZ6
|
||||
FBROqBYNL0TJb0HDv1FzcZS1UCUldRqTlvr6wZb0pfrp40fvztsqQgAh1t/Blg5i
|
||||
Zv5+ESSlNs5rWbFTxtq+FbMW/ERYTrVfnMkBiLg4Gq0HwID9a5jvJatzrrno2s1m
|
||||
OfZCT3HaE3tMZ6vvYuoamvLNdvdH+9KeTmBCursfNejt0rSGjIqfi6DvFJSayydQ
|
||||
is5DMSTbCLGdKQmA85VfEQmlQ8v0232WDSd6gVfp2tthDEDHnCbgWkEd1vsTyS85
|
||||
ubdt5v4CWGOWV+mu3bf8xM0CAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq
|
||||
hkiG9w0BAQsFAAOCAQEARTjc2zV/ol1/LsSzZy6l1R0uFBmR2KumH+Se1Yq2vKpY
|
||||
Dv6xmrvmjOUr5RBO77nRhIgdcQA+LyAg8ii2Dfzc8r1RTD+j1bYOxESXctBOBcXM
|
||||
Chy6FEBydR6m7S8qQyL+caJWO1WZWp2tapcm6sUG1oRVznWtK1/SHKIzOBwsmJ07
|
||||
79KsCJ6wf9tzD05EDTI2QhAObE9/thy+zc8l8cmv9A6p3jKkx9rwXUttSUqTn0CW
|
||||
w45bgKg6+DDcrhZ+MATbzuTfhuA4NFUTzK7KeX9sMuOV03Zs8SA3VhAOXmu063M3
|
||||
0f9X7P/0RmGTTp7GGCqEINcZdbLh3k7CpFb2Ox998Q==
|
||||
-----END CERTIFICATE-----
|
|
@ -1,18 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC2zCCAcOgAwIBAgIUIlE8UAkqQ+6mbJDtrt9kkmi8aJYwDQYJKoZIhvcNAQEL
|
||||
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
|
||||
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowKTEQMA4GA1UECgwHU2NhbGl0eTEV
|
||||
MBMGA1UEAwwMcHlrbWlwLmxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEAtxr7pq/lnzVeZz4z52Yc3DeaPqjNfRSyW5cPUlT7ABXFb7+tja7K2C7u
|
||||
DYVK+Q+2yJCQwYJY47aKJB++ewam9t2V8Xy0Z8S+0I2ImCwuyeihaD/f6uJZRzms
|
||||
ycdECH22BA6tCPlQLnlboRiZzI6rcIvXAbUMvLvFm3nyYIs9qidExRnfyMjISknM
|
||||
V+83LT5QW4IcHgKYqzdz2ZmOnk+f4wmMmitcivTdIZCL8Z0cxr7BJlOh5JZ/V5uj
|
||||
WUXeNa+ttW0RKKBlg9T+wj0JvwoJBPZTmsMAy3tI9tjLg3DwGYKsflbFeU2tebXI
|
||||
gncGFZ/dFxj331GGtq3kz1PzAUYf2wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB1
|
||||
8HgJ0fu6/pCrDxAm90eESFjmaTFyTN8q00zhq4Cb3zAT9KMWzAygkZ9n4ZFgELPo
|
||||
7kBE2H6RcDdoBmjVYd8HnBloDdYzYbncKgt5YBvxRaMSF4/l65BM8wjatyXErqnH
|
||||
QLLTRe5AuF0/F0KtPeDQ2JFVu8dZ35W3fyKGPRsEdVOSCTHROmqpGhZCpscyUP4W
|
||||
Hb0dBTESQ9mQHw14OCaaahARd0X5WdcA/E+m0fpGqj1rQCXS+PrRcSLe1E1hqPlK
|
||||
q/hXSXD5nybwipktELvJCbB7l4HmJr2pIpldeR5+ef68Cs8hqs6DRlsJX9sK2ng+
|
||||
TFe5v6SCarqZ9kFvr6Yp
|
||||
-----END CERTIFICATE-----
|
|
@ -1,18 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC8zCCAdugAwIBAgIUBs6nVXQXhrFbClub3aSLg72/DiYwDQYJKoZIhvcNAQEL
|
||||
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
|
||||
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowJTEQMA4GA1UECgwHU2NhbGl0eTER
|
||||
MA8GA1UEAwwISm9obiBEb2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
|
||||
AQC6neSYoBoWh/i2mBpduJnTlXacpJ0iQqLezvcGy8qR0s/48mtfV2IRGTNVsq4L
|
||||
jLLRsPGt9KkJlUhHGWhG00cBGEsIiJiBUr+WrEsO04ME/Sk76kX8wk/t9Oljl7jt
|
||||
UDnQUwshj+hRFe0iKAyE65JIutu5EiiNtOqMzbVgPNfNniAaGlrgwByJaS9arzsH
|
||||
PVju9yZBYzYhwAMyYFcXUGrgvHRCHKmxBi4QmV7DX4TeN4l9TrCyEmqDev4PRFip
|
||||
yR2Fh3WGSwWh45HgMT+Jp6Uv6yI4wMXWJAcNkHdx1OhjBoUQrkavvdeVEnCwjQ+p
|
||||
SMLm0T4iNxedQWBtDM7ts4EjAgMBAAGjGjAYMBYGA1UdJQEB/wQMMAoGCCsGAQUF
|
||||
BwMCMA0GCSqGSIb3DQEBCwUAA4IBAQCMi9HEhZc5jHJMj18Wq00fZy4O9XtjCe0J
|
||||
nntW9tzi3rTQcQWKA7i9uVdDoCg+gMFVxWMvV7luFEUc/VYV1v8hFfbIFygzFsZY
|
||||
xwv4GQaIwbsgzD+oziia53w0FSuNL0uE0MeKvrt3yzHxCxylHyl+TQd/UdAtAo+k
|
||||
RL1sI0mBZx5qo6d1J7ZMCxzAGaT7KjnJvziFr/UbfSNnwDsxsUwGaI1ZeAxJN8DI
|
||||
zTrg3f3lrrmHcauEgKnuQwIqaMZR6veG6RkjtcYSlJYID1irkE6njs7+wivOAkzt
|
||||
fBt/0PD76FmAI0VArgU/zDB8dGyYzrq39W749LuEfm1TPmlnUtDr
|
||||
-----END CERTIFICATE-----
|
|
@ -1,28 +0,0 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6neSYoBoWh/i2
|
||||
mBpduJnTlXacpJ0iQqLezvcGy8qR0s/48mtfV2IRGTNVsq4LjLLRsPGt9KkJlUhH
|
||||
GWhG00cBGEsIiJiBUr+WrEsO04ME/Sk76kX8wk/t9Oljl7jtUDnQUwshj+hRFe0i
|
||||
KAyE65JIutu5EiiNtOqMzbVgPNfNniAaGlrgwByJaS9arzsHPVju9yZBYzYhwAMy
|
||||
YFcXUGrgvHRCHKmxBi4QmV7DX4TeN4l9TrCyEmqDev4PRFipyR2Fh3WGSwWh45Hg
|
||||
MT+Jp6Uv6yI4wMXWJAcNkHdx1OhjBoUQrkavvdeVEnCwjQ+pSMLm0T4iNxedQWBt
|
||||
DM7ts4EjAgMBAAECggEANNXdUeUKXdSzcycPV/ea/c+0XFcy8e9B46lfQTpTqQOx
|
||||
xD8GbWD1L/gdk6baJgT43+ukEWdSsJbmdtLXti29Ta8OF2VtIDhIbCVtvs3dq3zt
|
||||
vrvugsiVDr8nkP306qOrKrNIVIFE+igmEmSaXsu/h/33ladxeeV9/s2DC7NOOjWN
|
||||
Mu4KYr5BBbu3qAavdzbrcz7Sch+GzsYqK/pBounCTQu3o9E4TSUcmcsasWmtHN3u
|
||||
e6G2UjObdzEW7J0wWvvtJ0wHQUVRueHfqwqKf0dymcZ3xOlx3ZPhKPz5n4F1UGUt
|
||||
RQaNazqs5SzZpUgDuPw4k8h/aCHK21Yexw/l4+O9KQKBgQD1WZSRK54zFoExBQgt
|
||||
OZSBNZW3Ibti5lSiF0M0g+66yNZSWfPuABEH0tu5CXopdPDXo4kW8NLGEqQStWTX
|
||||
RGK0DE9buEL3eebOfjIdS2IZ3t3dX3lMypplVCj4HzAgITlweSH1LLTyAtaaOpwa
|
||||
jksqfcn5Zw+XGkyc6GBBVaZetQKBgQDCt6Xf/g26+zjvHscjdzsfBhnYvTOrr6+F
|
||||
xqFFxOEOocGr+mL7UTAs+a9m/6lOWhlagk+m+TIZNL8o3IN7KFTYxPYPxTiewgVE
|
||||
rIm3JBmPxRiPn01P3HrtjaqfzsXF30j3ele7ix5OxieZq4vsW7ZXP3GZE34a08Ov
|
||||
12sE1DlvdwKBgQDzpYQOLhyqazzcqzyVfMrnDYmiFVN7QXTmiudobWRUBUIhAcdl
|
||||
oJdJB7K/rJOuO704x+RJ7dnCbZyWH6EGzZifaGIemXuXO21jvpqR0NyZCGOXhUp2
|
||||
YfS1j8AntwEZxyS9du2sBjui4gKvomiHTquChOxgSmKHEcznPTTpbN8MyQKBgF5F
|
||||
LVCZniolkLXsL7tS8VOez4qoZ0i6wP7CYLf3joJX+/z4N023S9yqcaorItvlMRsp
|
||||
tciAIyoi6F2vDRTmPNXJ3dtav4PVKVnLMs1w89MwOCjoljSQ6Q7zpGTEZenbpWbz
|
||||
W2BYBS9cLjXu4MpoyInLFINo9YeleLs8TvrCiKAXAoGBANsduqLnlUW/f5zDb5Fe
|
||||
SB51+KhBjsVIeYmU+8xtur9Z7IxZXK28wpoEsm7LmX7Va5dERjI+tItBiJ5+Unu1
|
||||
Xs2ljDg35ARKHs0dWBJGpbnZg4dbT6xpIL4YMPXm1Zu++PgRpxPIMn646xqd8GlH
|
||||
bavm6Km/fXNG58xus+EeLpV5
|
||||
-----END PRIVATE KEY-----
|
|
@ -1,28 +0,0 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC3Gvumr+WfNV5n
|
||||
PjPnZhzcN5o+qM19FLJblw9SVPsAFcVvv62NrsrYLu4NhUr5D7bIkJDBgljjtook
|
||||
H757Bqb23ZXxfLRnxL7QjYiYLC7J6KFoP9/q4llHOazJx0QIfbYEDq0I+VAueVuh
|
||||
GJnMjqtwi9cBtQy8u8WbefJgiz2qJ0TFGd/IyMhKScxX7zctPlBbghweApirN3PZ
|
||||
mY6eT5/jCYyaK1yK9N0hkIvxnRzGvsEmU6Hkln9Xm6NZRd41r621bREooGWD1P7C
|
||||
PQm/CgkE9lOawwDLe0j22MuDcPAZgqx+VsV5Ta15tciCdwYVn90XGPffUYa2reTP
|
||||
U/MBRh/bAgMBAAECggEABCvcMcbuDztzBB0Zp5re63Fk1SqZS9Et4wJE+hYvhaf5
|
||||
UHtoY8LoohYnnC0+MQBXpKgOdCoZBk8BRKNofnr/UL5pjQ/POFH2GuAujXDsO/NN
|
||||
wgc6fapcaE/7DLm6ZgsfG2aOMJclaXmgScI6trtFUpIM+t/6A06vyMP1bpeddwPW
|
||||
Fqu7NvpDiEcTRUGd+z1JooYgUhGgC7peYUx5+9zqFrwoDBKxnUOnz3BkDsXBy3qm
|
||||
65Vu0BSjuJzf6vVMpNGUHY6JXjopVNWku+JAX0wD+iikOd5sziNVdIj1fnZ+IHIf
|
||||
7G5h5owHpvSGzJFQ18/g5VHtJdCm+4WQSnbSJRsCAQKBgQDu4IH8yspyeH44fhoS
|
||||
PAp/OtILqSP+Da0zAp2LbhrOgyzyuSTdEAYyptqjqHS6QkB1Bu1H44FS0BYUxRXc
|
||||
iu2e9AndiLVCGngsE7TpA/ZVLN1B0LEZEHjM6p4d6zZM6iveKVnPAOkTWTBAgzCt
|
||||
b31nj4jL8PdlPKQil1AMrOlRAQKBgQDEOwshzIdr2Iy6B/n4CuBViEtwnbAd5f/c
|
||||
atA9bcfF8kCahokJsI4eCCLgBwDZpYKD+v0AwOBlacF6t6TX+vdlJsi5EP7uxZ22
|
||||
ILsuWqVm/0H77PACuckc5/qLZoGGC81l0DhnpoeMEb6r/TKOo5xAK1gxdlwNNrq+
|
||||
nP1zdZnU2wKBgBAS92xFUR4m0YeHpMV5WNN658t1FEDyNqdqE6PgQtmGpi2nG73s
|
||||
aB5cb/X3TfOCpce6MZlWy8sAyZuYL4Jprte1YDySCHBsS43bvZ64b4kHvdPB8UjY
|
||||
fOh9GSq2Oy8tysnmSm7NhuGQbNjKeyoQiIXBeNkQW/VqATl6qR5RPFoBAoGACNqV
|
||||
JQBCd/Y8W0Ry3eM3vgQ5SyqCQMcY5UwYez0Rz3efvJknY72InAhH8o2+VxOlsOjJ
|
||||
M5iAR3MfHLdeg7Q6J2E5m0gOCJ34ALi3WV8TqXMI+iH1rlnNnjVFU7bbTz4HFXnw
|
||||
oZSc9w/x53a0KkVtjmOmRg0OGDaI9ILG2MfMmhMCgYB8ZqJtX8qZ2TqKU3XdLZ4z
|
||||
T2N7xMFuKohWP420r5jKm3Xw85IC+y1SUTB9XGcL79r2eJzmzmdKQ3A3sf3oyUH3
|
||||
RdYWxtKcZ5PAE8hVRtn1ETZqUgxASGOUn/6w0npkYSOXPU5bc0W6RSLkjES0i+c3
|
||||
fv3OMNI8qpmQhEjpHHQS1g==
|
||||
-----END PRIVATE KEY-----
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
python3 /usr/local/bin/run_server.py 2>&1 | tee -a /artifacts/pykmip.log
|
|
@ -1,168 +0,0 @@
|
|||
{
|
||||
"example": {
|
||||
"preset": {
|
||||
"CERTIFICATE": {
|
||||
"LOCATE": "ALLOW_ALL",
|
||||
"CHECK": "ALLOW_ALL",
|
||||
"GET": "ALLOW_ALL",
|
||||
"GET_ATTRIBUTES": "ALLOW_ALL",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_ALL",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"OBTAIN_LEASE": "ALLOW_ALL",
|
||||
"ACTIVATE": "ALLOW_OWNER",
|
||||
"REVOKE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER",
|
||||
"ARCHIVE": "ALLOW_OWNER",
|
||||
"RECOVER": "ALLOW_OWNER"
|
||||
},
|
||||
"SYMMETRIC_KEY": {
|
||||
"REKEY": "ALLOW_OWNER",
|
||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
||||
"DERIVE_KEY": "ALLOW_OWNER",
|
||||
"LOCATE": "ALLOW_OWNER",
|
||||
"CHECK": "ALLOW_OWNER",
|
||||
"GET": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
||||
"ACTIVATE": "ALLOW_OWNER",
|
||||
"REVOKE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER",
|
||||
"ARCHIVE": "ALLOW_OWNER",
|
||||
"RECOVER": "ALLOW_OWNER"
|
||||
},
|
||||
"PUBLIC_KEY": {
|
||||
"LOCATE": "ALLOW_ALL",
|
||||
"CHECK": "ALLOW_ALL",
|
||||
"GET": "ALLOW_ALL",
|
||||
"GET_ATTRIBUTES": "ALLOW_ALL",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_ALL",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"OBTAIN_LEASE": "ALLOW_ALL",
|
||||
"ACTIVATE": "ALLOW_OWNER",
|
||||
"REVOKE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER",
|
||||
"ARCHIVE": "ALLOW_OWNER",
|
||||
"RECOVER": "ALLOW_OWNER"
|
||||
},
|
||||
"PRIVATE_KEY": {
|
||||
"REKEY": "ALLOW_OWNER",
|
||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
||||
"DERIVE_KEY": "ALLOW_OWNER",
|
||||
"LOCATE": "ALLOW_OWNER",
|
||||
"CHECK": "ALLOW_OWNER",
|
||||
"GET": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
||||
"ACTIVATE": "ALLOW_OWNER",
|
||||
"REVOKE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER",
|
||||
"ARCHIVE": "ALLOW_OWNER",
|
||||
"RECOVER": "ALLOW_OWNER"
|
||||
},
|
||||
"SPLIT_KEY": {
|
||||
"REKEY": "ALLOW_OWNER",
|
||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
||||
"DERIVE_KEY": "ALLOW_OWNER",
|
||||
"LOCATE": "ALLOW_OWNER",
|
||||
"CHECK": "ALLOW_OWNER",
|
||||
"GET": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
||||
"ACTIVATE": "ALLOW_OWNER",
|
||||
"REVOKE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER",
|
||||
"ARCHIVE": "ALLOW_OWNER",
|
||||
"RECOVER": "ALLOW_OWNER"
|
||||
},
|
||||
"TEMPLATE": {
|
||||
"LOCATE": "ALLOW_OWNER",
|
||||
"GET": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER"
|
||||
},
|
||||
"SECRET_DATA": {
|
||||
"REKEY": "ALLOW_OWNER",
|
||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
||||
"DERIVE_KEY": "ALLOW_OWNER",
|
||||
"LOCATE": "ALLOW_OWNER",
|
||||
"CHECK": "ALLOW_OWNER",
|
||||
"GET": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
||||
"ACTIVATE": "ALLOW_OWNER",
|
||||
"REVOKE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER",
|
||||
"ARCHIVE": "ALLOW_OWNER",
|
||||
"RECOVER": "ALLOW_OWNER"
|
||||
},
|
||||
"OPAQUE_DATA": {
|
||||
"REKEY": "ALLOW_OWNER",
|
||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
||||
"DERIVE_KEY": "ALLOW_OWNER",
|
||||
"LOCATE": "ALLOW_OWNER",
|
||||
"CHECK": "ALLOW_OWNER",
|
||||
"GET": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
||||
"ACTIVATE": "ALLOW_OWNER",
|
||||
"REVOKE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER",
|
||||
"ARCHIVE": "ALLOW_OWNER",
|
||||
"RECOVER": "ALLOW_OWNER"
|
||||
},
|
||||
"PGP_KEY": {
|
||||
"REKEY": "ALLOW_OWNER",
|
||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
||||
"DERIVE_KEY": "ALLOW_OWNER",
|
||||
"LOCATE": "ALLOW_OWNER",
|
||||
"CHECK": "ALLOW_OWNER",
|
||||
"GET": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
||||
"ACTIVATE": "ALLOW_OWNER",
|
||||
"REVOKE": "ALLOW_OWNER",
|
||||
"DESTROY": "ALLOW_OWNER",
|
||||
"ARCHIVE": "ALLOW_OWNER",
|
||||
"RECOVER": "ALLOW_OWNER"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
[server]
|
||||
hostname=0.0.0.0
|
||||
port=5696
|
||||
certificate_path=/ssl/kmip-cert.pem
|
||||
key_path=/ssl/kmip-key.pem
|
||||
ca_path=/ssl/kmip-ca.pem
|
||||
auth_suite=TLS1.2
|
||||
policy_path=/etc/pykmip/policies
|
||||
enable_tls_client_auth=True
|
||||
database_path=/pykmip/pykmip.db
|
||||
tls_cipher_suites=
|
||||
TLS_RSA_WITH_AES_128_CBC_SHA256
|
||||
TLS_RSA_WITH_AES_256_CBC_SHA256
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
|
||||
logging_level=DEBUG
|
|
@ -1,35 +0,0 @@
|
|||
name: Test alerts
|
||||
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- 'development/**'
|
||||
- 'q/*/**'
|
||||
|
||||
jobs:
|
||||
run-alert-tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
tests:
|
||||
- name: 1 minute interval tests
|
||||
file: monitoring/alerts.test.yaml
|
||||
|
||||
- name: 10 seconds interval tests
|
||||
file: monitoring/alerts.10s.test.yaml
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Render and test ${{ matrix.tests.name }}
|
||||
uses: scality/action-prom-render-test@1.0.3
|
||||
with:
|
||||
alert_file_path: monitoring/alerts.yaml
|
||||
test_file_path: ${{ matrix.tests.file }}
|
||||
alert_inputs: |
|
||||
namespace=zenko
|
||||
service=artesca-data-connector-s3api-metrics
|
||||
reportJob=artesca-data-ops-report-handler
|
||||
replicas=3
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
name: codeQL
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [w/**, q/*]
|
||||
pull_request:
|
||||
branches: [development/*, stabilization/*, hotfix/*]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Static analysis with CodeQL
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: javascript, python, ruby
|
||||
|
||||
- name: Build and analyze
|
||||
uses: github/codeql-action/analyze@v3
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
name: dependency review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [development/*, stabilization/*, hotfix/*]
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@v4
|
|
@ -1,80 +0,0 @@
|
|||
---
|
||||
name: release
|
||||
run-name: release ${{ inputs.tag }}
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Tag to be released'
|
||||
required: true
|
||||
|
||||
env:
|
||||
PROJECT_NAME: ${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
build-federation-image:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ github.token }}
|
||||
- name: Build and push image for federation
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: images/svc-base/Dockerfile
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
|
||||
cache-from: type=gha,scope=federation
|
||||
cache-to: type=gha,mode=max,scope=federation
|
||||
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildk
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ github.token }}
|
||||
|
||||
- name: Push dashboards into the production namespace
|
||||
run: |
|
||||
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
||||
dashboard.json:application/grafana-dashboard+json \
|
||||
alerts.yaml:application/prometheus-alerts+yaml
|
||||
working-directory: monitoring
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
name: Release ${{ github.event.inputs.tag }}
|
||||
tag_name: ${{ github.event.inputs.tag }}
|
||||
generate_release_notes: true
|
||||
target_commitish: ${{ github.sha }}
|
|
@ -1,533 +0,0 @@
|
|||
---
|
||||
name: tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
push:
|
||||
branches-ignore:
|
||||
- 'development/**'
|
||||
- 'q/*/**'
|
||||
|
||||
env:
|
||||
# Secrets
|
||||
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||
azurebackend_AZURE_STORAGE_ENDPOINT: >-
|
||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY_2 }}
|
||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME_2 }}
|
||||
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
|
||||
${{ secrets.AZURE_STORAGE_ENDPOINT_2 }}
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
|
||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
|
||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
||||
azuretest_AZURE_BLOB_ENDPOINT: "${{ secrets.AZURE_STORAGE_ENDPOINT }}"
|
||||
b2backend_B2_ACCOUNT_ID: "${{ secrets.B2BACKEND_B2_ACCOUNT_ID }}"
|
||||
b2backend_B2_STORAGE_ACCESS_KEY: >-
|
||||
${{ secrets.B2BACKEND_B2_STORAGE_ACCESS_KEY }}
|
||||
GOOGLE_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
|
||||
GOOGLE_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
|
||||
AWS_S3_BACKEND_ACCESS_KEY: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY }}"
|
||||
AWS_S3_BACKEND_SECRET_KEY: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY }}"
|
||||
AWS_S3_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY_2 }}"
|
||||
AWS_S3_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY_2 }}"
|
||||
AWS_GCP_BACKEND_ACCESS_KEY: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY }}"
|
||||
AWS_GCP_BACKEND_SECRET_KEY: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY }}"
|
||||
AWS_GCP_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY_2 }}"
|
||||
AWS_GCP_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY_2 }}"
|
||||
b2backend_B2_STORAGE_ENDPOINT: "${{ secrets.B2BACKEND_B2_STORAGE_ENDPOINT }}"
|
||||
gcpbackend2_GCP_SERVICE_EMAIL: "${{ secrets.GCP2_SERVICE_EMAIL }}"
|
||||
gcpbackend2_GCP_SERVICE_KEY: "${{ secrets.GCP2_SERVICE_KEY }}"
|
||||
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackend_GCP_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
|
||||
gcpbackend_GCP_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
|
||||
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
|
||||
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_EMAIL }}
|
||||
gcpbackendmismatch_GCP_SERVICE_KEY: >-
|
||||
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_KEY }}
|
||||
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
# Configs
|
||||
ENABLE_LOCAL_CACHE: "true"
|
||||
REPORT_TOKEN: "report-token-1"
|
||||
REMOTE_MANAGEMENT_DISABLE: "1"
|
||||
# https://github.com/git-lfs/git-lfs/issues/5749
|
||||
GIT_CLONE_PROTECTION_ACTIVE: 'false'
|
||||
jobs:
|
||||
linting-coverage:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '16'
|
||||
cache: yarn
|
||||
- name: install dependencies
|
||||
run: yarn install --frozen-lockfile --network-concurrency 1
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip
|
||||
- name: Install python deps
|
||||
run: pip install flake8
|
||||
- name: Lint Javascript
|
||||
run: yarn run --silent lint -- --max-warnings 0
|
||||
- name: Lint Markdown
|
||||
run: yarn run --silent lint_md
|
||||
- name: Lint python
|
||||
run: flake8 $(git ls-files "*.py")
|
||||
- name: Lint Yaml
|
||||
run: yamllint -c yamllint.yml $(git ls-files "*.yml")
|
||||
- name: Unit Coverage
|
||||
run: |
|
||||
set -ex
|
||||
mkdir -p $CIRCLE_TEST_REPORTS/unit
|
||||
yarn test
|
||||
yarn run test_legacy_location
|
||||
env:
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
CIRCLE_TEST_REPORTS: /tmp
|
||||
CIRCLE_ARTIFACTS: /tmp
|
||||
CI_REPORTS: /tmp
|
||||
- name: Unit Coverage logs
|
||||
run: find /tmp/unit -exec cat {} \;
|
||||
- name: preparing junit files for upload
|
||||
run: |
|
||||
mkdir -p artifacts/junit
|
||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
||||
if: always()
|
||||
- name: Upload files to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: artifacts
|
||||
if: always()
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to GitHub Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ github.token }}
|
||||
- name: Build and push cloudserver image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
provenance: false
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
labels: |
|
||||
git.repository=${{ github.repository }}
|
||||
git.commit-sha=${{ github.sha }}
|
||||
cache-from: type=gha,scope=cloudserver
|
||||
cache-to: type=gha,mode=max,scope=cloudserver
|
||||
- name: Build and push pykmip image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
context: .github/pykmip
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
||||
labels: |
|
||||
git.repository=${{ github.repository }}
|
||||
git.commit-sha=${{ github.sha }}
|
||||
cache-from: type=gha,scope=pykmip
|
||||
cache-to: type=gha,mode=max,scope=pykmip
|
||||
- name: Build and push MongoDB
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
context: .github/docker/mongodb
|
||||
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
cache-from: type=gha,scope=mongodb
|
||||
cache-to: type=gha,mode=max,scope=mongodb
|
||||
|
||||
multiple-backend:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
S3BACKEND: mem
|
||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||
S3DATA: multiple
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ github.token }}
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup CI services
|
||||
run: docker compose --profile sproxyd up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run multiple backend test
|
||||
run: |-
|
||||
set -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
bash wait_for_local_port.bash 81 40
|
||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
env:
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
mongo-v0-ft-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
S3BACKEND: mem
|
||||
MPU_TESTING: "yes"
|
||||
S3METADATA: mongodb
|
||||
S3KMS: file
|
||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||
DEFAULT_BUCKET_KEY_FORMAT: v0
|
||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup CI services
|
||||
run: docker compose --profile mongo up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run functional tests
|
||||
run: |-
|
||||
set -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
env:
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
mongo-v1-ft-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
S3BACKEND: mem
|
||||
MPU_TESTING: "yes"
|
||||
S3METADATA: mongodb
|
||||
S3KMS: file
|
||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||
DEFAULT_BUCKET_KEY_FORMAT: v1
|
||||
METADATA_MAX_CACHED_BUCKETS: 1
|
||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup CI services
|
||||
run: docker compose --profile mongo up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run functional tests
|
||||
run: |-
|
||||
set -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
|
||||
env:
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
file-ft-tests:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- job-name: file-ft-tests
|
||||
name: ${{ matrix.job-name }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
S3BACKEND: file
|
||||
S3VAULT: mem
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
MPU_TESTING: "yes"
|
||||
JOB_NAME: ${{ matrix.job-name }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup matrix job artifacts directory
|
||||
shell: bash
|
||||
run: |
|
||||
set -exu
|
||||
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
|
||||
- name: Setup CI services
|
||||
run: docker compose up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run file ft tests
|
||||
run: |-
|
||||
set -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
utapi-v2-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
ENABLE_UTAPI_V2: t
|
||||
S3BACKEND: mem
|
||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup CI services
|
||||
run: docker compose up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run file utapi v2 tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
quota-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
strategy:
|
||||
matrix:
|
||||
inflights:
|
||||
- name: "With Inflights"
|
||||
value: "true"
|
||||
- name: "Without Inflights"
|
||||
value: "false"
|
||||
env:
|
||||
S3METADATA: mongodb
|
||||
S3BACKEND: mem
|
||||
S3QUOTA: scuba
|
||||
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
|
||||
SCUBA_HOST: localhost
|
||||
SCUBA_PORT: 8100
|
||||
SCUBA_HEALTHCHECK_FREQUENCY: 100
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Setup CI services
|
||||
run: docker compose --profile mongo up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run quota tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
kmip-ft-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
S3BACKEND: file
|
||||
S3VAULT: mem
|
||||
MPU_TESTING: "yes"
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- name: Copy KMIP certs
|
||||
run: cp -r ./certs /tmp/ssl-kmip
|
||||
working-directory: .github/pykmip
|
||||
- name: Setup CI services
|
||||
run: docker compose --profile pykmip up -d
|
||||
working-directory: .github/docker
|
||||
- name: Run file KMIP tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
bash wait_for_local_port.bash 5696 40
|
||||
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
||||
|
||||
ceph-backend-test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
env:
|
||||
S3BACKEND: mem
|
||||
S3DATA: multiple
|
||||
S3KMS: file
|
||||
CI_CEPH: 'true'
|
||||
MPU_TESTING: "yes"
|
||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||
JOB_NAME: ${{ github.job }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Login to GitHub Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ github.token }}
|
||||
- name: Setup CI environment
|
||||
uses: ./.github/actions/setup-ci
|
||||
- uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: '2.5.9'
|
||||
- name: Install Ruby dependencies
|
||||
run: |
|
||||
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
|
||||
- name: Install Java dependencies
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
|
||||
- name: Setup CI services
|
||||
run: docker compose --profile ceph up -d
|
||||
working-directory: .github/docker
|
||||
env:
|
||||
S3METADATA: mongodb
|
||||
- name: Run Ceph multiple backend tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
bash .github/ceph/wait_for_ceph.sh
|
||||
bash wait_for_local_port.bash 27018 40
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/multibackend-tests.log
|
||||
env:
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
S3METADATA: mem
|
||||
- name: Run Java tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
mvn test | tee /tmp/artifacts/${{ github.job }}/java-tests.log
|
||||
working-directory: tests/functional/jaws
|
||||
- name: Run Ruby tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
|
||||
working-directory: tests/functional/fog
|
||||
- name: Run Javascript AWS SDK tests
|
||||
run: |-
|
||||
set -ex -o pipefail;
|
||||
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
|
||||
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
|
||||
env:
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
|
||||
S3BACKEND: file
|
||||
S3VAULT: mem
|
||||
S3METADATA: mongodb
|
||||
- name: Upload logs to artifacts
|
||||
uses: scality/action-artifacts@v4
|
||||
with:
|
||||
method: upload
|
||||
url: https://artifacts.scality.net
|
||||
user: ${{ secrets.ARTIFACTS_USER }}
|
||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||
source: /tmp/artifacts
|
||||
if: always()
|
|
@ -28,8 +28,3 @@ _build
|
|||
# Dependency directory
|
||||
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git
|
||||
node_modules
|
||||
yarn.lock
|
||||
.tox
|
||||
|
||||
# Junit directory
|
||||
junit
|
||||
|
|
70
Dockerfile
70
Dockerfile
|
@ -1,60 +1,28 @@
|
|||
ARG NODE_VERSION=16.20-bullseye-slim
|
||||
|
||||
FROM node:${NODE_VERSION} as builder
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
gnupg2 \
|
||||
jq \
|
||||
python3 \
|
||||
ssh \
|
||||
wget \
|
||||
libffi-dev \
|
||||
zlib1g-dev \
|
||||
&& apt-get clean \
|
||||
&& mkdir -p /root/ssh \
|
||||
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
|
||||
|
||||
ENV PYTHON=python3
|
||||
COPY package.json yarn.lock /usr/src/app/
|
||||
RUN npm install typescript -g
|
||||
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
||||
|
||||
################################################################################
|
||||
FROM node:${NODE_VERSION}
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
jq \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV NO_PROXY localhost,127.0.0.1
|
||||
ENV no_proxy localhost,127.0.0.1
|
||||
|
||||
EXPOSE 8000
|
||||
EXPOSE 8002
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
jq \
|
||||
tini \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
FROM node:6-slim
|
||||
MAINTAINER Giorgio Regni <gr@scality.com>
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
|
||||
# Keep the .git directory in order to properly report version
|
||||
COPY . /usr/src/app
|
||||
COPY --from=builder /usr/src/app/node_modules ./node_modules/
|
||||
COPY ./package.json .
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y jq python git build-essential --no-install-recommends \
|
||||
&& npm install --production \
|
||||
&& apt-get autoremove --purge -y python git build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& npm cache clear \
|
||||
&& rm -rf ~/.node-gyp \
|
||||
&& rm -rf /tmp/npm-*
|
||||
|
||||
COPY ./ ./
|
||||
|
||||
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
||||
|
||||
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
|
||||
ENV NO_PROXY localhost,127.0.0.1
|
||||
ENV no_proxy localhost,127.0.0.1
|
||||
|
||||
CMD [ "yarn", "start" ]
|
||||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||
CMD [ "npm", "start" ]
|
||||
|
||||
EXPOSE 8000
|
||||
|
|
|
@ -7,16 +7,16 @@ COPY . /usr/src/app
|
|||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y jq python git build-essential --no-install-recommends \
|
||||
&& yarn install --production \
|
||||
&& npm install --production \
|
||||
&& apt-get autoremove --purge -y python git build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& yarn cache clean \
|
||||
&& npm cache clear \
|
||||
&& rm -rf ~/.node-gyp \
|
||||
&& rm -rf /tmp/yarn-*
|
||||
&& rm -rf /tmp/npm-*
|
||||
|
||||
ENV S3BACKEND mem
|
||||
|
||||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||
CMD [ "yarn", "start" ]
|
||||
CMD [ "npm", "start" ]
|
||||
|
||||
EXPOSE 8000
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# S3 Healthcheck
|
||||
|
||||
Scality S3 exposes a healthcheck route `/live` on the port used
|
||||
for the metrics (defaults to port 8002) which returns a
|
||||
Scality S3 exposes a healthcheck route `/_/healthcheck` which returns a
|
||||
response with HTTP code
|
||||
|
||||
- 200 OK
|
||||
|
|
166
README.md
166
README.md
|
@ -1,7 +1,12 @@
|
|||
# Zenko CloudServer with Vitastor Backend
|
||||
# Zenko CloudServer
|
||||
|
||||
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
||||
|
||||
[![CircleCI][badgepub]](https://circleci.com/gh/scality/S3)
|
||||
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/S3)
|
||||
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/scality/s3server/)
|
||||
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
|
||||
|
||||
## Overview
|
||||
|
||||
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||
|
@ -11,71 +16,126 @@ Scality’s Open Source Multi-Cloud Data Controller.
|
|||
CloudServer provides a single AWS S3 API interface to access multiple
|
||||
backend data storage both on-premise or public in the cloud.
|
||||
|
||||
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
|
||||
backend support.
|
||||
CloudServer is useful for Developers, either to run as part of a
|
||||
continous integration test environment to emulate the AWS S3 service locally
|
||||
or as an abstraction layer to develop object storage enabled
|
||||
application on the go.
|
||||
|
||||
## Quick Start with Vitastor
|
||||
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
|
||||
|
||||
Vitastor Backend is in experimental status, however you can already try to
|
||||
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
|
||||
it works too 😊.
|
||||
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
||||
|
||||
Installation instructions:
|
||||
## Docker
|
||||
|
||||
### Install Vitastor
|
||||
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/scality/s3server/)
|
||||
|
||||
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
|
||||
## Contributing
|
||||
|
||||
### Install Zenko with Vitastor Backend
|
||||
In order to contribute, please follow the
|
||||
[Contributing Guidelines](
|
||||
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||
|
||||
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
|
||||
- Install dependencies: `npm install --omit dev` or just `npm install`
|
||||
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
|
||||
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
|
||||
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
|
||||
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
|
||||
## Installation
|
||||
|
||||
### Install and Configure MongoDB
|
||||
### Dependencies
|
||||
|
||||
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
|
||||
Building and running the Zenko CloudServer requires node.js 6.9.5 and npm v3
|
||||
. Up-to-date versions can be found at
|
||||
[Nodesource](https://github.com/nodesource/distributions).
|
||||
|
||||
### Setup Zenko
|
||||
### Clone source code
|
||||
|
||||
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
|
||||
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
|
||||
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
|
||||
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
|
||||
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
|
||||
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
|
||||
access keys, but it's not published, so let's use a file for now.
|
||||
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
|
||||
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
|
||||
in this file.
|
||||
|
||||
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
|
||||
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
|
||||
|
||||
### Start Zenko
|
||||
|
||||
Start the S3 server with: `node index.js`
|
||||
|
||||
If you use default settings, Zenko CloudServer starts on port 8000.
|
||||
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
|
||||
|
||||
Now you can access your S3 with `s3cmd` or `geesefs`:
|
||||
|
||||
```
|
||||
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
|
||||
```shell
|
||||
git clone https://github.com/scality/S3.git
|
||||
```
|
||||
|
||||
```
|
||||
AWS_ACCESS_KEY_ID=accessKey1 \
|
||||
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
|
||||
geesefs --endpoint http://localhost:8000 testbucket mountdir
|
||||
### Install js dependencies
|
||||
|
||||
Go to the ./S3 folder,
|
||||
|
||||
```shell
|
||||
npm install
|
||||
```
|
||||
|
||||
# Author & License
|
||||
If you get an error regarding installation of the diskUsage module,
|
||||
please install g++.
|
||||
|
||||
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
||||
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
|
||||
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
|
||||
If you get an error regarding level-down bindings, try clearing your npm cache:
|
||||
|
||||
```shell
|
||||
npm cache clear
|
||||
```
|
||||
|
||||
## Run it with a file backend
|
||||
|
||||
```shell
|
||||
npm start
|
||||
```
|
||||
|
||||
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
|
||||
9991 are also open locally for internal transfer of metadata and data,
|
||||
respectively.
|
||||
|
||||
The default access key is accessKey1 with
|
||||
a secret key of verySecretKey1.
|
||||
|
||||
By default the metadata files will be saved in the
|
||||
localMetadata directory and the data files will be saved
|
||||
in the localData directory within the ./S3 directory on your
|
||||
machine. These directories have been pre-created within the
|
||||
repository. If you would like to save the data or metadata in
|
||||
different locations of your choice, you must specify them with absolute paths.
|
||||
So, when starting the server:
|
||||
|
||||
```shell
|
||||
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||
npm start
|
||||
```
|
||||
|
||||
## Run it with multiple data backends
|
||||
|
||||
```shell
|
||||
export S3DATA='multiple'
|
||||
npm start
|
||||
```
|
||||
|
||||
This starts a Zenko CloudServer on port 8000.
|
||||
The default access key is accessKey1 with
|
||||
a secret key of verySecretKey1.
|
||||
|
||||
With multiple backends, you have the ability to
|
||||
choose where each object will be saved by setting
|
||||
the following header with a locationConstraint on
|
||||
a PUT request:
|
||||
|
||||
```shell
|
||||
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||
```
|
||||
|
||||
If no header is sent with a PUT object request, the
|
||||
location constraint of the bucket will determine
|
||||
where the data is saved. If the bucket has no location
|
||||
constraint, the endpoint of the PUT request will be
|
||||
used to determine location.
|
||||
|
||||
See the Configuration section in our documentation
|
||||
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
|
||||
to learn how to set location constraints.
|
||||
|
||||
## Run it with an in-memory backend
|
||||
|
||||
```shell
|
||||
npm run mem_backend
|
||||
```
|
||||
|
||||
This starts a Zenko CloudServer on port 8000.
|
||||
The default access key is accessKey1 with
|
||||
a secret key of verySecretKey1.
|
||||
|
||||
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
|
||||
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
||||
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
---
|
||||
theme: jekyll-theme-modernist
|
||||
theme: jekyll-theme-minimal
|
||||
|
|
|
@ -13,26 +13,20 @@ function _performSearch(host,
|
|||
port,
|
||||
bucketName,
|
||||
query,
|
||||
listVersions,
|
||||
accessKey,
|
||||
secretKey,
|
||||
sessionToken,
|
||||
verbose, ssl) {
|
||||
const escapedSearch = encodeURIComponent(query);
|
||||
const options = {
|
||||
host,
|
||||
port,
|
||||
method: 'GET',
|
||||
path: `/${bucketName}/?search=${escapedSearch}${listVersions ? '&&versions' : ''}`,
|
||||
path: `/${bucketName}/?search=${escapedSearch}`,
|
||||
headers: {
|
||||
'Content-Length': 0,
|
||||
},
|
||||
rejectUnauthorized: false,
|
||||
versions: '',
|
||||
};
|
||||
if (sessionToken) {
|
||||
options.headers['x-amz-security-token'] = sessionToken;
|
||||
}
|
||||
const transport = ssl ? https : http;
|
||||
const request = transport.request(options, response => {
|
||||
if (verbose) {
|
||||
|
@ -61,9 +55,9 @@ function _performSearch(host,
|
|||
// generateV4Headers exepects request object with path that does not
|
||||
// include query
|
||||
request.path = `/${bucketName}`;
|
||||
const requestData = listVersions ? { search: query, versions: '' } : { search: query };
|
||||
auth.client.generateV4Headers(request, requestData, accessKey, secretKey, 's3');
|
||||
request.path = `/${bucketName}?search=${escapedSearch}${listVersions ? '&&versions' : ''}`;
|
||||
auth.client.generateV4Headers(request, { search: query },
|
||||
accessKey, secretKey, 's3');
|
||||
request.path = `/${bucketName}?search=${escapedSearch}`;
|
||||
if (verbose) {
|
||||
logger.info('request headers', { headers: request._headers });
|
||||
}
|
||||
|
@ -82,17 +76,15 @@ function searchBucket() {
|
|||
.version('0.0.1')
|
||||
.option('-a, --access-key <accessKey>', 'Access key id')
|
||||
.option('-k, --secret-key <secretKey>', 'Secret access key')
|
||||
.option('-t, --session-token <sessionToken>', 'Session token')
|
||||
.option('-b, --bucket <bucket>', 'Name of the bucket')
|
||||
.option('-q, --query <query>', 'Search query')
|
||||
.option('-h, --host <host>', 'Host of the server')
|
||||
.option('-p, --port <port>', 'Port of the server')
|
||||
.option('-s', '--ssl', 'Enable ssl')
|
||||
.option('-l, --list-versions', 'List all versions of the objects that meet the search query, ' +
|
||||
'otherwise only list the latest version')
|
||||
.option('-v, --verbose')
|
||||
.parse(process.argv);
|
||||
const { host, port, accessKey, secretKey, sessionToken, bucket, query, listVersions, verbose, ssl } =
|
||||
|
||||
const { host, port, accessKey, secretKey, bucket, query, verbose, ssl } =
|
||||
commander;
|
||||
|
||||
if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
|
||||
|
@ -101,7 +93,7 @@ function searchBucket() {
|
|||
process.exit(1);
|
||||
}
|
||||
|
||||
_performSearch(host, port, bucket, query, listVersions, accessKey, secretKey, sessionToken, verbose,
|
||||
_performSearch(host, port, bucket, query, accessKey, secretKey, verbose,
|
||||
ssl);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
{
|
||||
"port": 8000,
|
||||
"listenOn": [],
|
||||
"metricsPort": 8002,
|
||||
"metricsListenOn": [],
|
||||
"replicationGroupId": "RG001",
|
||||
"workers": 4,
|
||||
"restEndpoints": {
|
||||
"localhost": "us-east-1",
|
||||
"127.0.0.1": "us-east-1",
|
||||
|
@ -42,10 +39,6 @@
|
|||
"host": "localhost",
|
||||
"port": 8900
|
||||
},
|
||||
"workflowEngineOperator": {
|
||||
"host": "localhost",
|
||||
"port": 3001
|
||||
},
|
||||
"cdmi": {
|
||||
"host": "localhost",
|
||||
"port": 81,
|
||||
|
@ -53,7 +46,7 @@
|
|||
"readonly": true
|
||||
},
|
||||
"bucketd": {
|
||||
"bootstrap": ["localhost:9000"]
|
||||
"bootstrap": ["localhost"]
|
||||
},
|
||||
"vaultd": {
|
||||
"host": "localhost",
|
||||
|
@ -75,10 +68,6 @@
|
|||
"host": "localhost",
|
||||
"port": 9991
|
||||
},
|
||||
"pfsClient": {
|
||||
"host": "localhost",
|
||||
"port": 9992
|
||||
},
|
||||
"metadataDaemon": {
|
||||
"bindAddress": "localhost",
|
||||
"port": 9990
|
||||
|
@ -87,57 +76,15 @@
|
|||
"bindAddress": "localhost",
|
||||
"port": 9991
|
||||
},
|
||||
"pfsDaemon": {
|
||||
"bindAddress": "localhost",
|
||||
"port": 9992
|
||||
},
|
||||
"recordLog": {
|
||||
"enabled": true,
|
||||
"recordLogName": "s3-recordlog"
|
||||
},
|
||||
"mongodb": {
|
||||
"replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020",
|
||||
"writeConcern": "majority",
|
||||
"replicaSet": "rs0",
|
||||
"readPreference": "primary",
|
||||
"database": "metadata"
|
||||
},
|
||||
"authdata": "authdata.json",
|
||||
"backends": {
|
||||
"auth": "file",
|
||||
"data": "file",
|
||||
"metadata": "mongodb",
|
||||
"kms": "file",
|
||||
"quota": "none"
|
||||
},
|
||||
"externalBackends": {
|
||||
"aws_s3": {
|
||||
"httpAgent": {
|
||||
"keepAlive": false,
|
||||
"keepAliveMsecs": 1000,
|
||||
"maxFreeSockets": 256,
|
||||
"maxSockets": null
|
||||
}
|
||||
},
|
||||
"gcp": {
|
||||
"httpAgent": {
|
||||
"keepAlive": true,
|
||||
"keepAliveMsecs": 1000,
|
||||
"maxFreeSockets": 256,
|
||||
"maxSockets": null
|
||||
}
|
||||
}
|
||||
},
|
||||
"requests": {
|
||||
"viaProxy": false,
|
||||
"trustedProxyCIDRs": [],
|
||||
"extractClientIPFromHeader": ""
|
||||
},
|
||||
"bucketNotificationDestinations": [
|
||||
{
|
||||
"resource": "target1",
|
||||
"type": "dummy",
|
||||
"host": "localhost:6000"
|
||||
}
|
||||
]
|
||||
"replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020",
|
||||
"writeConcern": "majority",
|
||||
"replicaSet": "rs0",
|
||||
"readPreference": "primary",
|
||||
"database": "metadata"
|
||||
}
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
{
|
||||
"port": 8000,
|
||||
"listenOn": [],
|
||||
"metricsPort": 8002,
|
||||
"metricsListenOn": [],
|
||||
"replicationGroupId": "RG001",
|
||||
"restEndpoints": {
|
||||
"localhost": "STANDARD",
|
||||
"127.0.0.1": "STANDARD",
|
||||
"yourhostname.ru": "STANDARD"
|
||||
},
|
||||
"websiteEndpoints": [
|
||||
"static.yourhostname.ru"
|
||||
],
|
||||
"replicationEndpoints": [ {
|
||||
"site": "zenko",
|
||||
"servers": ["127.0.0.1:8000"],
|
||||
"default": true
|
||||
} ],
|
||||
"log": {
|
||||
"logLevel": "info",
|
||||
"dumpLevel": "error"
|
||||
},
|
||||
"healthChecks": {
|
||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
||||
},
|
||||
"backends": {
|
||||
"metadata": "mongodb"
|
||||
},
|
||||
"mongodb": {
|
||||
"replicaSetHosts": "127.0.0.1:27017",
|
||||
"writeConcern": "majority",
|
||||
"replicaSet": "rs0",
|
||||
"readPreference": "primary",
|
||||
"database": "s3",
|
||||
"authCredentials": {
|
||||
"username": "s3",
|
||||
"password": ""
|
||||
}
|
||||
},
|
||||
"externalBackends": {
|
||||
"aws_s3": {
|
||||
"httpAgent": {
|
||||
"keepAlive": false,
|
||||
"keepAliveMsecs": 1000,
|
||||
"maxFreeSockets": 256,
|
||||
"maxSockets": null
|
||||
}
|
||||
},
|
||||
"gcp": {
|
||||
"httpAgent": {
|
||||
"keepAlive": true,
|
||||
"keepAliveMsecs": 1000,
|
||||
"maxFreeSockets": 256,
|
||||
"maxSockets": null
|
||||
}
|
||||
}
|
||||
},
|
||||
"requests": {
|
||||
"viaProxy": false,
|
||||
"trustedProxyCIDRs": [],
|
||||
"extractClientIPFromHeader": ""
|
||||
},
|
||||
"bucketNotificationDestinations": [
|
||||
{
|
||||
"resource": "target1",
|
||||
"type": "dummy",
|
||||
"host": "localhost:6000"
|
||||
}
|
||||
]
|
||||
}
|
123
constants.js
123
constants.js
|
@ -86,51 +86,40 @@ const constants = {
|
|||
// In testing, AWS seems to allow up to 88 more bytes, so we do the same.
|
||||
maximumMetaHeadersSize: 2136,
|
||||
|
||||
// Maximum HTTP headers size allowed
|
||||
maxHttpHeadersSize: 14122,
|
||||
|
||||
// hex digest of sha256 hash of empty string:
|
||||
emptyStringHash: crypto.createHash('sha256')
|
||||
.update('', 'binary').digest('hex'),
|
||||
|
||||
// Queries supported by AWS that we do not currently support.
|
||||
// Non-bucket queries
|
||||
unsupportedQueries: [
|
||||
'accelerate',
|
||||
'analytics',
|
||||
'inventory',
|
||||
'list-type',
|
||||
'logging',
|
||||
'metrics',
|
||||
'policyStatus',
|
||||
'publicAccessBlock',
|
||||
'notification',
|
||||
'policy',
|
||||
'requestPayment',
|
||||
'restore',
|
||||
'torrent',
|
||||
],
|
||||
|
||||
// Headers supported by AWS that we do not currently support.
|
||||
unsupportedHeaders: [
|
||||
'x-amz-server-side-encryption',
|
||||
'x-amz-server-side-encryption-customer-algorithm',
|
||||
'x-amz-server-side-encryption-aws-kms-key-id',
|
||||
'x-amz-server-side-encryption-context',
|
||||
'x-amz-server-side-encryption-customer-key',
|
||||
'x-amz-server-side-encryption-customer-key-md5',
|
||||
],
|
||||
|
||||
// user metadata header to set object locationConstraint
|
||||
objectLocationConstraintHeader: 'x-amz-storage-class',
|
||||
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
||||
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
||||
legacyLocations: ['sproxyd', 'legacy'],
|
||||
// declare here all existing service accounts and their properties
|
||||
// (if any, otherwise an empty object)
|
||||
serviceAccountProperties: {
|
||||
replication: {},
|
||||
lifecycle: {},
|
||||
gc: {},
|
||||
'md-ingestion': {
|
||||
canReplicate: true,
|
||||
},
|
||||
},
|
||||
/* eslint-disable camelcase */
|
||||
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
|
||||
externalBackends: { aws_s3: true, azure: true, gcp: true },
|
||||
replicationBackends: { aws_s3: true, azure: true, gcp: true },
|
||||
// some of the available data backends (if called directly rather
|
||||
// than through the multiple backend gateway) need a key provided
|
||||
// as a string as first parameter of the get/delete methods.
|
||||
|
@ -149,100 +138,6 @@ const constants = {
|
|||
azureAccountNameRegex: /^[a-z0-9]{3,24}$/,
|
||||
base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' +
|
||||
'(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'),
|
||||
productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko',
|
||||
// location constraint delimiter
|
||||
zenkoSeparator: ':',
|
||||
// user metadata applied on zenko objects
|
||||
zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
|
||||
bucketOwnerActions: [
|
||||
'bucketDeleteCors',
|
||||
'bucketDeleteLifecycle',
|
||||
'bucketDeletePolicy',
|
||||
'bucketDeleteReplication',
|
||||
'bucketDeleteWebsite',
|
||||
'bucketGetCors',
|
||||
'bucketGetLifecycle',
|
||||
'bucketGetLocation',
|
||||
'bucketGetPolicy',
|
||||
'bucketGetReplication',
|
||||
'bucketGetVersioning',
|
||||
'bucketGetWebsite',
|
||||
'bucketPutCors',
|
||||
'bucketPutLifecycle',
|
||||
'bucketPutPolicy',
|
||||
'bucketPutReplication',
|
||||
'bucketPutVersioning',
|
||||
'bucketPutWebsite',
|
||||
'objectDeleteTagging',
|
||||
'objectGetTagging',
|
||||
'objectPutTagging',
|
||||
'objectPutLegalHold',
|
||||
'objectPutRetention',
|
||||
],
|
||||
// response header to be sent when there are invalid
|
||||
// user metadata in the object's metadata
|
||||
invalidObjectUserMetadataHeader: 'x-amz-missing-meta',
|
||||
// Bucket specific queries supported by AWS that we do not currently support
|
||||
// these queries may or may not be supported at object level
|
||||
unsupportedBucketQueries: [
|
||||
],
|
||||
suppressedUtapiEventFields: [
|
||||
'object',
|
||||
'location',
|
||||
'versionId',
|
||||
],
|
||||
allowedUtapiEventFilterFields: [
|
||||
'operationId',
|
||||
'location',
|
||||
'account',
|
||||
'user',
|
||||
'bucket',
|
||||
],
|
||||
arrayOfAllowed: [
|
||||
'objectPutTagging',
|
||||
'objectPutLegalHold',
|
||||
'objectPutRetention',
|
||||
],
|
||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||
allowedRestoreObjectRequestTierValues: ['Standard'],
|
||||
lifecycleListing: {
|
||||
CURRENT_TYPE: 'current',
|
||||
NON_CURRENT_TYPE: 'noncurrent',
|
||||
ORPHAN_DM_TYPE: 'orphan',
|
||||
},
|
||||
multiObjectDeleteConcurrency: 50,
|
||||
maxScannedLifecycleListingEntries: 10000,
|
||||
overheadField: [
|
||||
'content-length',
|
||||
'owner-id',
|
||||
'versionId',
|
||||
'isNull',
|
||||
'isDeleteMarker',
|
||||
],
|
||||
unsupportedSignatureChecksums: new Set([
|
||||
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
|
||||
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
|
||||
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
|
||||
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
|
||||
]),
|
||||
supportedSignatureChecksums: new Set([
|
||||
'UNSIGNED-PAYLOAD',
|
||||
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
|
||||
]),
|
||||
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
|
||||
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
|
||||
// The AWS assumed Role resource type
|
||||
assumedRoleArnResourceType: 'assumed-role',
|
||||
// Session name of the backbeat lifecycle assumed role session.
|
||||
backbeatLifecycleSessionName: 'backbeat-lifecycle',
|
||||
actionsToConsiderAsObjectPut: [
|
||||
'initiateMultipartUpload',
|
||||
'objectPutPart',
|
||||
'completeMultipartUpload',
|
||||
],
|
||||
// if requester is not bucket owner, bucket policy actions should be denied with
|
||||
// MethodNotAllowed error
|
||||
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
|
||||
};
|
||||
|
||||
module.exports = constants;
|
||||
|
|
|
@ -4,30 +4,16 @@ const arsenal = require('arsenal');
|
|||
const { config } = require('./lib/Config.js');
|
||||
const logger = require('./lib/utilities/logger');
|
||||
|
||||
process.on('uncaughtException', err => {
|
||||
logger.fatal('caught error', {
|
||||
error: err.message,
|
||||
stack: err.stack,
|
||||
workerId: this.worker ? this.worker.id : undefined,
|
||||
workerPid: this.worker ? this.worker.process.pid : undefined,
|
||||
});
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
if (config.backends.data === 'file' ||
|
||||
(config.backends.data === 'multiple' &&
|
||||
config.backends.metadata !== 'scality')) {
|
||||
const dataServer = new arsenal.network.rest.RESTServer({
|
||||
bindAddress: config.dataDaemon.bindAddress,
|
||||
port: config.dataDaemon.port,
|
||||
dataStore: new arsenal.storage.data.file.DataFileStore({
|
||||
dataPath: config.dataDaemon.dataPath,
|
||||
log: config.log,
|
||||
noSync: config.dataDaemon.noSync,
|
||||
noCache: config.dataDaemon.noCache,
|
||||
}),
|
||||
log: config.log,
|
||||
});
|
||||
const dataServer = new arsenal.network.rest.RESTServer(
|
||||
{ bindAddress: config.dataDaemon.bindAddress,
|
||||
port: config.dataDaemon.port,
|
||||
dataStore: new arsenal.storage.data.file.DataFileStore(
|
||||
{ dataPath: config.dataDaemon.dataPath,
|
||||
log: config.log }),
|
||||
log: config.log });
|
||||
dataServer.setup(err => {
|
||||
if (err) {
|
||||
logger.error('Error initializing REST data server',
|
||||
|
|
|
@ -71,14 +71,9 @@ fi
|
|||
if [[ "$LISTEN_ADDR" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .listenOn=[\"$LISTEN_ADDR:8000\"]"
|
||||
fi
|
||||
|
||||
if [[ "$REPLICATION_GROUP_ID" ]] ; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .replicationGroupId=\"$REPLICATION_GROUP_ID\""
|
||||
fi
|
||||
|
||||
if [[ "$DATA_HOST" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataClient.host=\"$DATA_HOST\""
|
||||
fi
|
||||
|
@ -87,10 +82,6 @@ if [[ "$METADATA_HOST" ]]; then
|
|||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
|
||||
fi
|
||||
|
||||
if [[ "$PFSD_HOST" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsClient.host=\"$PFSD_HOST\""
|
||||
fi
|
||||
|
||||
if [[ "$MONGODB_HOSTS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
|
||||
fi
|
||||
|
@ -147,62 +138,10 @@ if [[ "$CRR_METRICS_PORT" ]]; then
|
|||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
|
||||
fi
|
||||
|
||||
if [[ "$WE_OPERATOR_HOST" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.host=\"$WE_OPERATOR_HOST\""
|
||||
fi
|
||||
|
||||
if [[ "$WE_OPERATOR_PORT" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.port=$WE_OPERATOR_PORT"
|
||||
fi
|
||||
|
||||
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
|
||||
fi
|
||||
|
||||
# external backends http(s) agent config
|
||||
|
||||
# AWS
|
||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAlive=$AWS_S3_HTTPAGENT_KEEPALIVE"
|
||||
fi
|
||||
|
||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAliveMsecs=$AWS_S3_HTTPAGENT_KEEPALIVE_MS"
|
||||
fi
|
||||
|
||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
|
||||
fi
|
||||
|
||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxFreeSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
|
||||
fi
|
||||
|
||||
#GCP
|
||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAlive=$GCP_HTTPAGENT_KEEPALIVE"
|
||||
fi
|
||||
|
||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAliveMsecs=$GCP_HTTPAGENT_KEEPALIVE_MS"
|
||||
fi
|
||||
|
||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
|
||||
fi
|
||||
|
||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
|
||||
fi
|
||||
|
||||
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
|
||||
fi
|
||||
|
||||
if [[ "$TESTING_MODE" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
|
||||
fi
|
||||
|
||||
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
||||
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
||||
mv config.json.tmp config.json
|
||||
|
|
|
@ -66,7 +66,7 @@ The second section, `"Implementation of Bucket Versioning in
|
|||
API" <#implementation-of-bucket-versioning-in-api>`__, describes the way
|
||||
the metadata options are used in the API within S3 actions to create new
|
||||
versions, update their metadata, and delete them. The management of null
|
||||
versions and creation of delete markers is also described in this
|
||||
versions and creation of delete markers are also described in this
|
||||
section.
|
||||
|
||||
Implementation of Bucket Versioning in Metadata
|
||||
|
@ -746,7 +746,7 @@ Operation
|
|||
Startup
|
||||
~~~~~~~
|
||||
|
||||
The simplest deployment is still to launch with yarn start, this will
|
||||
The simplest deployment is still to launch with npm start, this will
|
||||
start one instance of the Zenko CloudServer connector and will listen on the
|
||||
locally bound dmd ports 9990 and 9991 (by default, see below).
|
||||
|
||||
|
@ -755,7 +755,7 @@ command in the Zenko CloudServer directory:
|
|||
|
||||
::
|
||||
|
||||
yarn run start_dmd
|
||||
npm run start_dmd
|
||||
|
||||
This will open two ports:
|
||||
|
||||
|
@ -770,7 +770,7 @@ elsewhere with:
|
|||
|
||||
.. code:: sh
|
||||
|
||||
yarn run start_s3server
|
||||
npm run start_s3server
|
||||
|
||||
Configuration
|
||||
~~~~~~~~~~~~~
|
||||
|
|
|
@ -1,146 +0,0 @@
|
|||
# Bucket Policy Documentation
|
||||
|
||||
## Description
|
||||
|
||||
Bucket policy is a method of controlling access to a user's account at the
|
||||
resource level.
|
||||
There are three associated APIs:
|
||||
|
||||
- PUT Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html)
|
||||
- GET Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETpolicy.html)
|
||||
- DELETE Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html)
|
||||
|
||||
More information on bucket policies in general can be found at
|
||||
https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html.
|
||||
|
||||
## Requirements
|
||||
|
||||
To prevent loss of access to a bucket, the root owner of a bucket will always
|
||||
be able to perform any of the three bucket policy-related operations, even
|
||||
if permission is explicitly denied.
|
||||
All other users must have permission to perform the desired operation.
|
||||
|
||||
## Design
|
||||
|
||||
On a PUTBucketPolicy request, the user provides a policy in JSON format.
|
||||
The policy is evaluated against our policy schema in Arsenal and, once
|
||||
validated, is stored as part of the bucket's metadata.
|
||||
On a GETBucketPolicy request, the policy is retrieved from the bucket's
|
||||
metadata.
|
||||
On a DELETEBucketPolicy request, the policy is deleted from the bucket's
|
||||
metadata.
|
||||
|
||||
All other APIs are updated to check if a bucket policy is attached to the bucket
|
||||
the request is made on. If there is a policy, user authorization to perform
|
||||
the requested action is checked.
|
||||
|
||||
### Differences Between Bucket and IAM Policies
|
||||
|
||||
IAM policies are attached to an IAM identity and define what actions that
|
||||
identity is allowed to or denied from doing on what resource.
|
||||
Bucket policies attach only to buckets and define what actions are allowed or
|
||||
denied for which principles on that bucket. Permissions specified in a bucket
|
||||
policy apply to all objects in that bucket unless otherwise specified.
|
||||
|
||||
Besides their attachment origins, the main structural difference between
|
||||
IAM policy and bucket policy is the requirement of a "Principal" element in
|
||||
bucket policies. This field is redundant in IAM policies.
|
||||
|
||||
### Policy Validation
|
||||
|
||||
For general guidelines for bucket policy structure, see examples here:
|
||||
https://docs.aws.amazon.com/AmazonS3/latest/dev//example-bucket-policies.html.
|
||||
|
||||
Each bucket policy statement object requires at least four keys:
|
||||
"Effect", "Principle", "Resource", and "Action".
|
||||
|
||||
"Effect" defines the effect of the policy and can have a string value of either
|
||||
"Allow" or "Deny".
|
||||
"Resource" defines to which bucket or list of buckets a policy is attached.
|
||||
An object within the bucket is also a valid resource. The element value can be
|
||||
either a single bucket or object ARN string or an array of ARNs.
|
||||
"Action" lists which action(s) the policy controls. Its value can also be either
|
||||
a string or array of S3 APIs. Each action is the API name prepended by "s3:".
|
||||
"Principle" specifies which user(s) are granted or denied access to the bucket
|
||||
resource. Its value can be a string or an object containing an array of users.
|
||||
Valid users can be identified with an account ARN, account id, or user ARN.
|
||||
|
||||
There are also two optional bucket policy statement keys: Sid and Condition.
|
||||
|
||||
"Sid" stands for "statement id". If this key is not included, one will be
|
||||
generated for the statement.
|
||||
"Condition" lists the condition under which a statement will take affect.
|
||||
The possibilities are as follows:
|
||||
|
||||
- ArnEquals
|
||||
- ArnEqualsIfExists
|
||||
- ArnLike
|
||||
- ArnLikeIfExists
|
||||
- ArnNotEquals
|
||||
- ArnNotEqualsIfExists
|
||||
- ArnNotLike
|
||||
- ArnNotLikeIfExists
|
||||
- BinaryEquals
|
||||
- BinaryEqualsIfExists
|
||||
- BinaryNotEquals
|
||||
- BinaryNotEqualsIfExists
|
||||
- Bool
|
||||
- BoolIfExists
|
||||
- DateEquals
|
||||
- DateEqualsIfExists
|
||||
- DateGreaterThan
|
||||
- DateGreaterThanEquals
|
||||
- DateGreaterThanEqualsIfExists
|
||||
- DateGreaterThanIfExists
|
||||
- DateLessThan
|
||||
- DateLessThanEquals
|
||||
- DateLessThanEqualsIfExists
|
||||
- DateLessThanIfExists
|
||||
- DateNotEquals
|
||||
- DateNotEqualsIfExists
|
||||
- IpAddress
|
||||
- IpAddressIfExists
|
||||
- NotIpAddress
|
||||
- NotIpAddressIfExists
|
||||
- Null
|
||||
- NumericEquals
|
||||
- NumericEqualsIfExists
|
||||
- NumericGreaterThan
|
||||
- NumericGreaterThanEquals
|
||||
- NumericGreaterThanEqualsIfExists
|
||||
- NumericGreaterThanIfExists
|
||||
- NumericLessThan
|
||||
- NumericLessThanEquals
|
||||
- NumericLessThanEqualsIfExists
|
||||
- NumericLessThanIfExists
|
||||
- NumericNotEquals
|
||||
- NumericNotEqualsIfExists
|
||||
- StringEquals
|
||||
- StringEqualsIfExists
|
||||
- StringEqualsIgnoreCase
|
||||
- StringEqualsIgnoreCaseIfExists
|
||||
- StringLike
|
||||
- StringLikeIfExists
|
||||
- StringNotEquals
|
||||
- StringNotEqualsIfExists
|
||||
- StringNotEqualsIgnoreCase
|
||||
- StringNotEqualsIgnoreCaseIfExists
|
||||
- StringNotLike
|
||||
- StringNotLikeIfExists
|
||||
|
||||
The value of the Condition key will be an object containing the desired
|
||||
condition name as that key. The value of inner object can be a string, boolean,
|
||||
number, or object, depending on the condition.
|
||||
|
||||
## Authorization with Multiple Access Control Mechanisms
|
||||
|
||||
In the case where multiple access control mechanisms (such as IAM policies,
|
||||
bucket policies, and ACLs) refer to the same resource, the principle of
|
||||
least-privilege is applied. Unless an action is explicitly allowed, access will
|
||||
by default be denied. An explicit DENY in any policy will trump another
|
||||
policy's ALLOW for an action. The request will only be allowed if at least one
|
||||
policy specifies an ALLOW, and there is no overriding DENY.
|
||||
|
||||
The following diagram illustrates this logic:
|
||||
|
||||
![Access_Control_Authorization_Chart](./images/access_control_authorization.png)
|
|
@ -295,51 +295,3 @@ Should force path-style requests even though v3 advertises it does by default.
|
|||
$client->createBucket(array(
|
||||
'Bucket' => 'bucketphp',
|
||||
));
|
||||
|
||||
Go
|
||||
~~
|
||||
|
||||
`AWS Go SDK <https://github.com/aws/aws-sdk-go>`__
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: go
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
os.Setenv("AWS_ACCESS_KEY_ID", "accessKey1")
|
||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "verySecretKey1")
|
||||
endpoint := "http://localhost:8000"
|
||||
timeout := time.Duration(10) * time.Second
|
||||
sess := session.Must(session.NewSession())
|
||||
|
||||
// Create a context with a timeout that will abort the upload if it takes
|
||||
// more than the passed in timeout.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
svc := s3.New(sess, &aws.Config{
|
||||
Region: aws.String(endpoints.UsEast1RegionID),
|
||||
Endpoint: &endpoint,
|
||||
})
|
||||
|
||||
out, err := svc.ListBucketsWithContext(ctx, &s3.ListBucketsInput{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
fmt.Println(out)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ Got an idea? Get started!
|
|||
In order to contribute, please follow the `Contributing
|
||||
Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__.
|
||||
If anything is unclear to you, reach out to us on
|
||||
`forum <https://forum.zenko.io/>`__ or via a GitHub issue.
|
||||
`slack <https://zenko-io.slack.com/>`__ or via a GitHub issue.
|
||||
|
||||
Don't write code? There are other ways to help!
|
||||
-----------------------------------------------
|
||||
|
|
349
docs/DOCKER.rst
349
docs/DOCKER.rst
|
@ -1,7 +1,11 @@
|
|||
Docker
|
||||
======
|
||||
|
||||
.. _environment-variables:
|
||||
- `Environment Variables <#environment-variables>`__
|
||||
- `Tunables and setup tips <#tunables-and-setup-tips>`__
|
||||
- `Examples for continuous integration with
|
||||
Docker <#continuous-integration-with-docker-hosted CloudServer>`__
|
||||
- `Examples for going in production with Docker <#in-production-with-docker-hosted CloudServer>`__
|
||||
|
||||
Environment Variables
|
||||
---------------------
|
||||
|
@ -11,23 +15,21 @@ S3DATA
|
|||
|
||||
S3DATA=multiple
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
This variable enables running CloudServer with multiple data backends, defined
|
||||
Allows you to run Scality Zenko CloudServer with multiple data backends, defined
|
||||
as regions.
|
||||
When using multiple data backends, a custom ``locationConfig.json`` file is
|
||||
mandatory. It will allow you to set custom regions. You will then need to
|
||||
provide associated rest_endpoints for each custom region in your
|
||||
``config.json`` file.
|
||||
`Learn more about multiple backends configuration <../GETTING_STARTED/#location-configuration>`__
|
||||
|
||||
For multiple data backends, a custom locationConfig.json file is required.
|
||||
This file enables you to set custom regions. You must provide associated
|
||||
rest_endpoints for each custom region in config.json.
|
||||
If you are using Scality RING endpoints, please refer to your customer
|
||||
documentation.
|
||||
|
||||
`Learn more about multiple-backend configurations <GETTING_STARTED.html#location-configuration>`__
|
||||
|
||||
If you are using Scality RING endpoints, refer to your customer documentation.
|
||||
|
||||
Running CloudServer with an AWS S3-Hosted Backend
|
||||
"""""""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
To run CloudServer with an S3 AWS backend, add a new section to the
|
||||
``locationConfig.json`` file with the ``aws_s3`` location type:
|
||||
Running it with an AWS S3 hosted backend
|
||||
""""""""""""""""""""""""""""""""""""""""
|
||||
To run CloudServer with an S3 AWS backend, you will have to add a new section
|
||||
to your ``locationConfig.json`` file with the ``aws_s3`` location type:
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
@ -43,9 +45,10 @@ To run CloudServer with an S3 AWS backend, add a new section to the
|
|||
}
|
||||
(...)
|
||||
|
||||
Edit your AWS credentials file to enable your preferred command-line tool.
|
||||
This file must mention credentials for all backends in use. You can use
|
||||
several profiles if multiple profiles are configured.
|
||||
You will also have to edit your AWS credentials file to be able to use your
|
||||
command line tool of choice. This file should mention credentials for all the
|
||||
backends you're using. You can use several profiles when using multiple
|
||||
profiles.
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
@ -56,124 +59,110 @@ several profiles if multiple profiles are configured.
|
|||
aws_access_key_id={{YOUR_ACCESS_KEY}}
|
||||
aws_secret_access_key={{YOUR_SECRET_KEY}}
|
||||
|
||||
As with locationConfig.json, the AWS credentials file must be mounted at
|
||||
run time: ``-v ~/.aws/credentials:/root/.aws/credentials`` on Unix-like
|
||||
systems (Linux, OS X, etc.), or
|
||||
Just as you need to mount your locationConfig.json, you will need to mount your
|
||||
AWS credentials file at run time:
|
||||
``-v ~/.aws/credentials:/root/.aws/credentials`` on Linux, OS X, or Unix or
|
||||
``-v C:\Users\USERNAME\.aws\credential:/root/.aws/credentials`` on Windows
|
||||
|
||||
.. note:: One account cannot copy to another account with a source and
|
||||
destination on real AWS unless the account associated with the
|
||||
accessKey/secretKey pairs used for the destination bucket has source
|
||||
bucket access privileges. To enable this, update ACLs directly on AWS.
|
||||
NOTE: One account can't copy to another account with a source and
|
||||
destination on real AWS unless the account associated with the
|
||||
access Key/secret Key pairs used for the destination bucket has rights
|
||||
to get in the source bucket. ACL's would have to be updated
|
||||
on AWS directly to enable this.
|
||||
|
||||
S3BACKEND
|
||||
~~~~~~~~~
|
||||
|
||||
S3BACKEND=file
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
For stored file data to persist, you must mount Docker volumes
|
||||
for both data and metadata. See :ref:`In Production with a Docker-Hosted CloudServer <in-production-w-a-Docker-hosted-cloudserver>`
|
||||
When storing file data, for it to be persistent you must mount docker volumes
|
||||
for both data and metadata. See `this section <#using-docker-volumes-in-production>`__
|
||||
|
||||
S3BACKEND=mem
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
This is ideal for testing: no data remains after the container is shut down.
|
||||
This is ideal for testing - no data will remain after container is shutdown.
|
||||
|
||||
ENDPOINT
|
||||
~~~~~~~~
|
||||
|
||||
This variable specifies the endpoint. To direct CloudServer requests to
|
||||
new.host.com, for example, specify the endpoint with:
|
||||
This variable specifies your endpoint. If you have a domain such as
|
||||
new.host.com, by specifying that here, you and your users can direct s3
|
||||
server requests to new.host.com.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -d --name cloudserver -p 8000:8000 -e ENDPOINT=new.host.com zenko/cloudserver
|
||||
$ docker run -d --name s3server -p 8000:8000 -e ENDPOINT=new.host.com scality/s3server
|
||||
|
||||
.. note:: On Unix-like systems (Linux, OS X, etc.) edit /etc/hosts
|
||||
to associate 127.0.0.1 with new.host.com.
|
||||
|
||||
REMOTE_MANAGEMENT_DISABLE
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
CloudServer is a part of `Zenko <https://www.zenko.io/>`__. When you run CloudServer standalone it will still try to connect to Orbit by default (browser-based graphical user interface for Zenko).
|
||||
|
||||
Setting this variable to true(1) will default to accessKey1 and verySecretKey1 for credentials and disable the automatic Orbit management:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -d --name cloudserver -p 8000:8000 -e REMOTE_MANAGEMENT_DISABLE=1 zenko/cloudserver
|
||||
Note: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
|
||||
permissions, make sure to associate 127.0.0.1 with ``new.host.com``
|
||||
|
||||
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
These variables specify authentication credentials for an account named
|
||||
“CustomAccount”.
|
||||
"CustomAccount".
|
||||
|
||||
Set account credentials for multiple accounts by editing conf/authdata.json
|
||||
(see below for further details). To specify one set for personal use, set these
|
||||
environment variables:
|
||||
You can set credentials for many accounts by editing
|
||||
``conf/authdata.json`` (see below for further info), but if you just
|
||||
want to specify one set of your own, you can use these environment
|
||||
variables.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -d --name cloudserver -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey \
|
||||
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey zenko/cloudserver
|
||||
docker run -d --name s3server -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey
|
||||
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/s3server
|
||||
|
||||
.. note:: This takes precedence over the contents of the authdata.json
|
||||
file. The authdata.json file is ignored.
|
||||
|
||||
.. note:: The ACCESS_KEY and SECRET_KEY environment variables are
|
||||
deprecated.
|
||||
Note: Anything in the ``authdata.json`` file will be ignored. Note: The
|
||||
old ``ACCESS_KEY`` and ``SECRET_KEY`` environment variables are now
|
||||
deprecated
|
||||
|
||||
LOG\_LEVEL
|
||||
~~~~~~~~~~
|
||||
|
||||
This variable changes the log level. There are three levels: info, debug,
|
||||
and trace. The default is info. Debug provides more detailed logs, and trace
|
||||
provides the most detailed logs.
|
||||
This variable allows you to change the log level: info, debug or trace.
|
||||
The default is info. Debug will give you more detailed logs and trace
|
||||
will give you the most detailed.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -d --name cloudserver -p 8000:8000 -e LOG_LEVEL=trace zenko/cloudserver
|
||||
$ docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
|
||||
|
||||
SSL
|
||||
~~~
|
||||
|
||||
Set true, this variable runs CloudServer with SSL.
|
||||
This variable set to true allows you to run S3 with SSL:
|
||||
|
||||
If SSL is set true:
|
||||
**Note1**: You also need to specify the ENDPOINT environment variable.
|
||||
**Note2**: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
|
||||
permissions, make sure to associate 127.0.0.1 with ``<YOUR_ENDPOINT>``
|
||||
|
||||
* The ENDPOINT environment variable must also be specified.
|
||||
**Warning**: These certs, being self-signed (and the CA being generated
|
||||
inside the container) will be untrusted by any clients, and could
|
||||
disappear on a container upgrade. That's ok as long as it's for quick
|
||||
testing. Also, best security practice for non-testing would be to use an
|
||||
extra container to do SSL/TLS termination such as haproxy/nginx/stunnel
|
||||
to limit what an exploit on either component could expose, as well as
|
||||
certificates in a mounted volume
|
||||
|
||||
* On Unix-like systems (Linux, OS X, etc.), 127.0.0.1 must be associated with
|
||||
<YOUR_ENDPOINT> in /etc/hosts.
|
||||
.. code-block:: shell
|
||||
|
||||
.. Warning:: Self-signed certs with a CA generated within the container are
|
||||
suitable for testing purposes only. Clients cannot trust them, and they may
|
||||
disappear altogether on a container upgrade. The best security practice for
|
||||
production environments is to use an extra container, such as
|
||||
haproxy/nginx/stunnel, for SSL/TLS termination and to pull certificates
|
||||
from a mounted volume, limiting what an exploit on either component
|
||||
can expose.
|
||||
$ docker run -d --name s3server -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT>
|
||||
scality/s3server
|
||||
|
||||
.. code:: shell
|
||||
|
||||
$ docker run -d --name cloudserver -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT> \
|
||||
zenko/cloudserver
|
||||
|
||||
For more information about using ClousdServer with SSL, see `Using SSL <GETTING_STARTED.html#Using SSL>`__
|
||||
More information about how to use S3 server with SSL
|
||||
`here <https://s3.scality.com/v1.0/page/scality-with-ssl>`__
|
||||
|
||||
LISTEN\_ADDR
|
||||
~~~~~~~~~~~~
|
||||
|
||||
This variable causes CloudServer and its data and metadata components to
|
||||
listen on the specified address. This allows starting the data or metadata
|
||||
servers as standalone services, for example.
|
||||
This variable instructs the Zenko CloudServer, and its data and metadata
|
||||
components to listen on the specified address. This allows starting the data
|
||||
or metadata servers as standalone services, for example.
|
||||
|
||||
.. code:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0
|
||||
scality/s3server yarn run start_dataserver
|
||||
$ docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0
|
||||
scality/s3server npm run start_dataserver
|
||||
|
||||
|
||||
DATA\_HOST and METADATA\_HOST
|
||||
|
@ -183,10 +172,10 @@ These variables configure the data and metadata servers to use,
|
|||
usually when they are running on another host and only starting the stateless
|
||||
Zenko CloudServer.
|
||||
|
||||
.. code:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -d --name cloudserver -e DATA_HOST=cloudserver-data \
|
||||
-e METADATA_HOST=cloudserver-metadata zenko/cloudserver yarn run start_s3server
|
||||
$ docker run -d --name s3server -e DATA_HOST=s3server-data
|
||||
-e METADATA_HOST=s3server-metadata scality/s3server npm run start_s3server
|
||||
|
||||
REDIS\_HOST
|
||||
~~~~~~~~~~~
|
||||
|
@ -194,23 +183,21 @@ REDIS\_HOST
|
|||
Use this variable to connect to the redis cache server on another host than
|
||||
localhost.
|
||||
|
||||
.. code:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -d --name cloudserver -p 8000:8000 \
|
||||
-e REDIS_HOST=my-redis-server.example.com zenko/cloudserver
|
||||
$ docker run -d --name s3server -p 8000:8000
|
||||
-e REDIS_HOST=my-redis-server.example.com scality/s3server
|
||||
|
||||
REDIS\_PORT
|
||||
~~~~~~~~~~~
|
||||
|
||||
Use this variable to connect to the Redis cache server on a port other
|
||||
than the default 6379.
|
||||
Use this variable to connect to the redis cache server on another port than
|
||||
the default 6379.
|
||||
|
||||
.. code:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -d --name cloudserver -p 8000:8000 \
|
||||
-e REDIS_PORT=6379 zenko/cloudserver
|
||||
|
||||
.. _tunables-and-setup-tips:
|
||||
$ docker run -d --name s3server -p 8000:8000
|
||||
-e REDIS_PORT=6379 scality/s3server
|
||||
|
||||
Tunables and Setup Tips
|
||||
-----------------------
|
||||
|
@ -218,57 +205,61 @@ Tunables and Setup Tips
|
|||
Using Docker Volumes
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
CloudServer runs with a file backend by default, meaning that data is
|
||||
stored inside the CloudServer’s Docker container.
|
||||
Zenko CloudServer runs with a file backend by default.
|
||||
|
||||
For data and metadata to persist, data and metadata must be hosted in Docker
|
||||
volumes outside the CloudServer’s Docker container. Otherwise, the data
|
||||
and metadata are destroyed when the container is erased.
|
||||
So, by default, the data is stored inside your Zenko CloudServer Docker
|
||||
container.
|
||||
|
||||
However, if you want your data and metadata to persist, you **MUST** use
|
||||
Docker volumes to host your data and metadata outside your Zenko CloudServer
|
||||
Docker container. Otherwise, the data and metadata will be destroyed
|
||||
when you erase the container.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
||||
-p 8000:8000 -d zenko/cloudserver
|
||||
$ docker run -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||
-p 8000:8000 -d scality/s3server
|
||||
|
||||
This command mounts the ./data host directory to the container
|
||||
at /usr/src/app/localData and the ./metadata host directory to
|
||||
the container at /usr/src/app/localMetaData.
|
||||
This command mounts the host directory, ``./data``, into the container
|
||||
at ``/usr/src/app/localData`` and the host directory, ``./metadata``, into
|
||||
the container at ``/usr/src/app/localMetaData``. It can also be any host
|
||||
mount point, like ``/mnt/data`` and ``/mnt/metadata``.
|
||||
|
||||
.. tip:: These host directories can be mounted to any accessible mount
|
||||
point, such as /mnt/data and /mnt/metadata, for example.
|
||||
Adding modifying or deleting accounts or users credentials
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Adding, Modifying, or Deleting Accounts or Credentials
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
1. Create locally a customized ``authdata.json`` based on our ``/conf/authdata.json``.
|
||||
|
||||
1. Create a customized authdata.json file locally based on /conf/authdata.json.
|
||||
|
||||
2. Use `Docker volumes <https://docs.docker.com/storage/volumes/>`__
|
||||
to override the default ``authdata.json`` through a Docker file mapping.
|
||||
2. Use `Docker
|
||||
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__
|
||||
to override the default ``authdata.json`` through a docker file mapping.
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d \
|
||||
zenko/cloudserver
|
||||
$ docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
|
||||
scality/s3server
|
||||
|
||||
Specifying a Host Name
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
Specifying your own host name
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To specify a host name (for example, s3.domain.name), provide your own
|
||||
`config.json <https://github.com/scality/cloudserver/blob/master/config.json>`__
|
||||
file using `Docker volumes <https://docs.docker.com/storage/volumes/>`__.
|
||||
To specify a host name (e.g. s3.domain.name), you can provide your own
|
||||
`config.json <https://github.com/scality/S3/blob/master/config.json>`__
|
||||
using `Docker
|
||||
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__.
|
||||
|
||||
First, add a new key-value pair to the restEndpoints section of your
|
||||
config.json. Make the key the host name you want, and the value the default
|
||||
location\_constraint for this endpoint.
|
||||
First add a new key-value pair in the restEndpoints section of your
|
||||
config.json. The key in the key-value pair should be the host name you
|
||||
would like to add and the value is the default location\_constraint for
|
||||
this endpoint.
|
||||
|
||||
For example, ``s3.example.com`` is mapped to ``us-east-1`` which is one
|
||||
of the ``location_constraints`` listed in your locationConfig.json file
|
||||
`here <https://github.com/scality/S3/blob/master/locationConfig.json>`__.
|
||||
|
||||
For more information about location configuration, see:
|
||||
`GETTING STARTED <GETTING_STARTED.html#location-configuration>`__
|
||||
More information about location configuration
|
||||
`here <https://github.com/scality/S3/blob/master/README.md#location-configuration>`__
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
@ -276,31 +267,31 @@ For more information about location configuration, see:
|
|||
"localhost": "file",
|
||||
"127.0.0.1": "file",
|
||||
...
|
||||
"cloudserver.example.com": "us-east-1"
|
||||
"s3.example.com": "us-east-1"
|
||||
},
|
||||
|
||||
Next, run CloudServer using a `Docker volume
|
||||
<https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
|
||||
Then, run your Scality S3 Server using `Docker
|
||||
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d zenko/cloudserver
|
||||
$ docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
|
||||
|
||||
The local ``config.json`` file overrides the default one through a Docker
|
||||
file mapping.
|
||||
Your local ``config.json`` file will override the default one through a
|
||||
docker file mapping.
|
||||
|
||||
Running as an Unprivileged User
|
||||
Running as an unprivileged user
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
CloudServer runs as root by default.
|
||||
Zenko CloudServer runs as root by default.
|
||||
|
||||
To change this, modify the dockerfile and specify a user before the
|
||||
entry point.
|
||||
You can change that by modifing the dockerfile and specifying a user
|
||||
before the entrypoint.
|
||||
|
||||
The user must exist within the container, and must own the
|
||||
/usr/src/app directory for CloudServer to run.
|
||||
The user needs to exist within the container, and own the folder
|
||||
**/usr/src/app** for Scality Zenko CloudServer to run properly.
|
||||
|
||||
For example, the following dockerfile lines can be modified:
|
||||
For instance, you can modify these lines in the dockerfile:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
|
@ -314,58 +305,54 @@ For example, the following dockerfile lines can be modified:
|
|||
USER scality
|
||||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||
|
||||
.. _continuous-integration-with-docker-hosted-cloudserver:
|
||||
Continuous integration with Docker hosted CloudServer
|
||||
-----------------------------------------------------
|
||||
|
||||
Continuous Integration with a Docker-Hosted CloudServer
|
||||
-------------------------------------------------------
|
||||
When you start the Docker Scality Zenko CloudServer image, you can adjust the
|
||||
configuration of the Scality Zenko CloudServer instance by passing one or more
|
||||
environment variables on the docker run command line.
|
||||
|
||||
When you start the Docker CloudServer image, you can adjust the
|
||||
configuration of the CloudServer instance by passing one or more
|
||||
environment variables on the ``docker run`` command line.
|
||||
Sample ways to run it for CI are:
|
||||
|
||||
|
||||
To run CloudServer for CI with custom locations (one in-memory,
|
||||
one hosted on AWS), and custom credentials mounted:
|
||||
- With custom locations (one in-memory, one hosted on AWS), and custom
|
||||
credentials mounted:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run --name CloudServer -p 8000:8000 \
|
||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
|
||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
||||
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver
|
||||
docker run --name CloudServer -p 8000:8000
|
||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json
|
||||
-v ~/.aws/credentials:/root/.aws/credentials
|
||||
-e S3DATA=multiple -e S3BACKEND=mem scality/s3server
|
||||
|
||||
To run CloudServer for CI with custom locations, (one in-memory, one
|
||||
hosted on AWS, and one file), and custom credentials `set as environment
|
||||
variables <GETTING_STARTED.html#scality-access-key-id-and-scality-secret-access-key>`__):
|
||||
- With custom locations, (one in-memory, one hosted on AWS, one file),
|
||||
and custom credentials set as environment variables
|
||||
(see `this section <#scality-access-key-id-and-scality-secret-access-key>`__):
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run --name CloudServer -p 8000:8000 \
|
||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
||||
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
||||
-e SCALITY_ACCESS_KEY_ID=accessKey1 \
|
||||
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1 \
|
||||
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver
|
||||
docker run --name CloudServer -p 8000:8000
|
||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||
-v ~/.aws/credentials:/root/.aws/credentials
|
||||
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||
-e SCALITY_ACCESS_KEY_ID=accessKey1
|
||||
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1
|
||||
-e S3DATA=multiple -e S3BACKEND=mem scality/s3server
|
||||
|
||||
.. _in-production-w-a-Docker-hosted-cloudserver:
|
||||
In production with Docker hosted CloudServer
|
||||
--------------------------------------------
|
||||
|
||||
In Production with a Docker-Hosted CloudServer
|
||||
----------------------------------------------
|
||||
|
||||
Because data must persist in production settings, CloudServer offers
|
||||
multiple-backend capabilities. This requires a custom endpoint
|
||||
and custom credentials for local storage.
|
||||
|
||||
Customize these with:
|
||||
In production, we expect that data will be persistent, that you will use the
|
||||
multiple backends capabilities of Zenko CloudServer, and that you will have a
|
||||
custom endpoint for your local storage, and custom credentials for your local
|
||||
storage:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ docker run -d --name CloudServer \
|
||||
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
|
||||
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple \
|
||||
-e ENDPOINT=custom.endpoint.com \
|
||||
-p 8000:8000 -d zenko/cloudserver \
|
||||
docker run -d --name CloudServer
|
||||
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json
|
||||
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple
|
||||
-e ENDPOINT=custom.endpoint.com
|
||||
-p 8000:8000 -d scality/s3server
|
||||
|
|
|
@ -4,218 +4,212 @@ Getting Started
|
|||
.. figure:: ../res/scality-cloudserver-logo.png
|
||||
:alt: Zenko CloudServer logo
|
||||
|
||||
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
Building and running the Scality Zenko CloudServer requires node.js 10.x and
|
||||
yarn v1.17.x. Up-to-date versions can be found at
|
||||
`Nodesource <https://github.com/nodesource/distributions>`__.
|
||||
|CircleCI| |Scality CI|
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
1. Clone the source code
|
||||
Dependencies
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: shell
|
||||
Building and running the Scality Zenko CloudServer requires node.js 6.9.5 and
|
||||
npm v3 . Up-to-date versions can be found at
|
||||
`Nodesource <https://github.com/nodesource/distributions>`__.
|
||||
|
||||
$ git clone https://github.com/scality/cloudserver.git
|
||||
|
||||
2. Go to the cloudserver directory and use yarn to install the js dependencies.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd cloudserver
|
||||
$ yarn install
|
||||
|
||||
Running CloudServer with a File Backend
|
||||
---------------------------------------
|
||||
Clone source code
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ yarn start
|
||||
git clone https://github.com/scality/S3.git
|
||||
|
||||
This starts a Zenko CloudServer on port 8000. Two additional ports, 9990
|
||||
and 9991, are also open locally for internal transfer of metadata and
|
||||
data, respectively.
|
||||
Install js dependencies
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
||||
|
||||
By default, metadata files are saved in the localMetadata directory and
|
||||
data files are saved in the localData directory in the local ./cloudserver
|
||||
directory. These directories are pre-created within the repository. To
|
||||
save data or metadata in different locations, you must specify them using
|
||||
absolute paths. Thus, when starting the server:
|
||||
Go to the ./S3 folder,
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||
$ mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||
$ export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||
$ export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||
$ yarn start
|
||||
npm install
|
||||
|
||||
Running CloudServer with Multiple Data Backends
|
||||
-----------------------------------------------
|
||||
Run it with a file backend
|
||||
--------------------------
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ export S3DATA='multiple'
|
||||
$ yarn start
|
||||
npm start
|
||||
|
||||
This starts a Zenko CloudServer on port 8000.
|
||||
This starts an Zenko CloudServer on port 8000. Two additional ports 9990 and
|
||||
9991 are also open locally for internal transfer of metadata and data,
|
||||
respectively.
|
||||
|
||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
||||
The default access key is accessKey1 with a secret key of
|
||||
verySecretKey1.
|
||||
|
||||
With multiple backends, you can choose where each object is saved by setting
|
||||
the following header with a location constraint in a PUT request:
|
||||
By default the metadata files will be saved in the localMetadata
|
||||
directory and the data files will be saved in the localData directory
|
||||
within the ./S3 directory on your machine. These directories have been
|
||||
pre-created within the repository. If you would like to save the data or
|
||||
metadata in different locations of your choice, you must specify them
|
||||
with absolute paths. So, when starting the server:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||
npm start
|
||||
|
||||
Run it with multiple data backends
|
||||
----------------------------------
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
export S3DATA='multiple'
|
||||
npm start
|
||||
|
||||
This starts an Zenko CloudServer on port 8000. The default access key is
|
||||
accessKey1 with a secret key of verySecretKey1.
|
||||
|
||||
With multiple backends, you have the ability to choose where each object
|
||||
will be saved by setting the following header with a locationConstraint
|
||||
on a PUT request:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||
|
||||
If no header is sent with a PUT object request, the bucket’s location
|
||||
constraint determines where the data is saved. If the bucket has no
|
||||
location constraint, the endpoint of the PUT request determines location.
|
||||
If no header is sent with a PUT object request, the location constraint
|
||||
of the bucket will determine where the data is saved. If the bucket has
|
||||
no location constraint, the endpoint of the PUT request will be used to
|
||||
determine location.
|
||||
|
||||
See the Configuration_ section to set location constraints.
|
||||
See the Configuration section below to learn how to set location
|
||||
constraints.
|
||||
|
||||
Run CloudServer with an In-Memory Backend
|
||||
-----------------------------------------
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ yarn run mem_backend
|
||||
|
||||
This starts a Zenko CloudServer on port 8000.
|
||||
|
||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
||||
|
||||
Run CloudServer with Vault User Management
|
||||
------------------------------------------
|
||||
|
||||
.. code:: shell
|
||||
|
||||
export S3VAULT=vault
|
||||
yarn start
|
||||
|
||||
Note: Vault is proprietary and must be accessed separately.
|
||||
This starts a Zenko CloudServer using Vault for user management.
|
||||
|
||||
Run CloudServer for Continuous Integration Testing or in Production with Docker
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
Run Cloudserver with `DOCKER <DOCKER.html>`__
|
||||
|
||||
Testing
|
||||
~~~~~~~
|
||||
|
||||
Run unit tests with the command:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ yarn test
|
||||
|
||||
Run multiple-backend unit tests with:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ CI=true S3DATA=multiple yarn start
|
||||
$ yarn run multiple_backend_test
|
||||
|
||||
Run the linter with:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ yarn run lint
|
||||
|
||||
Running Functional Tests Locally
|
||||
Run it with an in-memory backend
|
||||
--------------------------------
|
||||
|
||||
To pass AWS and Azure backend tests locally, modify
|
||||
tests/locationConfig/locationConfigTests.json so that ``awsbackend``
|
||||
specifies the bucketname of a bucket you have access to based on your
|
||||
credentials, and modify ``azurebackend`` with details for your Azure account.
|
||||
.. code-block:: shell
|
||||
|
||||
npm run mem_backend
|
||||
|
||||
This starts an Zenko CloudServer on port 8000. The default access key is
|
||||
accessKey1 with a secret key of verySecretKey1.
|
||||
|
||||
Run it for continuous integration testing or in production with Docker
|
||||
----------------------------------------------------------------------
|
||||
|
||||
`DOCKER <../DOCKER/>`__
|
||||
|
||||
Testing
|
||||
-------
|
||||
|
||||
You can run the unit tests with the following command:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
npm test
|
||||
|
||||
You can run the multiple backend unit tests with:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
CI=true S3DATA=multiple npm start
|
||||
npm run multiple_backend_test
|
||||
|
||||
You can run the linter with:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
npm run lint
|
||||
|
||||
Running functional tests locally:
|
||||
|
||||
For the AWS backend and Azure backend tests to pass locally,
|
||||
you must modify tests/locationConfigTests.json so that awsbackend
|
||||
specifies a bucketname of a bucket you have access to based on
|
||||
your credentials profile and modify "azurebackend" with details
|
||||
for your Azure account.
|
||||
|
||||
The test suite requires additional tools, **s3cmd** and **Redis**
|
||||
installed in the environment the tests are running in.
|
||||
|
||||
1. Install `s3cmd <http://s3tools.org/download>`__
|
||||
- Install `s3cmd <http://s3tools.org/download>`__
|
||||
- Install `redis <https://redis.io/download>`__ and start Redis.
|
||||
- Add localCache section to your ``config.json``:
|
||||
|
||||
2. Install `redis <https://redis.io/download>`__ and start Redis.
|
||||
::
|
||||
|
||||
3. Add localCache section to ``config.json``:
|
||||
|
||||
.. code:: json
|
||||
|
||||
"localCache": {
|
||||
"localCache": {
|
||||
"host": REDIS_HOST,
|
||||
"port": REDIS_PORT
|
||||
}
|
||||
}
|
||||
|
||||
where ``REDIS_HOST`` is the Redis instance IP address (``"127.0.0.1"``
|
||||
if Redis is running locally) and ``REDIS_PORT`` is the Redis instance
|
||||
port (``6379`` by default)
|
||||
where ``REDIS_HOST`` is your Redis instance IP address (``"127.0.0.1"``
|
||||
if your Redis is running locally) and ``REDIS_PORT`` is your Redis
|
||||
instance port (``6379`` by default)
|
||||
|
||||
4. Add the following to the local etc/hosts file:
|
||||
- Add the following to the etc/hosts file on your machine:
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
|
||||
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
|
||||
|
||||
5. Start Zenko CloudServer in memory and run the functional tests:
|
||||
- Start the Zenko CloudServer in memory and run the functional tests:
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
$ CI=true yarn run mem_backend
|
||||
$ CI=true yarn run ft_test
|
||||
|
||||
.. _Configuration:
|
||||
CI=true npm run mem_backend
|
||||
CI=true npm run ft_test
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
There are three configuration files for Zenko CloudServer:
|
||||
There are three configuration files for your Scality Zenko CloudServer:
|
||||
|
||||
* ``conf/authdata.json``, for authentication.
|
||||
1. ``conf/authdata.json``, described above for authentication
|
||||
|
||||
* ``locationConfig.json``, to configure where data is saved.
|
||||
2. ``locationConfig.json``, to set up configuration options for
|
||||
|
||||
* ``config.json``, for general configuration options.
|
||||
where data will be saved
|
||||
|
||||
.. _location-configuration:
|
||||
3. ``config.json``, for general configuration options
|
||||
|
||||
Location Configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You must specify at least one locationConstraint in locationConfig.json
|
||||
(or leave it as pre-configured).
|
||||
You must specify at least one locationConstraint in your
|
||||
locationConfig.json (or leave as pre-configured).
|
||||
|
||||
You must also specify 'us-east-1' as a locationConstraint. If you put a
|
||||
bucket to an unknown endpoint and do not specify a locationConstraint in
|
||||
the PUT bucket call, us-east-1 is used.
|
||||
You must also specify 'us-east-1' as a locationConstraint so if you only
|
||||
define one locationConstraint, that would be it. If you put a bucket to
|
||||
an unknown endpoint and do not specify a locationConstraint in the put
|
||||
bucket call, us-east-1 will be used.
|
||||
|
||||
For instance, the following locationConstraint saves data sent to
|
||||
For instance, the following locationConstraint will save data sent to
|
||||
``myLocationConstraint`` to the file backend:
|
||||
|
||||
.. code:: json
|
||||
|
||||
"myLocationConstraint": {
|
||||
"type": "file",
|
||||
"legacyAwsBehavior": false,
|
||||
"details": {}
|
||||
},
|
||||
"myLocationConstraint": {
|
||||
"type": "file",
|
||||
"legacyAwsBehavior": false,
|
||||
"details": {}
|
||||
},
|
||||
|
||||
Each locationConstraint must include the ``type``, ``legacyAwsBehavior``,
|
||||
and ``details`` keys. ``type`` indicates which backend is used for that
|
||||
region. Supported backends are mem, file, and scality.``legacyAwsBehavior``
|
||||
indicates whether the region behaves the same as the AWS S3 'us-east-1'
|
||||
region. If the locationConstraint type is ``scality``, ``details`` must
|
||||
contain connector information for sproxyd. If the locationConstraint type
|
||||
is ``mem`` or ``file``, ``details`` must be empty.
|
||||
Each locationConstraint must include the ``type``,
|
||||
``legacyAwsBehavior``, and ``details`` keys. ``type`` indicates which
|
||||
backend will be used for that region. Currently, mem, file, and scality
|
||||
are the supported backends. ``legacyAwsBehavior`` indicates whether the
|
||||
region will have the same behavior as the AWS S3 'us-east-1' region. If
|
||||
the locationConstraint type is scality, ``details`` should contain
|
||||
connector information for sproxyd. If the locationConstraint type is mem
|
||||
or file, ``details`` should be empty.
|
||||
|
||||
Once locationConstraints is set in locationConfig.json, specify a default
|
||||
locationConstraint for each endpoint.
|
||||
Once you have your locationConstraints in your locationConfig.json, you
|
||||
can specify a default locationConstraint for each of your endpoints.
|
||||
|
||||
For instance, the following sets the ``localhost`` endpoint to the
|
||||
``myLocationConstraint`` data backend defined above:
|
||||
|
@ -226,24 +220,26 @@ For instance, the following sets the ``localhost`` endpoint to the
|
|||
"localhost": "myLocationConstraint"
|
||||
},
|
||||
|
||||
To use an endpoint other than localhost for Zenko CloudServer, the endpoint
|
||||
must be listed in ``restEndpoints``. Otherwise, if the server is running
|
||||
with a:
|
||||
If you would like to use an endpoint other than localhost for your
|
||||
Scality Zenko CloudServer, that endpoint MUST be listed in your
|
||||
``restEndpoints``. Otherwise if your server is running with a:
|
||||
|
||||
* **file backend**: The default location constraint is ``file``
|
||||
* **memory backend**: The default location constraint is ``mem``
|
||||
- **file backend**: your default location constraint will be ``file``
|
||||
|
||||
- **memory backend**: your default location constraint will be ``mem``
|
||||
|
||||
Endpoints
|
||||
~~~~~~~~~
|
||||
|
||||
The Zenko CloudServer supports endpoints that are rendered in either:
|
||||
Note that our Zenko CloudServer supports both:
|
||||
|
||||
* path style: http://myhostname.com/mybucket or
|
||||
* hosted style: http://mybucket.myhostname.com
|
||||
- path-style: http://myhostname.com/mybucket
|
||||
- hosted-style: http://mybucket.myhostname.com
|
||||
|
||||
However, if an IP address is specified for the host, hosted-style requests
|
||||
cannot reach the server. Use path-style requests in that case. For example,
|
||||
if you are using the AWS SDK for JavaScript, instantiate your client like this:
|
||||
However, hosted-style requests will not hit the server if you are using
|
||||
an ip address for your host. So, make sure you are using path-style
|
||||
requests in that case. For instance, if you are using the AWS SDK for
|
||||
JavaScript, you would instantiate your client like this:
|
||||
|
||||
.. code:: js
|
||||
|
||||
|
@ -252,99 +248,87 @@ if you are using the AWS SDK for JavaScript, instantiate your client like this:
|
|||
s3ForcePathStyle: true,
|
||||
});
|
||||
|
||||
Setting Your Own Access and Secret Key Pairs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Setting your own access key and secret key pairs
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Credentials can be set for many accounts by editing ``conf/authdata.json``,
|
||||
but use the ``SCALITY_ACCESS_KEY_ID`` and ``SCALITY_SECRET_ACCESS_KEY``
|
||||
environment variables to specify your own credentials.
|
||||
|
||||
_`scality-access-key-id-and-scality-secret-access-key`
|
||||
You can set credentials for many accounts by editing
|
||||
``conf/authdata.json`` but if you want to specify one set of your own
|
||||
credentials, you can use ``SCALITY_ACCESS_KEY_ID`` and
|
||||
``SCALITY_SECRET_ACCESS_KEY`` environment variables.
|
||||
|
||||
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
These variables specify authentication credentials for an account named
|
||||
“CustomAccount”.
|
||||
"CustomAccount".
|
||||
|
||||
.. note:: Anything in the ``authdata.json`` file is ignored.
|
||||
Note: Anything in the ``authdata.json`` file will be ignored.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey yarn start
|
||||
SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey npm start
|
||||
|
||||
.. _Using_SSL:
|
||||
|
||||
Using SSL
|
||||
~~~~~~~~~
|
||||
Scality with SSL
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To use https with your local CloudServer, you must set up
|
||||
SSL certificates.
|
||||
If you wish to use https with your local Zenko CloudServer, you need to set up
|
||||
SSL certificates. Here is a simple guide of how to do it.
|
||||
|
||||
1. Deploy CloudServer using `our DockerHub page
|
||||
<https://hub.docker.com/r/zenko/cloudserver/>`__ (run it with a file
|
||||
backend).
|
||||
Deploying Zenko CloudServer
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. Note:: If Docker is not installed locally, follow the
|
||||
`instructions to install it for your distribution
|
||||
<https://docs.docker.com/engine/installation/>`__
|
||||
First, you need to deploy **Zenko CloudServer**. This can be done very easily
|
||||
via `our **DockerHub**
|
||||
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
|
||||
with a file backend).
|
||||
|
||||
2. Update the CloudServer container’s config
|
||||
*Note:* *- If you don't have docker installed on your machine, here
|
||||
are the `instructions to install it for your
|
||||
distribution <https://docs.docker.com/engine/installation/>`__*
|
||||
|
||||
Add your certificates to your container. To do this,
|
||||
#. exec inside the CloudServer container.
|
||||
Updating your Zenko CloudServer container's config
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
#. Run ``$> docker ps`` to find the container’s ID (the corresponding
|
||||
image name is ``scality/cloudserver``.
|
||||
|
||||
#. Copy the corresponding container ID (``894aee038c5e`` in the present
|
||||
example), and run:
|
||||
You're going to add your certificates to your container. In order to do
|
||||
so, you need to exec inside your Zenko CloudServer container. Run a
|
||||
``$> docker ps`` and find your container's id (the corresponding image
|
||||
name should be ``scality/s3server``. Copy the corresponding container id
|
||||
(here we'll use ``894aee038c5e``, and run:
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
$> docker exec -it 894aee038c5e bash
|
||||
$> docker exec -it 894aee038c5e bash
|
||||
|
||||
This puts you inside your container, using an interactive terminal.
|
||||
You're now inside your container, using an interactive terminal :)
|
||||
|
||||
3. Generate the SSL key and certificates. The paths where the different
|
||||
files are stored are defined after the ``-out`` option in each of the
|
||||
following commands.
|
||||
Generate SSL key and certificates
|
||||
**********************************
|
||||
|
||||
#. Generate a private key for your certificate signing request (CSR):
|
||||
There are 5 steps to this generation. The paths where the different
|
||||
files are stored are defined after the ``-out`` option in each command
|
||||
|
||||
.. code-block:: shell
|
||||
.. code-block:: shell
|
||||
|
||||
$> openssl genrsa -out ca.key 2048
|
||||
# Generate a private key for your CSR
|
||||
$> openssl genrsa -out ca.key 2048
|
||||
# Generate a self signed certificate for your local Certificate Authority
|
||||
$> openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=scality.test"
|
||||
|
||||
#. Generate a self-signed certificate for your local certificate
|
||||
authority (CA):
|
||||
# Generate a key for Zenko CloudServer
|
||||
$> openssl genrsa -out test.key 2048
|
||||
# Generate a Certificate Signing Request for S3 Server
|
||||
$> openssl req -new -key test.key -out test.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.scality.test"
|
||||
# Generate a local-CA-signed certificate for S3 Server
|
||||
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
|
||||
|
||||
.. code:: shell
|
||||
Update Zenko CloudServer ``config.json``
|
||||
****************************************
|
||||
|
||||
$> openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=scality.test"
|
||||
Add a ``certFilePaths`` section to ``./config.json`` with the
|
||||
appropriate paths:
|
||||
|
||||
#. Generate a key for the CloudServer:
|
||||
|
||||
.. code:: shell
|
||||
|
||||
$> openssl genrsa -out test.key 2048
|
||||
|
||||
#. Generate a CSR for CloudServer:
|
||||
|
||||
.. code:: shell
|
||||
|
||||
$> openssl req -new -key test.key -out test.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.scality.test"
|
||||
|
||||
#. Generate a certificate for CloudServer signed by the local CA:
|
||||
|
||||
.. code:: shell
|
||||
|
||||
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
|
||||
|
||||
4. Update Zenko CloudServer ``config.json``. Add a ``certFilePaths``
|
||||
section to ``./config.json`` with appropriate paths:
|
||||
|
||||
.. code:: json
|
||||
.. code:: json
|
||||
|
||||
"certFilePaths": {
|
||||
"key": "./test.key",
|
||||
|
@ -352,36 +336,42 @@ SSL certificates.
|
|||
"ca": "./ca.crt"
|
||||
}
|
||||
|
||||
5. Run your container with the new config.
|
||||
Run your container with the new config
|
||||
**************************************
|
||||
|
||||
#. Exit the container by running ``$> exit``.
|
||||
First, you need to exit your container. Simply run ``$> exit``. Then,
|
||||
you need to restart your container. Normally, a simple
|
||||
``$> docker restart s3server`` should do the trick.
|
||||
|
||||
#. Restart the container with ``$> docker restart cloudserver``.
|
||||
Update your host config
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
6. Update the host configuration by adding s3.scality.test
|
||||
to /etc/hosts:
|
||||
Associates local IP addresses with hostname
|
||||
*******************************************
|
||||
|
||||
.. code:: bash
|
||||
In your ``/etc/hosts`` file on Linux, OS X, or Unix (with root
|
||||
permissions), edit the line of localhost so it looks like this:
|
||||
|
||||
127.0.0.1 localhost s3.scality.test
|
||||
::
|
||||
|
||||
7. Copy the local certificate authority (ca.crt in step 4) from your
|
||||
container. Choose the path to save this file to (in the present
|
||||
example, ``/root/ca.crt``), and run:
|
||||
127.0.0.1 localhost s3.scality.test
|
||||
|
||||
.. code:: shell
|
||||
Copy the local certificate authority from your container
|
||||
********************************************************
|
||||
|
||||
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt
|
||||
In the above commands, it's the file named ``ca.crt``. Choose the path
|
||||
you want to save this file at (here we chose ``/root/ca.crt``), and run
|
||||
something like:
|
||||
|
||||
.. note:: Your container ID will be different, and your path to
|
||||
ca.crt may be different.
|
||||
.. code-block:: shell
|
||||
|
||||
Test the Config
|
||||
^^^^^^^^^^^^^^^
|
||||
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt
|
||||
|
||||
If aws-sdk is not installed, run ``$> yarn install aws-sdk``.
|
||||
Test your config
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Paste the following script into a file named "test.js":
|
||||
If you do not have aws-sdk installed, run ``$> npm install aws-sdk``. In
|
||||
a ``test.js`` file, paste the following script:
|
||||
|
||||
.. code:: js
|
||||
|
||||
|
@ -421,13 +411,8 @@ Paste the following script into a file named "test.js":
|
|||
});
|
||||
});
|
||||
|
||||
Now run this script with:
|
||||
|
||||
.. code::
|
||||
|
||||
$> nodejs test.js
|
||||
|
||||
On success, the script outputs ``SSL is cool!``.
|
||||
Now run that script with ``$> nodejs test.js``. If all goes well, it
|
||||
should output ``SSL is cool!``. Enjoy that added security!
|
||||
|
||||
|
||||
.. |CircleCI| image:: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
# Get Bucket Version 2 Documentation
|
||||
|
||||
## Description
|
||||
|
||||
This feature implements version 2 of the GET Bucket (List Objects)
|
||||
operation, following AWS specifications
|
||||
(see https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html).
|
||||
|
||||
## Requirements
|
||||
|
||||
The user must have READ access to the bucket.
|
||||
|
||||
## Design
|
||||
|
||||
### Request
|
||||
|
||||
The `delimiter`, `encoding-type`, `max-keys`, and `prefix` request parameters
|
||||
from GET Bucket v1 remain unchanged.
|
||||
In order to specify v2, the parameter `list-type` must be included and
|
||||
set to `2`.
|
||||
The `marker` v1 parameter's functionality has been split in two and replaced by
|
||||
`start-after` and `continuation-token` in v2. The `start-after` parameter is
|
||||
a specific object key after which the API will return key names. It is only
|
||||
valid in the first GET request. If both the `start-after` and
|
||||
`continuation-token` parameters are included in a request, the API will
|
||||
ignore the `start-after` parameter in favor of the `continuation-token`.
|
||||
If the GET Bucket v2 response is truncated, a `NextContinuationToken` will
|
||||
also be included. To list the next set of objects, the `NextContinuationToken`
|
||||
can be used as the `continuation-token` in the next request. The continuation
|
||||
token is an obfuscated string of 57 characters that CloudServer understands and
|
||||
interprets.
|
||||
By default, the v2 response does not include object owner information. To
|
||||
include owner information like the default v1 response, use the `fetch-owner`
|
||||
request parameter set to `true`.
|
||||
|
||||
### Response
|
||||
|
||||
The GET Bucket v1 and v2 responses are largely the same, with only a few changes.
|
||||
The `NextMarker` v1 parameter has been replaced by the
|
||||
`NextContinuationToken`. The `NextContinuationToken` is included with any
|
||||
truncated response, even if no delimiter is sent in the request. Its value is an
|
||||
obfuscated string that can be passed at the `continuation-token` in the next
|
||||
request, which will be interpreted by CloudServer.
|
||||
The `KeyCounter` parameter is returned in every response. Its value is the
|
||||
number of keys included in the response. It is always less than or equal to
|
||||
the `MaxKeys` value.
|
||||
If the `start-after` or `continuation-token` parameter is used in the
|
||||
request, it is also included in the response.
|
||||
By default, the v2 response does not include object owner information, unlike
|
||||
the v1 response. See the `Request` section for including it.
|
||||
|
||||
### Continuation Token
|
||||
|
||||
An example continuation token:
|
||||
|
||||
```
|
||||
NextContinuationToken: '1bunC4s+crlZNAAbKUGBLyajJUQKp22TOdUR6/01snxD2cZtjJD0ugA=='
|
||||
```
|
||||
|
||||
In order to generate a comparable token, CloudServer uses base64 encoding to
|
||||
obfuscate the key name of the next object to be listed.
|
||||
Encoded continuation tokens are similarly decoded in order for listing to
|
||||
continue from the correct object.
|
||||
|
||||
## Performing Get Bucket V2 Operation
|
||||
|
||||
When performing the GET Bucket V2 operation, if the request is built manually,
|
||||
the parameter `list-type` must be included and set to `2`.
|
||||
Using the AWS cli client, the command becomes `list-objects-v2`.
|
|
@ -4,415 +4,479 @@ Integrations
|
|||
High Availability
|
||||
=================
|
||||
|
||||
`Docker Swarm <https://docs.docker.com/engine/swarm/>`__ is a clustering tool
|
||||
developed by Docker for use with its containers. It can be used to start
|
||||
services, which we define to ensure CloudServer's continuous availability to
|
||||
end users. A swarm defines a manager and *n* workers among *n* + 1 servers.
|
||||
|
||||
This tutorial shows how to perform a basic setup with three servers, which
|
||||
provides strong service resiliency, while remaining easy to use and
|
||||
maintain. We will use NFS through Docker to share data and
|
||||
`Docker swarm <https://docs.docker.com/engine/swarm/>`__ is a
|
||||
clustering tool developped by Docker and ready to use with its
|
||||
containers. It allows to start a service, which we define and use as a
|
||||
means to ensure Zenko CloudServer's continuous availability to the end user.
|
||||
Indeed, a swarm defines a manager and n workers among n+1 servers. We
|
||||
will do a basic setup in this tutorial, with just 3 servers, which
|
||||
already provides a strong service resiliency, whilst remaining easy to
|
||||
do as an individual. We will use NFS through docker to share data and
|
||||
metadata between the different servers.
|
||||
|
||||
Sections are labeled **On Server**, **On Clients**, or
|
||||
**On All Machines**, referring respectively to NFS server, NFS clients, or
|
||||
NFS server and clients. In the present example, the server’s IP address is
|
||||
**10.200.15.113** and the client IP addresses are **10.200.15.96** and
|
||||
**10.200.15.97**
|
||||
You will see that the steps of this tutorial are defined as **On
|
||||
Server**, **On Clients**, **On All Machines**. This refers respectively
|
||||
to NFS Server, NFS Clients, or NFS Server and Clients. In our example,
|
||||
the IP of the Server will be **10.200.15.113**, while the IPs of the
|
||||
Clients will be **10.200.15.96 and 10.200.15.97**
|
||||
|
||||
1. Install Docker (on All Machines)
|
||||
Installing docker
|
||||
-----------------
|
||||
|
||||
Docker 17.03.0-ce is used for this tutorial. Docker 1.12.6 and later will
|
||||
likely work, but is not tested.
|
||||
Any version from docker 1.12.6 onwards should work; we used Docker
|
||||
17.03.0-ce for this tutorial.
|
||||
|
||||
* On Ubuntu 14.04
|
||||
Install Docker CE for Ubuntu as `documented at Docker
|
||||
<https://docs.docker.com/install/linux/docker-ce/ubuntu/>`__.
|
||||
Install the aufs dependency as recommended by Docker. The required
|
||||
commands are:
|
||||
On All Machines
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
On Ubuntu 14.04
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
$> sudo apt-get update
|
||||
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
|
||||
$> sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
|
||||
$> curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
$> sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
$> sudo apt-get update
|
||||
$> sudo apt-get install docker-ce
|
||||
|
||||
* On CentOS 7
|
||||
Install Docker CE as `documented at Docker
|
||||
<https://docs.docker.com/install/linux/docker-ce/centos/>`__.
|
||||
The required commands are:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo yum install -y yum-utils
|
||||
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
$> sudo yum makecache fast
|
||||
$> sudo yum install docker-ce
|
||||
$> sudo systemctl start docker
|
||||
|
||||
2. Install NFS on Client(s)
|
||||
|
||||
NFS clients mount Docker volumes over the NFS server’s shared folders.
|
||||
If the NFS commons are installed, manual mounts are no longer needed.
|
||||
|
||||
* On Ubuntu 14.04
|
||||
|
||||
Install the NFS commons with apt-get:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo apt-get install nfs-common
|
||||
|
||||
* On CentOS 7
|
||||
|
||||
Install the NFS utils; then start required services:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> yum install nfs-utils
|
||||
$> sudo systemctl enable rpcbind
|
||||
$> sudo systemctl enable nfs-server
|
||||
$> sudo systemctl enable nfs-lock
|
||||
$> sudo systemctl enable nfs-idmap
|
||||
$> sudo systemctl start rpcbind
|
||||
$> sudo systemctl start nfs-server
|
||||
$> sudo systemctl start nfs-lock
|
||||
$> sudo systemctl start nfs-idmap
|
||||
|
||||
3. Install NFS (on Server)
|
||||
|
||||
The NFS server hosts the data and metadata. The package(s) to install on it
|
||||
differs from the package installed on the clients.
|
||||
|
||||
* On Ubuntu 14.04
|
||||
|
||||
Install the NFS server-specific package and the NFS commons:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo apt-get install nfs-kernel-server nfs-common
|
||||
|
||||
* On CentOS 7
|
||||
|
||||
Install the NFS utils and start the required services:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> yum install nfs-utils
|
||||
$> sudo systemctl enable rpcbind
|
||||
$> sudo systemctl enable nfs-server
|
||||
$> sudo systemctl enable nfs-lock
|
||||
$> sudo systemctl enable nfs-idmap
|
||||
$> sudo systemctl start rpcbind
|
||||
$> sudo systemctl start nfs-server
|
||||
$> sudo systemctl start nfs-lock
|
||||
$> sudo systemctl start nfs-idmap
|
||||
|
||||
For both distributions:
|
||||
|
||||
#. Choose where shared data and metadata from the local
|
||||
`CloudServer <http://www.zenko.io/cloudserver/>`__ shall be stored (The
|
||||
present example uses /var/nfs/data and /var/nfs/metadata). Set permissions
|
||||
for these folders for
|
||||
sharing over NFS:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> mkdir -p /var/nfs/data /var/nfs/metadata
|
||||
$> chmod -R 777 /var/nfs/
|
||||
|
||||
#. The /etc/exports file configures network permissions and r-w-x permissions
|
||||
for NFS access. Edit /etc/exports, adding the following lines:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||
|
||||
Ubuntu applies the no\_subtree\_check option by default, so both
|
||||
folders are declared with the same permissions, even though they’re in
|
||||
the same tree.
|
||||
|
||||
#. Export this new NFS table:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo exportfs -a
|
||||
|
||||
#. Edit the ``MountFlags`` option in the Docker config in
|
||||
/lib/systemd/system/docker.service to enable NFS mount from Docker volumes
|
||||
on other machines:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
MountFlags=shared
|
||||
|
||||
#. Restart the NFS server and Docker daemons to apply these changes.
|
||||
|
||||
* On Ubuntu 14.04
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo service nfs-kernel-server restart
|
||||
$> sudo service docker restart
|
||||
|
||||
* On CentOS 7
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo systemctl restart nfs-server
|
||||
$> sudo systemctl daemon-reload
|
||||
$> sudo systemctl restart docker
|
||||
|
||||
|
||||
4. Set Up a Docker Swarm
|
||||
|
||||
* On all machines and distributions:
|
||||
|
||||
Set up the Docker volumes to be mounted to the NFS server for CloudServer’s
|
||||
data and metadata storage. The following commands must be replicated on all
|
||||
machines:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
|
||||
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
|
||||
|
||||
There is no need to ``docker exec`` these volumes to mount them: the
|
||||
Docker Swarm manager does this when the Docker service is started.
|
||||
|
||||
* On a server:
|
||||
|
||||
To start a Docker service on a Docker Swarm cluster, initialize the cluster
|
||||
(that is, define a manager), prompt workers/nodes to join in, and then start
|
||||
the service.
|
||||
|
||||
Initialize the swarm cluster, and review its response:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker swarm init --advertise-addr 10.200.15.113
|
||||
|
||||
Swarm initialized: current node (db2aqfu3bzfzzs9b1kfeaglmq) is now a manager.
|
||||
|
||||
To add a worker to this swarm, run the following command:
|
||||
|
||||
docker swarm join \
|
||||
--token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka \
|
||||
10.200.15.113:2377
|
||||
|
||||
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
||||
|
||||
* On clients:
|
||||
|
||||
Copy and paste the command provided by your Docker Swarm init. A successful
|
||||
request/response will resemble:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
|
||||
|
||||
This node joined a swarm as a worker.
|
||||
|
||||
Set Up Docker Swarm on Clients on a Server
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Start the service on the Swarm cluster.
|
||||
The docker website has `solid
|
||||
documentation <https://docs.docker.com/engine/installation/linux/ubuntu/>`__.
|
||||
We have chosen to install the aufs dependency, as recommended by Docker.
|
||||
Here are the required commands:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/cloudserver
|
||||
$> sudo apt-get update
|
||||
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
|
||||
$> sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
|
||||
$> curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
$> sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
$> sudo apt-get update
|
||||
$> sudo apt-get install docker-ce
|
||||
|
||||
On a successful installation, ``docker service ls`` returns the following
|
||||
output:
|
||||
On CentOS 7
|
||||
^^^^^^^^^^^
|
||||
|
||||
The docker website has `solid
|
||||
documentation <https://docs.docker.com/engine/installation/linux/centos/>`__.
|
||||
Here are the required commands:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo yum install -y yum-utils
|
||||
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||
$> sudo yum makecache fast
|
||||
$> sudo yum install docker-ce
|
||||
$> sudo systemctl start docker
|
||||
|
||||
Configure NFS
|
||||
-------------
|
||||
|
||||
On Clients
|
||||
~~~~~~~~~~
|
||||
|
||||
Your NFS Clients will mount Docker volumes over your NFS Server's shared
|
||||
folders. Hence, you don't have to mount anything manually, you just have
|
||||
to install the NFS commons:
|
||||
|
||||
On Ubuntu 14.04
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Simply install the NFS commons:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo apt-get install nfs-common
|
||||
|
||||
On CentOS 7
|
||||
^^^^^^^^^^^
|
||||
|
||||
Install the NFS utils, and then start the required services:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> yum install nfs-utils
|
||||
$> sudo systemctl enable rpcbind
|
||||
$> sudo systemctl enable nfs-server
|
||||
$> sudo systemctl enable nfs-lock
|
||||
$> sudo systemctl enable nfs-idmap
|
||||
$> sudo systemctl start rpcbind
|
||||
$> sudo systemctl start nfs-server
|
||||
$> sudo systemctl start nfs-lock
|
||||
$> sudo systemctl start nfs-idmap
|
||||
|
||||
On Server
|
||||
~~~~~~~~~
|
||||
|
||||
Your NFS Server will be the machine to physically host the data and
|
||||
metadata. The package(s) we will install on it is slightly different
|
||||
from the one we installed on the clients.
|
||||
|
||||
On Ubuntu 14.04
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Install the NFS server specific package and the NFS commons:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo apt-get install nfs-kernel-server nfs-common
|
||||
|
||||
On CentOS 7
|
||||
^^^^^^^^^^^
|
||||
|
||||
Same steps as with the client: install the NFS utils and start the
|
||||
required services:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> yum install nfs-utils
|
||||
$> sudo systemctl enable rpcbind
|
||||
$> sudo systemctl enable nfs-server
|
||||
$> sudo systemctl enable nfs-lock
|
||||
$> sudo systemctl enable nfs-idmap
|
||||
$> sudo systemctl start rpcbind
|
||||
$> sudo systemctl start nfs-server
|
||||
$> sudo systemctl start nfs-lock
|
||||
$> sudo systemctl start nfs-idmap
|
||||
|
||||
On Ubuntu 14.04 and CentOS 7
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Choose where your shared data and metadata from your local `Zenko CloudServer
|
||||
<http://www.zenko.io/cloudserver/>`__ will be stored.
|
||||
We chose to go with /var/nfs/data and /var/nfs/metadata. You also need
|
||||
to set proper sharing permissions for these folders as they'll be shared
|
||||
over NFS:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> mkdir -p /var/nfs/data /var/nfs/metadata
|
||||
$> chmod -R 777 /var/nfs/
|
||||
|
||||
Now you need to update your **/etc/exports** file. This is the file that
|
||||
configures network permissions and rwx permissions for NFS access. By
|
||||
default, Ubuntu applies the no\_subtree\_check option, so we declared
|
||||
both folders with the same permissions, even though they're in the same
|
||||
tree:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo vim /etc/exports
|
||||
|
||||
In this file, add the following lines:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||
|
||||
Export this new NFS table:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo exportfs -a
|
||||
|
||||
Eventually, you need to allow for NFS mount from Docker volumes on other
|
||||
machines. You need to change the Docker config in
|
||||
**/lib/systemd/system/docker.service**:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo vim /lib/systemd/system/docker.service
|
||||
|
||||
In this file, change the **MountFlags** option:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
MountFlags=shared
|
||||
|
||||
Now you just need to restart the NFS server and docker daemons so your
|
||||
changes apply.
|
||||
|
||||
On Ubuntu 14.04
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
Restart your NFS Server and docker services:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo service nfs-kernel-server restart
|
||||
$> sudo service docker restart
|
||||
|
||||
On CentOS 7
|
||||
^^^^^^^^^^^
|
||||
|
||||
Restart your NFS Server and docker daemons:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> sudo systemctl restart nfs-server
|
||||
$> sudo systemctl daemon-reload
|
||||
$> sudo systemctl restart docker
|
||||
|
||||
Set up your Docker Swarm service
|
||||
--------------------------------
|
||||
|
||||
On All Machines
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
On Ubuntu 14.04 and CentOS 7
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We will now set up the Docker volumes that will be mounted to the NFS
|
||||
Server and serve as data and metadata storage for Zenko CloudServer. These two
|
||||
commands have to be replicated on all machines:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
|
||||
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
|
||||
|
||||
There is no need to ""docker exec" these volumes to mount them: the
|
||||
Docker Swarm manager will do it when the Docker service will be started.
|
||||
|
||||
On Server
|
||||
^^^^^^^^^
|
||||
|
||||
To start a Docker service on a Docker Swarm cluster, you first have to
|
||||
initialize that cluster (i.e.: define a manager), then have the
|
||||
workers/nodes join in, and then start the service. Initialize the swarm
|
||||
cluster, and look at the response:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker swarm init --advertise-addr 10.200.15.113
|
||||
|
||||
Swarm initialized: current node (db2aqfu3bzfzzs9b1kfeaglmq) is now a manager.
|
||||
|
||||
To add a worker to this swarm, run the following command:
|
||||
|
||||
docker swarm join \
|
||||
--token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka \
|
||||
10.200.15.113:2377
|
||||
|
||||
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
||||
|
||||
On Clients
|
||||
^^^^^^^^^^
|
||||
|
||||
Simply copy/paste the command provided by your docker swarm init. When
|
||||
all goes well, you'll get something like this:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
|
||||
|
||||
This node joined a swarm as a worker.
|
||||
|
||||
On Server
|
||||
^^^^^^^^^
|
||||
|
||||
Start the service on your swarm cluster!
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/s3server
|
||||
|
||||
If you run a docker service ls, you should have the following output:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker service ls
|
||||
ID NAME MODE REPLICAS IMAGE
|
||||
ocmggza412ft s3 replicated 1/1 scality/cloudserver:latest
|
||||
ocmggza412ft s3 replicated 1/1 scality/s3server:latest
|
||||
|
||||
If the service does not start, consider disabling apparmor/SELinux.
|
||||
If your service won't start, consider disabling apparmor/SELinux.
|
||||
|
||||
Testing the High-Availability CloudServer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Testing your High Availability S3Server
|
||||
---------------------------------------
|
||||
|
||||
On all machines (client/server) and distributions (Ubuntu and CentOS),
|
||||
determine where CloudServer is running using ``docker ps``. CloudServer can
|
||||
operate on any node of the Swarm cluster, manager or worker. When you find
|
||||
it, you can kill it with ``docker stop <container id>``. It will respawn
|
||||
on a different node. Now, if one server falls, or if Docker stops
|
||||
unexpectedly, the end user will still be able to access your the local CloudServer.
|
||||
|
||||
Troubleshooting
|
||||
On All Machines
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
To troubleshoot the service, run:
|
||||
On Ubuntu 14.04 and CentOS 7
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Try to find out where your Scality Zenko CloudServer is actually running using
|
||||
the **docker ps** command. It can be on any node of the swarm cluster,
|
||||
manager or worker. When you find it, you can kill it, with **docker stop
|
||||
<container id>** and you'll see it respawn on a different node of the
|
||||
swarm cluster. Now you see, if one of your servers falls, or if docker
|
||||
stops unexpectedly, your end user will still be able to access your
|
||||
local Zenko CloudServer.
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
To troubleshoot the service you can run:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker service ps s3docker service ps s3
|
||||
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
|
||||
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/cloudserver localhost.localdomain.localdomain Running Running 7 days ago
|
||||
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/cloudserver localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
|
||||
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/s3server localhost.localdomain.localdomain Running Running 7 days ago
|
||||
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/s3server localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
|
||||
|
||||
If the error is truncated, view the error in detail by inspecting the
|
||||
Docker task ID:
|
||||
If the error is truncated it is possible to have a more detailed view of
|
||||
the error by inspecting the docker task ID:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> docker inspect cvmf3j3bz8w6r4h0lf3pxo6eu
|
||||
|
||||
Off you go!
|
||||
~~~~~~~~~~~
|
||||
-----------
|
||||
|
||||
Let us know what you use this functionality for, and if you'd like any
|
||||
specific developments around it. Or, even better: come and contribute to
|
||||
our `Github repository <https://github.com/scality/s3/>`__! We look
|
||||
forward to meeting you!
|
||||
|
||||
Let us know how you use this and if you'd like any specific developments
|
||||
around it. Even better: come and contribute to our `Github repository
|
||||
<https://github.com/scality/s3/>`__! We look forward to meeting you!
|
||||
|
||||
S3FS
|
||||
====
|
||||
|
||||
You can export buckets as a filesystem with s3fs on CloudServer.
|
||||
Export your buckets as a filesystem with s3fs on top of Zenko CloudServer
|
||||
|
||||
`s3fs <https://github.com/s3fs-fuse/s3fs-fuse>`__ is an open source
|
||||
tool, available both on Debian and RedHat distributions, that enables
|
||||
you to mount an S3 bucket on a filesystem-like backend. This tutorial uses
|
||||
an Ubuntu 14.04 host to deploy and use s3fs over CloudServer.
|
||||
tool that allows you to mount an S3 bucket on a filesystem-like backend.
|
||||
It is available both on Debian and RedHat distributions. For this
|
||||
tutorial, we used an Ubuntu 14.04 host to deploy and use s3fs over
|
||||
Scality's Zenko CloudServer.
|
||||
|
||||
Deploying Zenko CloudServer with SSL
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
------------------------------------
|
||||
|
||||
First, deploy CloudServer with a file backend using `our DockerHub page
|
||||
<https://hub.docker.com/r/zenko/cloudserver>`__.
|
||||
First, you need to deploy **Zenko CloudServer**. This can be done very easily
|
||||
via `our DockerHub
|
||||
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
|
||||
with a file backend).
|
||||
|
||||
.. note::
|
||||
*Note:* *- If you don't have docker installed on your machine, here
|
||||
are the `instructions to install it for your
|
||||
distribution <https://docs.docker.com/engine/installation/>`__*
|
||||
|
||||
If Docker is not installed on your machine, follow
|
||||
`these instructions <https://docs.docker.com/engine/installation/>`__
|
||||
to install it for your distribution.
|
||||
You also necessarily have to set up SSL with Zenko CloudServer to use s3fs. We
|
||||
have a nice
|
||||
`tutorial <https://s3.scality.com/v1.0/page/scality-with-ssl>`__ to help
|
||||
you do it.
|
||||
|
||||
You must also set up SSL with CloudServer to use s3fs. See `Using SSL
|
||||
<./GETTING_STARTED#Using_SSL>`__ for instructions.
|
||||
|
||||
s3fs Setup
|
||||
~~~~~~~~~~
|
||||
s3fs setup
|
||||
----------
|
||||
|
||||
Installing s3fs
|
||||
---------------
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Follow the instructions in the s3fs `README
|
||||
<https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation-from-pre-built-packages>`__,
|
||||
|
||||
Check that s3fs is properly installed. A version check should return
|
||||
a response resembling:
|
||||
s3fs has quite a few dependencies. As explained in their
|
||||
`README <https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation>`__,
|
||||
the following commands should install everything for Ubuntu 14.04:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> s3fs --version
|
||||
$> sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev
|
||||
$> sudo apt-get install libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||
|
||||
Now you want to install s3fs per se:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||
$> cd s3fs-fuse
|
||||
$> ./autogen.sh
|
||||
$> ./configure
|
||||
$> make
|
||||
$> sudo make install
|
||||
|
||||
Check that s3fs is properly installed by checking its version. it should
|
||||
answer as below:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> s3fs --version
|
||||
|
||||
Amazon Simple Storage Service File System V1.80(commit:d40da2c) with OpenSSL
|
||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
||||
License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>
|
||||
This is free software: you are free to change and redistribute it.
|
||||
There is NO WARRANTY, to the extent permitted by law.
|
||||
|
||||
Configuring s3fs
|
||||
----------------
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
s3fs expects you to provide it with a password file. Our file is
|
||||
``/etc/passwd-s3fs``. The structure for this file is
|
||||
``ACCESSKEYID:SECRETKEYID``, so, for CloudServer, you can run:
|
||||
``ACCESSKEYID:SECRETKEYID``, so, for S3Server, you can run:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> echo 'accessKey1:verySecretKey1' > /etc/passwd-s3fs
|
||||
$> chmod 600 /etc/passwd-s3fs
|
||||
|
||||
Using CloudServer with s3fs
|
||||
---------------------------
|
||||
Using Zenko CloudServer with s3fs
|
||||
---------------------------------
|
||||
|
||||
1. Use /mnt/tests3fs as a mount point.
|
||||
First, you're going to need a mountpoint; we chose ``/mnt/tests3fs``:
|
||||
|
||||
.. code:: sh
|
||||
.. code:: sh
|
||||
|
||||
$> mkdir /mnt/tests3fs
|
||||
|
||||
2. Create a bucket on your local CloudServer. In the present example it is
|
||||
named “tests3fs”.
|
||||
Then, you want to create a bucket on your local Zenko CloudServer; we named it
|
||||
``tests3fs``:
|
||||
|
||||
.. code:: sh
|
||||
.. code:: sh
|
||||
|
||||
$> s3cmd mb s3://tests3fs
|
||||
|
||||
3. Mount the bucket to your mount point with s3fs:
|
||||
*Note:* *- If you've never used s3cmd with our Zenko CloudServer, our README
|
||||
provides you with a `recommended
|
||||
config <https://github.com/scality/S3/blob/master/README.md#s3cmd>`__*
|
||||
|
||||
.. code:: sh
|
||||
Now you can mount your bucket to your mountpoint with s3fs:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> s3fs tests3fs /mnt/tests3fs -o passwd_file=/etc/passwd-s3fs -o url="https://s3.scality.test:8000/" -o use_path_request_style
|
||||
|
||||
The structure of this command is:
|
||||
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``. Of these mandatory
|
||||
options:
|
||||
*If you're curious, the structure of this command is*
|
||||
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``\ *, and the
|
||||
options are mandatory and serve the following purposes:
|
||||
* ``passwd_file``\ *: specifiy path to password file;
|
||||
* ``url``\ *: specify the hostname used by your SSL provider;
|
||||
* ``use_path_request_style``\ *: force path style (by default, s3fs
|
||||
uses subdomains (DNS style)).*
|
||||
|
||||
* ``passwd_file`` specifies the path to the password file.
|
||||
* ``url`` specifies the host name used by your SSL provider.
|
||||
* ``use_path_request_style`` forces the path style (by default,
|
||||
s3fs uses DNS-style subdomains).
|
||||
| From now on, you can either add files to your mountpoint, or add
|
||||
objects to your bucket, and they'll show in the other.
|
||||
| For example, let's' create two files, and then a directory with a file
|
||||
in our mountpoint:
|
||||
|
||||
Once the bucket is mounted, files added to the mount point or
|
||||
objects added to the bucket will appear in both locations.
|
||||
.. code:: sh
|
||||
|
||||
Example
|
||||
-------
|
||||
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
|
||||
$> mkdir /mnt/tests3fs/dir1
|
||||
$> touch /mnt/tests3fs/dir1/file3
|
||||
|
||||
Create two files, and then a directory with a file in our mount point:
|
||||
Now, I can use s3cmd to show me what is actually in S3Server:
|
||||
|
||||
.. code:: sh
|
||||
.. code:: sh
|
||||
|
||||
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
|
||||
$> mkdir /mnt/tests3fs/dir1
|
||||
$> touch /mnt/tests3fs/dir1/file3
|
||||
$> s3cmd ls -r s3://tests3fs
|
||||
|
||||
Now, use s3cmd to show what is in CloudServer:
|
||||
2017-02-28 17:28 0 s3://tests3fs/dir1/
|
||||
2017-02-28 17:29 0 s3://tests3fs/dir1/file3
|
||||
2017-02-28 17:28 0 s3://tests3fs/file1
|
||||
2017-02-28 17:28 0 s3://tests3fs/file2
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> s3cmd ls -r s3://tests3fs
|
||||
|
||||
2017-02-28 17:28 0 s3://tests3fs/dir1/
|
||||
2017-02-28 17:29 0 s3://tests3fs/dir1/file3
|
||||
2017-02-28 17:28 0 s3://tests3fs/file1
|
||||
2017-02-28 17:28 0 s3://tests3fs/file2
|
||||
|
||||
Now you can enjoy a filesystem view on your local CloudServer.
|
||||
Now you can enjoy a filesystem view on your local Zenko CloudServer!
|
||||
|
||||
|
||||
Duplicity
|
||||
=========
|
||||
|
||||
How to back up your files with CloudServer.
|
||||
How to backup your files with Zenko CloudServer.
|
||||
|
||||
Installing Duplicity and its Dependencies
|
||||
Installing
|
||||
-----------
|
||||
|
||||
Installing Duplicity and its dependencies
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To install `Duplicity <http://duplicity.nongnu.org/>`__,
|
||||
go to `this site <https://code.launchpad.net/duplicity/0.7-series>`__.
|
||||
Download the latest tarball. Decompress it and follow the instructions
|
||||
in the README.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> tar zxvf duplicity-0.7.11.tar.gz
|
||||
$> cd duplicity-0.7.11
|
||||
$> python setup.py install
|
||||
|
||||
You may receive error messages indicating the need to install some or all
|
||||
of the following dependencies:
|
||||
Second, you want to install
|
||||
`Duplicity <http://duplicity.nongnu.org/index.html>`__. You have to
|
||||
download `this
|
||||
tarball <https://code.launchpad.net/duplicity/0.7-series/0.7.11/+download/duplicity-0.7.11.tar.gz>`__,
|
||||
decompress it, and then checkout the README inside, which will give you
|
||||
a list of dependencies to install. If you're using Ubuntu 14.04, this is
|
||||
your lucky day: here is a lazy step by step install.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
|
@ -420,20 +484,30 @@ of the following dependencies:
|
|||
$> apt-get install python-dev python-pip python-lockfile
|
||||
$> pip install -U boto
|
||||
|
||||
Testing the Installation
|
||||
------------------------
|
||||
Then you want to actually install Duplicity:
|
||||
|
||||
1. Check that CloudServer is running. Run ``$> docker ps``. You should
|
||||
see one container named ``scality/cloudserver``. If you do not, run
|
||||
``$> docker start cloudserver`` and check again.
|
||||
.. code:: sh
|
||||
|
||||
$> tar zxvf duplicity-0.7.11.tar.gz
|
||||
$> cd duplicity-0.7.11
|
||||
$> python setup.py install
|
||||
|
||||
2. Duplicity uses a module called “Boto” to send requests to S3. Boto
|
||||
requires a configuration file located in ``/etc/boto.cfg`` to store
|
||||
your credentials and preferences. A minimal configuration
|
||||
you can fine tune `following these instructions
|
||||
<http://boto.cloudhackers.com/en/latest/getting_started.html>`__ is
|
||||
shown here:
|
||||
Using
|
||||
------
|
||||
|
||||
Testing your installation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
First, we're just going to quickly check that Zenko CloudServer is actually
|
||||
running. To do so, simply run ``$> docker ps`` . You should see one
|
||||
container named ``scality/s3server``. If that is not the case, try
|
||||
``$> docker start s3server``, and check again.
|
||||
|
||||
Secondly, as you probably know, Duplicity uses a module called **Boto**
|
||||
to send requests to S3. Boto requires a configuration file located in
|
||||
**``/etc/boto.cfg``** to have your credentials and preferences. Here is
|
||||
a minimalistic config `that you can finetune following these
|
||||
instructions <http://boto.cloudhackers.com/en/latest/getting_started.html>`__.
|
||||
|
||||
::
|
||||
|
||||
|
@ -447,51 +521,54 @@ Testing the Installation
|
|||
# If using SSL, unmute and provide absolute path to local CA certificate
|
||||
# ca_certificates_file = /absolute/path/to/ca.crt
|
||||
|
||||
.. note:: To set up SSL with CloudServer, check out our `Using SSL
|
||||
<./GETTING_STARTED#Using_SSL>`__ in GETTING STARTED.
|
||||
*Note:* *If you want to set up SSL with Zenko CloudServer, check out our
|
||||
`tutorial <http://link/to/SSL/tutorial>`__*
|
||||
|
||||
3. At this point all requirements to run CloudServer as a backend to Duplicity
|
||||
have been met. A local folder/file should back up to the local S3.
|
||||
Try it with the decompressed Duplicity folder:
|
||||
At this point, we've met all the requirements to start running Zenko CloudServer
|
||||
as a backend to Duplicity. So we should be able to back up a local
|
||||
folder/file to local S3. Let's try with the duplicity decompressed
|
||||
folder:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
$> duplicity duplicity-0.7.11 "s3://127.0.0.1:8000/testbucket/"
|
||||
|
||||
.. note:: Duplicity will prompt for a symmetric encryption passphrase.
|
||||
Save it carefully, as you will need it to recover your data.
|
||||
Alternatively, you can add the ``--no-encryption`` flag
|
||||
and the data will be stored plain.
|
||||
*Note:* *Duplicity will prompt you for a symmetric encryption
|
||||
passphrase. Save it somewhere as you will need it to recover your
|
||||
data. Alternatively, you can also add the ``--no-encryption`` flag
|
||||
and the data will be stored plain.*
|
||||
|
||||
If this command is successful, you will receive an output resembling:
|
||||
If this command is succesful, you will get an output looking like this:
|
||||
|
||||
.. code:: sh
|
||||
::
|
||||
|
||||
--------------[ Backup Statistics ]--------------
|
||||
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
|
||||
EndTime 1486486547.40 (Tue Feb 7 16:55:47 2017)
|
||||
ElapsedTime 0.27 (0.27 seconds)
|
||||
SourceFiles 388
|
||||
SourceFileSize 6634529 (6.33 MB)
|
||||
NewFiles 388
|
||||
NewFileSize 6634529 (6.33 MB)
|
||||
DeletedFiles 0
|
||||
ChangedFiles 0
|
||||
ChangedFileSize 0 (0 bytes)
|
||||
ChangedDeltaSize 0 (0 bytes)
|
||||
DeltaEntries 388
|
||||
RawDeltaSize 6392865 (6.10 MB)
|
||||
TotalDestinationSizeChange 2003677 (1.91 MB)
|
||||
Errors 0
|
||||
-------------------------------------------------
|
||||
--------------[ Backup Statistics ]--------------
|
||||
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
|
||||
EndTime 1486486547.40 (Tue Feb 7 16:55:47 2017)
|
||||
ElapsedTime 0.27 (0.27 seconds)
|
||||
SourceFiles 388
|
||||
SourceFileSize 6634529 (6.33 MB)
|
||||
NewFiles 388
|
||||
NewFileSize 6634529 (6.33 MB)
|
||||
DeletedFiles 0
|
||||
ChangedFiles 0
|
||||
ChangedFileSize 0 (0 bytes)
|
||||
ChangedDeltaSize 0 (0 bytes)
|
||||
DeltaEntries 388
|
||||
RawDeltaSize 6392865 (6.10 MB)
|
||||
TotalDestinationSizeChange 2003677 (1.91 MB)
|
||||
Errors 0
|
||||
-------------------------------------------------
|
||||
|
||||
Congratulations! You can now back up to your local S3 through Duplicity.
|
||||
Congratulations! You can now backup to your local S3 through duplicity
|
||||
:)
|
||||
|
||||
Automating Backups
|
||||
------------------
|
||||
Automating backups
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The easiest way to back up files periodically is to write a bash script
|
||||
and add it to your crontab. A suggested script follows.
|
||||
Now you probably want to back up your files periodically. The easiest
|
||||
way to do this is to write a bash script and add it to your crontab.
|
||||
Here is my suggestion for such a file:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
|
@ -500,33 +577,33 @@ and add it to your crontab. A suggested script follows.
|
|||
# Export your passphrase so you don't have to type anything
|
||||
export PASSPHRASE="mypassphrase"
|
||||
|
||||
# To use a GPG key, put it here and uncomment the line below
|
||||
# If you want to use a GPG Key, put it here and unmute the line below
|
||||
#GPG_KEY=
|
||||
|
||||
# Define your backup bucket, with localhost specified
|
||||
DEST="s3://127.0.0.1:8000/testbucketcloudserver/"
|
||||
DEST="s3://127.0.0.1:8000/testbuckets3server/"
|
||||
|
||||
# Define the absolute path to the folder to back up
|
||||
# Define the absolute path to the folder you want to backup
|
||||
SOURCE=/root/testfolder
|
||||
|
||||
# Set to "full" for full backups, and "incremental" for incremental backups
|
||||
# Warning: you must perform one full backup befor you can perform
|
||||
# Warning: you have to perform one full backup befor you can perform
|
||||
# incremental ones on top of it
|
||||
FULL=incremental
|
||||
|
||||
# How long to keep backups. If you don't want to delete old backups, keep
|
||||
# this value empty; otherwise, the syntax is "1Y" for one year, "1M" for
|
||||
# one month, "1D" for one day.
|
||||
# How long to keep backups for; if you don't want to delete old
|
||||
# backups, keep empty; otherwise, syntax is "1Y" for one year, "1M"
|
||||
# for one month, "1D" for one day
|
||||
OLDER_THAN="1Y"
|
||||
|
||||
# is_running checks whether Duplicity is currently completing a task
|
||||
# is_running checks whether duplicity is currently completing a task
|
||||
is_running=$(ps -ef | grep duplicity | grep python | wc -l)
|
||||
|
||||
# If Duplicity is already completing a task, this will not run
|
||||
# If duplicity is already completing a task, this will simply not run
|
||||
if [ $is_running -eq 0 ]; then
|
||||
echo "Backup for ${SOURCE} started"
|
||||
|
||||
# To delete backups older than a certain time, do it here
|
||||
# If you want to delete backups older than a certain time, we do it here
|
||||
if [ "$OLDER_THAN" != "" ]; then
|
||||
echo "Removing backups older than ${OLDER_THAN}"
|
||||
duplicity remove-older-than ${OLDER_THAN} ${DEST}
|
||||
|
@ -549,17 +626,17 @@ and add it to your crontab. A suggested script follows.
|
|||
# Forget the passphrase...
|
||||
unset PASSPHRASE
|
||||
|
||||
Put this file in ``/usr/local/sbin/backup.sh``. Run ``crontab -e`` and
|
||||
paste your configuration into the file that opens. If you're unfamiliar
|
||||
with Cron, here is a good `HowTo
|
||||
<https://help.ubuntu.com/community/CronHowto>`__. If the folder being
|
||||
backed up is a folder to be modified permanently during the work day,
|
||||
we can set incremental backups every 5 minutes from 8 AM to 9 PM Monday
|
||||
through Friday by pasting the following line into crontab:
|
||||
So let's say you put this file in ``/usr/local/sbin/backup.sh.`` Next
|
||||
you want to run ``crontab -e`` and paste your configuration in the file
|
||||
that opens. If you're unfamiliar with Cron, here is a good `How
|
||||
To <https://help.ubuntu.com/community/CronHowto>`__. The folder I'm
|
||||
backing up is a folder I modify permanently during my workday, so I want
|
||||
incremental backups every 5mn from 8AM to 9PM monday to friday. Here is
|
||||
the line I will paste in my crontab:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh
|
||||
|
||||
Adding or removing files from the folder being backed up will result in
|
||||
incremental backups in the bucket.
|
||||
Now I can try and add / remove files from the folder I'm backing up, and
|
||||
I will see incremental backups in my bucket.
|
||||
|
|
|
@ -0,0 +1,229 @@
|
|||
# Metadata Search Documenation
|
||||
|
||||
## Description
|
||||
|
||||
This feature enables metadata search to be performed on the metadata of objects
|
||||
stored in Zenko.
|
||||
|
||||
## Requirements
|
||||
|
||||
+ MongoDB
|
||||
|
||||
## Design
|
||||
|
||||
The MD Search feature expands on the existing `GET Bucket` S3 API. It allows
|
||||
users to conduct metadata searches by adding the custom Zenko querystring
|
||||
parameter, `search`. The `search` parameter is of a pseudo
|
||||
SQL WHERE clause structure and supports basic SQL operators:
|
||||
ex. `"A=1 AND B=2 OR C=3"` (more complex queries can also be achieved with the
|
||||
use of nesting operators, `(` and `)`).
|
||||
|
||||
The search process is as follows:
|
||||
|
||||
+ Zenko receives a `GET` request.
|
||||
|
||||
```
|
||||
# regular getBucket request
|
||||
GET /bucketname HTTP/1.1
|
||||
Host: 127.0.0.1:8000
|
||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
||||
Authorization: authorization string
|
||||
|
||||
# getBucket versions request
|
||||
GET /bucketname?versions HTTP/1.1
|
||||
Host: 127.0.0.1:8000
|
||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
||||
Authorization: authorization string
|
||||
|
||||
# search getBucket request
|
||||
GET /bucketname?search=key%3Dsearch-item HTTP/1.1
|
||||
Host: 127.0.0.1:8000
|
||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
||||
Authorization: authorization string
|
||||
```
|
||||
|
||||
+ If the request does not contain the query param `search`, a normal bucket
|
||||
listing is performed and a XML result containing the list of objects will be
|
||||
returned as the response.
|
||||
+ If the request does contain the query parameter `search`, the search string is
|
||||
parsed and validated.
|
||||
|
||||
+ If the search string is invalid, an `InvalidArgument` error will be
|
||||
returned as response.
|
||||
+ If the search string is valid, it will be parsed and an abstract syntax
|
||||
tree (AST) is generated.
|
||||
|
||||
+ The AST is then passed to the MongoDB backend to be used as the query filter
|
||||
for retrieving objects in a bucket that satisfies the requested search
|
||||
conditions.
|
||||
+ The filtered results are then parsed and returned as the response.
|
||||
|
||||
The results from MD search is of the same structure as the `GET Bucket`
|
||||
results:
|
||||
|
||||
```xml
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Name>bucketname</Name>
|
||||
<Prefix/>
|
||||
<Marker/>
|
||||
<MaxKeys>1000</MaxKeys>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<Contents>
|
||||
<Key>objectKey</Key>
|
||||
<LastModified>2018-04-19T18:31:49.426Z</LastModified>
|
||||
<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
|
||||
<Size>0</Size>
|
||||
<Owner>
|
||||
<ID>79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be</ID>
|
||||
<DisplayName>Bart</DisplayName>
|
||||
</Owner>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
</Contents>
|
||||
<Contents>
|
||||
...
|
||||
</Contents>
|
||||
</ListBucketResult>
|
||||
```
|
||||
|
||||
## Performing MD Search with Zenko
|
||||
|
||||
To make a successful request to Zenko, you would need
|
||||
|
||||
+ Zenko Credentials
|
||||
+ Sign request with Auth V4
|
||||
|
||||
With requirements, you can peform metadata searches by:
|
||||
|
||||
+ using the `search_bucket` tool in the
|
||||
[Scality/S3](https://github.com/scality/S3) GitHub repository.
|
||||
+ creating an AuthV4 signed HTTP request to Zenko in the programming language of
|
||||
choice
|
||||
|
||||
### Using the S3 Tool
|
||||
|
||||
After cloning the [Scality/S3](https://github.com/scality/S3) GitHub repository
|
||||
and installing the necessary dependencies, you can run the following command
|
||||
in the S3 project root directory to access the search tool.
|
||||
|
||||
```
|
||||
node bin/search_bucket
|
||||
```
|
||||
|
||||
This will generate the following output
|
||||
|
||||
```
|
||||
Usage: search_bucket [options]
|
||||
|
||||
Options:
|
||||
|
||||
-V, --version output the version number
|
||||
-a, --access-key <accessKey> Access key id
|
||||
-k, --secret-key <secretKey> Secret access key
|
||||
-b, --bucket <bucket> Name of the bucket
|
||||
-q, --query <query> Search query
|
||||
-h, --host <host> Host of the server
|
||||
-p, --port <port> Port of the server
|
||||
-s --ssl
|
||||
-v, --verbose
|
||||
-h, --help output usage information
|
||||
```
|
||||
|
||||
In the following examples, our Zenko Server is accessible on endpoint
|
||||
`http://127.0.0.1:8000` and contains the bucket `zenkobucket`.
|
||||
|
||||
```
|
||||
# search for objects with metadata "blue"
|
||||
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
|
||||
-q "x-amz-meta-color=blue" -h 127.0.0.1 -p 8000
|
||||
|
||||
# search for objects tagged with "type=color"
|
||||
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
|
||||
-q "tags.type=color" -h 127.0.0.1 -p 8000
|
||||
```
|
||||
|
||||
### Coding Examples
|
||||
|
||||
Search requests can be also performed by making HTTP requests authenticated
|
||||
with the `AWS Signature version 4` scheme.\
|
||||
See the following urls for more information about the V4 authentication scheme.
|
||||
|
||||
+ http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
|
||||
+ http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
|
||||
|
||||
You can also view examples for making requests with Auth V4 in various
|
||||
languages [here](../exmaples).
|
||||
|
||||
### Specifying Metadata Fields
|
||||
|
||||
To search common metadata headers:
|
||||
|
||||
```
|
||||
{metadata-key}{supported SQL op}{search value}
|
||||
# example
|
||||
key = blueObject
|
||||
size > 0
|
||||
key LIKE "blue.*"
|
||||
```
|
||||
|
||||
To search custom user metadata:
|
||||
|
||||
```
|
||||
# metadata must be prefixed with "x-amz-meta-"
|
||||
x-amz-meta-{usermetadata-key}{supported SQL op}{search value}
|
||||
# example
|
||||
x-amz-meta-color = blue
|
||||
x-amz-meta-color != red
|
||||
x-amz-meta-color LIKE "b.*"
|
||||
```
|
||||
|
||||
To search tags:
|
||||
|
||||
```
|
||||
# tag searches must be prefixed with "tags."
|
||||
tags.{tag-key}{supported SQL op}{search value}
|
||||
# example
|
||||
tags.type = color
|
||||
```
|
||||
|
||||
### Differences from SQL
|
||||
|
||||
The MD search queries are similar to the `WHERE` clauses of SQL queries, but
|
||||
they differ in that:
|
||||
|
||||
+ MD search queries follow the `PCRE` format
|
||||
+ Search queries do not require values with hyphens to be enclosed in
|
||||
backticks, ``(`)``
|
||||
|
||||
```
|
||||
# SQL query
|
||||
`x-amz-meta-search-item` = `ice-cream-cone`
|
||||
|
||||
# MD Search query
|
||||
x-amz-meta-search-item = ice-cream-cone
|
||||
```
|
||||
|
||||
+ The search queries do not support all of the SQL operators.
|
||||
|
||||
+ Supported SQL Operators: `=`, `<`, `>`, `<=`, `>=`, `!=`,
|
||||
`AND`, `OR`, `LIKE`, `<>`
|
||||
+ Unsupported SQL Operators: `NOT`, `BETWEEN`, `IN`, `IS`, `+`,
|
||||
`-`, `%`, `^`, `/`, `*`, `!`
|
||||
|
||||
#### Using Regular Expressions in MD Search
|
||||
|
||||
+ Regular expressions used in MD search differs from SQL in that wildcards are
|
||||
represented with `.*` instead of `%`.
|
||||
+ Regex patterns must be wrapped in quotes as not doing so can lead to
|
||||
misinterpretation of patterns.
|
||||
+ Regex patterns can be written in form of the `/pattern/` syntax or
|
||||
just the pattern if one does not require regex options, similar to `PCRE`.
|
||||
|
||||
Example regular expressions:
|
||||
|
||||
```
|
||||
# search for strings containing word substring "helloworld"
|
||||
".*helloworld.*"
|
||||
"/.*helloworld.*/"
|
||||
"/.*helloworld.*/i"
|
||||
```
|
|
@ -1,263 +0,0 @@
|
|||
Metadata Search Documentation
|
||||
=============================
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
This feature enables metadata search to be performed on the metadata of objects
|
||||
stored in Zenko.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
* MongoDB
|
||||
|
||||
Design
|
||||
------
|
||||
|
||||
The Metadata Search feature expands on the existing :code:`GET Bucket` S3 API by
|
||||
enabling users to conduct metadata searches by adding the custom Zenko query
|
||||
string parameter, :code:`search`. The :code:`search` parameter is structured as a pseudo
|
||||
SQL WHERE clause, and supports basic SQL operators. For example:
|
||||
:code:`"A=1 AND B=2 OR C=3"` (complex queries can be built using nesting
|
||||
operators, :code:`(` and :code:`)`).
|
||||
|
||||
The search process is as follows:
|
||||
|
||||
* Zenko receives a :code:`GET` request.
|
||||
|
||||
.. code::
|
||||
|
||||
# regular getBucket request
|
||||
GET /bucketname HTTP/1.1
|
||||
Host: 127.0.0.1:8000
|
||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
||||
Authorization: authorization string
|
||||
|
||||
# getBucket versions request
|
||||
GET /bucketname?versions HTTP/1.1
|
||||
Host: 127.0.0.1:8000
|
||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
||||
Authorization: authorization string
|
||||
|
||||
# search getBucket request
|
||||
GET /bucketname?search=key%3Dsearch-item HTTP/1.1
|
||||
Host: 127.0.0.1:8000
|
||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
||||
Authorization: authorization string
|
||||
|
||||
* If the request does *not* contain the :code:`search` query parameter, Zenko performs
|
||||
a normal bucket listing and returns an XML result containing the list of
|
||||
objects.
|
||||
* If the request *does* contain the :code:`search` query parameter, Zenko parses and
|
||||
validates the search string.
|
||||
|
||||
- If the search string is invalid, Zenko returns an :code:`InvalidArgument` error.
|
||||
|
||||
.. code::
|
||||
|
||||
<?xml version=\"1.0\" encoding=\"UTF-8\"?>
|
||||
<Error>
|
||||
<Code>InvalidArgument</Code>
|
||||
<Message>Invalid sql where clause sent as search query</Message>
|
||||
<Resource></Resource>
|
||||
<RequestId>d1d6afc64345a8e1198e</RequestId>
|
||||
</Error>
|
||||
|
||||
- If the search string is valid, Zenko parses it and generates an abstract
|
||||
syntax tree (AST). The AST is then passed to the MongoDB backend to be
|
||||
used as the query filter for retrieving objects from a bucket that
|
||||
satisfies the requested search conditions. Zenko parses the filtered
|
||||
results and returns them as the response.
|
||||
|
||||
Metadata search results have the same structure as a :code:`GET Bucket` response:
|
||||
|
||||
.. code:: xml
|
||||
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Name>bucketname</Name>
|
||||
<Prefix/>
|
||||
<Marker/>
|
||||
<MaxKeys>1000</MaxKeys>
|
||||
<IsTruncated>false</IsTruncated>
|
||||
<Contents>
|
||||
<Key>objectKey</Key>
|
||||
<LastModified>2018-04-19T18:31:49.426Z</LastModified>
|
||||
<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
|
||||
<Size>0</Size>
|
||||
<Owner>
|
||||
<ID>79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be</ID>
|
||||
<DisplayName>Bart</DisplayName>
|
||||
</Owner>
|
||||
<StorageClass>STANDARD</StorageClass>
|
||||
</Contents>
|
||||
<Contents>
|
||||
...
|
||||
</Contents>
|
||||
</ListBucketResult>
|
||||
|
||||
Performing Metadata Searches with Zenko
|
||||
---------------------------------------
|
||||
|
||||
You can perform metadata searches by:
|
||||
|
||||
+ Using the :code:`search_bucket` tool in the
|
||||
`Scality/S3 <https://github.com/scality/S3>`_ GitHub repository.
|
||||
+ Creating a signed HTTP request to Zenko in your preferred programming
|
||||
language.
|
||||
|
||||
Using the S3 Tool
|
||||
+++++++++++++++++
|
||||
|
||||
After cloning the `Scality/S3 <https://github.com/scality/S3>`_ GitHub repository
|
||||
and installing the necessary dependencies, run the following command in the S3
|
||||
project’s root directory to access the search tool:
|
||||
|
||||
.. code::
|
||||
|
||||
node bin/search_bucket
|
||||
|
||||
This generates the following output:
|
||||
|
||||
.. code::
|
||||
|
||||
Usage: search_bucket [options]
|
||||
|
||||
Options:
|
||||
|
||||
-V, --version output the version number
|
||||
-a, --access-key <accessKey> Access key id
|
||||
-k, --secret-key <secretKey> Secret access key
|
||||
-b, --bucket <bucket> Name of the bucket
|
||||
-q, --query <query> Search query
|
||||
-h, --host <host> Host of the server
|
||||
-p, --port <port> Port of the server
|
||||
-s --ssl
|
||||
-v, --verbose
|
||||
-h, --help output usage information
|
||||
|
||||
In the following examples, Zenko Server is accessible on endpoint
|
||||
:code:`http://127.0.0.1:8000` and contains the bucket :code:`zenkobucket`.
|
||||
|
||||
.. code::
|
||||
|
||||
# search for objects with metadata "blue"
|
||||
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
|
||||
-q "x-amz-meta-color=blue" -h 127.0.0.1 -p 8000
|
||||
|
||||
# search for objects tagged with "type=color"
|
||||
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
|
||||
-q "tags.type=color" -h 127.0.0.1 -p 8000
|
||||
|
||||
Coding Examples
|
||||
+++++++++++++++
|
||||
|
||||
Search requests can be also performed by making HTTP requests authenticated
|
||||
with one of the AWS Signature schemes: version 2 or version 4. \
|
||||
For more about authentication scheme, see:
|
||||
|
||||
* https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
|
||||
* http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
|
||||
* http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
|
||||
|
||||
You can also view examples for making requests with Auth V4 in various
|
||||
languages `here <../../../examples>`__.
|
||||
|
||||
Specifying Metadata Fields
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To search system metadata headers:
|
||||
|
||||
.. code::
|
||||
|
||||
{system-metadata-key}{supported SQL op}{search value}
|
||||
# example
|
||||
key = blueObject
|
||||
size > 0
|
||||
key LIKE "blue.*"
|
||||
|
||||
To search custom user metadata:
|
||||
|
||||
.. code::
|
||||
|
||||
# metadata must be prefixed with "x-amz-meta-"
|
||||
x-amz-meta-{user-metadata-key}{supported SQL op}{search value}
|
||||
|
||||
# example
|
||||
x-amz-meta-color = blue
|
||||
x-amz-meta-color != red
|
||||
x-amz-meta-color LIKE "b.*"
|
||||
|
||||
To search tags:
|
||||
|
||||
.. code::
|
||||
|
||||
# tag searches must be prefixed with "tags."
|
||||
tags.{tag-key}{supported SQL op}{search value}
|
||||
# example
|
||||
tags.type = color
|
||||
|
||||
Examples queries:
|
||||
|
||||
.. code::
|
||||
|
||||
# searching for objects with custom metadata "color"=blue" and are tagged
|
||||
# "type"="color"
|
||||
|
||||
tags.type="color" AND x-amz-meta-color="blue"
|
||||
|
||||
# searching for objects with the object key containing the substring "blue"
|
||||
# or (custom metadata "color"=blue" and are tagged "type"="color")
|
||||
|
||||
key LIKE '.*blue.*' OR (x-amz-meta-color="blue" AND tags.type="color")
|
||||
|
||||
Differences from SQL
|
||||
++++++++++++++++++++
|
||||
|
||||
Zenko metadata search queries are similar to SQL-query :code:`WHERE` clauses, but
|
||||
differ in that:
|
||||
|
||||
* They follow the :code:`PCRE` format
|
||||
* They do not require values with hyphens to be enclosed in
|
||||
backticks, :code:``(`)``
|
||||
|
||||
.. code::
|
||||
|
||||
# SQL query
|
||||
`x-amz-meta-search-item` = `ice-cream-cone`
|
||||
|
||||
# MD Search query
|
||||
x-amz-meta-search-item = ice-cream-cone
|
||||
|
||||
* Search queries do not support all SQL operators.
|
||||
|
||||
.. code::
|
||||
|
||||
# Supported SQL operators:
|
||||
=, <, >, <=, >=, !=, AND, OR, LIKE, <>
|
||||
|
||||
# Unsupported SQL operators:
|
||||
NOT, BETWEEN, IN, IS, +, -, %, ^, /, *, !
|
||||
|
||||
Using Regular Expressions in Metadata Search
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Regular expressions in Zenko metadata search differ from SQL in the following
|
||||
ways:
|
||||
|
||||
+ Wildcards are represented with :code:`.*` instead of :code:`%`.
|
||||
+ Regex patterns must be wrapped in quotes. Failure to do this can lead to
|
||||
misinterpretation of patterns.
|
||||
+ As with :code:`PCRE`, regular expressions can be entered in either the
|
||||
:code:`/pattern/` syntax or as the pattern itself if regex options are
|
||||
not required.
|
||||
|
||||
Example regular expressions:
|
||||
|
||||
.. code::
|
||||
|
||||
# search for strings containing word substring "helloworld"
|
||||
".*helloworld.*"
|
||||
"/.*helloworld.*/"
|
||||
"/.*helloworld.*/i"
|
|
@ -1,161 +0,0 @@
|
|||
# Object Lock Feature Test Plan
|
||||
|
||||
## Feature Component Description
|
||||
|
||||
Implementing Object Lock will introduce six new APIs:
|
||||
|
||||
- putObjectLockConfiguration
|
||||
- getObjectLockConfiguration
|
||||
- putObjectRetention
|
||||
- getObjectRetention
|
||||
- putObjectLegalHold
|
||||
- getObjectLegalHold
|
||||
|
||||
Along with these APIs, putBucket, putObject, deleteObject, and multiObjectDelete
|
||||
be affected. In Arsenal, both the BucketInfo and ObjectMD models will be
|
||||
updated. Bucket policy and IAM policy permissions will be updated to include
|
||||
the new API actions.
|
||||
|
||||
## Functional Tests
|
||||
|
||||
### putBucket tests
|
||||
|
||||
- passing option to enable object lock updates bucket metadata and enables
|
||||
bucket versioning
|
||||
|
||||
### putBucketVersioning tests
|
||||
|
||||
- suspending versioning on bucket with object lock enabled returns error
|
||||
|
||||
### putObject tests
|
||||
|
||||
- putting retention configuration on object should be allowed
|
||||
- putting invalid retention configuration returns error
|
||||
|
||||
### getObject tests
|
||||
|
||||
- getting object with retention information should include retention information
|
||||
|
||||
### copyObject tests
|
||||
|
||||
- copying object with retention information should include retention information
|
||||
|
||||
### initiateMultipartUpload tests
|
||||
|
||||
- mpu object initiated with retention information should include retention
|
||||
information
|
||||
|
||||
### putObjectLockConfiguration tests
|
||||
|
||||
- putting configuration as non-bucket-owner user returns AccessDenied error
|
||||
- disabling object lock on bucket created with object lock returns error
|
||||
- enabling object lock on bucket created without object lock returns
|
||||
InvalidBucketState error
|
||||
- enabling object lock with token on bucket created without object lock succeeds
|
||||
- putting valid object lock configuration when bucket does not have object
|
||||
lock enabled returns error (InvalidRequest?)
|
||||
- putting valid object lock configuration updates bucket metadata
|
||||
- putting invalid object lock configuration returns error
|
||||
- ObjectLockEnabled !== "Enabled"
|
||||
- Rule object doesn't contain DefaultRetention key
|
||||
- Mode !== "GOVERNANCE" or "COMPLIANCE"
|
||||
- Days are not an integer
|
||||
- Years are not an integer
|
||||
|
||||
### getObjectLockConfiguration tests
|
||||
|
||||
- getting configuration as non-bucket-owner user returns AccessDenied error
|
||||
- getting configuration when none is set returns
|
||||
ObjectLockConfigurationNotFoundError error
|
||||
- getting configuration returns correct object lock configuration for bucket
|
||||
|
||||
### putObjectRetention
|
||||
|
||||
- putting retention as non-bucket-owner user returns AccessDenied error
|
||||
- putting retention on object in bucket without object lock enabled returns
|
||||
InvalidRequest error
|
||||
- putting valid retention period updates object metadata
|
||||
|
||||
### getObjectRetention
|
||||
|
||||
- getting retention as non-bucket-owner user returns AccessDenied error
|
||||
- getting retention when none is set returns NoSuchObjectLockConfiguration
|
||||
error
|
||||
- getting retention returns correct object retention period
|
||||
|
||||
### putObjectLegalHold
|
||||
|
||||
- putting legal hold as non-bucket-owner user returns AccessDenied error
|
||||
- putting legal hold on object in bucket without object lock enabled returns
|
||||
InvalidRequest error
|
||||
- putting valid legal hold updates object metadata
|
||||
|
||||
### getObjectLegalHold
|
||||
|
||||
- getting legal hold as non-bucket-owner user returns AccessDenied error
|
||||
- getting legal hold when none is set returns NoSuchObjectLockConfiguration
|
||||
error
|
||||
- getting legal hold returns correct object legal hold
|
||||
|
||||
## End to End Tests
|
||||
|
||||
### Scenarios
|
||||
|
||||
- Create bucket with object lock enabled. Put object. Put object lock
|
||||
configuration. Put another object.
|
||||
- Ensure object put before configuration does not have retention period set
|
||||
- Ensure object put after configuration does have retention period set
|
||||
|
||||
- Create bucket without object lock. Put object. Enable object lock with token
|
||||
and put object lock configuration. Put another object.
|
||||
- Ensure object put before configuration does not have retention period set
|
||||
- Ensure object put after configuration does have retention period set
|
||||
|
||||
- Create bucket with object lock enabled and put configuration with COMPLIANCE
|
||||
mode. Put object.
|
||||
- Ensure object cannot be deleted (returns AccessDenied error).
|
||||
- Ensure object cannot be overwritten.
|
||||
|
||||
- Create bucket with object lock enabled and put configuration with GOVERNANCE
|
||||
mode. Put object.
|
||||
- Ensure user without permission cannot delete object
|
||||
- Ensure user without permission cannot overwrite object
|
||||
- Ensure user with permission can delete object
|
||||
- Ensure user with permission can overwrite object
|
||||
- Ensure user with permission can lengthen retention period
|
||||
- Ensure user with permission cannot shorten retention period
|
||||
|
||||
- Create bucket with object lock enabled and put configuration. Edit bucket
|
||||
metadata so retention period is expired. Put object.
|
||||
- Ensure object can be deleted.
|
||||
- Ensure object can be overwritten.
|
||||
|
||||
- Create bucket with object lock enabled and put configuration. Edit bucket
|
||||
metadata so retention period is expired. Put object. Put new retention
|
||||
period on object.
|
||||
- Ensure object cannot be deleted.
|
||||
- Ensure object cannot be overwritten.
|
||||
|
||||
- Create bucket with object locked enabled and put configuration. Put object.
|
||||
Edit object metadata so retention period is past expiration.
|
||||
- Ensure object can be deleted.
|
||||
- Ensure object can be overwritten.
|
||||
|
||||
- Create bucket with object lock enabled and put configuration. Edit bucket
|
||||
metadata so retention period is expired. Put object. Put legal hold
|
||||
on object.
|
||||
- Ensure object cannot be deleted.
|
||||
- Ensure object cannot be overwritten.
|
||||
|
||||
- Create bucket with object lock enabled and put configuration. Put object.
|
||||
Check object retention. Change bucket object lock configuration.
|
||||
- Ensure object retention period has not changed with bucket configuration.
|
||||
|
||||
- Create bucket with object lock enabled. Put object with legal hold.
|
||||
- Ensure object cannot be deleted.
|
||||
- Ensure object cannot be overwritten.
|
||||
|
||||
- Create bucket with object lock enabled. Put object with legal hold. Remove
|
||||
legal hold.
|
||||
- Ensure object can be deleted.
|
||||
- Ensure object can be overwritten.
|
|
@ -1,73 +0,0 @@
|
|||
# Cloudserver Release Plan
|
||||
|
||||
## Docker Image Generation
|
||||
|
||||
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
|
||||
CloudServer has a few images there:
|
||||
|
||||
* Cloudserver container image: ghcr.io/scality/cloudserver
|
||||
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
||||
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
||||
|
||||
With every CI build, the CI will push images, tagging the
|
||||
content with the developer branch's short SHA-1 commit hash.
|
||||
This allows those images to be used by developers, CI builds,
|
||||
build chain and so on.
|
||||
|
||||
Tagged versions of cloudserver will be stored in the production namespace.
|
||||
|
||||
## How to Pull Docker Images
|
||||
|
||||
```sh
|
||||
docker pull ghcr.io/scality/cloudserver:<commit hash>
|
||||
docker pull ghcr.io/scality/cloudserver:<tag>
|
||||
```
|
||||
|
||||
## Release Process
|
||||
|
||||
To release a production image:
|
||||
|
||||
* Create a PR to bump the package version
|
||||
Update Cloudserver's `package.json` by bumping it to the relevant next
|
||||
version in a new PR. Per example if the last released version was
|
||||
`8.4.7`, the next version would be `8.4.8`.
|
||||
|
||||
```js
|
||||
{
|
||||
"name": "cloudserver",
|
||||
"version": "8.4.8", <--- Here
|
||||
[...]
|
||||
}
|
||||
```
|
||||
|
||||
* Review & merge the PR
|
||||
|
||||
* Create the release on GitHub
|
||||
|
||||
* Go the Release tab (https://github.com/scality/cloudserver/releases);
|
||||
* Click on the `Draft new release button`;
|
||||
* In the `tag` field, type the name of the release (`8.4.8`), and confirm
|
||||
to create the tag on publish;
|
||||
* Click on `Generate release notes` button to fill the fields;
|
||||
* Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case);
|
||||
* Click to `Publish the release` to create the GitHub release and git tag
|
||||
|
||||
Notes:
|
||||
* the Git tag will be created automatically.
|
||||
* this should be done as soon as the PR is merged, so that the tag
|
||||
is put on the "version bump" commit.
|
||||
|
||||
* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force)
|
||||
|
||||
* Branch Name: The one used for the tag earlier. In this example `development/8.4`
|
||||
* Override Stage: 'release'
|
||||
* Extra properties:
|
||||
* name: `'tag'`, value: `[release version]`, in this example`'8.4.8'`
|
||||
|
||||
* Release the release version on Jira
|
||||
|
||||
* Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page)
|
||||
* Create a next version
|
||||
* Name: `[next version]`, in this example `8.4.9`
|
||||
* Click `...` and select `Release` on the recently released version (`8.4.8`)
|
||||
* Fill in the field to move incomplete version to the next one
|
|
@ -6,7 +6,7 @@ Using Public Clouds as data backends
|
|||
Introduction
|
||||
------------
|
||||
|
||||
As stated in our `GETTING STARTED guide <GETTING_STARTED.html#location-configuration>`__,
|
||||
As stated in our `GETTING STARTED guide <../GETTING_STARTED/#location-configuration>`__,
|
||||
new data backends can be added by creating a region (also called location
|
||||
constraint) with the right endpoint and credentials.
|
||||
This section of the documentation shows you how to set up our currently
|
||||
|
@ -139,7 +139,7 @@ to start the server and start writing data to AWS S3 through CloudServer.
|
|||
.. code:: shell
|
||||
|
||||
# Start the server locally
|
||||
$> S3DATA=multiple yarn start
|
||||
$> S3DATA=multiple npm start
|
||||
|
||||
Run the server as a docker container with the ability to write to AWS S3
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -162,8 +162,8 @@ CloudServer.
|
|||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
||||
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000 \
|
||||
-d scality/cloudserver
|
||||
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
|
||||
-d scality/s3server
|
||||
|
||||
Testing: put an object to AWS S3 using CloudServer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -306,7 +306,7 @@ to start the server and start writing data to MS Azure through CloudServer.
|
|||
.. code:: shell
|
||||
|
||||
# Start the server locally
|
||||
$> S3DATA=multiple yarn start
|
||||
$> S3DATA=multiple npm start
|
||||
|
||||
Run the server as a docker container with the ability to write to MS Azure
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -324,7 +324,7 @@ CloudServer.
|
|||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
||||
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
|
||||
-d scality/cloudserver
|
||||
-d scality/s3server
|
||||
|
||||
Testing: put an object to MS Azure using CloudServer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
============================================
|
||||
Add New Backend Storage To Zenko CloudServer
|
||||
Add New Backend Storage To Zenko Cloudserver
|
||||
============================================
|
||||
|
||||
This set of documents aims at bootstrapping developers with Zenko's CloudServer
|
||||
This set of documents aims at bootstrapping developers with Zenko's Cloudserver
|
||||
module, so they can then go on and contribute features.
|
||||
|
||||
.. toctree::
|
||||
|
@ -43,7 +43,7 @@ Openstack Swift x
|
|||
and filling out the "Feature Request" section of our
|
||||
template.
|
||||
|
||||
To add support for a new backend support to CloudServer official
|
||||
To add support for a new backend support to Cloudserver official
|
||||
repository, please follow these steps:
|
||||
|
||||
- familiarize yourself with our `Contributing Guidelines`_
|
||||
|
|
|
@ -2,28 +2,28 @@
|
|||
Add A New Backend
|
||||
=================
|
||||
|
||||
Supporting all possible public cloud storage APIs is CloudServer's
|
||||
Supporting all possible public cloud storage APIs is Cloudserver's
|
||||
ultimate goal. As an open source project, contributions are welcome.
|
||||
|
||||
The first step is to get familiar with building a custom Docker image
|
||||
for CloudServer.
|
||||
for Cloudserver.
|
||||
|
||||
Build a Custom Docker Image
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Clone Zenko's CloudServer, install all dependencies and start the
|
||||
Clone Zenko's Cloudserver, install all dependencies and start the
|
||||
service:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ git clone https://github.com/scality/cloudserver
|
||||
$ cd cloudserver
|
||||
$ yarn install
|
||||
$ yarn start
|
||||
$ npm install
|
||||
$ npm start
|
||||
|
||||
.. tip::
|
||||
|
||||
Some optional dependencies may fail, resulting in you seeing `yarn
|
||||
Some optional dependencies may fail, resulting in you seeing `NPM
|
||||
WARN` messages; these can safely be ignored. Refer to the User
|
||||
documentation for all available options.
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ These backends abstract the complexity of multiple APIs to let users
|
|||
work on a single common namespace across multiple clouds.
|
||||
|
||||
This documents aims at introducing you to the right files in
|
||||
CloudServer (the Zenko stack's subcomponent in charge of API
|
||||
Cloudserver (the Zenko stack's subcomponent in charge of API
|
||||
translation, among other things) to add support to your own backend of
|
||||
choice.
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
======================
|
||||
S3-Compatible Backends
|
||||
S3 compatible backends
|
||||
======================
|
||||
|
||||
|
||||
Adding Support in CloudServer
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Adding support in Zenko’s Cloudserver
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is the easiest case for backend support integration: there is nothing to do
|
||||
but configuration! Follow the steps described in our
|
||||
|
@ -33,7 +33,7 @@ definition for that backend will look something like:
|
|||
}
|
||||
},
|
||||
|
||||
Adding Support in Zenko Orbit
|
||||
Adding support in Zenko Orbit
|
||||
#############################
|
||||
|
||||
This can only be done by our core developpers' team. If that’s what you’re
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 23 KiB |
|
@ -4,17 +4,17 @@
|
|||
#
|
||||
# tox -e pip-compile
|
||||
#
|
||||
alabaster==0.7.12 \
|
||||
--hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
|
||||
--hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 \
|
||||
alabaster==0.7.11 \
|
||||
--hash=sha256:674bb3bab080f598371f4443c5008cbfeb1a5e622dd312395d2d82af2c54c456 \
|
||||
--hash=sha256:b63b1f4dc77c074d386752ec4a8a7517600f6c0db8cd42980cae17ab7b3275d7 \
|
||||
# via sphinx
|
||||
babel==2.6.0 \
|
||||
--hash=sha256:6778d85147d5d85345c14a26aada5e478ab04e39b078b0745ee6870c2b5cf669 \
|
||||
--hash=sha256:8cba50f48c529ca3fa18cf81fa9403be176d374ac4d60738b839122dfaaa3d23 \
|
||||
# via sphinx
|
||||
certifi==2018.10.15 \
|
||||
--hash=sha256:339dc09518b07e2fa7eda5450740925974815557727d6bd35d319c1524a04a4c \
|
||||
--hash=sha256:6d58c986d22b038c8c0df30d639f23a3e6d172a05c3583e766f4c0b785c0986a \
|
||||
certifi==2018.4.16 \
|
||||
--hash=sha256:13e698f54293db9f89122b0581843a782ad0934a4fe0172d2a980ba77fc61bb7 \
|
||||
--hash=sha256:9fa520c1bacfb634fa7af20a76bcbd3d5fb390481724c597da32c719a7dca4b0 \
|
||||
# via requests
|
||||
chardet==3.0.4 \
|
||||
--hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
|
||||
|
@ -32,66 +32,39 @@ idna==2.7 \
|
|||
--hash=sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e \
|
||||
--hash=sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16 \
|
||||
# via requests
|
||||
imagesize==1.1.0 \
|
||||
--hash=sha256:3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8 \
|
||||
--hash=sha256:f3832918bc3c66617f92e35f5d70729187676313caa60c187eb0f28b8fe5e3b5 \
|
||||
imagesize==1.0.0 \
|
||||
--hash=sha256:3620cc0cadba3f7475f9940d22431fc4d407269f1be59ec9b8edcca26440cf18 \
|
||||
--hash=sha256:5b326e4678b6925158ccc66a9fa3122b6106d7c876ee32d7de6ce59385b96315 \
|
||||
# via sphinx
|
||||
jinja2==2.10 \
|
||||
--hash=sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd \
|
||||
--hash=sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4 \
|
||||
# via sphinx
|
||||
markupsafe==1.1.0 \
|
||||
--hash=sha256:048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432 \
|
||||
--hash=sha256:130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b \
|
||||
--hash=sha256:19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9 \
|
||||
--hash=sha256:1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af \
|
||||
--hash=sha256:1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834 \
|
||||
--hash=sha256:1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd \
|
||||
--hash=sha256:1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d \
|
||||
--hash=sha256:31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7 \
|
||||
--hash=sha256:3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b \
|
||||
--hash=sha256:4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3 \
|
||||
--hash=sha256:525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c \
|
||||
--hash=sha256:52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2 \
|
||||
--hash=sha256:52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7 \
|
||||
--hash=sha256:5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36 \
|
||||
--hash=sha256:5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1 \
|
||||
--hash=sha256:5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e \
|
||||
--hash=sha256:7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1 \
|
||||
--hash=sha256:83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c \
|
||||
--hash=sha256:857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856 \
|
||||
--hash=sha256:98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550 \
|
||||
--hash=sha256:bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492 \
|
||||
--hash=sha256:d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672 \
|
||||
--hash=sha256:e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401 \
|
||||
--hash=sha256:edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6 \
|
||||
--hash=sha256:efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6 \
|
||||
--hash=sha256:f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c \
|
||||
--hash=sha256:f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd \
|
||||
--hash=sha256:fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1 \
|
||||
markupsafe==1.0 \
|
||||
--hash=sha256:a6be69091dac236ea9c6bc7d012beab42010fa914c459791d627dad4910eb665 \
|
||||
# via jinja2
|
||||
packaging==18.0 \
|
||||
--hash=sha256:0886227f54515e592aaa2e5a553332c73962917f2831f1b0f9b9f4380a4b9807 \
|
||||
--hash=sha256:f95a1e147590f204328170981833854229bb2912ac3d5f89e2a8ccd2834800c9 \
|
||||
packaging==17.1 \
|
||||
--hash=sha256:e9215d2d2535d3ae866c3d6efc77d5b24a0192cce0ff20e42896cc0664f889c0 \
|
||||
--hash=sha256:f019b770dd64e585a99714f1fd5e01c7a8f11b45635aa953fd41c689a657375b \
|
||||
# via sphinx
|
||||
pygments==2.2.0 \
|
||||
--hash=sha256:78f3f434bcc5d6ee09020f92ba487f95ba50f1e3ef83ae96b9d5ffa1bab25c5d \
|
||||
--hash=sha256:dbae1046def0efb574852fab9e90209b23f556367b5a320c0bcb871c77c3e8cc \
|
||||
# via sphinx
|
||||
pyparsing==2.3.0 \
|
||||
--hash=sha256:40856e74d4987de5d01761a22d1621ae1c7f8774585acae358aa5c5936c6c90b \
|
||||
--hash=sha256:f353aab21fd474459d97b709e527b5571314ee5f067441dc9f88e33eecd96592 \
|
||||
pyparsing==2.2.0 \
|
||||
--hash=sha256:0832bcf47acd283788593e7a0f542407bd9550a55a8a8435214a1960e04bcb04 \
|
||||
--hash=sha256:fee43f17a9c4087e7ed1605bd6df994c6173c1e977d7ade7b651292fab2bd010 \
|
||||
# via packaging
|
||||
pytz==2018.7 \
|
||||
--hash=sha256:31cb35c89bd7d333cd32c5f278fca91b523b0834369e757f4c5641ea252236ca \
|
||||
--hash=sha256:8e0f8568c118d3077b46be7d654cc8167fa916092e28320cde048e54bfc9f1e6 \
|
||||
pytz==2018.5 \
|
||||
--hash=sha256:a061aa0a9e06881eb8b3b2b43f05b9439d6583c206d0a6c340ff72a7b6669053 \
|
||||
--hash=sha256:ffb9ef1de172603304d9d2819af6f5ece76f2e85ec10692a524dd876e72bf277 \
|
||||
# via babel
|
||||
recommonmark==0.4.0 \
|
||||
--hash=sha256:6e29c723abcf5533842376d87c4589e62923ecb6002a8e059eb608345ddaff9d \
|
||||
--hash=sha256:cd8bf902e469dae94d00367a8197fb7b81fcabc9cfb79d520e0d22d0fbeaa8b7
|
||||
requests==2.20.1 \
|
||||
--hash=sha256:65b3a120e4329e33c9889db89c80976c5272f56ea92d3e74da8a463992e3ff54 \
|
||||
--hash=sha256:ea881206e59f41dbd0bd445437d792e43906703fff75ca8ff43ccdb11f33f263 \
|
||||
requests==2.19.1 \
|
||||
--hash=sha256:63b52e3c866428a224f97cab011de738c36aec0185aa91cfacd418b5d58911d1 \
|
||||
--hash=sha256:ec22d826a36ed72a7358ff3fe56cbd4ba69dd7a6718ffd450ff0e9df7a47ce6a \
|
||||
# via sphinx
|
||||
six==1.11.0 \
|
||||
--hash=sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9 \
|
||||
|
@ -101,19 +74,19 @@ snowballstemmer==1.2.1 \
|
|||
--hash=sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128 \
|
||||
--hash=sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89 \
|
||||
# via sphinx
|
||||
sphinx==1.8.2 \
|
||||
--hash=sha256:120732cbddb1b2364471c3d9f8bfd4b0c5b550862f99a65736c77f970b142aea \
|
||||
--hash=sha256:b348790776490894e0424101af9c8413f2a86831524bd55c5f379d3e3e12ca64
|
||||
sphinx==1.7.5 \
|
||||
--hash=sha256:85f7e32c8ef07f4ba5aeca728e0f7717bef0789fba8458b8d9c5c294cad134f3 \
|
||||
--hash=sha256:d45480a229edf70d84ca9fae3784162b1bc75ee47e480ffe04a4b7f21a95d76d
|
||||
sphinxcontrib-websupport==1.1.0 \
|
||||
--hash=sha256:68ca7ff70785cbe1e7bccc71a48b5b6d965d79ca50629606c7861a21b206d9dd \
|
||||
--hash=sha256:9de47f375baf1ea07cdb3436ff39d7a9c76042c10a769c52353ec46e4e8fc3b9 \
|
||||
# via sphinx
|
||||
typing==3.6.6 \
|
||||
--hash=sha256:4027c5f6127a6267a435201981ba156de91ad0d1d98e9ddc2aa173453453492d \
|
||||
--hash=sha256:57dcf675a99b74d64dacf6fba08fb17cf7e3d5fdff53d4a30ea2a5e7e52543d4 \
|
||||
--hash=sha256:a4c8473ce11a65999c8f59cb093e70686b6c84c98df58c1dae9b3b196089858a \
|
||||
typing==3.6.4 \
|
||||
--hash=sha256:3a887b021a77b292e151afb75323dea88a7bc1b3dfa92176cff8e44c8b68bddf \
|
||||
--hash=sha256:b2c689d54e1144bbcfd191b0832980a21c2dbcf7b5ff7a66248a60c90e951eb8 \
|
||||
--hash=sha256:d400a9344254803a2368533e4533a4200d21eb7b6b729c173bc38201a74db3f2 \
|
||||
# via sphinx
|
||||
urllib3==1.24.1 \
|
||||
--hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \
|
||||
--hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \
|
||||
urllib3==1.23 \
|
||||
--hash=sha256:a68ac5e15e76e7e5dd2b8f94007233e01effe3e50e8daddf69acfd81cb686baf \
|
||||
--hash=sha256:b5725a0bd4ba422ab0e66e89e030c806576753ea3ee08554382c14e685d117b5 \
|
||||
# via requests
|
||||
|
|
|
@ -0,0 +1,283 @@
|
|||
---
|
||||
version: 0.2
|
||||
|
||||
branches:
|
||||
feature/*, improvement/*, bugfix/*, w/*, q/*, hotfix/*:
|
||||
stage: pre-merge
|
||||
development/*:
|
||||
stage: post-merge
|
||||
|
||||
models:
|
||||
- env: &global-env
|
||||
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:azure_storage_access_key)s
|
||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
%(secret:azure_storage_account_name)s
|
||||
azurebackend_AZURE_STORAGE_ENDPOINT: >-
|
||||
%(secret:azure_storage_endpoint)s
|
||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:azure_storage_access_key_2)s
|
||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
%(secret:azure_storage_account_name_2)s
|
||||
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
|
||||
%(secret:azure_storage_endpoint_2)s
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:azure_storage_access_key)s
|
||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
%(secret:azure_storage_account_name)s
|
||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
|
||||
%(secret:azure_storage_endpoint)s
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:azure_storage_access_key)s
|
||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
|
||||
%(secret:azure_storage_account_name)s
|
||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
|
||||
%(secret:azure_storage_endpoint)s
|
||||
azuretest_AZURE_BLOB_ENDPOINT: "%(secret:azure_storage_endpoint)s"
|
||||
b2backend_B2_ACCOUNT_ID: "%(secret:b2backend_b2_account_id)s"
|
||||
b2backend_B2_STORAGE_ACCESS_KEY: >-
|
||||
%(secret:b2backend_b2_storage_access_key)s
|
||||
GOOGLE_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
|
||||
GOOGLE_SERVICE_KEY: "%(secret:gcp_service_key)s"
|
||||
AWS_S3_BACKEND_ACCESS_KEY: "%(secret:aws_s3_backend_access_key)s"
|
||||
AWS_S3_BACKEND_SECRET_KEY: "%(secret:aws_s3_backend_secret_key)s"
|
||||
AWS_S3_BACKEND_ACCESS_KEY_2: "%(secret:aws_s3_backend_access_key_2)s"
|
||||
AWS_S3_BACKEND_SECRET_KEY_2: "%(secret:aws_s3_backend_secret_key_2)s"
|
||||
AWS_GCP_BACKEND_ACCESS_KEY: "%(secret:aws_gcp_backend_access_key)s"
|
||||
AWS_GCP_BACKEND_SECRET_KEY: "%(secret:aws_gcp_backend_secret_key)s"
|
||||
AWS_GCP_BACKEND_ACCESS_KEY_2: "%(secret:aws_gcp_backend_access_key_2)s"
|
||||
AWS_GCP_BACKEND_SECRET_KEY_2: "%(secret:aws_gcp_backend_secret_key_2)s"
|
||||
b2backend_B2_STORAGE_ENDPOINT: "%(secret:b2backend_b2_storage_endpoint)s"
|
||||
gcpbackend2_GCP_SERVICE_EMAIL: "%(secret:gcp2_service_email)s"
|
||||
gcpbackend2_GCP_SERVICE_KEY: "%(secret:gcp2_service_key)s"
|
||||
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackend_GCP_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
|
||||
gcpbackend_GCP_SERVICE_KEY: "%(secret:gcp_service_key)s"
|
||||
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
|
||||
%(secret:gcpbackendmismatch_gcp_service_email)s
|
||||
gcpbackendmismatch_GCP_SERVICE_KEY: >-
|
||||
%(secret:gcpbackendmismatch_gcp_service_key)s
|
||||
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
||||
- env: &mongo-vars
|
||||
S3BACKEND: "mem"
|
||||
MPU_TESTING: "yes"
|
||||
S3METADATA: mongodb
|
||||
- env: &multiple-backend-vars
|
||||
S3BACKEND: "mem"
|
||||
S3DATA: "multiple"
|
||||
- env: &file-mem-mpu
|
||||
S3BACKEND: "file"
|
||||
S3VAULT: "mem"
|
||||
MPU_TESTING: "yes"
|
||||
- Git: &clone
|
||||
name: Pull repo
|
||||
repourl: '%(prop:git_reference)s'
|
||||
shallow: True
|
||||
retryFetch: True
|
||||
haltOnFailure: True
|
||||
- ShellCommand: &credentials
|
||||
name: Setup Credentials
|
||||
command: bash eve/workers/build/credentials.bash
|
||||
haltOnFailure: True
|
||||
env: *global-env
|
||||
- ShellCommand: &npm-install
|
||||
name: install modules
|
||||
command: npm install
|
||||
haltOnFailure: True
|
||||
- Upload: &upload-artifacts
|
||||
source: /artifacts
|
||||
urls:
|
||||
- "*"
|
||||
- ShellCommand: &follow-s3-log
|
||||
logfiles:
|
||||
s3:
|
||||
filename: /artifacts/s3.log
|
||||
follow: true
|
||||
- ShellCommand: &add-hostname
|
||||
name: add hostname
|
||||
command: |
|
||||
echo "127.0.0.1 testrequestbucket.localhost" >> /etc/hosts
|
||||
echo \
|
||||
"127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" \
|
||||
>> /etc/hosts
|
||||
haltOnFailure: True
|
||||
stages:
|
||||
pre-merge:
|
||||
worker:
|
||||
type: local
|
||||
steps:
|
||||
- SetProperty:
|
||||
property: artifacts_name
|
||||
value: "scality-s3-%(prop:buildnumber)s"
|
||||
haltOnFailure: True
|
||||
|
||||
- TriggerStages:
|
||||
name: Launch all workers
|
||||
stage_names:
|
||||
- linting-coverage
|
||||
- file-ft-tests
|
||||
- multiple-backend-test
|
||||
- mongo-ft-tests
|
||||
waitForFinish: True
|
||||
haltOnFailure: True
|
||||
|
||||
linting-coverage:
|
||||
worker:
|
||||
type: docker
|
||||
path: eve/workers/build
|
||||
volumes: &default_volumes
|
||||
- '/home/eve/workspace'
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *npm-install
|
||||
- ShellCommand: *add-hostname
|
||||
- ShellCommand:
|
||||
name: Linting
|
||||
command: |
|
||||
npm run --silent lint -- --max-warnings 0
|
||||
npm run --silent lint_md
|
||||
flake8 $(git ls-files "*.py")
|
||||
yamllint $(git ls-files "*.yml")
|
||||
- ShellCommand:
|
||||
name: Unit Coverage
|
||||
command: |
|
||||
mkdir -p $CIRCLE_TEST_REPORTS/unit
|
||||
npm run unit_coverage
|
||||
npm run unit_coverage_legacy_location
|
||||
env: &shared-vars
|
||||
CIRCLE_TEST_REPORTS: /tmp
|
||||
CIRCLE_ARTIFACTS: /tmp
|
||||
CI_REPORTS: /tmp
|
||||
- ShellCommand:
|
||||
name: Unit Coverage logs
|
||||
command: find /tmp/unit -exec cat {} \;
|
||||
|
||||
multiple-backend-test:
|
||||
worker:
|
||||
type: kube_pod
|
||||
path: eve/workers/pod.yaml
|
||||
images:
|
||||
aggressor: eve/workers/build
|
||||
s3: "."
|
||||
vars:
|
||||
aggressorMemLimit: "2Gi"
|
||||
s3MemLimit: "2Gi"
|
||||
env:
|
||||
<<: *multiple-backend-vars
|
||||
<<: *global-env
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *credentials
|
||||
- ShellCommand: *npm-install
|
||||
- ShellCommand:
|
||||
command: |
|
||||
bash -c "
|
||||
source /root/.aws/exports &> /dev/null
|
||||
set -ex
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
npm run multiple_backend_test"
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *multiple-backend-vars
|
||||
<<: *global-env
|
||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||
- ShellCommand:
|
||||
command: mvn test
|
||||
workdir: build/tests/functional/jaws
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *multiple-backend-vars
|
||||
- ShellCommand:
|
||||
command: rspec tests.rb
|
||||
workdir: build/tests/functional/fog
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *multiple-backend-vars
|
||||
- Upload: *upload-artifacts
|
||||
|
||||
mongo-ft-tests:
|
||||
worker: &s3-pod
|
||||
type: kube_pod
|
||||
path: eve/workers/pod.yaml
|
||||
images:
|
||||
aggressor: eve/workers/build
|
||||
s3: "."
|
||||
vars:
|
||||
aggressorMemLimit: "2Gi"
|
||||
s3MemLimit: "1664Mi"
|
||||
redis: enabled
|
||||
env:
|
||||
<<: *mongo-vars
|
||||
<<: *global-env
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *credentials
|
||||
- ShellCommand: *npm-install
|
||||
- ShellCommand:
|
||||
command: |
|
||||
set -ex
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
npm run ft_test
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *mongo-vars
|
||||
<<: *global-env
|
||||
- Upload: *upload-artifacts
|
||||
|
||||
file-ft-tests:
|
||||
worker:
|
||||
type: kube_pod
|
||||
path: eve/workers/pod.yaml
|
||||
images:
|
||||
aggressor: eve/workers/build
|
||||
s3: "."
|
||||
vars:
|
||||
aggressorMemLimit: "1920Mi"
|
||||
s3MemLimit: "2Gi"
|
||||
redis: enabled
|
||||
env:
|
||||
<<: *file-mem-mpu
|
||||
<<: *global-env
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: *credentials
|
||||
- ShellCommand: *npm-install
|
||||
- ShellCommand:
|
||||
command: |
|
||||
set -ex
|
||||
bash wait_for_local_port.bash 8000 40
|
||||
npm run ft_test
|
||||
<<: *follow-s3-log
|
||||
env:
|
||||
<<: *file-mem-mpu
|
||||
<<: *global-env
|
||||
- Upload: *upload-artifacts
|
||||
|
||||
post-merge:
|
||||
worker:
|
||||
type: local
|
||||
steps:
|
||||
- Git: *clone
|
||||
- ShellCommand: &docker_login
|
||||
name: Private Registry Login
|
||||
command: >
|
||||
docker login
|
||||
-u '%(secret:private_registry_username)s'
|
||||
-p '%(secret:private_registry_password)s'
|
||||
'%(secret:private_registry_url)s'
|
||||
- SetProperty: &docker_image_name
|
||||
name: Set docker image name property
|
||||
property: docker_image_name
|
||||
value:
|
||||
"%(secret:private_registry_url)s/zenko/cloudserver:\
|
||||
%(prop:commit_short_revision)s"
|
||||
- ShellCommand:
|
||||
name: Build docker image
|
||||
command: >-
|
||||
docker build --no-cache -t %(prop:docker_image_name)s .
|
||||
- ShellCommand:
|
||||
name: Push image
|
||||
command: docker push %(prop:docker_image_name)s
|
|
@ -0,0 +1,57 @@
|
|||
FROM buildpack-deps:stretch-curl
|
||||
|
||||
#
|
||||
# Install packages needed by the buildchain
|
||||
#
|
||||
ENV LANG C.UTF-8
|
||||
COPY ./s3_packages.list ./buildbot_worker_packages.list /tmp/
|
||||
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
|
||||
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
|
||||
&& apt-get update \
|
||||
&& cat /tmp/*packages.list | xargs apt-get install -y \
|
||||
&& git clone https://github.com/tj/n.git \
|
||||
&& make -C ./n \
|
||||
&& n 6 latest \
|
||||
&& pip install pip==9.0.1 \
|
||||
&& rm -rf ./n \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& rm -f /tmp/packages.list
|
||||
|
||||
#
|
||||
# Add user eve
|
||||
#
|
||||
|
||||
RUN adduser -u 1042 --home /home/eve --disabled-password --gecos "" eve \
|
||||
&& adduser eve sudo \
|
||||
&& sed -ri 's/(%sudo.*)ALL$/\1NOPASSWD:ALL/' /etc/sudoers
|
||||
#
|
||||
# Install Dependencies
|
||||
#
|
||||
|
||||
# Install RVM and gems
|
||||
ENV RUBY_VERSION="2.4.1"
|
||||
COPY ./gems.list /tmp/
|
||||
RUN cat /tmp/gems.list | xargs gem install
|
||||
#RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 \
|
||||
# && curl -sSL https://get.rvm.io | bash -s stable --ruby=$RUBY_VERSION \
|
||||
# && usermod -a -G rvm eve
|
||||
#RUN /bin/bash -l -c "\
|
||||
# source /usr/local/rvm/scripts/rvm \
|
||||
# && cat /tmp/gems.list | xargs gem install \
|
||||
# && rm /tmp/gems.list"
|
||||
|
||||
# Install Pip packages
|
||||
COPY ./pip_packages.list /tmp/
|
||||
RUN cat /tmp/pip_packages.list | xargs pip install \
|
||||
&& rm -f /tmp/pip_packages.list \
|
||||
&& mkdir /home/eve/.aws \
|
||||
&& chown eve /home/eve/.aws
|
||||
|
||||
#
|
||||
# Run buildbot-worker on startup
|
||||
#
|
||||
|
||||
ARG BUILDBOT_VERSION
|
||||
RUN pip install buildbot-worker==$BUILDBOT_VERSION
|
||||
|
||||
CMD ["/bin/bash", "-l", "-c", "buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS && buildbot-worker start --nodaemon"]
|
|
@ -0,0 +1,13 @@
|
|||
ca-certificates
|
||||
git
|
||||
gnupg
|
||||
libffi-dev
|
||||
libssl-dev
|
||||
python-pip
|
||||
python2.7
|
||||
python2.7-dev
|
||||
software-properties-common
|
||||
sudo
|
||||
tcl
|
||||
wget
|
||||
procps
|
|
@ -2,9 +2,9 @@
|
|||
set -x #echo on
|
||||
set -e #exit at the first error
|
||||
|
||||
mkdir -p $HOME/.aws
|
||||
mkdir -p ~/.aws
|
||||
|
||||
cat >>$HOME/.aws/credentials <<EOF
|
||||
cat >>/root/.aws/credentials <<EOF
|
||||
[default]
|
||||
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
|
||||
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY
|
|
@ -0,0 +1,4 @@
|
|||
fog-aws:1.3.0
|
||||
json
|
||||
mime-types:3.1
|
||||
rspec:3.5
|
|
@ -0,0 +1,3 @@
|
|||
flake8
|
||||
s3cmd==1.6.1
|
||||
yamllint
|
|
@ -0,0 +1,11 @@
|
|||
build-essential
|
||||
curl
|
||||
default-jdk
|
||||
libdigest-hmac-perl
|
||||
lsof
|
||||
maven
|
||||
netcat
|
||||
redis-server
|
||||
ruby-full
|
||||
yarn=1.7.0-1
|
||||
zlib1g-dev
|
|
@ -0,0 +1,162 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "proxy-ci-test-pod"
|
||||
spec:
|
||||
activeDeadlineSeconds: 3600
|
||||
restartPolicy: Never
|
||||
terminationGracePeriodSeconds: 10
|
||||
hostAliases:
|
||||
- ip: "127.0.0.1"
|
||||
hostnames:
|
||||
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com"
|
||||
- "testrequestbucket.localhost"
|
||||
containers:
|
||||
{% if vars.env.S3METADATA is defined and vars.env.S3METADATA == "mongodb" -%}
|
||||
- name: mongo
|
||||
image: scality/ci-mongo:3.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1Gi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 1Gi
|
||||
{%- endif %}
|
||||
- name: aggressor
|
||||
image: {{ images.aggressor }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1Gi
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: {{ vars.aggressorMemLimit }}
|
||||
volumeMounts:
|
||||
- name: creds
|
||||
readOnly: false
|
||||
mountPath: /root/.aws
|
||||
- name: artifacts
|
||||
readOnly: true
|
||||
mountPath: /artifacts
|
||||
command:
|
||||
- bash
|
||||
- -lc
|
||||
- |
|
||||
buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS
|
||||
buildbot-worker start --nodaemon
|
||||
env:
|
||||
- name: CI
|
||||
value: "true"
|
||||
- name: ENABLE_LOCAL_CACHE
|
||||
value: "true"
|
||||
- name: REPORT_TOKEN
|
||||
value: "report-token-1"
|
||||
- name: REMOTE_MANAGEMENT_DISABLE
|
||||
value: "1"
|
||||
{% for key, value in vars.env.items() %}
|
||||
- name: {{ key }}
|
||||
value: "{{ value }}"
|
||||
{% endfor %}
|
||||
- name: s3
|
||||
image: {{ images.s3 }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 1Gi
|
||||
limits:
|
||||
cpu: "2"
|
||||
memory: {{ vars.s3MemLimit }}
|
||||
volumeMounts:
|
||||
- name: creds
|
||||
readOnly: false
|
||||
mountPath: /root/.aws
|
||||
- name: certs
|
||||
readOnly: true
|
||||
mountPath: /tmp
|
||||
- name: artifacts
|
||||
readOnly: false
|
||||
mountPath: /artifacts
|
||||
command:
|
||||
- bash
|
||||
- -ec
|
||||
- |
|
||||
sleep 10 # wait for mongo
|
||||
/usr/src/app/docker-entrypoint.sh npm start | tee -a /artifacts/s3.log
|
||||
env:
|
||||
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" -%}
|
||||
- name: S3_LOCATION_FILE
|
||||
value: "/usr/src/app/tests/locationConfig/locationConfigTests.json"
|
||||
{%- endif %}
|
||||
- name: CI
|
||||
value: "true"
|
||||
- name: ENABLE_LOCAL_CACHE
|
||||
value: "true"
|
||||
- name: MONGODB_HOSTS
|
||||
value: "localhost:27018"
|
||||
- name: MONGODB_RS
|
||||
value: "rs0"
|
||||
- name: REDIS_HOST
|
||||
value: "localhost"
|
||||
- name: REDIS_PORT
|
||||
value: "6379"
|
||||
- name: REPORT_TOKEN
|
||||
value: "report-token-1"
|
||||
- name: REMOTE_MANAGEMENT_DISABLE
|
||||
value: "1"
|
||||
- name: HEALTHCHECKS_ALLOWFROM
|
||||
value: "0.0.0.0/0"
|
||||
{% for key, value in vars.env.items() %}
|
||||
- name: {{ key }}
|
||||
value: "{{ value }}"
|
||||
{% endfor %}
|
||||
{% if vars.redis is defined and vars.redis == "enabled" -%}
|
||||
- name: redis
|
||||
image: redis:alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
{%- endif %}
|
||||
{% if vars.env.CI_PROXY is defined and vars.env.CI_PROXY == "true" -%}
|
||||
- name: squid
|
||||
image: scality/ci-squid
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 128Mi
|
||||
volumeMounts:
|
||||
- name: certs
|
||||
readOnly: false
|
||||
mountPath: /ssl
|
||||
command:
|
||||
- sh
|
||||
- -exc
|
||||
- |
|
||||
mkdir -p /ssl
|
||||
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
|
||||
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
|
||||
-keyout /ssl/myca.pem -out /ssl/myca.pem
|
||||
cp /ssl/myca.pem /ssl/CA.pem
|
||||
squid -f /etc/squid/squid.conf -N -z
|
||||
squid -f /etc/squid/squid.conf -NYCd 1
|
||||
{%- endif %}
|
||||
volumes:
|
||||
- name: creds
|
||||
emptyDir: {}
|
||||
- name: certs
|
||||
emptyDir: {}
|
||||
- name: artifacts
|
||||
emptyDir: {}
|
|
@ -48,7 +48,7 @@ signed_headers = 'host;x-amz-content-sha256;x-amz-date'
|
|||
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
|
||||
.format(method, canonical_uri, canonical_querystring, canonical_headers,
|
||||
signed_headers, payload_hash)
|
||||
print(canonical_request)
|
||||
print canonical_request
|
||||
|
||||
credential_scope = '{0}/{1}/{2}/aws4_request' \
|
||||
.format(date_stamp, region, service)
|
||||
|
@ -76,4 +76,4 @@ headers = {
|
|||
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring
|
||||
|
||||
r = requests.get(endpoint, headers=headers)
|
||||
print(r.text)
|
||||
print (r.text)
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
|
||||
|
||||
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
||||
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
||||
|
||||
COPY . ${HOME_DIR}/s3
|
||||
RUN chown -R ${USER} ${HOME_DIR}
|
||||
RUN pip3 install redis===3.5.3 requests==2.27.1 && \
|
||||
apt-get install -y git-lfs
|
||||
|
||||
USER ${USER}
|
||||
WORKDIR ${HOME_DIR}/s3
|
||||
RUN rm -f ~/.gitconfig && \
|
||||
git config --global --add safe.directory . && \
|
||||
git lfs install && \
|
||||
GIT_LFS_SKIP_SMUDGE=1 && \
|
||||
yarn global add typescript && \
|
||||
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
||||
yarn cache clean --all && \
|
||||
yarn global remove typescript
|
||||
|
||||
# run symlinking separately to avoid yarn installation errors
|
||||
# we might have to check if the symlinking is really needed!
|
||||
RUN ln -sf /scality-kms node_modules
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/supervisord.conf"
|
7
index.js
7
index.js
|
@ -1,10 +1,3 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
require('werelogs').stderrUtils.catchAndTimestampStderr(
|
||||
undefined,
|
||||
// Do not exit as workers have their own listener that will exit
|
||||
// But primary don't have another listener
|
||||
require('cluster').isPrimary ? 1 : null,
|
||||
);
|
||||
|
||||
require('./lib/server.js')();
|
||||
|
|
1314
lib/Config.js
1314
lib/Config.js
File diff suppressed because it is too large
Load Diff
321
lib/api/api.js
321
lib/api/api.js
|
@ -1,41 +1,24 @@
|
|||
const { auth, errors, policies } = require('arsenal');
|
||||
const async = require('async');
|
||||
const { auth, errors } = require('arsenal');
|
||||
|
||||
const bucketDelete = require('./bucketDelete');
|
||||
const bucketDeleteCors = require('./bucketDeleteCors');
|
||||
const bucketDeleteEncryption = require('./bucketDeleteEncryption');
|
||||
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
||||
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
||||
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
||||
const bucketDeleteQuota = require('./bucketDeleteQuota');
|
||||
const { bucketGet } = require('./bucketGet');
|
||||
const bucketGet = require('./bucketGet');
|
||||
const bucketGetACL = require('./bucketGetACL');
|
||||
const bucketGetCors = require('./bucketGetCors');
|
||||
const bucketGetVersioning = require('./bucketGetVersioning');
|
||||
const bucketGetWebsite = require('./bucketGetWebsite');
|
||||
const bucketGetLocation = require('./bucketGetLocation');
|
||||
const bucketGetLifecycle = require('./bucketGetLifecycle');
|
||||
const bucketGetNotification = require('./bucketGetNotification');
|
||||
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
||||
const bucketGetPolicy = require('./bucketGetPolicy');
|
||||
const bucketGetQuota = require('./bucketGetQuota');
|
||||
const bucketGetEncryption = require('./bucketGetEncryption');
|
||||
const bucketHead = require('./bucketHead');
|
||||
const { bucketPut } = require('./bucketPut');
|
||||
const bucketPutACL = require('./bucketPutACL');
|
||||
const bucketPutCors = require('./bucketPutCors');
|
||||
const bucketPutVersioning = require('./bucketPutVersioning');
|
||||
const bucketPutTagging = require('./bucketPutTagging');
|
||||
const bucketDeleteTagging = require('./bucketDeleteTagging');
|
||||
const bucketGetTagging = require('./bucketGetTagging');
|
||||
const bucketPutWebsite = require('./bucketPutWebsite');
|
||||
const bucketPutReplication = require('./bucketPutReplication');
|
||||
const bucketPutLifecycle = require('./bucketPutLifecycle');
|
||||
const bucketPutNotification = require('./bucketPutNotification');
|
||||
const bucketPutEncryption = require('./bucketPutEncryption');
|
||||
const bucketPutPolicy = require('./bucketPutPolicy');
|
||||
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
||||
const bucketUpdateQuota = require('./bucketUpdateQuota');
|
||||
const bucketGetReplication = require('./bucketGetReplication');
|
||||
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
||||
const corsPreflight = require('./corsPreflight');
|
||||
|
@ -43,75 +26,40 @@ const completeMultipartUpload = require('./completeMultipartUpload');
|
|||
const initiateMultipartUpload = require('./initiateMultipartUpload');
|
||||
const listMultipartUploads = require('./listMultipartUploads');
|
||||
const listParts = require('./listParts');
|
||||
const metadataSearch = require('./metadataSearch');
|
||||
const { multiObjectDelete } = require('./multiObjectDelete');
|
||||
const multipartDelete = require('./multipartDelete');
|
||||
const objectCopy = require('./objectCopy');
|
||||
const { objectDelete } = require('./objectDelete');
|
||||
const objectDelete = require('./objectDelete');
|
||||
const objectDeleteTagging = require('./objectDeleteTagging');
|
||||
const objectGet = require('./objectGet');
|
||||
const objectGetACL = require('./objectGetACL');
|
||||
const objectGetLegalHold = require('./objectGetLegalHold');
|
||||
const objectGetRetention = require('./objectGetRetention');
|
||||
const objectGetTagging = require('./objectGetTagging');
|
||||
const objectHead = require('./objectHead');
|
||||
const objectPut = require('./objectPut');
|
||||
const objectPutACL = require('./objectPutACL');
|
||||
const objectPutLegalHold = require('./objectPutLegalHold');
|
||||
const objectPutTagging = require('./objectPutTagging');
|
||||
const objectPutPart = require('./objectPutPart');
|
||||
const objectPutCopyPart = require('./objectPutCopyPart');
|
||||
const objectPutRetention = require('./objectPutRetention');
|
||||
const objectRestore = require('./objectRestore');
|
||||
const prepareRequestContexts
|
||||
= require('./apiUtils/authorization/prepareRequestContexts');
|
||||
const serviceGet = require('./serviceGet');
|
||||
const vault = require('../auth/vault');
|
||||
const website = require('./website');
|
||||
const websiteGet = require('./websiteGet');
|
||||
const websiteHead = require('./websiteHead');
|
||||
const writeContinue = require('../utilities/writeContinue');
|
||||
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
|
||||
const parseCopySource = require('./apiUtils/object/parseCopySource');
|
||||
const { tagConditionKeyAuth } = require('./apiUtils/authorization/tagConditionKeys');
|
||||
const { isRequesterASessionUser } = require('./apiUtils/authorization/permissionChecks');
|
||||
const checkHttpHeadersSize = require('./apiUtils/object/checkHttpHeadersSize');
|
||||
|
||||
const monitoringMap = policies.actionMaps.actionMonitoringMapS3;
|
||||
|
||||
auth.setHandler(vault);
|
||||
|
||||
/* eslint-disable no-param-reassign */
|
||||
const api = {
|
||||
callApiMethod(apiMethod, request, response, log, callback) {
|
||||
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
request.apiMethod = apiMethod;
|
||||
// Array of end of API callbacks, used to perform some logic
|
||||
// at the end of an API.
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
request.finalizerHooks = [];
|
||||
|
||||
const actionLog = monitoringMap[apiMethod];
|
||||
if (!actionLog &&
|
||||
apiMethod !== 'websiteGet' &&
|
||||
apiMethod !== 'websiteHead' &&
|
||||
apiMethod !== 'corsPreflight') {
|
||||
log.error('callApiMethod(): No actionLog for this api method', {
|
||||
apiMethod,
|
||||
});
|
||||
}
|
||||
log.addDefaultFields({
|
||||
service: 's3',
|
||||
action: actionLog,
|
||||
bucketName: request.bucketName,
|
||||
});
|
||||
if (request.objectKey) {
|
||||
log.addDefaultFields({
|
||||
objectKey: request.objectKey,
|
||||
});
|
||||
}
|
||||
let returnTagCount = true;
|
||||
|
||||
const validationRes = validateQueryAndHeaders(request, log);
|
||||
const validationRes =
|
||||
validateQueryAndHeaders(request.method, request.query,
|
||||
request.headers, log);
|
||||
if (validationRes.error) {
|
||||
log.debug('request query / header validation failed', {
|
||||
error: validationRes.error,
|
||||
|
@ -123,7 +71,6 @@ const api = {
|
|||
// no need to check auth on website or cors preflight requests
|
||||
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
|
||||
apiMethod === 'corsPreflight') {
|
||||
request.actionImplicitDenies = false;
|
||||
return this[apiMethod](request, log, callback);
|
||||
}
|
||||
|
||||
|
@ -136,242 +83,128 @@ const api = {
|
|||
return process.nextTick(callback, parsingError);
|
||||
}
|
||||
|
||||
const { httpHeadersSizeError } = checkHttpHeadersSize(request.headers);
|
||||
if (httpHeadersSizeError) {
|
||||
log.debug('http header size limit exceeded', {
|
||||
error: httpHeadersSizeError,
|
||||
});
|
||||
return process.nextTick(callback, httpHeadersSizeError);
|
||||
}
|
||||
|
||||
const requestContexts = prepareRequestContexts(apiMethod, request,
|
||||
sourceBucket, sourceObject, sourceVersionId);
|
||||
// Extract all the _apiMethods and store them in an array
|
||||
const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : [];
|
||||
// Attach the names to the current request
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
request.apiMethods = apiMethods;
|
||||
|
||||
function checkAuthResults(authResults) {
|
||||
let returnTagCount = true;
|
||||
const isImplicitDeny = {};
|
||||
let isOnlyImplicitDeny = true;
|
||||
if (apiMethod === 'objectGet') {
|
||||
// first item checks s3:GetObject(Version) action
|
||||
if (!authResults[0].isAllowed && !authResults[0].isImplicit) {
|
||||
log.trace('get object authorization denial from Vault');
|
||||
return errors.AccessDenied;
|
||||
}
|
||||
// TODO add support for returnTagCount in the bucket policy
|
||||
// checks
|
||||
isImplicitDeny[authResults[0].action] = authResults[0].isImplicit;
|
||||
// second item checks s3:GetObject(Version)Tagging action
|
||||
if (!authResults[1].isAllowed) {
|
||||
log.trace('get tagging authorization denial ' +
|
||||
'from Vault');
|
||||
returnTagCount = false;
|
||||
}
|
||||
} else {
|
||||
for (let i = 0; i < authResults.length; i++) {
|
||||
isImplicitDeny[authResults[i].action] = true;
|
||||
if (!authResults[i].isAllowed && !authResults[i].isImplicit) {
|
||||
// Any explicit deny rejects the current API call
|
||||
log.trace('authorization denial from Vault');
|
||||
return errors.AccessDenied;
|
||||
}
|
||||
if (authResults[i].isAllowed) {
|
||||
// If the action is allowed, the result is not implicit
|
||||
// Deny.
|
||||
isImplicitDeny[authResults[i].action] = false;
|
||||
isOnlyImplicitDeny = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// These two APIs cannot use ACLs or Bucket Policies, hence, any
|
||||
// implicit deny from vault must be treated as an explicit deny.
|
||||
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
|
||||
return errors.AccessDenied;
|
||||
}
|
||||
return { returnTagCount, isImplicitDeny };
|
||||
}
|
||||
|
||||
return async.waterfall([
|
||||
next => auth.server.doAuth(
|
||||
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
||||
if (err) {
|
||||
// VaultClient returns standard errors, but the route requires
|
||||
// Arsenal errors
|
||||
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
|
||||
log.trace('authentication error', { error: err });
|
||||
return next(arsenalError);
|
||||
}
|
||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||
}, 's3', requestContexts),
|
||||
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
|
||||
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
||||
if (userInfo.isRequesterAnIAMUser()) {
|
||||
authNames.userName = userInfo.getIAMdisplayName();
|
||||
}
|
||||
if (isRequesterASessionUser(userInfo)) {
|
||||
authNames.sessionName = userInfo.getShortid().split(':')[1];
|
||||
}
|
||||
log.addDefaultFields(authNames);
|
||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||
}
|
||||
// issue 100 Continue to the client
|
||||
writeContinue(request, response);
|
||||
const MAX_POST_LENGTH = request.method === 'POST' ?
|
||||
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
|
||||
const post = [];
|
||||
let postLength = 0;
|
||||
request.on('data', chunk => {
|
||||
postLength += chunk.length;
|
||||
// Sanity check on post length
|
||||
if (postLength <= MAX_POST_LENGTH) {
|
||||
post.push(chunk);
|
||||
}
|
||||
});
|
||||
|
||||
request.on('error', err => {
|
||||
log.trace('error receiving request', {
|
||||
error: err,
|
||||
});
|
||||
return next(errors.InternalError);
|
||||
});
|
||||
|
||||
request.on('end', () => {
|
||||
if (postLength > MAX_POST_LENGTH) {
|
||||
log.error('body length is too long for request type',
|
||||
{ postLength });
|
||||
return next(errors.InvalidRequest);
|
||||
}
|
||||
// Convert array of post buffers into one string
|
||||
request.post = Buffer.concat(post, postLength).toString();
|
||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
||||
});
|
||||
return undefined;
|
||||
},
|
||||
// Tag condition keys require information from CloudServer for evaluation
|
||||
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
|
||||
authorizationResults,
|
||||
request,
|
||||
requestContexts,
|
||||
apiMethod,
|
||||
log,
|
||||
(err, authResultsWithTags) => {
|
||||
if (err) {
|
||||
log.trace('tag authentication error', { error: err });
|
||||
return next(err);
|
||||
}
|
||||
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
|
||||
},
|
||||
),
|
||||
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
||||
return auth.server.doAuth(request, log, (err, userInfo,
|
||||
authorizationResults, streamingV4Params) => {
|
||||
if (err) {
|
||||
log.trace('authentication error', { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
request.accountQuotas = infos?.accountQuota;
|
||||
if (authorizationResults) {
|
||||
const checkedResults = checkAuthResults(authorizationResults);
|
||||
if (checkedResults instanceof Error) {
|
||||
return callback(checkedResults);
|
||||
if (apiMethod === 'objectGet') {
|
||||
// first item checks s3:GetObject(Version) action
|
||||
if (!authorizationResults[0].isAllowed) {
|
||||
log.trace('get object authorization denial from Vault');
|
||||
return callback(errors.AccessDenied);
|
||||
}
|
||||
// second item checks s3:GetObject(Version)Tagging action
|
||||
if (!authorizationResults[1].isAllowed) {
|
||||
log.trace('get tagging authorization denial ' +
|
||||
'from Vault');
|
||||
returnTagCount = false;
|
||||
}
|
||||
} else {
|
||||
for (let i = 0; i < authorizationResults.length; i++) {
|
||||
if (!authorizationResults[i].isAllowed) {
|
||||
log.trace('authorization denial from Vault');
|
||||
return callback(errors.AccessDenied);
|
||||
}
|
||||
}
|
||||
}
|
||||
returnTagCount = checkedResults.returnTagCount;
|
||||
request.actionImplicitDenies = checkedResults.isImplicitDeny;
|
||||
} else {
|
||||
// create an object of keys apiMethods with all values to false:
|
||||
// for backward compatibility, all apiMethods are allowed by default
|
||||
// thus it is explicitly allowed, so implicit deny is false
|
||||
request.actionImplicitDenies = apiMethods.reduce((acc, curr) => {
|
||||
acc[curr] = false;
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
|
||||
(hook, done) => hook(err, done),
|
||||
() => callback(err, ...results));
|
||||
|
||||
// issue 100 Continue to the client
|
||||
writeContinue(request, response);
|
||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||
request._response = response;
|
||||
return this[apiMethod](userInfo, request, streamingV4Params,
|
||||
log, methodCallback, authorizationResults);
|
||||
log, callback);
|
||||
}
|
||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||
return this[apiMethod](userInfo, request, sourceBucket,
|
||||
sourceObject, sourceVersionId, log, methodCallback);
|
||||
}
|
||||
if (apiMethod === 'objectGet') {
|
||||
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
||||
}
|
||||
return this[apiMethod](userInfo, request, log, methodCallback);
|
||||
});
|
||||
const MAX_POST_LENGTH = request.method.toUpperCase() === 'POST' ?
|
||||
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
|
||||
const post = [];
|
||||
let postLength = 0;
|
||||
request.on('data', chunk => {
|
||||
postLength += chunk.length;
|
||||
// Sanity check on post length
|
||||
if (postLength <= MAX_POST_LENGTH) {
|
||||
post.push(chunk);
|
||||
}
|
||||
return undefined;
|
||||
});
|
||||
|
||||
request.on('error', err => {
|
||||
log.trace('error receiving request', {
|
||||
error: err,
|
||||
});
|
||||
return callback(errors.InternalError);
|
||||
});
|
||||
|
||||
request.on('end', () => {
|
||||
if (postLength > MAX_POST_LENGTH) {
|
||||
log.error('body length is too long for request type',
|
||||
{ postLength });
|
||||
return callback(errors.InvalidRequest);
|
||||
}
|
||||
// Convert array of post buffers into one string
|
||||
request.post = Buffer.concat(post, postLength).toString();
|
||||
|
||||
if (apiMethod === 'objectCopy' ||
|
||||
apiMethod === 'objectPutCopyPart') {
|
||||
return this[apiMethod](userInfo, request, sourceBucket,
|
||||
sourceObject, sourceVersionId, log, callback);
|
||||
}
|
||||
if (apiMethod === 'objectGet') {
|
||||
return this[apiMethod](userInfo, request,
|
||||
returnTagCount, log, callback);
|
||||
}
|
||||
return this[apiMethod](userInfo, request, log, callback);
|
||||
});
|
||||
return undefined;
|
||||
}, 's3', requestContexts);
|
||||
},
|
||||
bucketDelete,
|
||||
bucketDeleteCors,
|
||||
bucketDeleteEncryption,
|
||||
bucketDeleteWebsite,
|
||||
bucketGet,
|
||||
bucketGetACL,
|
||||
bucketGetCors,
|
||||
bucketGetObjectLock,
|
||||
bucketGetVersioning,
|
||||
bucketGetWebsite,
|
||||
bucketGetLocation,
|
||||
bucketGetEncryption,
|
||||
bucketHead,
|
||||
bucketPut,
|
||||
bucketPutACL,
|
||||
bucketPutCors,
|
||||
bucketPutVersioning,
|
||||
bucketPutTagging,
|
||||
bucketDeleteTagging,
|
||||
bucketGetTagging,
|
||||
bucketPutWebsite,
|
||||
bucketPutReplication,
|
||||
bucketGetReplication,
|
||||
bucketDeleteReplication,
|
||||
bucketDeleteQuota,
|
||||
bucketPutLifecycle,
|
||||
bucketUpdateQuota,
|
||||
bucketGetLifecycle,
|
||||
bucketDeleteLifecycle,
|
||||
bucketPutPolicy,
|
||||
bucketGetPolicy,
|
||||
bucketGetQuota,
|
||||
bucketDeletePolicy,
|
||||
bucketPutObjectLock,
|
||||
bucketPutNotification,
|
||||
bucketGetNotification,
|
||||
bucketPutEncryption,
|
||||
corsPreflight,
|
||||
completeMultipartUpload,
|
||||
initiateMultipartUpload,
|
||||
listMultipartUploads,
|
||||
listParts,
|
||||
metadataSearch,
|
||||
multiObjectDelete,
|
||||
multipartDelete,
|
||||
objectDelete,
|
||||
objectDeleteTagging,
|
||||
objectGet,
|
||||
objectGetACL,
|
||||
objectGetLegalHold,
|
||||
objectGetRetention,
|
||||
objectGetTagging,
|
||||
objectCopy,
|
||||
objectHead,
|
||||
objectPut,
|
||||
objectPutACL,
|
||||
objectPutLegalHold,
|
||||
objectPutTagging,
|
||||
objectPutPart,
|
||||
objectPutCopyPart,
|
||||
objectPutRetention,
|
||||
objectRestore,
|
||||
serviceGet,
|
||||
websiteGet: website,
|
||||
websiteHead: website,
|
||||
websiteGet,
|
||||
websiteHead,
|
||||
};
|
||||
|
||||
module.exports = api;
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
const constants = require('../../../../constants');
|
||||
|
||||
function isBackbeatUser(canonicalID) {
|
||||
const canonicalIDArray = canonicalID.split('/');
|
||||
const serviceName = canonicalIDArray[canonicalIDArray.length - 1];
|
||||
return ['replication', 'lifecycle', 'gc'].includes(serviceName);
|
||||
}
|
||||
|
||||
function isBucketAuthorized(bucket, requestType, canonicalID) {
|
||||
// Check to see if user is authorized to perform a
|
||||
// particular action on bucket based on ACLs.
|
||||
// TODO: Add IAM checks and bucket policy checks.
|
||||
if (bucket.getOwner() === canonicalID || isBackbeatUser(canonicalID)) {
|
||||
return true;
|
||||
} else if (requestType === 'bucketOwnerAction') {
|
||||
// only bucket owner can modify or retrieve this property of a bucket
|
||||
return false;
|
||||
}
|
||||
const bucketAcl = bucket.getAcl();
|
||||
if (requestType === 'bucketGet' || requestType === 'bucketHead') {
|
||||
if (bucketAcl.Canned === 'public-read'
|
||||
|| bucketAcl.Canned === 'public-read-write'
|
||||
|| (bucketAcl.Canned === 'authenticated-read'
|
||||
&& canonicalID !== constants.publicId)) {
|
||||
return true;
|
||||
} else if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| bucketAcl.READ.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (requestType === 'bucketGetACL') {
|
||||
if ((bucketAcl.Canned === 'log-delivery-write'
|
||||
&& canonicalID === constants.logId)
|
||||
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| bucketAcl.READ_ACP.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (requestType === 'bucketPutACL') {
|
||||
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (requestType === 'bucketDelete' && bucket.getOwner() === canonicalID) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (requestType === 'objectDelete' || requestType === 'objectPut') {
|
||||
if (bucketAcl.Canned === 'public-read-write'
|
||||
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// Note that an account can have the ability to do objectPutACL,
|
||||
// objectGetACL, objectHead or objectGet even if the account has no rights
|
||||
// to the bucket holding the object. So, if the request type is
|
||||
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
|
||||
// authorization check should just return true so can move on to check
|
||||
// rights at the object level.
|
||||
|
||||
return (requestType === 'objectPutACL' || requestType === 'objectGetACL' ||
|
||||
requestType === 'objectGet' || requestType === 'objectHead');
|
||||
}
|
||||
|
||||
function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
|
||||
const bucketOwner = bucket.getOwner();
|
||||
if (!objectMD) {
|
||||
return false;
|
||||
}
|
||||
if (objectMD['owner-id'] === canonicalID) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (isBackbeatUser(canonicalID)) {
|
||||
return true;
|
||||
}
|
||||
// account is authorized if:
|
||||
// - requesttype is "bucketOwnerAction" (example: for objectTagging) and
|
||||
// - account is the bucket owner
|
||||
if (requestType === 'bucketOwnerAction' && bucketOwner === canonicalID) {
|
||||
return true;
|
||||
}
|
||||
if (requestType === 'objectGet' || requestType === 'objectHead') {
|
||||
if (objectMD.acl.Canned === 'public-read'
|
||||
|| objectMD.acl.Canned === 'public-read-write'
|
||||
|| (objectMD.acl.Canned === 'authenticated-read'
|
||||
&& canonicalID !== constants.publicId)) {
|
||||
return true;
|
||||
} else if (objectMD.acl.Canned === 'bucket-owner-read'
|
||||
&& bucketOwner === canonicalID) {
|
||||
return true;
|
||||
} else if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||
&& bucketOwner === canonicalID)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| objectMD.acl.READ.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||
// bucket has canned ACL public-read-write
|
||||
if (requestType === 'objectPut' || requestType === 'objectDelete') {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (requestType === 'objectPutACL') {
|
||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||
&& bucketOwner === canonicalID)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| objectMD.acl.WRITE_ACP.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (requestType === 'objectGetACL') {
|
||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||
&& bucketOwner === canonicalID)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| objectMD.acl.READ_ACP.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
isBucketAuthorized,
|
||||
isObjAuthorized,
|
||||
isBackbeatUser,
|
||||
};
|
|
@ -1,29 +0,0 @@
|
|||
const { errors } = require('arsenal');
|
||||
const vault = require('../../../auth/vault');
|
||||
|
||||
function checkExpectedBucketOwner(headers, bucket, log, cb) {
|
||||
const expectedOwner = headers['x-amz-expected-bucket-owner'];
|
||||
if (expectedOwner === undefined) {
|
||||
return cb();
|
||||
}
|
||||
|
||||
const bucketOwner = bucket.getOwner();
|
||||
return vault.getAccountIds([bucketOwner], log, (error, res) => {
|
||||
if (error) {
|
||||
log.error('error fetch accountId from vault', {
|
||||
method: 'checkExpectedBucketOwner',
|
||||
error,
|
||||
});
|
||||
}
|
||||
|
||||
if (error || res[bucketOwner] !== expectedOwner) {
|
||||
return cb(errors.AccessDenied);
|
||||
}
|
||||
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
checkExpectedBucketOwner,
|
||||
};
|
|
@ -1,641 +0,0 @@
|
|||
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
|
||||
const { errors } = require('arsenal');
|
||||
const { parseCIDR, isValid } = require('ipaddr.js');
|
||||
const constants = require('../../../../constants');
|
||||
const { config } = require('../../../Config');
|
||||
|
||||
const {
|
||||
allAuthedUsersId,
|
||||
bucketOwnerActions,
|
||||
logId,
|
||||
publicId,
|
||||
arrayOfAllowed,
|
||||
assumedRoleArnResourceType,
|
||||
backbeatLifecycleSessionName,
|
||||
actionsToConsiderAsObjectPut,
|
||||
} = constants;
|
||||
|
||||
// whitelist buckets to allow public read on objects
|
||||
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
|
||||
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
|
||||
|
||||
function getServiceAccountProperties(canonicalID) {
|
||||
const canonicalIDArray = canonicalID.split('/');
|
||||
const serviceName = canonicalIDArray[canonicalIDArray.length - 1];
|
||||
return constants.serviceAccountProperties[serviceName];
|
||||
}
|
||||
|
||||
function isServiceAccount(canonicalID) {
|
||||
return getServiceAccountProperties(canonicalID) !== undefined;
|
||||
}
|
||||
|
||||
function isRequesterASessionUser(authInfo) {
|
||||
const regexpAssumedRoleArn = /^arn:aws:sts::[0-9]{12}:assumed-role\/.*$/;
|
||||
return regexpAssumedRoleArn.test(authInfo.getArn());
|
||||
}
|
||||
|
||||
function isRequesterNonAccountUser(authInfo) {
|
||||
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks the access control for a given bucket based on the request type and user's canonical ID.
|
||||
*
|
||||
* @param {Bucket} bucket - The bucket to check access control for.
|
||||
* @param {string} requestType - The list of s3 actions to check within the API call.
|
||||
* @param {string} canonicalID - The canonical ID of the user making the request.
|
||||
* @param {string} mainApiCall - The main API call (first item of the requestType).
|
||||
*
|
||||
* @returns {boolean} - Returns true if the user has the necessary access rights, otherwise false.
|
||||
*/
|
||||
|
||||
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
||||
// Same logic applies on the Versioned APIs, so let's simplify it.
|
||||
let requestTypeParsed = requestType.endsWith('Version') ?
|
||||
requestType.slice(0, 'Version'.length * -1) : requestType;
|
||||
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
|
||||
'objectPut' : requestTypeParsed;
|
||||
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
|
||||
'objectPut' : mainApiCall;
|
||||
if (bucket.getOwner() === canonicalID) {
|
||||
return true;
|
||||
}
|
||||
if (parsedMainApiCall === 'objectGet') {
|
||||
if (requestTypeParsed === 'objectGetTagging') {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (parsedMainApiCall === 'objectPut') {
|
||||
if (arrayOfAllowed.includes(requestTypeParsed)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
const bucketAcl = bucket.getAcl();
|
||||
if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') {
|
||||
if (bucketAcl.Canned === 'public-read'
|
||||
|| bucketAcl.Canned === 'public-read-write'
|
||||
|| (bucketAcl.Canned === 'authenticated-read'
|
||||
&& canonicalID !== publicId)) {
|
||||
return true;
|
||||
} else if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| bucketAcl.READ.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
} else if (bucketAcl.READ.indexOf(publicId) > -1
|
||||
|| (bucketAcl.READ.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (requestTypeParsed === 'bucketGetACL') {
|
||||
if ((bucketAcl.Canned === 'log-delivery-write'
|
||||
&& canonicalID === logId)
|
||||
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| bucketAcl.READ_ACP.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
} else if (bucketAcl.READ_ACP.indexOf(publicId) > -1
|
||||
|| (bucketAcl.READ_ACP.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (requestTypeParsed === 'bucketPutACL') {
|
||||
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
} else if (bucketAcl.WRITE_ACP.indexOf(publicId) > -1
|
||||
|| (bucketAcl.WRITE_ACP.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') {
|
||||
if (bucketAcl.Canned === 'public-read-write'
|
||||
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
} else if (bucketAcl.WRITE.indexOf(publicId) > -1
|
||||
|| (bucketAcl.WRITE.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| (bucketAcl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| bucketAcl.FULL_CONTROL.indexOf(publicId) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// Note that an account can have the ability to do objectPutACL,
|
||||
// objectGetACL, objectHead or objectGet even if the account has no rights
|
||||
// to the bucket holding the object. So, if the request type is
|
||||
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket
|
||||
// authorization check should just return true so can move on to check
|
||||
// rights at the object level.
|
||||
return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
|
||||
|| requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
||||
}
|
||||
|
||||
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
|
||||
isUserUnauthenticated, mainApiCall) {
|
||||
const bucketOwner = bucket.getOwner();
|
||||
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
|
||||
'objectPut' : requestType;
|
||||
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
|
||||
'objectPut' : mainApiCall;
|
||||
// acls don't distinguish between users and accounts, so both should be allowed
|
||||
if (bucketOwnerActions.includes(requestTypeParsed)
|
||||
&& (bucketOwner === canonicalID)) {
|
||||
return true;
|
||||
}
|
||||
if (objectMD['owner-id'] === canonicalID) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Backward compatibility
|
||||
if (parsedMainApiCall === 'objectGet') {
|
||||
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
|
||||
&& requestTypeParsed === 'objectGetTagging') {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!objectMD.acl) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
|
||||
if (objectMD.acl.Canned === 'public-read'
|
||||
|| objectMD.acl.Canned === 'public-read-write'
|
||||
|| (objectMD.acl.Canned === 'authenticated-read'
|
||||
&& canonicalID !== publicId)) {
|
||||
return true;
|
||||
} else if (objectMD.acl.Canned === 'bucket-owner-read'
|
||||
&& bucketOwner === canonicalID) {
|
||||
return true;
|
||||
} else if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||
&& bucketOwner === canonicalID)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| objectMD.acl.READ.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
} else if (objectMD.acl.READ.indexOf(publicId) > -1
|
||||
|| (objectMD.acl.READ.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||
// bucket has canned ACL public-read-write
|
||||
if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (requestTypeParsed === 'objectPutACL') {
|
||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||
&& bucketOwner === canonicalID)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| objectMD.acl.WRITE_ACP.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
} else if (objectMD.acl.WRITE_ACP.indexOf(publicId) > -1
|
||||
|| (objectMD.acl.WRITE_ACP.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (requestTypeParsed === 'objectGetACL') {
|
||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||
&& bucketOwner === canonicalID)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||
|| objectMD.acl.READ_ACP.indexOf(canonicalID) > -1) {
|
||||
return true;
|
||||
} else if (objectMD.acl.READ_ACP.indexOf(publicId) > -1
|
||||
|| (objectMD.acl.READ_ACP.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| (objectMD.acl.FULL_CONTROL.indexOf(allAuthedUsersId) > -1
|
||||
&& canonicalID !== publicId)
|
||||
|| objectMD.acl.FULL_CONTROL.indexOf(publicId) > -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// allow public reads on buckets that are whitelisted for anonymous reads
|
||||
// TODO: remove this after bucket policies are implemented
|
||||
const bucketAcl = bucket.getAcl();
|
||||
const allowPublicReads = publicReadBuckets.includes(bucket.getName())
|
||||
&& bucketAcl.Canned === 'public-read'
|
||||
&& (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
||||
if (allowPublicReads) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
function _checkBucketPolicyActions(requestType, actions, log) {
|
||||
const mappedAction = actionMaps.actionMapBP[requestType];
|
||||
// Deny any action that isn't in list of controlled actions
|
||||
if (!mappedAction) {
|
||||
return false;
|
||||
}
|
||||
return evaluators.isActionApplicable(mappedAction, actions, log);
|
||||
}
|
||||
|
||||
function _checkBucketPolicyResources(request, resource, log) {
|
||||
if (!request || (Array.isArray(resource) && resource.length === 0)) {
|
||||
return true;
|
||||
}
|
||||
// build request context from the request!
|
||||
const requestContext = new RequestContext(request.headers, request.query,
|
||||
request.bucketName, request.objectKey, null,
|
||||
request.connection.encrypted, request.resourceType, 's3');
|
||||
return evaluators.isResourceApplicable(requestContext, resource, log);
|
||||
}
|
||||
|
||||
function _checkBucketPolicyConditions(request, conditions, log) {
|
||||
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
|
||||
if (!conditions) {
|
||||
return true;
|
||||
}
|
||||
// build request context from the request!
|
||||
const requestContext = new RequestContext(request.headers, request.query,
|
||||
request.bucketName, request.objectKey, ip,
|
||||
request.connection.encrypted, request.resourceType, 's3', null, null,
|
||||
null, null, null, null, null, null, null, null, null,
|
||||
request.objectLockRetentionDays);
|
||||
return evaluators.meetConditions(requestContext, conditions, log);
|
||||
}
|
||||
|
||||
function _getAccountId(arn) {
|
||||
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
|
||||
return arn.substr(13, 12);
|
||||
}
|
||||
|
||||
function _isAccountId(principal) {
|
||||
return (principal.length === 12 && /^\d+$/.test(principal));
|
||||
}
|
||||
|
||||
function _checkPrincipal(requester, principal) {
|
||||
if (principal === '*') {
|
||||
return true;
|
||||
}
|
||||
if (principal === requester) {
|
||||
return true;
|
||||
}
|
||||
if (_isAccountId(principal)) {
|
||||
return _getAccountId(requester) === principal;
|
||||
}
|
||||
if (principal.endsWith('root')) {
|
||||
return _getAccountId(requester) === _getAccountId(principal);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
function _checkPrincipals(canonicalID, arn, principal) {
|
||||
if (principal === '*') {
|
||||
return true;
|
||||
}
|
||||
if (principal.CanonicalUser) {
|
||||
if (Array.isArray(principal.CanonicalUser)) {
|
||||
return principal.CanonicalUser.some(p => _checkPrincipal(canonicalID, p));
|
||||
}
|
||||
return _checkPrincipal(canonicalID, principal.CanonicalUser);
|
||||
}
|
||||
if (principal.AWS) {
|
||||
if (Array.isArray(principal.AWS)) {
|
||||
return principal.AWS.some(p => _checkPrincipal(arn, p));
|
||||
}
|
||||
return _checkPrincipal(arn, principal.AWS);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) {
|
||||
let permission = 'defaultDeny';
|
||||
// if requester is user within bucket owner account, actions should be
|
||||
// allowed unless explicitly denied (assumes allowed by IAM policy)
|
||||
if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) {
|
||||
permission = 'allow';
|
||||
}
|
||||
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
|
||||
while (copiedStatement.length > 0) {
|
||||
const s = copiedStatement[0];
|
||||
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
|
||||
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
|
||||
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
|
||||
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
|
||||
|
||||
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') {
|
||||
// explicit deny trumps any allows, so return immediately
|
||||
return 'explicitDeny';
|
||||
}
|
||||
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') {
|
||||
permission = 'allow';
|
||||
}
|
||||
copiedStatement = copiedStatement.splice(1);
|
||||
}
|
||||
return permission;
|
||||
}
|
||||
|
||||
function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log,
|
||||
request, aclPermission, results, actionImplicitDenies) {
|
||||
const bucketPolicy = bucket.getBucketPolicy();
|
||||
let processedResult = results[requestType];
|
||||
if (!bucketPolicy) {
|
||||
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
|
||||
} else {
|
||||
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
|
||||
bucketOwner, log, request, actionImplicitDenies);
|
||||
|
||||
if (bucketPolicyPermission === 'explicitDeny') {
|
||||
processedResult = false;
|
||||
} else if (bucketPolicyPermission === 'allow') {
|
||||
processedResult = true;
|
||||
} else {
|
||||
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
|
||||
}
|
||||
}
|
||||
return processedResult;
|
||||
}
|
||||
|
||||
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
|
||||
actionImplicitDeniesInput = {}, isWebsite = false) {
|
||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
||||
const mainApiCall = requestTypes[0];
|
||||
const results = {};
|
||||
return requestTypes.every(_requestType => {
|
||||
// By default, all missing actions are defined as allowed from IAM, to be
|
||||
// backward compatible
|
||||
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
||||
// Check to see if user is authorized to perform a
|
||||
// particular action on bucket based on ACLs.
|
||||
// TODO: Add IAM checks
|
||||
let requesterIsNotUser = true;
|
||||
let arn = null;
|
||||
if (authInfo) {
|
||||
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
||||
arn = authInfo.getArn();
|
||||
}
|
||||
// if the bucket owner is an account, users should not have default access
|
||||
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
|
||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||
return results[_requestType];
|
||||
}
|
||||
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
|
||||
// In case of error bucket access is checked with bucketGet
|
||||
// For website, bucket policy only uses objectGet and ignores bucketGet
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
|
||||
// bucketGet should be used to check acl but switched to objectGet for bucket policy
|
||||
if (isWebsite && _requestType === 'bucketGet') {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
_requestType = 'objectGet';
|
||||
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
|
||||
}
|
||||
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
||||
request, aclPermission, results, actionImplicitDenies);
|
||||
});
|
||||
}
|
||||
|
||||
function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {},
|
||||
log, request) {
|
||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
||||
const results = {};
|
||||
return requestTypes.every(_requestType => {
|
||||
// By default, all missing actions are defined as allowed from IAM, to be
|
||||
// backward compatible
|
||||
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
||||
let arn = null;
|
||||
if (authInfo) {
|
||||
arn = authInfo.getArn();
|
||||
}
|
||||
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
||||
request, true, results, actionImplicitDenies);
|
||||
});
|
||||
}
|
||||
|
||||
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
|
||||
actionImplicitDeniesInput = {}, isWebsite = false) {
|
||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
||||
const results = {};
|
||||
const mainApiCall = requestTypes[0];
|
||||
return requestTypes.every(_requestType => {
|
||||
// By default, all missing actions are defined as allowed from IAM, to be
|
||||
// backward compatible
|
||||
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
|
||||
const parsedMethodName = _requestType.endsWith('Version')
|
||||
? _requestType.slice(0, -7) : _requestType;
|
||||
const bucketOwner = bucket.getOwner();
|
||||
if (!objectMD) {
|
||||
// check bucket has read access
|
||||
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
|
||||
let permission = 'bucketGet';
|
||||
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
|
||||
permission = 'objectPut';
|
||||
}
|
||||
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
|
||||
actionImplicitDenies, isWebsite);
|
||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||
// bucket has canned ACL public-read-write
|
||||
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
|
||||
&& results[_requestType] === false) {
|
||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||
}
|
||||
return results[_requestType];
|
||||
}
|
||||
let requesterIsNotUser = true;
|
||||
let arn = null;
|
||||
let isUserUnauthenticated = false;
|
||||
if (authInfo) {
|
||||
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
||||
arn = authInfo.getArn();
|
||||
isUserUnauthenticated = arn === undefined;
|
||||
}
|
||||
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
|
||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||
return results[_requestType];
|
||||
}
|
||||
// account is authorized if:
|
||||
// - requesttype is included in bucketOwnerActions and
|
||||
// - account is the bucket owner
|
||||
// - requester is account, not user
|
||||
if (bucketOwnerActions.includes(parsedMethodName)
|
||||
&& (bucketOwner === canonicalID)
|
||||
&& requesterIsNotUser) {
|
||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||
return results[_requestType];
|
||||
}
|
||||
const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName,
|
||||
canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall);
|
||||
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner,
|
||||
log, request, aclPermission, results, actionImplicitDenies);
|
||||
});
|
||||
}
|
||||
|
||||
function _checkResource(resource, bucketArn) {
|
||||
if (resource === bucketArn) {
|
||||
return true;
|
||||
}
|
||||
if (resource.includes('/')) {
|
||||
const rSubs = resource.split('/');
|
||||
return rSubs[0] === bucketArn;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// the resources specified in the bucket policy should contain the bucket name
|
||||
function validatePolicyResource(bucketName, policy) {
|
||||
const bucketArn = `arn:aws:s3:::${bucketName}`;
|
||||
|
||||
return policy.Statement.every(s => {
|
||||
if (Array.isArray(s.Resource)) {
|
||||
return s.Resource.every(r => _checkResource(r, bucketArn));
|
||||
}
|
||||
if (typeof s.Resource === 'string') {
|
||||
return _checkResource(s.Resource, bucketArn);
|
||||
}
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
function checkIp(value) {
|
||||
const errString = 'Invalid IP address in Conditions';
|
||||
|
||||
const values = Array.isArray(value) ? value : [value];
|
||||
|
||||
for (let i = 0; i < values.length; i++) {
|
||||
// these preliminary checks are validating the provided
|
||||
// ip address against ipaddr.js, the library we use when
|
||||
// evaluating IP condition keys. It ensures compatibility,
|
||||
// but additional checks are required to enforce the right
|
||||
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
|
||||
// we would accept different ip formats, which is not
|
||||
// standard in an AWS use case.
|
||||
try {
|
||||
try {
|
||||
parseCIDR(values[i]);
|
||||
} catch (err) {
|
||||
isValid(values[i]);
|
||||
}
|
||||
} catch (err) {
|
||||
return errString;
|
||||
}
|
||||
|
||||
// Apply the existing IP validation logic to each element
|
||||
const validateIpRegex = ip => {
|
||||
if (constants.ipv4Regex.test(ip)) {
|
||||
return ip.split('.').every(part => parseInt(part, 10) <= 255);
|
||||
}
|
||||
if (constants.ipv6Regex.test(ip)) {
|
||||
return ip.split(':').every(part => part.length <= 4);
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
if (validateIpRegex(values[i]) !== true) {
|
||||
return errString;
|
||||
}
|
||||
}
|
||||
|
||||
// If the function hasn't returned by now, all elements are valid
|
||||
return null;
|
||||
}
|
||||
|
||||
// This function checks all bucket policy conditions if the values provided
|
||||
// are valid for the condition type. If not it returns a relevant Malformed policy error string
|
||||
function validatePolicyConditions(policy) {
|
||||
const validConditions = [
|
||||
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
|
||||
{ conditionKey: 's3:object-lock-remaining-retention-days' },
|
||||
];
|
||||
// keys where value type does not seem to be checked by AWS:
|
||||
// - s3:object-lock-remaining-retention-days
|
||||
|
||||
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// there can be multiple statements in the policy, each with a Condition enclosure
|
||||
for (let i = 0; i < policy.Statement.length; i++) {
|
||||
const s = policy.Statement[i];
|
||||
if (s.Condition) {
|
||||
const conditionOperators = Object.keys(s.Condition);
|
||||
// there can be multiple condition operations in the Condition enclosure
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
for (const conditionOperator of conditionOperators) {
|
||||
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
|
||||
const conditionValue = s.Condition[conditionOperator][conditionKey];
|
||||
const validCondition = validConditions.find(validCondition =>
|
||||
validCondition.conditionKey === conditionKey
|
||||
);
|
||||
// AWS returns does not return an error if the condition starts with 'aws:'
|
||||
// so we reproduce this behaviour
|
||||
if (!validCondition && !conditionKey.startsWith('aws:')) {
|
||||
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
|
||||
}
|
||||
if (validCondition && validCondition.conditionValueTypeChecker) {
|
||||
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
|
||||
if (conditionValueTypeError) {
|
||||
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
|
||||
* @param {string} arn - Amazon resource name - example:
|
||||
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
|
||||
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
|
||||
*/
|
||||
function isLifecycleSession(arn) {
|
||||
if (!arn) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const arnSplits = arn.split(':');
|
||||
const service = arnSplits[2];
|
||||
|
||||
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
|
||||
|
||||
const resourceType = resourceNames[0];
|
||||
const sessionName = resourceNames[resourceNames.length - 1];
|
||||
|
||||
return (service === 'sts'
|
||||
&& resourceType === assumedRoleArnResourceType
|
||||
&& sessionName === backbeatLifecycleSessionName);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
isBucketAuthorized,
|
||||
isObjAuthorized,
|
||||
getServiceAccountProperties,
|
||||
isServiceAccount,
|
||||
isRequesterASessionUser,
|
||||
isRequesterNonAccountUser,
|
||||
checkBucketAcls,
|
||||
checkObjectAcls,
|
||||
validatePolicyResource,
|
||||
validatePolicyConditions,
|
||||
isLifecycleSession,
|
||||
evaluateBucketPolicyWithIAM,
|
||||
};
|
|
@ -1,20 +1,10 @@
|
|||
const { policies } = require('arsenal');
|
||||
const { config } = require('../../../Config');
|
||||
|
||||
const { RequestContext, requestUtils } = policies;
|
||||
const RequestContext = policies.RequestContext;
|
||||
let apiMethodAfterVersionCheck;
|
||||
const apiMethodWithVersion = {
|
||||
objectGetACL: true,
|
||||
objectPutACL: true,
|
||||
objectGet: true,
|
||||
objectDelete: true,
|
||||
objectPutTagging: true,
|
||||
objectGetTagging: true,
|
||||
objectDeleteTagging: true,
|
||||
objectGetLegalHold: true,
|
||||
objectPutLegalHold: true,
|
||||
objectPutRetention: true,
|
||||
};
|
||||
const apiMethodWithVersion = { objectGetACL: true, objectPutACL: true,
|
||||
objectGet: true, objectDelete: true, objectPutTagging: true,
|
||||
objectGetTagging: true, objectDeleteTagging: true };
|
||||
|
||||
function isHeaderAcl(headers) {
|
||||
return headers['x-amz-grant-read'] || headers['x-amz-grant-read-acp'] ||
|
||||
|
@ -43,7 +33,8 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
|||
// null as the requestContext to Vault so it will only do an authentication
|
||||
// check.
|
||||
|
||||
const ip = requestUtils.getClientIp(request, config);
|
||||
const ip = request.headers['x-forwarded-for'] ||
|
||||
request.socket.remoteAddress;
|
||||
|
||||
function generateRequestContext(apiMethod) {
|
||||
return new RequestContext(request.headers,
|
||||
|
@ -52,7 +43,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
|||
apiMethod, 's3');
|
||||
}
|
||||
|
||||
if (apiMethod === 'bucketPut') {
|
||||
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -65,17 +56,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
|||
|
||||
const requestContexts = [];
|
||||
|
||||
if (apiMethod === 'multiObjectDelete') {
|
||||
// MultiObjectDelete does not require any authorization when evaluating
|
||||
// the API. Instead, we authorize each object passed.
|
||||
// But in order to get any relevant information from the authorization service
|
||||
// for example, the account quota, we must send a request context object
|
||||
// with no `specificResource`. We expect the result to be an implicit deny.
|
||||
// In the API, we then ignore these authorization results, and we can use
|
||||
// any information returned, e.g., the quota.
|
||||
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
|
||||
requestContexts.push(requestContextMultiObjectDelete);
|
||||
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||
if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||
'objectGet';
|
||||
|
@ -107,126 +88,27 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
|||
const objectGetTaggingAction = (request.query &&
|
||||
request.query.versionId) ? 'objectGetTaggingVersion' :
|
||||
'objectGetTagging';
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const objectGetVersionAction = 'objectGetVersion';
|
||||
const getVersionResourceVersion =
|
||||
generateRequestContext(objectGetVersionAction);
|
||||
requestContexts.push(getVersionResourceVersion);
|
||||
}
|
||||
const getRequestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
const getTaggingRequestContext =
|
||||
generateRequestContext(objectGetTaggingAction);
|
||||
requestContexts.push(getRequestContext, getTaggingRequestContext);
|
||||
} else if (apiMethodAfterVersionCheck === 'objectGetTagging') {
|
||||
const objectGetTaggingAction = 'objectGetTagging';
|
||||
const getTaggingResourceVersion =
|
||||
generateRequestContext(objectGetTaggingAction);
|
||||
requestContexts.push(getTaggingResourceVersion);
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const objectGetTaggingVersionAction = 'objectGetTaggingVersion';
|
||||
const getTaggingVersionResourceVersion =
|
||||
generateRequestContext(objectGetTaggingVersionAction);
|
||||
requestContexts.push(getTaggingVersionResourceVersion);
|
||||
}
|
||||
} else if (apiMethodAfterVersionCheck === 'objectHead') {
|
||||
const objectHeadAction = 'objectHead';
|
||||
const headObjectAction =
|
||||
generateRequestContext(objectHeadAction);
|
||||
requestContexts.push(headObjectAction);
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const objectHeadVersionAction = 'objectGetVersion';
|
||||
const headObjectVersion =
|
||||
generateRequestContext(objectHeadVersionAction);
|
||||
requestContexts.push(headObjectVersion);
|
||||
}
|
||||
} else if (apiMethodAfterVersionCheck === 'objectPutTagging') {
|
||||
const putObjectTaggingRequestContext =
|
||||
generateRequestContext('objectPutTagging');
|
||||
requestContexts.push(putObjectTaggingRequestContext);
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const putObjectVersionRequestContext =
|
||||
generateRequestContext('objectPutTaggingVersion');
|
||||
requestContexts.push(putObjectVersionRequestContext);
|
||||
}
|
||||
} else if (apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||
const putObjectRequestContext =
|
||||
generateRequestContext('objectPut');
|
||||
requestContexts.push(putObjectRequestContext);
|
||||
const getObjectRequestContext =
|
||||
generateRequestContext('objectGet');
|
||||
requestContexts.push(getObjectRequestContext);
|
||||
} else if (apiMethodAfterVersionCheck === 'objectPut') {
|
||||
// if put object with version
|
||||
if (request.headers['x-scal-s3-version-id'] ||
|
||||
request.headers['x-scal-s3-version-id'] === '') {
|
||||
const putVersionRequestContext =
|
||||
generateRequestContext('objectPutVersion');
|
||||
requestContexts.push(putVersionRequestContext);
|
||||
} else {
|
||||
const putRequestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
requestContexts.push(putRequestContext);
|
||||
// if put object (versioning) with tag set
|
||||
if (request.headers['x-amz-tagging']) {
|
||||
const putTaggingRequestContext =
|
||||
generateRequestContext('objectPutTagging');
|
||||
requestContexts.push(putTaggingRequestContext);
|
||||
}
|
||||
if (['ON', 'OFF'].includes(request.headers['x-amz-object-lock-legal-hold-status'])) {
|
||||
const putLegalHoldStatusAction =
|
||||
generateRequestContext('objectPutLegalHold');
|
||||
requestContexts.push(putLegalHoldStatusAction);
|
||||
}
|
||||
// if put object (versioning) with ACL
|
||||
if (isHeaderAcl(request.headers)) {
|
||||
const putAclRequestContext =
|
||||
generateRequestContext('objectPutACL');
|
||||
requestContexts.push(putAclRequestContext);
|
||||
}
|
||||
if (request.headers['x-amz-object-lock-mode']) {
|
||||
const putObjectLockRequestContext =
|
||||
generateRequestContext('objectPutRetention');
|
||||
requestContexts.push(putObjectLockRequestContext);
|
||||
}
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const putObjectVersionRequestContext =
|
||||
generateRequestContext('objectPutTaggingVersion');
|
||||
requestContexts.push(putObjectVersionRequestContext);
|
||||
}
|
||||
const putRequestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
requestContexts.push(putRequestContext);
|
||||
// if put object (versioning) with tag set
|
||||
if (request.headers['x-amz-tagging']) {
|
||||
const putTaggingRequestContext =
|
||||
generateRequestContext('objectPutTagging');
|
||||
requestContexts.push(putTaggingRequestContext);
|
||||
}
|
||||
} else if (apiMethodAfterVersionCheck === 'initiateMultipartUpload' ||
|
||||
apiMethodAfterVersionCheck === 'objectPutPart' ||
|
||||
apiMethodAfterVersionCheck === 'completeMultipartUpload'
|
||||
) {
|
||||
if (request.headers['x-scal-s3-version-id'] ||
|
||||
request.headers['x-scal-s3-version-id'] === '') {
|
||||
const putVersionRequestContext =
|
||||
generateRequestContext('objectPutVersion');
|
||||
requestContexts.push(putVersionRequestContext);
|
||||
} else {
|
||||
const putRequestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
requestContexts.push(putRequestContext);
|
||||
}
|
||||
|
||||
// if put object (versioning) with ACL
|
||||
if (isHeaderAcl(request.headers)) {
|
||||
const putAclRequestContext =
|
||||
generateRequestContext('objectPutACL');
|
||||
requestContexts.push(putAclRequestContext);
|
||||
}
|
||||
|
||||
if (request.headers['x-amz-object-lock-mode']) {
|
||||
const putObjectLockRequestContext =
|
||||
generateRequestContext('objectPutRetention');
|
||||
requestContexts.push(putObjectLockRequestContext);
|
||||
}
|
||||
if (request.headers['x-amz-version-id']) {
|
||||
const putObjectVersionRequestContext =
|
||||
generateRequestContext('objectPutTaggingVersion');
|
||||
requestContexts.push(putObjectVersionRequestContext);
|
||||
}
|
||||
} else {
|
||||
const requestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
const async = require('async');
|
||||
|
||||
const { auth, s3middleware } = require('arsenal');
|
||||
const metadata = require('../../../metadata/wrapper');
|
||||
const { decodeVersionId } = require('../object/versioning');
|
||||
|
||||
const { parseTagXml } = s3middleware.tagging;
|
||||
|
||||
function makeTagQuery(tags) {
|
||||
return Object.entries(tags)
|
||||
.map(i => i.join('='))
|
||||
.join('&');
|
||||
}
|
||||
|
||||
function updateRequestContextsWithTags(request, requestContexts, apiMethod, log, cb) {
|
||||
async.waterfall([
|
||||
next => {
|
||||
if (request.headers['x-amz-tagging']) {
|
||||
return next(null, request.headers['x-amz-tagging']);
|
||||
}
|
||||
if (request.post && apiMethod === 'objectPutTagging') {
|
||||
return parseTagXml(request.post, log, (err, tags) => {
|
||||
if (err) {
|
||||
log.trace('error parsing request tags');
|
||||
return next(err);
|
||||
}
|
||||
return next(null, makeTagQuery(tags));
|
||||
});
|
||||
}
|
||||
return next(null, null);
|
||||
},
|
||||
(requestTagsQuery, next) => {
|
||||
const objectKey = request.objectKey;
|
||||
const bucketName = request.bucketName;
|
||||
const decodedVidResult = decodeVersionId(request.query);
|
||||
if (decodedVidResult instanceof Error) {
|
||||
log.trace('invalid versionId query', {
|
||||
versionId: request.query.versionId,
|
||||
error: decodedVidResult,
|
||||
});
|
||||
return next(decodedVidResult);
|
||||
}
|
||||
const reqVersionId = decodedVidResult;
|
||||
return metadata.getObjectMD(
|
||||
bucketName, objectKey, { versionId: reqVersionId }, log, (err, objMD) => {
|
||||
if (err) {
|
||||
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||
if (err.NoSuchKey) {
|
||||
return next(null, requestTagsQuery, null);
|
||||
}
|
||||
log.trace('error getting request object tags');
|
||||
return next(err);
|
||||
}
|
||||
const existingTagsQuery = objMD.tags && makeTagQuery(objMD.tags);
|
||||
return next(null, requestTagsQuery, existingTagsQuery);
|
||||
});
|
||||
},
|
||||
], (err, requestTagsQuery, existingTagsQuery) => {
|
||||
if (err) {
|
||||
log.trace('error processing tag condition key evaluation');
|
||||
return cb(err);
|
||||
}
|
||||
// FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
for (const rc of requestContexts) {
|
||||
rc.setNeedTagEval(true);
|
||||
if (requestTagsQuery) {
|
||||
rc.setRequestObjTags(requestTagsQuery);
|
||||
}
|
||||
if (existingTagsQuery) {
|
||||
rc.setExistingObjTag(existingTagsQuery);
|
||||
}
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
function tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log, cb) {
|
||||
if (!authorizationResults) {
|
||||
return cb();
|
||||
}
|
||||
if (!authorizationResults.some(authRes => authRes.checkTagConditions)) {
|
||||
return cb(null, authorizationResults);
|
||||
}
|
||||
|
||||
return updateRequestContextsWithTags(request, requestContexts, apiMethod, log, err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return auth.server.doAuth(request, log,
|
||||
(err, userInfo, authResults) => cb(err, authResults), 's3', requestContexts);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
tagConditionKeyAuth,
|
||||
updateRequestContextsWithTags,
|
||||
makeTagQuery,
|
||||
};
|
|
@ -6,34 +6,29 @@ const acl = require('../../../metadata/acl');
|
|||
const BucketInfo = require('arsenal').models.BucketInfo;
|
||||
const constants = require('../../../../constants');
|
||||
const createKeyForUserBucket = require('./createKeyForUserBucket');
|
||||
const { parseBucketEncryptionHeaders } = require('./bucketEncryption');
|
||||
const metadata = require('../../../metadata/wrapper');
|
||||
const kms = require('../../../kms/wrapper');
|
||||
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
|
||||
const { isServiceAccount } = require('../authorization/permissionChecks');
|
||||
const { isBackbeatUser } = require('../authorization/aclChecks');
|
||||
|
||||
const usersBucket = constants.usersBucket;
|
||||
const oldUsersBucket = constants.oldUsersBucket;
|
||||
const zenkoSeparator = constants.zenkoSeparator;
|
||||
const userBucketOwner = 'admin';
|
||||
|
||||
|
||||
function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
|
||||
function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||
// BACKWARD: Simplify once do not have to deal with old
|
||||
// usersbucket name and old splitter
|
||||
|
||||
// Get new format usersBucket to see if it exists
|
||||
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
|
||||
if (err && !err.is.NoSuchBucket && !err.is.BucketAlreadyExists) {
|
||||
if (err && !err.NoSuchBucket && !err.BucketAlreadyExists) {
|
||||
return cb(err);
|
||||
}
|
||||
const splitter = usersBucketAttrs ?
|
||||
constants.splitter : constants.oldSplitter;
|
||||
let key = createKeyForUserBucket(canonicalID, splitter, bucketName);
|
||||
const omVal = {
|
||||
creationDate: new Date().toJSON(),
|
||||
ingestion: bucketMD.getIngestion(),
|
||||
};
|
||||
const omVal = { creationDate: new Date().toJSON() };
|
||||
// If the new format usersbucket does not exist, try to put the
|
||||
// key in the old usersBucket using the old splitter.
|
||||
// Otherwise put the key in the new format usersBucket
|
||||
|
@ -41,7 +36,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
|
|||
usersBucket : oldUsersBucket;
|
||||
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
||||
omVal, {}, log, err => {
|
||||
if (err?.is?.NoSuchBucket) {
|
||||
if (err && err.NoSuchBucket) {
|
||||
// There must be no usersBucket so createBucket
|
||||
// one using the new format
|
||||
log.trace('users bucket does not exist, ' +
|
||||
|
@ -61,8 +56,9 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
|
|||
// from getting a BucketAlreadyExists
|
||||
// error with respect
|
||||
// to the usersBucket.
|
||||
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||
if (err && !err.BucketAlreadyExists) {
|
||||
if (err &&
|
||||
err !==
|
||||
errors.BucketAlreadyExists) {
|
||||
log.error('error from metadata', {
|
||||
error: err,
|
||||
});
|
||||
|
@ -96,7 +92,7 @@ function freshStartCreateBucket(bucket, canonicalID, log, callback) {
|
|||
return callback(err);
|
||||
}
|
||||
log.trace('created bucket in metadata');
|
||||
return addToUsersBucket(canonicalID, bucketName, bucket, log, err => {
|
||||
return addToUsersBucket(canonicalID, bucketName, log, err => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
@ -119,7 +115,7 @@ function freshStartCreateBucket(bucket, canonicalID, log, callback) {
|
|||
*/
|
||||
function cleanUpBucket(bucketMD, canonicalID, log, callback) {
|
||||
const bucketName = bucketMD.getName();
|
||||
return addToUsersBucket(canonicalID, bucketName, bucketMD, log, err => {
|
||||
return addToUsersBucket(canonicalID, bucketName, log, err => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
@ -171,36 +167,14 @@ function createBucket(authInfo, bucketName, headers,
|
|||
authInfo.getAccountDisplayName();
|
||||
const creationDate = new Date().toJSON();
|
||||
const isNFSEnabled = headers['x-scal-nfs-enabled'] === 'true';
|
||||
const headerObjectLock = headers['x-amz-bucket-object-lock-enabled'];
|
||||
const objectLockEnabled
|
||||
= headerObjectLock && headerObjectLock.toLowerCase() === 'true';
|
||||
const bucket = new BucketInfo(bucketName, canonicalID, ownerDisplayName,
|
||||
creationDate, BucketInfo.currentModelVersion(), null, null, null, null,
|
||||
null, null, null, null, null, null, null, null, null, isNFSEnabled,
|
||||
null, null, objectLockEnabled);
|
||||
let locationConstraintVal = null;
|
||||
const bucket = new BucketInfo(bucketName,
|
||||
canonicalID, ownerDisplayName, creationDate,
|
||||
BucketInfo.currentModelVersion(), null, null, null,
|
||||
null, null, null, null, null, null, null, null,
|
||||
null, isNFSEnabled);
|
||||
|
||||
if (locationConstraint) {
|
||||
const [locationConstraintStr, ingestion] =
|
||||
locationConstraint.split(zenkoSeparator);
|
||||
if (locationConstraintStr) {
|
||||
locationConstraintVal = locationConstraintStr;
|
||||
bucket.setLocationConstraint(locationConstraintStr);
|
||||
}
|
||||
if (ingestion === 'ingest') {
|
||||
bucket.enableIngestion();
|
||||
//automatically enable versioning for ingestion buckets
|
||||
bucket.setVersioningConfiguration({ Status: 'Enabled' });
|
||||
}
|
||||
}
|
||||
if (objectLockEnabled) {
|
||||
// default versioning configuration AWS sets
|
||||
// when a bucket is created with object lock
|
||||
const versioningConfiguration = {
|
||||
Status: 'Enabled',
|
||||
MfaDelete: 'Disabled',
|
||||
};
|
||||
bucket.setVersioningConfiguration(versioningConfiguration);
|
||||
if (locationConstraint !== undefined) {
|
||||
bucket.setLocationConstraint(locationConstraint);
|
||||
}
|
||||
const parseAclParams = {
|
||||
headers,
|
||||
|
@ -223,7 +197,6 @@ function createBucket(authInfo, bucketName, headers,
|
|||
},
|
||||
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
|
||||
metadata.getBucket(bucketName, log, (err, data) => {
|
||||
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||
if (err && err.NoSuchBucket) {
|
||||
return callback(null, 'NoBucketYet');
|
||||
}
|
||||
|
@ -242,15 +215,14 @@ function createBucket(authInfo, bucketName, headers,
|
|||
const existingBucketMD = results.getAnyExistingBucketInfo;
|
||||
if (existingBucketMD instanceof BucketInfo &&
|
||||
existingBucketMD.getOwner() !== canonicalID &&
|
||||
!isServiceAccount(canonicalID)) {
|
||||
!isBackbeatUser(canonicalID)) {
|
||||
// return existingBucketMD to collect cors headers
|
||||
return cb(errors.BucketAlreadyExists, existingBucketMD);
|
||||
}
|
||||
const newBucketMD = results.prepareNewBucketMD;
|
||||
if (existingBucketMD === 'NoBucketYet') {
|
||||
const sseConfig = parseBucketEncryptionHeaders(headers);
|
||||
return bucketLevelServerSideEncryption(
|
||||
bucketName, sseConfig, log,
|
||||
bucketName, headers, log,
|
||||
(err, sseInfo) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
|
@ -273,7 +245,7 @@ function createBucket(authInfo, bucketName, headers,
|
|||
// error unless old AWS behavior (us-east-1)
|
||||
// Existing locationConstraint must have legacyAwsBehavior === true
|
||||
// New locationConstraint should have legacyAwsBehavior === true
|
||||
if (isLegacyAWSBehavior(locationConstraintVal) &&
|
||||
if (isLegacyAWSBehavior(locationConstraint) &&
|
||||
isLegacyAWSBehavior(existingBucketMD.getLocationConstraint())) {
|
||||
log.trace('returning 200 instead of 409 to mirror us-east-1');
|
||||
return cb(null, existingBucketMD);
|
||||
|
|
|
@ -2,21 +2,60 @@ const assert = require('assert');
|
|||
const async = require('async');
|
||||
const { errors } = require('arsenal');
|
||||
|
||||
const abortMultipartUpload = require('../object/abortMultipartUpload');
|
||||
const { pushMetric } = require('../../../utapi/utilities');
|
||||
const logger = require('../../../utilities/logger');
|
||||
|
||||
const { splitter, oldSplitter, mpuBucketPrefix } =
|
||||
require('../../../../constants');
|
||||
const constants = require('../../../../constants');
|
||||
const createKeyForUserBucket = require('./createKeyForUserBucket');
|
||||
const metadata = require('../../../metadata/wrapper');
|
||||
const kms = require('../../../kms/wrapper');
|
||||
const deleteUserBucketEntry = require('./deleteUserBucketEntry');
|
||||
|
||||
const usersBucket = constants.usersBucket;
|
||||
const oldUsersBucket = constants.oldUsersBucket;
|
||||
|
||||
|
||||
function _deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
||||
log.trace('deleting bucket name from users bucket', { method:
|
||||
'_deleteUserBucketEntry' });
|
||||
const keyForUserBucket = createKeyForUserBucket(canonicalID,
|
||||
constants.splitter, bucketName);
|
||||
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
||||
// If the object representing the bucket is not in the
|
||||
// users bucket just continue
|
||||
if (error && error.NoSuchKey) {
|
||||
return cb(null);
|
||||
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
||||
// have old user bucket format
|
||||
} else if (error && error.NoSuchBucket) {
|
||||
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
||||
constants.oldSplitter, bucketName);
|
||||
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
||||
{}, log, error => {
|
||||
if (error && !error.NoSuchKey) {
|
||||
log.error('from metadata while deleting user bucket',
|
||||
{ error });
|
||||
return cb(error);
|
||||
}
|
||||
log.trace('deleted bucket from user bucket',
|
||||
{ method: '_deleteUserBucketEntry' });
|
||||
return cb(null);
|
||||
});
|
||||
} else if (error) {
|
||||
log.error('from metadata while deleting user bucket', { error,
|
||||
method: '_deleteUserBucketEntry' });
|
||||
return cb(error);
|
||||
}
|
||||
log.trace('deleted bucket from user bucket', {
|
||||
method: '_deleteUserBucketEntry' });
|
||||
return cb(null);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
function _deleteMPUbucket(destinationBucketName, log, cb) {
|
||||
const mpuBucketName =
|
||||
`${mpuBucketPrefix}${destinationBucketName}`;
|
||||
`${constants.mpuBucketPrefix}${destinationBucketName}`;
|
||||
return metadata.deleteBucket(mpuBucketName, log, err => {
|
||||
// If the mpu bucket does not exist, just move on
|
||||
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||
if (err && err.NoSuchBucket) {
|
||||
return cb();
|
||||
}
|
||||
|
@ -24,38 +63,48 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
|
|||
});
|
||||
}
|
||||
|
||||
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) {
|
||||
async.mapLimit(mpus, 1, (mpu, next) => {
|
||||
const splitterChar = mpu.key.includes(oldSplitter) ?
|
||||
oldSplitter : splitter;
|
||||
// `overview${splitter}${objectKey}${splitter}${uploadId}
|
||||
const [, objectKey, uploadId] = mpu.key.split(splitterChar);
|
||||
abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||
(err, destBucket, partSizeSum) => {
|
||||
pushMetric('abortMultipartUpload', log, {
|
||||
authInfo,
|
||||
canonicalID: bucketMD.getOwner(),
|
||||
bucket: bucketName,
|
||||
keys: [objectKey],
|
||||
byteLength: partSizeSum,
|
||||
});
|
||||
next(err);
|
||||
}, request);
|
||||
}, cb);
|
||||
/**
|
||||
* Invisibly finishes deleting a bucket that already has a deleted flag
|
||||
* by deleting the object in the users bucket representing the created bucket
|
||||
* and then deleting the bucket in metadata
|
||||
* @param {string} bucketName - name of bucket
|
||||
* @param {string} canonicalID - bucket owner's canonicalID
|
||||
* @return {undefined}
|
||||
*/
|
||||
function invisiblyDelete(bucketName, canonicalID) {
|
||||
const log = logger.newRequestLogger();
|
||||
log.trace('deleting bucket with deleted flag invisibly', { bucketName });
|
||||
return _deleteUserBucketEntry(bucketName, canonicalID, log, err => {
|
||||
if (err) {
|
||||
log.error('error invisibly deleting bucket name from user bucket',
|
||||
{ error: err });
|
||||
return log.end();
|
||||
}
|
||||
log.trace('deleted bucket name from user bucket');
|
||||
return metadata.deleteBucket(bucketName, log, error => {
|
||||
log.trace('deleting bucket from metadata',
|
||||
{ method: 'invisiblyDelete' });
|
||||
if (error) {
|
||||
log.error('error deleting bucket from metadata', { error });
|
||||
return log.end();
|
||||
}
|
||||
log.trace('invisible deletion of bucket succeeded',
|
||||
{ method: 'invisiblyDelete' });
|
||||
return log.end();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* deleteBucket - Delete bucket from namespace
|
||||
* @param {object} authInfo - authentication info
|
||||
* @param {object} bucketMD - bucket attributes/metadata
|
||||
* @param {string} bucketName - bucket in which objectMetadata is stored
|
||||
* @param {string} canonicalID - account canonicalID of requester
|
||||
* @param {object} request - request object given by router
|
||||
* including normalized headers
|
||||
* @param {object} log - Werelogs logger
|
||||
* @param {function} cb - callback from async.waterfall in bucketDelete
|
||||
* @return {undefined}
|
||||
*/
|
||||
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) {
|
||||
function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
|
||||
log.trace('deleting bucket from metadata');
|
||||
assert.strictEqual(typeof bucketName, 'string');
|
||||
assert.strictEqual(typeof canonicalID, 'string');
|
||||
|
@ -84,16 +133,18 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
|
|||
return next();
|
||||
});
|
||||
},
|
||||
|
||||
// Note: This does not mirror AWS behavior. AWS will allow a user to
|
||||
// delete a bucket even if there are ongoing multipart uploads.
|
||||
function deleteMPUbucketStep(next) {
|
||||
const MPUBucketName = `${mpuBucketPrefix}${bucketName}`;
|
||||
const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||
// check to see if there are any mpu overview objects (so ignore
|
||||
// any orphaned part objects)
|
||||
return metadata.listObject(MPUBucketName, { prefix: 'overview' },
|
||||
log, (err, objectsListRes) => {
|
||||
return metadata.listObject(MPUBucketName,
|
||||
{ maxKeys: 1, prefix: 'overview' }, log,
|
||||
(err, objectsListRes) => {
|
||||
// If no shadow bucket ever created, no ongoing MPU's, so
|
||||
// continue with deletion
|
||||
if (err?.is.NoSuchBucket) {
|
||||
if (err && err.NoSuchBucket) {
|
||||
return next();
|
||||
}
|
||||
if (err) {
|
||||
|
@ -101,14 +152,12 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
|
|||
return next(err);
|
||||
}
|
||||
if (objectsListRes.Contents.length) {
|
||||
return _deleteOngoingMPUs(authInfo, bucketName,
|
||||
bucketMD, objectsListRes.Contents, request, log, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
log.trace('deleting shadow MPU bucket');
|
||||
return _deleteMPUbucket(bucketName, log, next);
|
||||
});
|
||||
log.debug('bucket delete failed',
|
||||
{ error: errors.MPUinProgress });
|
||||
// Return non-AWS standard error
|
||||
// regarding ongoing MPUs so user
|
||||
// understands what is occurring
|
||||
return next(errors.MPUinProgress);
|
||||
}
|
||||
log.trace('deleting shadow MPU bucket');
|
||||
return _deleteMPUbucket(bucketName, log, next);
|
||||
|
@ -124,7 +173,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
|
|||
},
|
||||
function deleteUserBucketEntryStep(next) {
|
||||
log.trace('deleting bucket name from user bucket');
|
||||
return deleteUserBucketEntry(bucketName, canonicalID, log, next);
|
||||
return _deleteUserBucketEntry(bucketName, canonicalID, log, next);
|
||||
},
|
||||
],
|
||||
// eslint-disable-next-line prefer-arrow-callback
|
||||
|
@ -148,4 +197,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
|
|||
});
|
||||
}
|
||||
|
||||
module.exports = deleteBucket;
|
||||
module.exports = {
|
||||
invisiblyDelete,
|
||||
deleteBucket,
|
||||
};
|
||||
|
|
|
@ -1,255 +0,0 @@
|
|||
const { errors } = require('arsenal');
|
||||
const metadata = require('../../../metadata/wrapper');
|
||||
const kms = require('../../../kms/wrapper');
|
||||
const { parseString } = require('xml2js');
|
||||
|
||||
/**
|
||||
* ServerSideEncryptionInfo - user configuration for server side encryption
|
||||
* @typedef {Object} ServerSideEncryptionInfo
|
||||
* @property {string} algorithm - Algorithm to use for encryption. Either AES256 or aws:kms.
|
||||
* @property {string} masterKeyId - Key id for the kms key used to encrypt data keys.
|
||||
* @property {string} configuredMasterKeyId - User configured master key id.
|
||||
* @property {boolean} mandatory - Whether a default encryption policy has been enabled.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @callback ServerSideEncryptionInfo~callback
|
||||
* @param {Object} error - Instance of Arsenal error
|
||||
* @param {ServerSideEncryptionInfo} - SSE configuration
|
||||
*/
|
||||
|
||||
/**
|
||||
* parseEncryptionXml - Parses and validates a ServerSideEncryptionConfiguration xml document
|
||||
* @param {object} xml - ServerSideEncryptionConfiguration doc
|
||||
* @param {object} log - logger
|
||||
* @param {ServerSideEncryptionInfo~callback} cb - callback
|
||||
* @returns {undefined}
|
||||
*/
|
||||
function parseEncryptionXml(xml, log, cb) {
|
||||
return parseString(xml, (err, parsed) => {
|
||||
if (err) {
|
||||
log.trace('xml parsing failed', {
|
||||
error: err,
|
||||
method: 'parseEncryptionXml',
|
||||
});
|
||||
log.debug('invalid xml', { xml });
|
||||
return cb(errors.MalformedXML);
|
||||
}
|
||||
|
||||
if (!parsed
|
||||
|| !parsed.ServerSideEncryptionConfiguration
|
||||
|| !parsed.ServerSideEncryptionConfiguration.Rule) {
|
||||
log.trace('error in sse config, invalid ServerSideEncryptionConfiguration section', {
|
||||
method: 'parseEncryptionXml',
|
||||
});
|
||||
return cb(errors.MalformedXML);
|
||||
}
|
||||
|
||||
const { Rule } = parsed.ServerSideEncryptionConfiguration;
|
||||
|
||||
if (!Array.isArray(Rule)
|
||||
|| Rule.length > 1
|
||||
|| !Rule[0]
|
||||
|| !Rule[0].ApplyServerSideEncryptionByDefault
|
||||
|| !Rule[0].ApplyServerSideEncryptionByDefault[0]) {
|
||||
log.trace('error in sse config, invalid ApplyServerSideEncryptionByDefault section', {
|
||||
method: 'parseEncryptionXml',
|
||||
});
|
||||
return cb(errors.MalformedXML);
|
||||
}
|
||||
|
||||
const [encConfig] = Rule[0].ApplyServerSideEncryptionByDefault;
|
||||
|
||||
if (!encConfig.SSEAlgorithm || !encConfig.SSEAlgorithm[0]) {
|
||||
log.trace('error in sse config, no SSEAlgorithm provided', {
|
||||
method: 'parseEncryptionXml',
|
||||
});
|
||||
return cb(errors.MalformedXML);
|
||||
}
|
||||
|
||||
const [algorithm] = encConfig.SSEAlgorithm;
|
||||
|
||||
if (algorithm !== 'AES256' && algorithm !== 'aws:kms') {
|
||||
log.trace('error in sse config, unknown SSEAlgorithm', {
|
||||
method: 'parseEncryptionXml',
|
||||
});
|
||||
return cb(errors.MalformedXML);
|
||||
}
|
||||
|
||||
const result = { algorithm, mandatory: true };
|
||||
|
||||
if (encConfig.KMSMasterKeyID) {
|
||||
if (algorithm === 'AES256') {
|
||||
log.trace('error in sse config, can not specify KMSMasterKeyID when using AES256', {
|
||||
method: 'parseEncryptionXml',
|
||||
});
|
||||
return cb(errors.InvalidArgument.customizeDescription(
|
||||
'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms'));
|
||||
}
|
||||
|
||||
if (!encConfig.KMSMasterKeyID[0] || typeof encConfig.KMSMasterKeyID[0] !== 'string') {
|
||||
log.trace('error in sse config, invalid KMSMasterKeyID', {
|
||||
method: 'parseEncryptionXml',
|
||||
});
|
||||
return cb(errors.MalformedXML);
|
||||
}
|
||||
|
||||
result.configuredMasterKeyId = encConfig.KMSMasterKeyID[0];
|
||||
}
|
||||
return cb(null, result);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* hydrateEncryptionConfig - Constructs a ServerSideEncryptionInfo object from arguments
|
||||
* ensuring no invalid or undefined keys are added
|
||||
*
|
||||
* @param {string} algorithm - Algorithm to use for encryption. Either AES256 or aws:kms.
|
||||
* @param {string} configuredMasterKeyId - User configured master key id.
|
||||
* @param {boolean} [mandatory] - Whether a default encryption policy has been enabled.
|
||||
* @returns {ServerSideEncryptionInfo} - SSE configuration
|
||||
*/
|
||||
function hydrateEncryptionConfig(algorithm, configuredMasterKeyId, mandatory = null) {
|
||||
if (algorithm !== 'AES256' && algorithm !== 'aws:kms') {
|
||||
return {
|
||||
algorithm: null,
|
||||
};
|
||||
}
|
||||
|
||||
const sseConfig = { algorithm, mandatory };
|
||||
|
||||
if (algorithm === 'aws:kms' && configuredMasterKeyId) {
|
||||
sseConfig.configuredMasterKeyId = configuredMasterKeyId;
|
||||
}
|
||||
|
||||
if (mandatory !== null) {
|
||||
sseConfig.mandatory = mandatory;
|
||||
}
|
||||
|
||||
return sseConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* parseBucketEncryptionHeaders - retrieves bucket level sse configuration from request headers
|
||||
* @param {object} headers - Request headers
|
||||
* @returns {ServerSideEncryptionInfo} - SSE configuration
|
||||
*/
|
||||
function parseBucketEncryptionHeaders(headers) {
|
||||
const sseAlgorithm = headers['x-amz-scal-server-side-encryption'];
|
||||
const configuredMasterKeyId = headers['x-amz-scal-server-side-encryption-aws-kms-key-id'] || null;
|
||||
return hydrateEncryptionConfig(sseAlgorithm, configuredMasterKeyId, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* parseObjectEncryptionHeaders - retrieves bucket level sse configuration from request headers
|
||||
* @param {object} headers - Request headers
|
||||
* @returns {ServerSideEncryptionInfo} - SSE configuration
|
||||
*/
|
||||
function parseObjectEncryptionHeaders(headers) {
|
||||
const sseAlgorithm = headers['x-amz-server-side-encryption'];
|
||||
const configuredMasterKeyId = headers['x-amz-server-side-encryption-aws-kms-key-id'] || null;
|
||||
|
||||
if (sseAlgorithm && sseAlgorithm !== 'AES256' && sseAlgorithm !== 'aws:kms') {
|
||||
return {
|
||||
error: errors.InvalidArgument.customizeDescription('The encryption method specified is not supported'),
|
||||
};
|
||||
}
|
||||
|
||||
if (sseAlgorithm !== 'aws:kms' && configuredMasterKeyId) {
|
||||
return {
|
||||
error: errors.InvalidArgument.customizeDescription(
|
||||
'a KMSMasterKeyID is not applicable if the default sse algorithm is not aws:kms'),
|
||||
};
|
||||
}
|
||||
return { objectSSE: hydrateEncryptionConfig(sseAlgorithm, configuredMasterKeyId) };
|
||||
}
|
||||
|
||||
/**
|
||||
* createDefaultBucketEncryptionMetadata - Creates master key and sets up default server side encryption configuration
|
||||
* @param {BucketInfo} bucket - bucket metadata
|
||||
* @param {object} log - werelogs logger
|
||||
* @param {ServerSideEncryptionInfo~callback} cb - callback
|
||||
* @returns {undefined}
|
||||
*/
|
||||
function createDefaultBucketEncryptionMetadata(bucket, log, cb) {
|
||||
return kms.bucketLevelEncryption(
|
||||
bucket.getName(),
|
||||
{ algorithm: 'AES256', mandatory: false },
|
||||
log,
|
||||
(error, sseConfig) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
bucket.setServerSideEncryption(sseConfig);
|
||||
return metadata.updateBucket(bucket.getName(), bucket, log, err => cb(err, sseConfig));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {object} headers - request headers
|
||||
* @param {BucketInfo} bucket - BucketInfo model
|
||||
* @param {*} log - werelogs logger
|
||||
* @param {ServerSideEncryptionInfo~callback} cb - callback
|
||||
* @returns {undefined}
|
||||
*/
|
||||
function getObjectSSEConfiguration(headers, bucket, log, cb) {
|
||||
const bucketSSE = bucket.getServerSideEncryption();
|
||||
const { error, objectSSE } = parseObjectEncryptionHeaders(headers);
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
|
||||
// If a per object sse algo has been passed through
|
||||
// x-amz-server-side-encryption
|
||||
if (objectSSE.algorithm) {
|
||||
// If aws:kms and a custom key id
|
||||
// pass it through without updating the bucket md
|
||||
if (objectSSE.algorithm === 'aws:kms' && objectSSE.configuredMasterKeyId) {
|
||||
return cb(null, objectSSE);
|
||||
}
|
||||
|
||||
// If the client has not specified a key id,
|
||||
// and we have a default config, then we reuse
|
||||
// it and pass it through
|
||||
if (!objectSSE.configuredMasterKeyId && bucketSSE) {
|
||||
// The default configs algo is overridden with the one passed in the
|
||||
// request headers. Our implementations of AES256 and aws:kms are the
|
||||
// same underneath so this is only cosmetic change.
|
||||
const sseConfig = Object.assign({}, bucketSSE, { algorithm: objectSSE.algorithm });
|
||||
return cb(null, sseConfig);
|
||||
}
|
||||
|
||||
// If the client has not specified a key id, and we
|
||||
// don't have a default config, generate it
|
||||
if (!objectSSE.configuredMasterKeyId && !bucketSSE) {
|
||||
return createDefaultBucketEncryptionMetadata(bucket, log, (error, sseConfig) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
// Override the algorithm, for the same reasons as above.
|
||||
Object.assign(sseConfig, { algorithm: objectSSE.algorithm });
|
||||
return cb(null, sseConfig);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// If the bucket has a default encryption config, and it is mandatory
|
||||
// (created with putBucketEncryption or legacy headers)
|
||||
// pass it through
|
||||
if (bucketSSE && bucketSSE.mandatory) {
|
||||
return cb(null, bucketSSE);
|
||||
}
|
||||
|
||||
// No encryption config
|
||||
return cb(null, null);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
createDefaultBucketEncryptionMetadata,
|
||||
getObjectSSEConfiguration,
|
||||
hydrateEncryptionConfig,
|
||||
parseEncryptionXml,
|
||||
parseBucketEncryptionHeaders,
|
||||
parseObjectEncryptionHeaders,
|
||||
};
|
|
@ -1,5 +1,4 @@
|
|||
const invisiblyDelete = require('./invisiblyDelete');
|
||||
const constants = require('../../../../constants');
|
||||
const { invisiblyDelete } = require('./bucketDeletion');
|
||||
|
||||
/**
|
||||
* Checks whether to proceed with a request based on the bucket flags
|
||||
|
@ -9,17 +8,9 @@ const constants = require('../../../../constants');
|
|||
* @return {boolean} true if the bucket should be shielded, false otherwise
|
||||
*/
|
||||
function bucketShield(bucket, requestType) {
|
||||
const invisiblyDeleteRequests = constants.bucketOwnerActions.concat(
|
||||
[
|
||||
'bucketGet',
|
||||
'bucketHead',
|
||||
'bucketGetACL',
|
||||
'objectGet',
|
||||
'objectGetACL',
|
||||
'objectHead',
|
||||
'objectPutACL',
|
||||
'objectDelete',
|
||||
]);
|
||||
const invisiblyDeleteRequests = ['bucketGet', 'bucketHead',
|
||||
'bucketGetACL', 'bucketOwnerAction', 'objectGet', 'objectGetACL',
|
||||
'objectHead', 'objectPutACL', 'objectDelete'];
|
||||
if (invisiblyDeleteRequests.indexOf(requestType) > -1 &&
|
||||
bucket.hasDeletedFlag()) {
|
||||
invisiblyDelete(bucket.getName(), bucket.getOwner());
|
||||
|
@ -30,9 +21,6 @@ function bucketShield(bucket, requestType) {
|
|||
// Otherwise return an error to the client
|
||||
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
||||
(requestType !== 'objectPut' &&
|
||||
requestType !== 'initiateMultipartUpload' &&
|
||||
requestType !== 'objectPutPart' &&
|
||||
requestType !== 'completeMultipartUpload' &&
|
||||
requestType !== 'bucketPutACL' &&
|
||||
requestType !== 'bucketDelete')) {
|
||||
return true;
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
const createKeyForUserBucket = require('./createKeyForUserBucket');
|
||||
const { usersBucket, oldUsersBucket, splitter, oldSplitter } =
|
||||
require('../../../../constants');
|
||||
const metadata = require('../../../metadata/wrapper');
|
||||
|
||||
function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
||||
log.trace('deleting bucket name from users bucket', { method:
|
||||
'_deleteUserBucketEntry' });
|
||||
const keyForUserBucket = createKeyForUserBucket(canonicalID, splitter,
|
||||
bucketName);
|
||||
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
||||
// If the object representing the bucket is not in the
|
||||
// users bucket just continue
|
||||
if (error?.is.NoSuchKey) {
|
||||
return cb(null);
|
||||
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
||||
// have old user bucket format
|
||||
} else if (error?.is.NoSuchBucket) {
|
||||
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
||||
oldSplitter, bucketName);
|
||||
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
||||
{}, log, error => {
|
||||
// TODO: move to `.is` once BKTCLT-9 is done and bumped in Cloudserver
|
||||
if (error && !error.NoSuchKey) {
|
||||
log.error('from metadata while deleting user bucket',
|
||||
{ error });
|
||||
return cb(error);
|
||||
}
|
||||
log.trace('deleted bucket from user bucket',
|
||||
{ method: '_deleteUserBucketEntry' });
|
||||
return cb(null);
|
||||
});
|
||||
} else if (error) {
|
||||
log.error('from metadata while deleting user bucket', { error,
|
||||
method: '_deleteUserBucketEntry' });
|
||||
return cb(error);
|
||||
}
|
||||
log.trace('deleted bucket from user bucket', {
|
||||
method: '_deleteUserBucketEntry' });
|
||||
return cb(null);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = deleteUserBucketEntry;
|
|
@ -1,37 +0,0 @@
|
|||
const { errors, models } = require('arsenal');
|
||||
const { NotificationConfiguration } = models;
|
||||
|
||||
const { config } = require('../../../Config');
|
||||
|
||||
function getNotificationConfiguration(parsedXml) {
|
||||
const notifConfig = new NotificationConfiguration(parsedXml).getValidatedNotificationConfiguration();
|
||||
// if notifConfig is empty object, effectively delete notification configuration
|
||||
if (notifConfig.error || Object.keys(notifConfig).length === 0) {
|
||||
return notifConfig;
|
||||
}
|
||||
if (!config.bucketNotificationDestinations) {
|
||||
return { error: errors.InvalidArgument.customizeDescription(
|
||||
'Unable to validate the following destination configurations') };
|
||||
}
|
||||
const targets = new Set(config.bucketNotificationDestinations.map(t => t.resource));
|
||||
const notifConfigTargets = notifConfig.queueConfig.map(t => t.queueArn.split(':')[5]);
|
||||
// getting invalid targets
|
||||
const invalidTargets = [];
|
||||
notifConfigTargets.forEach((t, i) => {
|
||||
if (!targets.has(t)) {
|
||||
invalidTargets.push({
|
||||
ArgumentName: notifConfig.queueConfig[i].queueArn,
|
||||
ArgumentValue: 'The destination queue does not exist',
|
||||
});
|
||||
}
|
||||
});
|
||||
if (invalidTargets.length > 0) {
|
||||
const errDesc = 'Unable to validate the following destination configurations';
|
||||
let error = errors.InvalidArgument.customizeDescription(errDesc);
|
||||
error = error.addMetadataEntry('invalidArguments', invalidTargets);
|
||||
return { error };
|
||||
}
|
||||
return notifConfig;
|
||||
}
|
||||
|
||||
module.exports = getNotificationConfiguration;
|
|
@ -1,37 +0,0 @@
|
|||
const logger = require('../../../utilities/logger');
|
||||
const deleteUserBucketEntry = require('./deleteUserBucketEntry');
|
||||
const metadata = require('../../../metadata/wrapper');
|
||||
|
||||
/**
|
||||
* Invisibly finishes deleting a bucket that already has a deleted flag
|
||||
* by deleting the object in the users bucket representing the created bucket
|
||||
* and then deleting the bucket in metadata
|
||||
* @param {string} bucketName - name of bucket
|
||||
* @param {string} canonicalID - bucket owner's canonicalID
|
||||
* @return {undefined}
|
||||
*/
|
||||
function invisiblyDelete(bucketName, canonicalID) {
|
||||
const log = logger.newRequestLogger();
|
||||
log.trace('deleting bucket with deleted flag invisibly', { bucketName });
|
||||
return deleteUserBucketEntry(bucketName, canonicalID, log, err => {
|
||||
if (err) {
|
||||
log.error('error invisibly deleting bucket name from user bucket',
|
||||
{ error: err });
|
||||
return log.end();
|
||||
}
|
||||
log.trace('deleted bucket name from user bucket');
|
||||
return metadata.deleteBucket(bucketName, log, error => {
|
||||
log.trace('deleting bucket from metadata',
|
||||
{ method: 'invisiblyDelete' });
|
||||
if (error) {
|
||||
log.error('error deleting bucket from metadata', { error });
|
||||
return log.end();
|
||||
}
|
||||
log.trace('invisible deletion of bucket succeeded',
|
||||
{ method: 'invisiblyDelete' });
|
||||
return log.end();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = invisiblyDelete;
|
|
@ -0,0 +1,233 @@
|
|||
const { config } = require('../../../Config');
|
||||
const { legacyLocations } = require('../../../../constants.js');
|
||||
const { locationConstraints } = config;
|
||||
const escapeForXml = require('arsenal').s3middleware.escapeForXml;
|
||||
|
||||
class BackendInfo {
|
||||
/**
|
||||
* Represents the info necessary to evaluate which data backend to use
|
||||
* on a data put call.
|
||||
* @constructor
|
||||
* @param {string | undefined} objectLocationConstraint - location constraint
|
||||
* for object based on user meta header
|
||||
* @param {string | undefined } bucketLocationConstraint - location
|
||||
* constraint for bucket based on bucket metadata
|
||||
* @param {string} requestEndpoint - endpoint to which request was made
|
||||
* @param {string | undefined } legacyLocationConstraint - legacy location
|
||||
* constraint
|
||||
*/
|
||||
constructor(objectLocationConstraint, bucketLocationConstraint,
|
||||
requestEndpoint, legacyLocationConstraint) {
|
||||
this._objectLocationConstraint = objectLocationConstraint;
|
||||
this._bucketLocationConstraint = bucketLocationConstraint;
|
||||
this._requestEndpoint = requestEndpoint;
|
||||
this._legacyLocationConstraint = legacyLocationConstraint;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* validate proposed location constraint against config
|
||||
* @param {string | undefined} locationConstraint - value of user
|
||||
* metadata location constraint header or bucket location constraint
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {boolean} - true if valid, false if not
|
||||
*/
|
||||
static isValidLocationConstraint(locationConstraint, log) {
|
||||
if (Object.keys(config.locationConstraints).
|
||||
indexOf(locationConstraint) < 0) {
|
||||
log.trace('proposed locationConstraint is invalid',
|
||||
{ locationConstraint });
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* validate that request endpoint is listed in the restEndpoint config
|
||||
* @param {string} requestEndpoint - request endpoint
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {boolean} - true if present, false if not
|
||||
*/
|
||||
static isRequestEndpointPresent(requestEndpoint, log) {
|
||||
if (Object.keys(config.restEndpoints).indexOf(requestEndpoint) < 0) {
|
||||
log.trace('requestEndpoint does not match config restEndpoints',
|
||||
{ requestEndpoint });
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* validate that locationConstraint for request Endpoint matches
|
||||
* one config locationConstraint
|
||||
* @param {string} requestEndpoint - request endpoint
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {boolean} - true if matches, false if not
|
||||
*/
|
||||
static isRequestEndpointValueValid(requestEndpoint, log) {
|
||||
if (Object.keys(config.locationConstraints).indexOf(config
|
||||
.restEndpoints[requestEndpoint]) < 0) {
|
||||
log.trace('the default locationConstraint for request' +
|
||||
'Endpoint does not match any config locationConstraint',
|
||||
{ requestEndpoint });
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* validate that s3 server is running with a file or memory backend
|
||||
* @param {string} requestEndpoint - request endpoint
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {boolean} - true if running with file/mem backend, false if not
|
||||
*/
|
||||
static isMemOrFileBackend(requestEndpoint, log) {
|
||||
if (config.backends.data === 'mem' ||
|
||||
config.backends.data === 'file') {
|
||||
log.trace('use data backend for the location', {
|
||||
dataBackend: config.backends.data,
|
||||
method: 'isMemOrFileBackend',
|
||||
});
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* validate requestEndpoint against config or mem/file data backend
|
||||
* - if there is no match for the request endpoint in the config
|
||||
* restEndpoints and data backend is set to mem or file we will use this
|
||||
* data backend for the location.
|
||||
* - if locationConstraint for request Endpoint does not match
|
||||
* any config locationConstraint, we will return an error
|
||||
* @param {string} requestEndpoint - request endpoint
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {boolean} - true if valid, false if not
|
||||
*/
|
||||
static isValidRequestEndpointOrBackend(requestEndpoint, log) {
|
||||
if (!BackendInfo.isRequestEndpointPresent(requestEndpoint, log)) {
|
||||
return BackendInfo.isMemOrFileBackend(requestEndpoint, log);
|
||||
}
|
||||
return BackendInfo.isRequestEndpointValueValid(requestEndpoint, log);
|
||||
}
|
||||
|
||||
/**
|
||||
* validate controlling BackendInfo Parameter
|
||||
* @param {string | undefined} objectLocationConstraint - value of user
|
||||
* metadata location constraint header
|
||||
* @param {string | null} bucketLocationConstraint - location
|
||||
* constraint from bucket metadata
|
||||
* @param {string} requestEndpoint - endpoint of request
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {object} - location constraint validity
|
||||
*/
|
||||
static controllingBackendParam(objectLocationConstraint,
|
||||
bucketLocationConstraint, requestEndpoint, log) {
|
||||
if (objectLocationConstraint) {
|
||||
if (BackendInfo.isValidLocationConstraint(objectLocationConstraint,
|
||||
log)) {
|
||||
log.trace('objectLocationConstraint is valid');
|
||||
return { isValid: true };
|
||||
}
|
||||
log.trace('objectLocationConstraint is invalid');
|
||||
return { isValid: false, description: 'Object Location Error - ' +
|
||||
`Your object location "${escapeForXml(objectLocationConstraint)}"` +
|
||||
'is not in your location config - Please update.' };
|
||||
}
|
||||
if (bucketLocationConstraint) {
|
||||
if (BackendInfo.isValidLocationConstraint(bucketLocationConstraint,
|
||||
log)) {
|
||||
log.trace('bucketLocationConstraint is valid');
|
||||
return { isValid: true };
|
||||
}
|
||||
log.trace('bucketLocationConstraint is invalid');
|
||||
return { isValid: false, description: 'Bucket Location Error - ' +
|
||||
`Your bucket location "${escapeForXml(bucketLocationConstraint)}"` +
|
||||
' is not in your location config - Please update.' };
|
||||
}
|
||||
const legacyLocationConstraint =
|
||||
BackendInfo.getLegacyLocationConstraint();
|
||||
if (legacyLocationConstraint) {
|
||||
log.trace('legacy location is valid');
|
||||
return { isValid: true, legacyLocationConstraint };
|
||||
}
|
||||
if (!BackendInfo.isValidRequestEndpointOrBackend(requestEndpoint,
|
||||
log)) {
|
||||
return { isValid: false, description: 'Endpoint Location Error - ' +
|
||||
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
|
||||
'in your config OR the default location constraint for request ' +
|
||||
`endpoint "${escapeForXml(requestEndpoint)}" does not ` +
|
||||
'match any config locationConstraint - Please update.' };
|
||||
}
|
||||
if (BackendInfo.isRequestEndpointPresent(requestEndpoint, log)) {
|
||||
return { isValid: true };
|
||||
}
|
||||
return { isValid: true, defaultedToDataBackend: true };
|
||||
}
|
||||
|
||||
/**
|
||||
* Return legacyLocationConstraint
|
||||
* @return {string | undefined} legacyLocationConstraint;
|
||||
*/
|
||||
static getLegacyLocationConstraint() {
|
||||
return legacyLocations.find(ll => locationConstraints[ll]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return objectLocationConstraint
|
||||
* @return {string | undefined} objectLocationConstraint;
|
||||
*/
|
||||
getObjectLocationConstraint() {
|
||||
return this._objectLocationConstraint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return bucketLocationConstraint
|
||||
* @return {string | undefined} bucketLocationConstraint;
|
||||
*/
|
||||
getBucketLocationConstraint() {
|
||||
return this._bucketLocationConstraint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return requestEndpoint
|
||||
* @return {string} requestEndpoint;
|
||||
*/
|
||||
getRequestEndpoint() {
|
||||
return this._requestEndpoint;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return locationConstraint that should be used with put request
|
||||
* Order of priority is:
|
||||
* (1) objectLocationConstraint,
|
||||
* (2) bucketLocationConstraint,
|
||||
* (3) legacyLocationConstraint,
|
||||
* (4) default locationConstraint for requestEndpoint if requestEndpoint
|
||||
* is listed in restEndpoints in config.json
|
||||
* (5) default data backend
|
||||
* @return {string} locationConstraint;
|
||||
*/
|
||||
getControllingLocationConstraint() {
|
||||
const objectLC = this.getObjectLocationConstraint();
|
||||
const bucketLC = this.getBucketLocationConstraint();
|
||||
const reqEndpoint = this.getRequestEndpoint();
|
||||
if (objectLC) {
|
||||
return objectLC;
|
||||
}
|
||||
if (bucketLC) {
|
||||
return bucketLC;
|
||||
}
|
||||
if (this._legacyLocationConstraint) {
|
||||
return this._legacyLocationConstraint;
|
||||
}
|
||||
if (config.restEndpoints[reqEndpoint]) {
|
||||
return config.restEndpoints[reqEndpoint];
|
||||
}
|
||||
return config.backends.data;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
BackendInfo,
|
||||
};
|
|
@ -1,131 +0,0 @@
|
|||
const async = require('async');
|
||||
|
||||
const constants = require('../../../../constants');
|
||||
const { data } = require('../../../data/wrapper');
|
||||
const locationConstraintCheck = require('../object/locationConstraintCheck');
|
||||
const { standardMetadataValidateBucketAndObj } =
|
||||
require('../../../metadata/metadataUtils');
|
||||
const services = require('../../../services');
|
||||
|
||||
function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||
callback, request) {
|
||||
const metadataValMPUparams = {
|
||||
authInfo,
|
||||
bucketName,
|
||||
objectKey,
|
||||
uploadId,
|
||||
preciseRequestType: request.apiMethods || 'multipartDelete',
|
||||
request,
|
||||
};
|
||||
// For validating the request at the destinationBucket level
|
||||
// params are the same as validating at the MPU level
|
||||
// but the requestType is the more general 'objectDelete'
|
||||
const metadataValParams = Object.assign({}, metadataValMPUparams);
|
||||
metadataValParams.requestType = 'objectPut';
|
||||
const authzIdentityResult = request ? request.actionImplicitDenies : false;
|
||||
|
||||
async.waterfall([
|
||||
function checkDestBucketVal(next) {
|
||||
standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log,
|
||||
(err, destinationBucket) => {
|
||||
if (err) {
|
||||
return next(err, destinationBucket);
|
||||
}
|
||||
if (destinationBucket.policies) {
|
||||
// TODO: Check bucket policies to see if user is granted
|
||||
// permission or forbidden permission to take
|
||||
// given action.
|
||||
// If permitted, add 'bucketPolicyGoAhead'
|
||||
// attribute to params for validating at MPU level.
|
||||
// This is GH Issue#76
|
||||
metadataValMPUparams.requestType =
|
||||
'bucketPolicyGoAhead';
|
||||
}
|
||||
return next(null, destinationBucket);
|
||||
});
|
||||
},
|
||||
function checkMPUval(destBucket, next) {
|
||||
metadataValParams.log = log;
|
||||
services.metadataValidateMultipart(metadataValParams,
|
||||
(err, mpuBucket, mpuOverviewObj) => {
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
}
|
||||
return next(err, mpuBucket, mpuOverviewObj, destBucket);
|
||||
});
|
||||
},
|
||||
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
|
||||
next) {
|
||||
const location = mpuOverviewObj.controllingLocationConstraint;
|
||||
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
delete request.actionImplicitDenies;
|
||||
return data.abortMPU(objectKey, uploadId, location, bucketName,
|
||||
request, destBucket, locationConstraintCheck, log,
|
||||
(err, skipDataDelete) => {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
request.actionImplicitDenies = originalIdentityAuthzResults;
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
}
|
||||
// for Azure and GCP we do not need to delete data
|
||||
// for all other backends, skipDataDelete will be set to false
|
||||
return next(null, mpuBucket, destBucket, skipDataDelete);
|
||||
});
|
||||
},
|
||||
function getPartLocations(mpuBucket, destBucket, skipDataDelete,
|
||||
next) {
|
||||
services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||
(err, result) => {
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
}
|
||||
const storedParts = result.Contents;
|
||||
return next(null, mpuBucket, storedParts, destBucket,
|
||||
skipDataDelete);
|
||||
});
|
||||
},
|
||||
function deleteData(mpuBucket, storedParts, destBucket,
|
||||
skipDataDelete, next) {
|
||||
if (skipDataDelete) {
|
||||
return next(null, mpuBucket, storedParts, destBucket);
|
||||
}
|
||||
// The locations were sent to metadata as an array
|
||||
// under partLocations. Pull the partLocations.
|
||||
let locations = storedParts.map(item => item.value.partLocations);
|
||||
if (locations.length === 0) {
|
||||
return next(null, mpuBucket, storedParts, destBucket);
|
||||
}
|
||||
// flatten the array
|
||||
locations = [].concat(...locations);
|
||||
return async.eachLimit(locations, 5, (loc, cb) => {
|
||||
data.delete(loc, log, err => {
|
||||
if (err) {
|
||||
log.fatal('delete ObjectPart failed', { err });
|
||||
}
|
||||
cb();
|
||||
});
|
||||
}, () => next(null, mpuBucket, storedParts, destBucket));
|
||||
},
|
||||
function deleteMetadata(mpuBucket, storedParts, destBucket, next) {
|
||||
let splitter = constants.splitter;
|
||||
// BACKWARD: Remove to remove the old splitter
|
||||
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
||||
splitter = constants.oldSplitter;
|
||||
}
|
||||
// Reconstruct mpuOverviewKey
|
||||
const mpuOverviewKey =
|
||||
`overview${splitter}${objectKey}${splitter}${uploadId}`;
|
||||
|
||||
// Get the sum of all part sizes to include in pushMetric object
|
||||
const partSizeSum = storedParts.map(item => item.value.Size)
|
||||
.reduce((currPart, nextPart) => currPart + nextPart, 0);
|
||||
const keysToDelete = storedParts.map(item => item.key);
|
||||
keysToDelete.push(mpuOverviewKey);
|
||||
services.batchDeleteObjectMetadata(mpuBucket.getName(),
|
||||
keysToDelete, log, err => next(err, destBucket, partSizeSum));
|
||||
},
|
||||
], callback);
|
||||
}
|
||||
|
||||
module.exports = abortMultipartUpload;
|
|
@ -1,19 +0,0 @@
|
|||
const { zenkoIDHeader } = require('arsenal').constants;
|
||||
|
||||
const _config = require('../../../Config').config;
|
||||
|
||||
/**
|
||||
* applyZenkoUserMD - if request is within a Zenko deployment, apply user
|
||||
* metadata called "zenko-source" to the object
|
||||
* @param {Object} metaHeaders - user metadata object
|
||||
* @return {undefined}
|
||||
*/
|
||||
function applyZenkoUserMD(metaHeaders) {
|
||||
if (process.env.REMOTE_MANAGEMENT_DISABLE === '0' &&
|
||||
!metaHeaders[zenkoIDHeader]) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
metaHeaders[zenkoIDHeader] = _config.getPublicInstanceId();
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = applyZenkoUserMD;
|
|
@ -1,25 +0,0 @@
|
|||
const { errors } = require('arsenal');
|
||||
const { maxHttpHeadersSize } = require('../../../../constants');
|
||||
|
||||
/**
|
||||
* Checks the size of the HTTP headers
|
||||
* @param {object} requestHeaders - HTTP request headers
|
||||
* @return {object} object with error or null
|
||||
*/
|
||||
function checkHttpHeadersSize(requestHeaders) {
|
||||
let httpHeadersSize = 0;
|
||||
|
||||
Object.keys(requestHeaders).forEach(header => {
|
||||
httpHeadersSize += Buffer.byteLength(header, 'utf8') +
|
||||
Buffer.byteLength(requestHeaders[header], 'utf8');
|
||||
});
|
||||
|
||||
if (httpHeadersSize > maxHttpHeadersSize) {
|
||||
return {
|
||||
httpHeadersSizeError: errors.HttpHeadersTooLarge,
|
||||
};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
module.exports = checkHttpHeadersSize;
|
|
@ -1,38 +0,0 @@
|
|||
const { maximumMetaHeadersSize,
|
||||
invalidObjectUserMetadataHeader } = require('../../../../constants');
|
||||
|
||||
/**
|
||||
* Checks the size of the user metadata in the object metadata and removes
|
||||
* them from the response if the size of the user metadata is larger than
|
||||
* the maximum size allowed. A custome metadata key is added to the response
|
||||
* with the number of user metadata keys not returned as its value
|
||||
* @param {object} responseMetadata - response metadata
|
||||
* @return {object} responseMetaHeaders headers with object metadata to include
|
||||
* in response to client
|
||||
*/
|
||||
function checkUserMetadataSize(responseMetadata) {
|
||||
let userMetadataSize = 0;
|
||||
// collect the user metadata keys from the object metadata
|
||||
const userMetadataHeaders = Object.keys(responseMetadata)
|
||||
.filter(key => key.startsWith('x-amz-meta-'));
|
||||
// compute the size of all user metadata key and its value
|
||||
userMetadataHeaders.forEach(header => {
|
||||
userMetadataSize += header.length + responseMetadata[header].length;
|
||||
});
|
||||
// check the size computed against the maximum allowed
|
||||
// if the computed size is greater, then remove all the
|
||||
// user metadata from the response object
|
||||
if (userMetadataSize > maximumMetaHeadersSize) {
|
||||
const md = Object.assign({}, responseMetadata);
|
||||
userMetadataHeaders.forEach(header => {
|
||||
delete md[header];
|
||||
});
|
||||
// add the prescribed/custom metadata with number of user metadata
|
||||
// as its value
|
||||
md[invalidObjectUserMetadataHeader] = userMetadataHeaders.length;
|
||||
return md;
|
||||
}
|
||||
return responseMetadata;
|
||||
}
|
||||
|
||||
module.exports = checkUserMetadataSize;
|
|
@ -1,247 +0,0 @@
|
|||
/*
|
||||
* Code based on Yutaka Oishi (Fujifilm) contributions
|
||||
* Date: 11 Sep 2020
|
||||
*/
|
||||
const { ObjectMDArchive } = require('arsenal').models;
|
||||
const errors = require('arsenal').errors;
|
||||
const { config } = require('../../../Config');
|
||||
const { locationConstraints } = config;
|
||||
|
||||
const { scaledMsPerDay } = config.getTimeOptions();
|
||||
|
||||
/**
|
||||
* Get response header "x-amz-restore"
|
||||
* Be called by objectHead.js
|
||||
* @param {object} objMD - object's metadata
|
||||
* @returns {string|undefined} x-amz-restore
|
||||
*/
|
||||
function getAmzRestoreResHeader(objMD) {
|
||||
if (objMD.archive &&
|
||||
objMD.archive.restoreRequestedAt &&
|
||||
!objMD.archive.restoreCompletedAt) {
|
||||
// Avoid race condition by relying on the `archive` MD of the object
|
||||
// and return the right header after a RESTORE request.
|
||||
// eslint-disable-next-line
|
||||
return `ongoing-request="true"`;
|
||||
}
|
||||
if (objMD['x-amz-restore']) {
|
||||
if (objMD['x-amz-restore']['expiry-date']) {
|
||||
const utcDateTime = new Date(objMD['x-amz-restore']['expiry-date']).toUTCString();
|
||||
// eslint-disable-next-line
|
||||
return `ongoing-request="${objMD['x-amz-restore']['ongoing-request']}", expiry-date="${utcDateTime}"`;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if restore can be done.
|
||||
*
|
||||
* @param {ObjectMD} objectMD - object metadata
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
|
||||
*/
|
||||
function _validateStartRestore(objectMD, log) {
|
||||
if (objectMD.archive?.restoreCompletedAt) {
|
||||
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
|
||||
// return InvalidObjectState error if the restored object is expired
|
||||
// but restore info md of this object has not yet been cleared
|
||||
log.debug('The restored object already expired.',
|
||||
{
|
||||
archive: objectMD.archive,
|
||||
method: '_validateStartRestore',
|
||||
});
|
||||
return errors.InvalidObjectState;
|
||||
}
|
||||
|
||||
// If object is already restored, no further check is needed
|
||||
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
|
||||
// been reset.
|
||||
return undefined;
|
||||
}
|
||||
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
|
||||
if (!isLocationCold) {
|
||||
// return InvalidObjectState error if the object is not in cold storage,
|
||||
// not in cold storage means either location cold flag not exists or cold flag is explicit false
|
||||
log.debug('The bucket of the object is not in a cold storage location.',
|
||||
{
|
||||
isLocationCold,
|
||||
method: '_validateStartRestore',
|
||||
});
|
||||
return errors.InvalidObjectState;
|
||||
}
|
||||
if (objectMD.archive?.restoreRequestedAt) {
|
||||
// return RestoreAlreadyInProgress error if the object is currently being restored
|
||||
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
|
||||
log.debug('The object is currently being restored.',
|
||||
{
|
||||
archive: objectMD.archive,
|
||||
method: '_validateStartRestore',
|
||||
});
|
||||
return errors.RestoreAlreadyInProgress;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if "put version id" is allowed
|
||||
*
|
||||
* @param {ObjectMD} objMD - object metadata
|
||||
* @param {string} versionId - object's version id
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {ArsenalError|undefined} - undefined if "put version id" is allowed
|
||||
*/
|
||||
function validatePutVersionId(objMD, versionId, log) {
|
||||
if (!objMD) {
|
||||
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
||||
log.error('error no object metadata found', { method: 'validatePutVersionId', versionId });
|
||||
return err;
|
||||
}
|
||||
|
||||
if (objMD.isDeleteMarker) {
|
||||
log.error('version is a delete marker', { method: 'validatePutVersionId', versionId });
|
||||
return errors.MethodNotAllowed;
|
||||
}
|
||||
|
||||
const isLocationCold = locationConstraints[objMD.dataStoreName]?.isCold;
|
||||
if (!isLocationCold) {
|
||||
log.error('The object data is not stored in a cold storage location.',
|
||||
{
|
||||
isLocationCold,
|
||||
dataStoreName: objMD.dataStoreName,
|
||||
method: 'validatePutVersionId',
|
||||
});
|
||||
return errors.InvalidObjectState;
|
||||
}
|
||||
|
||||
// make sure object archive restoration is in progress
|
||||
// NOTE: we do not use putObjectVersion to update the restoration period.
|
||||
if (!objMD.archive || !objMD.archive.restoreRequestedAt || !objMD.archive.restoreRequestedDays
|
||||
|| objMD.archive.restoreCompletedAt || objMD.archive.restoreWillExpireAt) {
|
||||
log.error('object archive restoration is not in progress',
|
||||
{ method: 'validatePutVersionId', versionId });
|
||||
return errors.InvalidObjectState;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the object is already restored, and update the expiration date accordingly:
|
||||
* > After restoring an archived object, you can update the restoration period by reissuing the
|
||||
* > request with a new period. Amazon S3 updates the restoration period relative to the current
|
||||
* > time.
|
||||
*
|
||||
* @param {ObjectMD} objectMD - object metadata
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {boolean} - true if the object is already restored
|
||||
*/
|
||||
function _updateObjectExpirationDate(objectMD, log) {
|
||||
// Check if restoreCompletedAt field exists
|
||||
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
|
||||
// checked earlier in the process, so checking again here would create weird states
|
||||
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
|
||||
log.debug('The restore status of the object.', {
|
||||
isObjectAlreadyRestored,
|
||||
method: 'isObjectAlreadyRestored'
|
||||
});
|
||||
if (isObjectAlreadyRestored) {
|
||||
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
|
||||
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
|
||||
|
||||
/* eslint-disable no-param-reassign */
|
||||
objectMD.archive.restoreWillExpireAt = expiryDate;
|
||||
objectMD['x-amz-restore'] = {
|
||||
'ongoing-request': false,
|
||||
'expiry-date': expiryDate,
|
||||
};
|
||||
/* eslint-enable no-param-reassign */
|
||||
}
|
||||
return isObjectAlreadyRestored;
|
||||
}
|
||||
|
||||
/**
|
||||
* update restore expiration date.
|
||||
*
|
||||
* @param {ObjectMD} objectMD - objectMD instance
|
||||
* @param {object} restoreParam - restore param
|
||||
* @param {object} log - werelogs logger
|
||||
* @return {ArsenalError|undefined} internal error if object MD is not valid
|
||||
*
|
||||
*/
|
||||
function _updateRestoreInfo(objectMD, restoreParam, log) {
|
||||
if (!objectMD.archive) {
|
||||
log.debug('objectMD.archive doesn\'t exits', {
|
||||
objectMD,
|
||||
method: '_updateRestoreInfo'
|
||||
});
|
||||
return errors.InternalError.customizeDescription('Archive metadata is missing.');
|
||||
}
|
||||
/* eslint-disable no-param-reassign */
|
||||
objectMD.archive.restoreRequestedAt = new Date();
|
||||
objectMD.archive.restoreRequestedDays = restoreParam.days;
|
||||
objectMD.originOp = 's3:ObjectRestore:Post';
|
||||
/* eslint-enable no-param-reassign */
|
||||
if (!ObjectMDArchive.isValid(objectMD.archive)) {
|
||||
log.debug('archive is not valid', {
|
||||
archive: objectMD.archive,
|
||||
method: '_updateRestoreInfo'
|
||||
});
|
||||
return errors.InternalError.customizeDescription('Invalid archive metadata.');
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* start to restore object.
|
||||
* If not exist x-amz-restore, add it to objectMD.(x-amz-restore = false)
|
||||
* calculate restore expiry-date and add it to objectMD.
|
||||
* Be called by objectRestore.js
|
||||
*
|
||||
* @param {ObjectMD} objectMD - objectMd instance
|
||||
* @param {object} restoreParam - bucket name
|
||||
* @param {object} log - werelogs logger
|
||||
* @param {function} cb - bucket name
|
||||
* @return {undefined}
|
||||
*
|
||||
*/
|
||||
function startRestore(objectMD, restoreParam, log, cb) {
|
||||
log.info('Validating if restore can be done or not.');
|
||||
const checkResultError = _validateStartRestore(objectMD, log);
|
||||
if (checkResultError) {
|
||||
return cb(checkResultError);
|
||||
}
|
||||
log.info('Updating restore information.');
|
||||
const updateResultError = _updateRestoreInfo(objectMD, restoreParam, log);
|
||||
if (updateResultError) {
|
||||
return cb(updateResultError);
|
||||
}
|
||||
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
|
||||
return cb(null, isObjectAlreadyRestored);
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if object data is available or if it's in cold storage
|
||||
* @param {ObjectMD} objMD Object metadata
|
||||
* @returns {ArsenalError|null} error if object data is not available
|
||||
*/
|
||||
function verifyColdObjectAvailable(objMD) {
|
||||
// return error when object is cold
|
||||
if (objMD.archive &&
|
||||
// Object is in cold backend
|
||||
(!objMD.archive.restoreRequestedAt ||
|
||||
// Object is being restored
|
||||
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
|
||||
const err = errors.InvalidObjectState
|
||||
.customizeDescription('The operation is not valid for the object\'s storage class');
|
||||
return err;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
startRestore,
|
||||
getAmzRestoreResHeader,
|
||||
validatePutVersionId,
|
||||
verifyColdObjectAvailable,
|
||||
};
|
|
@ -1,28 +0,0 @@
|
|||
/**
|
||||
* generateToken - generates obfuscated continue token from object keyName
|
||||
* @param {String} keyName - name of key to obfuscate
|
||||
* @return {String} - obfuscated continue token
|
||||
*/
|
||||
function generateToken(keyName) {
|
||||
if (keyName === '' || keyName === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return Buffer.from(keyName).toString('base64');
|
||||
}
|
||||
|
||||
/**
|
||||
* decryptToken - decrypts object keyName from obfuscated continue token
|
||||
* @param {String} token - obfuscated continue token
|
||||
* @return {String} - object keyName
|
||||
*/
|
||||
function decryptToken(token) {
|
||||
if (token === '' || token === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return Buffer.from(token, 'base64').toString('utf8');
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
generateToken,
|
||||
decryptToken,
|
||||
};
|
|
@ -3,24 +3,25 @@ const { errors, s3middleware } = require('arsenal');
|
|||
const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
|
||||
|
||||
const constants = require('../../../../constants');
|
||||
const { data } = require('../../../data/wrapper');
|
||||
const data = require('../../../data/wrapper');
|
||||
const services = require('../../../services');
|
||||
const logger = require('../../../utilities/logger');
|
||||
const { dataStore } = require('./storeObject');
|
||||
const locationConstraintCheck = require('./locationConstraintCheck');
|
||||
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
|
||||
const { versioningPreprocessing } = require('./versioning');
|
||||
const removeAWSChunked = require('./removeAWSChunked');
|
||||
const getReplicationInfo = require('./getReplicationInfo');
|
||||
const { config } = require('../../../Config');
|
||||
const validateWebsiteHeader = require('./websiteServing')
|
||||
.validateWebsiteHeader;
|
||||
const applyZenkoUserMD = require('./applyZenkoUserMD');
|
||||
const { externalBackends, versioningNotImplBackends } = constants;
|
||||
|
||||
const externalVersioningErrorMessage = 'We do not currently support putting ' +
|
||||
'a versioned object to a location-constraint of type Azure or GCP.';
|
||||
|
||||
|
||||
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||
metadataStoreParams, dataToDelete, log, requestMethod, callback) {
|
||||
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
|
||||
services.metadataStoreObject(bucketName, dataGetInfo,
|
||||
cipherBundle, metadataStoreParams, (err, result) => {
|
||||
if (err) {
|
||||
|
@ -29,8 +30,8 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
|||
if (dataToDelete) {
|
||||
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
||||
dataGetInfo[0].dataStoreName : null;
|
||||
return data.batchDelete(dataToDelete, requestMethod,
|
||||
newDataStoreName, log, err => callback(err, result));
|
||||
data.batchDelete(dataToDelete, requestMethod,
|
||||
newDataStoreName, deleteLog);
|
||||
}
|
||||
return callback(null, result);
|
||||
});
|
||||
|
@ -50,9 +51,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
|||
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
|
||||
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
|
||||
* credentialScope (to be used for streaming v4 auth if applicable)
|
||||
* @param {(object|null)} overheadField - fields to be included in metadata overhead
|
||||
* @param {RequestLogger} log - logger instance
|
||||
* @param {string} originOp - Origin operation
|
||||
* @param {function} callback - callback function
|
||||
* @return {undefined} and call callback with (err, result) -
|
||||
* result.contentMD5 - content md5 of new object or version
|
||||
|
@ -60,10 +59,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
|||
*/
|
||||
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||
overheadField, log, originOp, callback) {
|
||||
const putVersionId = request.headers['x-scal-s3-version-id'];
|
||||
const isPutVersion = putVersionId || putVersionId === '';
|
||||
|
||||
log, callback) {
|
||||
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
||||
// although the request method may actually be 'DELETE' if creating a
|
||||
// delete marker, for our purposes we consider this to be a 'PUT'
|
||||
|
@ -86,9 +82,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
});
|
||||
return process.nextTick(() => callback(metaHeaders));
|
||||
}
|
||||
// if the request occurs within a Zenko deployment, we place a user-metadata
|
||||
// field on the object
|
||||
applyZenkoUserMD(metaHeaders);
|
||||
|
||||
log.trace('meta headers', { metaHeaders, method: 'objectPut' });
|
||||
const objectKeyContext = {
|
||||
|
@ -116,24 +109,8 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
isDeleteMarker,
|
||||
replicationInfo: getReplicationInfo(
|
||||
objectKey, bucketMD, false, size, null, null, authInfo),
|
||||
overheadField,
|
||||
log,
|
||||
};
|
||||
|
||||
// For Azure BlobStorage API compatability
|
||||
// If an object already exists copy/repair creation-time
|
||||
// creation-time must remain static after an object is created
|
||||
// --> EVEN FOR VERSIONS <--
|
||||
if (objMD) {
|
||||
if (objMD['creation-time']) {
|
||||
metadataStoreParams.creationTime = objMD['creation-time'];
|
||||
} else {
|
||||
// If creation-time is not set (for old objects)
|
||||
// fall back to the last modified and store it back to the db
|
||||
metadataStoreParams.creationTime = objMD['last-modified'];
|
||||
}
|
||||
}
|
||||
|
||||
if (!isDeleteMarker) {
|
||||
metadataStoreParams.contentType = request.headers['content-type'];
|
||||
metadataStoreParams.cacheControl = request.headers['cache-control'];
|
||||
|
@ -143,13 +120,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
removeAWSChunked(request.headers['content-encoding']);
|
||||
metadataStoreParams.expires = request.headers.expires;
|
||||
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
||||
metadataStoreParams.originOp = originOp;
|
||||
const defaultObjectLockConfiguration
|
||||
= bucketMD.getObjectLockConfiguration();
|
||||
if (defaultObjectLockConfiguration) {
|
||||
metadataStoreParams.defaultRetention
|
||||
= defaultObjectLockConfiguration;
|
||||
}
|
||||
}
|
||||
|
||||
// if creating new delete marker and there is an existing object, copy
|
||||
|
@ -158,7 +128,6 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
// eslint-disable-next-line no-param-reassign
|
||||
request.headers[constants.objectLocationConstraintHeader] =
|
||||
objMD[constants.objectLocationConstraintHeader];
|
||||
metadataStoreParams.originOp = originOp;
|
||||
}
|
||||
|
||||
const backendInfoObj =
|
||||
|
@ -189,49 +158,17 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
}
|
||||
}
|
||||
|
||||
if (objMD && objMD.uploadId) {
|
||||
metadataStoreParams.oldReplayId = objMD.uploadId;
|
||||
}
|
||||
|
||||
/* eslint-disable camelcase */
|
||||
const dontSkipBackend = externalBackends;
|
||||
/* eslint-enable camelcase */
|
||||
|
||||
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
|
||||
const mdOnlySize = request.headers['x-amz-meta-size'];
|
||||
|
||||
const requestLogger =
|
||||
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
|
||||
return async.waterfall([
|
||||
function storeData(next) {
|
||||
if (size === 0) {
|
||||
if (!dontSkipBackend[locationType]) {
|
||||
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
|
||||
return next(null, null, null);
|
||||
}
|
||||
// Handle mdOnlyHeader as a metadata only operation. If
|
||||
// the object in question is actually 0 byte or has a body size
|
||||
// then handle normally.
|
||||
if (mdOnlyHeader === 'true' && mdOnlySize > 0) {
|
||||
log.debug('metadata only operation x-amz-meta-mdonly');
|
||||
const md5 = request.headers['x-amz-meta-md5chksum']
|
||||
? new Buffer(request.headers['x-amz-meta-md5chksum'],
|
||||
'base64').toString('hex') : null;
|
||||
const numParts = request.headers['x-amz-meta-md5numparts'];
|
||||
let _md5;
|
||||
if (numParts === undefined) {
|
||||
_md5 = md5;
|
||||
} else {
|
||||
_md5 = `${md5}-${numParts}`;
|
||||
}
|
||||
const versionId = request.headers['x-amz-meta-version-id'];
|
||||
const dataGetInfo = {
|
||||
key: objectKey,
|
||||
dataStoreName: location,
|
||||
dataStoreType: locationType,
|
||||
dataStoreVersionId: versionId,
|
||||
dataStoreMD5: _md5,
|
||||
};
|
||||
return next(null, dataGetInfo, _md5);
|
||||
}
|
||||
if (size === 0 && !dontSkipBackend[locationType]) {
|
||||
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
|
||||
return next(null, null, null);
|
||||
}
|
||||
return dataStore(objectKeyContext, cipherBundle, request, size,
|
||||
streamingV4Params, backendInfo, log, next);
|
||||
|
@ -256,25 +193,16 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
dataGetInfoArr[0].cipheredDataKey =
|
||||
cipherBundle.cipheredDataKey;
|
||||
}
|
||||
if (mdOnlyHeader === 'true') {
|
||||
metadataStoreParams.size = mdOnlySize;
|
||||
dataGetInfoArr[0].size = mdOnlySize;
|
||||
}
|
||||
metadataStoreParams.contentMD5 = calculatedHash;
|
||||
return next(null, dataGetInfoArr);
|
||||
},
|
||||
function getVersioningInfo(infoArr, next) {
|
||||
// if x-scal-s3-version-id header is specified, we overwrite the object/version metadata.
|
||||
if (isPutVersion) {
|
||||
const options = overwritingVersioning(objMD, metadataStoreParams);
|
||||
return process.nextTick(() => next(null, options, infoArr));
|
||||
}
|
||||
return versioningPreprocessing(bucketName, bucketMD,
|
||||
metadataStoreParams.objectKey, objMD, log, (err, options) => {
|
||||
if (err) {
|
||||
// TODO: check AWS error when user requested a specific
|
||||
// version before any versions have been put
|
||||
const logLvl = err.is.BadRequest ?
|
||||
const logLvl = err === errors.BadRequest ?
|
||||
'debug' : 'error';
|
||||
log[logLvl]('error getting versioning info', {
|
||||
error: err,
|
||||
|
@ -288,13 +216,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
metadataStoreParams.versionId = options.versionId;
|
||||
metadataStoreParams.versioning = options.versioning;
|
||||
metadataStoreParams.isNull = options.isNull;
|
||||
metadataStoreParams.deleteNullKey = options.deleteNullKey;
|
||||
if (options.extraMD) {
|
||||
Object.assign(metadataStoreParams, options.extraMD);
|
||||
}
|
||||
metadataStoreParams.nullVersionId = options.nullVersionId;
|
||||
return _storeInMDandDeleteData(bucketName, infoArr,
|
||||
cipherBundle, metadataStoreParams,
|
||||
options.dataToDelete, log, requestMethod, next);
|
||||
options.dataToDelete, requestLogger, requestMethod, next);
|
||||
},
|
||||
], callback);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue