Compare commits
12 Commits
developmen
...
w/7.70/imp
Author | SHA1 | Date |
---|---|---|
Will Toozs | 7b5ba98a57 | |
Will Toozs | 329dbcc4dc | |
Will Toozs | be51322c79 | |
Will Toozs | 718468f53b | |
Will Toozs | 452878deec | |
Will Toozs | 7ce2e6fc44 | |
Will Toozs | 40e4019ac3 | |
Will Toozs | b11f3cfc5d | |
Will Toozs | 4ea06f22ed | |
Will Toozs | 133c6f05f0 | |
Will Toozs | f8fe00e114 | |
Will Toozs | a1839e36ea |
|
@ -1,9 +1,3 @@
|
||||||
node_modules
|
.git
|
||||||
localData/*
|
|
||||||
localMetadata/*
|
|
||||||
# Keep the .git/HEAD file in order to properly report version
|
|
||||||
.git/objects
|
|
||||||
.github
|
.github
|
||||||
.tox
|
node_modules
|
||||||
coverage
|
|
||||||
.DS_Store
|
|
54
.eslintrc
54
.eslintrc
|
@ -1,54 +1,10 @@
|
||||||
{
|
{
|
||||||
"extends": "scality",
|
"extends": "scality",
|
||||||
"plugins": [
|
|
||||||
"mocha"
|
|
||||||
],
|
|
||||||
"rules": {
|
|
||||||
"import/extensions": "off",
|
|
||||||
"lines-around-directive": "off",
|
|
||||||
"no-underscore-dangle": "off",
|
|
||||||
"indent": "off",
|
|
||||||
"object-curly-newline": "off",
|
|
||||||
"operator-linebreak": "off",
|
|
||||||
"function-paren-newline": "off",
|
|
||||||
"import/newline-after-import": "off",
|
|
||||||
"prefer-destructuring": "off",
|
|
||||||
"implicit-arrow-linebreak": "off",
|
|
||||||
"no-bitwise": "off",
|
|
||||||
"dot-location": "off",
|
|
||||||
"comma-dangle": "off",
|
|
||||||
"no-undef-init": "off",
|
|
||||||
"global-require": "off",
|
|
||||||
"import/no-dynamic-require": "off",
|
|
||||||
"class-methods-use-this": "off",
|
|
||||||
"no-plusplus": "off",
|
|
||||||
"no-else-return": "off",
|
|
||||||
"object-property-newline": "off",
|
|
||||||
"import/order": "off",
|
|
||||||
"no-continue": "off",
|
|
||||||
"no-tabs": "off",
|
|
||||||
"lines-between-class-members": "off",
|
|
||||||
"prefer-spread": "off",
|
|
||||||
"no-lonely-if": "off",
|
|
||||||
"no-useless-escape": "off",
|
|
||||||
"no-restricted-globals": "off",
|
|
||||||
"no-buffer-constructor": "off",
|
|
||||||
"import/no-extraneous-dependencies": "off",
|
|
||||||
"space-unary-ops": "off",
|
|
||||||
"no-useless-return": "off",
|
|
||||||
"no-unexpected-multiline": "off",
|
|
||||||
"no-mixed-operators": "off",
|
|
||||||
"newline-per-chained-call": "off",
|
|
||||||
"operator-assignment": "off",
|
|
||||||
"spaced-comment": "off",
|
|
||||||
"comma-style": "off",
|
|
||||||
"no-restricted-properties": "off",
|
|
||||||
"new-parens": "off",
|
|
||||||
"no-multi-spaces": "off",
|
|
||||||
"quote-props": "off",
|
|
||||||
"mocha/no-exclusive-tests": "error",
|
|
||||||
},
|
|
||||||
"parserOptions": {
|
"parserOptions": {
|
||||||
"ecmaVersion": 2020
|
"ecmaVersion": 2018,
|
||||||
|
"sourceType": "module",
|
||||||
|
"ecmaFeatures": {
|
||||||
|
"jsx": true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,32 +1,19 @@
|
||||||
# General support information
|
# Issue template
|
||||||
|
|
||||||
GitHub Issues are **reserved** for actionable bug reports (including
|
If you are reporting a new issue, make sure that we do not have any
|
||||||
documentation inaccuracies), and feature requests.
|
duplicates already open. You can ensure this by searching the issue list for
|
||||||
**All questions** (regarding configuration, use cases, performance, community,
|
this repository. If there is a duplicate, please close your issue and add a
|
||||||
events, setup and usage recommendations, among other things) should be asked on
|
comment to the existing issue instead.
|
||||||
the **[Zenko Forum](http://forum.zenko.io/)**.
|
|
||||||
|
|
||||||
> Questions opened as GitHub issues will systematically be closed, and moved to
|
## General support information
|
||||||
> the [Zenko Forum](http://forum.zenko.io/).
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||||
|
General questions should be sent to the
|
||||||
## Avoiding duplicates
|
[S3 scality server Forum](http://forum.scality.com/).
|
||||||
|
|
||||||
When reporting a new issue/requesting a feature, make sure that we do not have
|
|
||||||
any duplicates already open:
|
|
||||||
|
|
||||||
- search the issue list for this repository (use the search bar, select
|
|
||||||
"Issues" on the left pane after searching);
|
|
||||||
- if there is a duplicate, please do not open your issue, and add a comment
|
|
||||||
to the existing issue instead.
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
## Bug report information
|
## Bug report information
|
||||||
|
|
||||||
(delete this section (everything between the lines) if you're not reporting a bug
|
(delete this section if not applicable)
|
||||||
but requesting a feature)
|
|
||||||
|
|
||||||
### Description
|
### Description
|
||||||
|
|
||||||
|
@ -42,22 +29,13 @@ Describe the results you received
|
||||||
|
|
||||||
### Expected result
|
### Expected result
|
||||||
|
|
||||||
Describe the results you expected
|
Describe the results you expecteds
|
||||||
|
|
||||||
### Additional information
|
### Additional information: (Node.js version, Docker version, etc)
|
||||||
|
|
||||||
- Node.js version,
|
|
||||||
- Docker version,
|
|
||||||
- yarn version,
|
|
||||||
- distribution/OS,
|
|
||||||
- optional: anything else you deem helpful to us.
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
## Feature Request
|
## Feature Request
|
||||||
|
|
||||||
(delete this section (everything between the lines) if you're not requesting
|
(delete this section if not applicable)
|
||||||
a feature but reporting a bug)
|
|
||||||
|
|
||||||
### Proposal
|
### Proposal
|
||||||
|
|
||||||
|
@ -74,14 +52,3 @@ What you would like to happen
|
||||||
### Use case
|
### Use case
|
||||||
|
|
||||||
Please provide use cases for changing the current behavior
|
Please provide use cases for changing the current behavior
|
||||||
|
|
||||||
### Additional information
|
|
||||||
|
|
||||||
- Is this request for your company? Y/N
|
|
||||||
- If Y: Company name:
|
|
||||||
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
|
|
||||||
- Are you willing to contribute this feature yourself?
|
|
||||||
- Position/Title:
|
|
||||||
- How did you hear about us?
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
|
@ -15,29 +15,11 @@ runs:
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |-
|
run: |-
|
||||||
set -exu;
|
set -exu;
|
||||||
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
mkdir -p /tmp/artifacts/${{ github.job }}/;
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v2
|
||||||
with:
|
with:
|
||||||
node-version: '16'
|
node-version: '16'
|
||||||
cache: 'yarn'
|
cache: 'yarn'
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
shell: bash
|
shell: bash
|
||||||
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pip
|
|
||||||
key: ${{ runner.os }}-pip
|
|
||||||
- uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: 3.9
|
|
||||||
- name: Setup python2 test environment
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -y libdigest-hmac-perl
|
|
||||||
pip install 's3cmd==2.3.0'
|
|
||||||
- name: fix sproxyd.conf permissions
|
|
||||||
shell: bash
|
|
||||||
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
|
|
||||||
- name: ensure fuse kernel module is loaded (for sproxyd)
|
|
||||||
shell: bash
|
|
||||||
run: sudo modprobe fuse
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
FROM ceph/daemon:v3.2.1-stable-3.2-mimic-centos-7
|
|
||||||
|
|
||||||
ENV CEPH_DAEMON demo
|
|
||||||
ENV CEPH_DEMO_DAEMONS mon,mgr,osd,rgw
|
|
||||||
|
|
||||||
ENV CEPH_DEMO_UID zenko
|
|
||||||
ENV CEPH_DEMO_ACCESS_KEY accessKey1
|
|
||||||
ENV CEPH_DEMO_SECRET_KEY verySecretKey1
|
|
||||||
ENV CEPH_DEMO_BUCKET zenkobucket
|
|
||||||
|
|
||||||
ENV CEPH_PUBLIC_NETWORK 0.0.0.0/0
|
|
||||||
ENV MON_IP 0.0.0.0
|
|
||||||
ENV NETWORK_AUTO_DETECT 4
|
|
||||||
ENV RGW_CIVETWEB_PORT 8001
|
|
||||||
|
|
||||||
RUN rm /etc/yum.repos.d/tcmu-runner.repo
|
|
||||||
|
|
||||||
ADD ./entrypoint-wrapper.sh /
|
|
||||||
RUN chmod +x /entrypoint-wrapper.sh && \
|
|
||||||
yum install -y python-pip && \
|
|
||||||
yum clean all && \
|
|
||||||
pip install awscli && \
|
|
||||||
rm -rf /root/.cache/pip
|
|
||||||
|
|
||||||
ENTRYPOINT [ "/entrypoint-wrapper.sh" ]
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
touch /artifacts/ceph.log
|
|
||||||
mkfifo /tmp/entrypoint_output
|
|
||||||
# We run this in the background so that we can tail the RGW log after init,
|
|
||||||
# because entrypoint.sh never returns
|
|
||||||
|
|
||||||
# The next line will be needed when ceph builds 3.2.2 so I'll leave it here
|
|
||||||
# bash /opt/ceph-container/bin/entrypoint.sh > /tmp/entrypoint_output &
|
|
||||||
|
|
||||||
bash /entrypoint.sh > /tmp/entrypoint_output &
|
|
||||||
entrypoint_pid="$!"
|
|
||||||
while read -r line; do
|
|
||||||
echo $line
|
|
||||||
# When we find this line server has started
|
|
||||||
if [ -n "$(echo $line | grep 'Creating bucket')" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done < /tmp/entrypoint_output
|
|
||||||
|
|
||||||
# Make our buckets - CEPH_DEMO_BUCKET is set to force the "Creating bucket" message, but unused
|
|
||||||
s3cmd mb s3://cephbucket s3://cephbucket2
|
|
||||||
|
|
||||||
mkdir /root/.aws
|
|
||||||
cat > /root/.aws/credentials <<EOF
|
|
||||||
[default]
|
|
||||||
aws_access_key_id = accessKey1
|
|
||||||
aws_secret_access_key = verySecretKey1
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Enable versioning on them
|
|
||||||
for bucket in cephbucket cephbucket2; do
|
|
||||||
echo "Enabling versiong for $bucket"
|
|
||||||
aws --endpoint http://127.0.0.1:8001 s3api put-bucket-versioning --bucket $bucket --versioning Status=Enabled
|
|
||||||
done
|
|
||||||
tail -f /var/log/ceph/client.rgw.*.log | tee -a /artifacts/ceph.log
|
|
||||||
wait $entrypoint_pid
|
|
|
@ -1,11 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# This script is needed because RADOS Gateway
|
|
||||||
# will open the port before beginning to serve traffic
|
|
||||||
# causing wait_for_local_port.bash to exit immediately
|
|
||||||
|
|
||||||
echo 'Waiting for ceph'
|
|
||||||
while [ -z "$(curl 127.0.0.1:8001 2>/dev/null)" ]; do
|
|
||||||
sleep 1
|
|
||||||
echo -n "."
|
|
||||||
done
|
|
|
@ -34,3 +34,4 @@ gcpbackendmismatch_GCP_SERVICE_KEY
|
||||||
gcpbackend_GCP_SERVICE_KEYFILE
|
gcpbackend_GCP_SERVICE_KEYFILE
|
||||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE
|
gcpbackendmismatch_GCP_SERVICE_KEYFILE
|
||||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
|
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
|
||||||
|
gcpbackendproxy_GCP_SERVICE_KEYFILE
|
||||||
|
|
|
@ -20,7 +20,6 @@ services:
|
||||||
- METADATA_HOST=0.0.0.0
|
- METADATA_HOST=0.0.0.0
|
||||||
- S3BACKEND
|
- S3BACKEND
|
||||||
- S3DATA
|
- S3DATA
|
||||||
- S3METADATA
|
|
||||||
- MPU_TESTING
|
- MPU_TESTING
|
||||||
- S3VAULT
|
- S3VAULT
|
||||||
- S3_LOCATION_FILE
|
- S3_LOCATION_FILE
|
||||||
|
@ -35,16 +34,7 @@ services:
|
||||||
- S3KMIP_KEY
|
- S3KMIP_KEY
|
||||||
- S3KMIP_CERT
|
- S3KMIP_CERT
|
||||||
- S3KMIP_CA
|
- S3KMIP_CA
|
||||||
- MONGODB_HOSTS=0.0.0.0:27018
|
|
||||||
- MONGODB_RS=rs0
|
|
||||||
- DEFAULT_BUCKET_KEY_FORMAT
|
|
||||||
- METADATA_MAX_CACHED_BUCKETS
|
|
||||||
- ENABLE_NULL_VERSION_COMPAT_MODE
|
- ENABLE_NULL_VERSION_COMPAT_MODE
|
||||||
- SCUBA_HOST
|
|
||||||
- SCUBA_PORT
|
|
||||||
- SCUBA_HEALTHCHECK_FREQUENCY
|
|
||||||
- S3QUOTA
|
|
||||||
- QUOTA_ENABLE_INFLIGHTS
|
|
||||||
env_file:
|
env_file:
|
||||||
- creds.env
|
- creds.env
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -72,21 +62,6 @@ services:
|
||||||
pykmip:
|
pykmip:
|
||||||
network_mode: "host"
|
network_mode: "host"
|
||||||
profiles: ['pykmip']
|
profiles: ['pykmip']
|
||||||
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
|
image: registry.scality.com/cloudserver-dev/pykmip
|
||||||
volumes:
|
volumes:
|
||||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
||||||
mongo:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['mongo', 'ceph']
|
|
||||||
image: ${MONGODB_IMAGE}
|
|
||||||
ceph:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['ceph']
|
|
||||||
image: ghcr.io/scality/cloudserver/ci-ceph
|
|
||||||
sproxyd:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['sproxyd']
|
|
||||||
image: sproxyd-standalone
|
|
||||||
build: ./sproxyd
|
|
||||||
user: 0:0
|
|
||||||
privileged: yes
|
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
FROM mongo:5.0.21
|
|
||||||
|
|
||||||
ENV USER=scality \
|
|
||||||
HOME_DIR=/home/scality \
|
|
||||||
CONF_DIR=/conf \
|
|
||||||
DATA_DIR=/data
|
|
||||||
|
|
||||||
# Set up directories and permissions
|
|
||||||
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
|
|
||||||
mkdir /logs; \
|
|
||||||
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
|
|
||||||
|
|
||||||
# Set up environment variables and directories for scality user
|
|
||||||
RUN mkdir ${CONF_DIR} && \
|
|
||||||
chown -R ${USER} ${CONF_DIR} && \
|
|
||||||
chown -R ${USER} ${DATA_DIR}
|
|
||||||
|
|
||||||
# copy the mongo config file
|
|
||||||
COPY /conf/mongod.conf /conf/mongod.conf
|
|
||||||
COPY /conf/mongo-run.sh /conf/mongo-run.sh
|
|
||||||
COPY /conf/initReplicaSet /conf/initReplicaSet.js
|
|
||||||
|
|
||||||
EXPOSE 27017/tcp
|
|
||||||
EXPOSE 27018
|
|
||||||
|
|
||||||
# Set up CMD
|
|
||||||
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
|
|
||||||
CMD ["bash", "/conf/mongo-run.sh"]
|
|
|
@ -1,4 +0,0 @@
|
||||||
rs.initiate({
|
|
||||||
_id: "rs0",
|
|
||||||
members: [{ _id: 0, host: "127.0.0.1:27018" }]
|
|
||||||
});
|
|
|
@ -1,10 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set -exo pipefail
|
|
||||||
|
|
||||||
init_RS() {
|
|
||||||
sleep 5
|
|
||||||
mongo --port 27018 /conf/initReplicaSet.js
|
|
||||||
}
|
|
||||||
init_RS &
|
|
||||||
|
|
||||||
mongod --bind_ip_all --config=/conf/mongod.conf
|
|
|
@ -1,15 +0,0 @@
|
||||||
storage:
|
|
||||||
journal:
|
|
||||||
enabled: true
|
|
||||||
engine: wiredTiger
|
|
||||||
dbPath: "/data/db"
|
|
||||||
processManagement:
|
|
||||||
fork: false
|
|
||||||
net:
|
|
||||||
port: 27018
|
|
||||||
bindIp: 0.0.0.0
|
|
||||||
replication:
|
|
||||||
replSetName: "rs0"
|
|
||||||
enableMajorityReadConcern: true
|
|
||||||
security:
|
|
||||||
authorization: disabled
|
|
|
@ -1,3 +0,0 @@
|
||||||
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
|
|
||||||
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
|
|
||||||
RUN chown root:root /conf/sproxyd0.conf
|
|
|
@ -1,26 +0,0 @@
|
||||||
fastcgi_param QUERY_STRING $query_string;
|
|
||||||
fastcgi_param REQUEST_METHOD $request_method;
|
|
||||||
fastcgi_param CONTENT_TYPE $content_type;
|
|
||||||
fastcgi_param CONTENT_LENGTH $content_length;
|
|
||||||
|
|
||||||
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
|
||||||
fastcgi_param SCRIPT_NAME /var/www;
|
|
||||||
fastcgi_param PATH_INFO $document_uri;
|
|
||||||
|
|
||||||
fastcgi_param REQUEST_URI $request_uri;
|
|
||||||
fastcgi_param DOCUMENT_URI $document_uri;
|
|
||||||
fastcgi_param DOCUMENT_ROOT $document_root;
|
|
||||||
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
|
||||||
fastcgi_param HTTPS $https if_not_empty;
|
|
||||||
|
|
||||||
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
|
||||||
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
|
||||||
|
|
||||||
fastcgi_param REMOTE_ADDR $remote_addr;
|
|
||||||
fastcgi_param REMOTE_PORT $remote_port;
|
|
||||||
fastcgi_param SERVER_ADDR $server_addr;
|
|
||||||
fastcgi_param SERVER_PORT $server_port;
|
|
||||||
fastcgi_param SERVER_NAME $server_name;
|
|
||||||
|
|
||||||
# PHP only, required if PHP was built with --enable-force-cgi-redirect
|
|
||||||
fastcgi_param REDIRECT_STATUS 200;
|
|
|
@ -1,88 +0,0 @@
|
||||||
worker_processes 1;
|
|
||||||
error_log /logs/error.log;
|
|
||||||
user root root;
|
|
||||||
events {
|
|
||||||
worker_connections 1000;
|
|
||||||
reuse_port on;
|
|
||||||
multi_accept on;
|
|
||||||
}
|
|
||||||
worker_rlimit_nofile 20000;
|
|
||||||
http {
|
|
||||||
root /var/www/;
|
|
||||||
upstream sproxyds {
|
|
||||||
least_conn;
|
|
||||||
keepalive 40;
|
|
||||||
server 127.0.0.1:20000;
|
|
||||||
}
|
|
||||||
server {
|
|
||||||
client_max_body_size 0;
|
|
||||||
client_body_timeout 150;
|
|
||||||
client_header_timeout 150;
|
|
||||||
postpone_output 0;
|
|
||||||
client_body_postpone_size 0;
|
|
||||||
keepalive_requests 1100;
|
|
||||||
keepalive_timeout 300s;
|
|
||||||
server_tokens off;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
gzip off;
|
|
||||||
tcp_nodelay on;
|
|
||||||
tcp_nopush on;
|
|
||||||
sendfile on;
|
|
||||||
listen 81;
|
|
||||||
server_name localhost;
|
|
||||||
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
|
|
||||||
location ~* ^/proxy/(.*)$ {
|
|
||||||
rewrite ^/proxy/(.*)$ /$1 last;
|
|
||||||
}
|
|
||||||
allow 127.0.0.1;
|
|
||||||
|
|
||||||
deny all;
|
|
||||||
set $usermd '-';
|
|
||||||
set $sentusermd '-';
|
|
||||||
set $elapsed_ms '-';
|
|
||||||
set $now '-';
|
|
||||||
log_by_lua '
|
|
||||||
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
|
|
||||||
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
|
|
||||||
end
|
|
||||||
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
|
|
||||||
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
|
|
||||||
end
|
|
||||||
local elapsed_ms = tonumber(ngx.var.request_time)
|
|
||||||
if not ( elapsed_ms == nil) then
|
|
||||||
elapsed_ms = elapsed_ms * 1000
|
|
||||||
ngx.var.elapsed_ms = tostring(elapsed_ms)
|
|
||||||
end
|
|
||||||
local time = tonumber(ngx.var.msec) * 1000
|
|
||||||
ngx.var.now = time
|
|
||||||
';
|
|
||||||
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
|
|
||||||
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
|
|
||||||
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
|
|
||||||
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
|
|
||||||
'"contentType":"$content_type","s3Address":"$remote_addr",'
|
|
||||||
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
|
|
||||||
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
|
|
||||||
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
|
|
||||||
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
|
|
||||||
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
|
|
||||||
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
|
|
||||||
access_log /dev/stdout irm;
|
|
||||||
error_log /dev/stdout error;
|
|
||||||
location / {
|
|
||||||
proxy_request_buffering off;
|
|
||||||
fastcgi_request_buffering off;
|
|
||||||
fastcgi_no_cache 1;
|
|
||||||
fastcgi_cache_bypass 1;
|
|
||||||
fastcgi_buffering off;
|
|
||||||
fastcgi_ignore_client_abort on;
|
|
||||||
fastcgi_keep_conn on;
|
|
||||||
include fastcgi_params;
|
|
||||||
fastcgi_pass sproxyds;
|
|
||||||
fastcgi_next_upstream error timeout;
|
|
||||||
fastcgi_send_timeout 285s;
|
|
||||||
fastcgi_read_timeout 285s;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
{
|
|
||||||
"general": {
|
|
||||||
"ring": "DATA",
|
|
||||||
"port": 20000,
|
|
||||||
"syslog_facility": "local0"
|
|
||||||
},
|
|
||||||
"ring_driver:0": {
|
|
||||||
"alias": "dc1",
|
|
||||||
"type": "local",
|
|
||||||
"queue_path": "/tmp/ring-objs"
|
|
||||||
},
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
[supervisord]
|
|
||||||
nodaemon = true
|
|
||||||
loglevel = info
|
|
||||||
logfile = %(ENV_LOG_DIR)s/supervisord.log
|
|
||||||
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
|
|
||||||
logfile_maxbytes = 20MB
|
|
||||||
logfile_backups = 2
|
|
||||||
|
|
||||||
[unix_http_server]
|
|
||||||
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
|
|
||||||
|
|
||||||
[rpcinterface:supervisor]
|
|
||||||
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
|
||||||
|
|
||||||
[supervisorctl]
|
|
||||||
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
|
|
||||||
|
|
||||||
[program:nginx]
|
|
||||||
directory=%(ENV_SUP_RUN_DIR)s
|
|
||||||
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
|
|
||||||
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
|
||||||
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
|
|
||||||
stdout_logfile_maxbytes=100MB
|
|
||||||
stdout_logfile_backups=7
|
|
||||||
stderr_logfile_maxbytes=100MB
|
|
||||||
stderr_logfile_backups=7
|
|
||||||
autorestart=true
|
|
||||||
autostart=true
|
|
||||||
user=root
|
|
||||||
|
|
||||||
[program:sproxyd]
|
|
||||||
directory=%(ENV_SUP_RUN_DIR)s
|
|
||||||
process_name=%(program_name)s-%(process_num)s
|
|
||||||
numprocs=1
|
|
||||||
numprocs_start=0
|
|
||||||
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
|
|
||||||
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
|
||||||
stdout_logfile_maxbytes=100MB
|
|
||||||
stdout_logfile_backups=7
|
|
||||||
redirect_stderr=true
|
|
||||||
autorestart=true
|
|
||||||
autostart=true
|
|
||||||
user=root
|
|
|
@ -1,35 +0,0 @@
|
||||||
name: Test alerts
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches-ignore:
|
|
||||||
- 'development/**'
|
|
||||||
- 'q/*/**'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run-alert-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
tests:
|
|
||||||
- name: 1 minute interval tests
|
|
||||||
file: monitoring/alerts.test.yaml
|
|
||||||
|
|
||||||
- name: 10 seconds interval tests
|
|
||||||
file: monitoring/alerts.10s.test.yaml
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Render and test ${{ matrix.tests.name }}
|
|
||||||
uses: scality/action-prom-render-test@1.0.3
|
|
||||||
with:
|
|
||||||
alert_file_path: monitoring/alerts.yaml
|
|
||||||
test_file_path: ${{ matrix.tests.file }}
|
|
||||||
alert_inputs: |
|
|
||||||
namespace=zenko
|
|
||||||
service=artesca-data-connector-s3api-metrics
|
|
||||||
reportJob=artesca-data-ops-report-handler
|
|
||||||
replicas=3
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
|
@ -3,7 +3,7 @@ name: codeQL
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [w/**, q/*]
|
branches: [development/*, stabilization/*, hotfix/*]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [development/*, stabilization/*, hotfix/*]
|
branches: [development/*, stabilization/*, hotfix/*]
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
@ -14,12 +14,12 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v3
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
languages: javascript, python, ruby
|
languages: javascript, python, ruby
|
||||||
|
|
||||||
- name: Build and analyze
|
- name: Build and analyze
|
||||||
uses: github/codeql-action/analyze@v3
|
uses: github/codeql-action/analyze@v2
|
||||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: 'Checkout Repository'
|
- name: 'Checkout Repository'
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: 'Dependency Review'
|
- name: 'Dependency Review'
|
||||||
uses: actions/dependency-review-action@v4
|
uses: actions/dependency-review-action@v3
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
---
|
---
|
||||||
name: release
|
name: release
|
||||||
run-name: release ${{ inputs.tag }}
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
@ -9,70 +8,38 @@ on:
|
||||||
description: 'Tag to be released'
|
description: 'Tag to be released'
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
env:
|
|
||||||
PROJECT_NAME: ${{ github.event.repository.name }}
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-federation-image:
|
build-federation-image:
|
||||||
runs-on: ubuntu-20.04
|
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
|
||||||
steps:
|
secrets: inherit
|
||||||
- name: Checkout
|
with:
|
||||||
uses: actions/checkout@v4
|
push: true
|
||||||
- name: Set up Docker Buildx
|
registry: registry.scality.com
|
||||||
uses: docker/setup-buildx-action@v3
|
namespace: ${{ github.event.repository.name }}
|
||||||
- name: Login to GitHub Registry
|
name: ${{ github.event.repository.name }}
|
||||||
uses: docker/login-action@v3
|
context: .
|
||||||
with:
|
file: images/svc-base/Dockerfile
|
||||||
registry: ghcr.io
|
tag: ${{ github.event.inputs.tag }}-svc-base
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Build and push image for federation
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .
|
|
||||||
file: images/svc-base/Dockerfile
|
|
||||||
tags: |
|
|
||||||
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
|
|
||||||
cache-from: type=gha,scope=federation
|
|
||||||
cache-to: type=gha,mode=max,scope=federation
|
|
||||||
|
|
||||||
release:
|
build-image:
|
||||||
|
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
|
||||||
|
secrets: inherit
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
registry: registry.scality.com
|
||||||
|
namespace: ${{ github.event.repository.name }}
|
||||||
|
name: ${{ github.event.repository.name }}
|
||||||
|
context: .
|
||||||
|
file: Dockerfile
|
||||||
|
tag: ${{ github.event.inputs.tag }}
|
||||||
|
|
||||||
|
github-release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up Docker Buildk
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Login to Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
|
|
||||||
- name: Push dashboards into the production namespace
|
|
||||||
run: |
|
|
||||||
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
|
||||||
dashboard.json:application/grafana-dashboard+json \
|
|
||||||
alerts.yaml:application/prometheus-alerts+yaml
|
|
||||||
working-directory: monitoring
|
|
||||||
|
|
||||||
- name: Build and push
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
|
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v1
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
name: Release ${{ github.event.inputs.tag }}
|
name: Release ${{ github.event.inputs.tag }}
|
||||||
tag_name: ${{ github.event.inputs.tag }}
|
tag_name: ${{ github.event.inputs.tag }}
|
||||||
|
|
|
@ -2,8 +2,6 @@
|
||||||
name: tests
|
name: tests
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
push:
|
push:
|
||||||
branches-ignore:
|
branches-ignore:
|
||||||
- 'development/**'
|
- 'development/**'
|
||||||
|
@ -67,24 +65,23 @@ env:
|
||||||
ENABLE_LOCAL_CACHE: "true"
|
ENABLE_LOCAL_CACHE: "true"
|
||||||
REPORT_TOKEN: "report-token-1"
|
REPORT_TOKEN: "report-token-1"
|
||||||
REMOTE_MANAGEMENT_DISABLE: "1"
|
REMOTE_MANAGEMENT_DISABLE: "1"
|
||||||
# https://github.com/git-lfs/git-lfs/issues/5749
|
|
||||||
GIT_CLONE_PROTECTION_ACTIVE: 'false'
|
|
||||||
jobs:
|
jobs:
|
||||||
linting-coverage:
|
linting-coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
- uses: actions/setup-node@v4
|
- uses: actions/setup-node@v2
|
||||||
with:
|
with:
|
||||||
node-version: '16'
|
node-version: '16'
|
||||||
cache: yarn
|
cache: yarn
|
||||||
- name: install dependencies
|
- name: install dependencies
|
||||||
run: yarn install --frozen-lockfile --network-concurrency 1
|
run: yarn install --frozen-lockfile --network-concurrency 1
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.9'
|
python-version: '3.9'
|
||||||
- uses: actions/cache@v4
|
- uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: ~/.cache/pip
|
path: ~/.cache/pip
|
||||||
key: ${{ runner.os }}-pip
|
key: ${{ runner.os }}-pip
|
||||||
|
@ -117,7 +114,7 @@ jobs:
|
||||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
||||||
if: always()
|
if: always()
|
||||||
- name: Upload files to artifacts
|
- name: Upload files to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v2
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -128,166 +125,76 @@ jobs:
|
||||||
|
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v1.6.0
|
||||||
- name: Login to GitHub Registry
|
- name: Login to GitHub Registry
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v1.10.0
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ github.token }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Login to Registry
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
registry: registry.scality.com
|
||||||
|
username: ${{ secrets.REGISTRY_LOGIN }}
|
||||||
|
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
- name: Build and push cloudserver image
|
- name: Build and push cloudserver image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
push: true
|
push: true
|
||||||
context: .
|
context: .
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: |
|
tags: |
|
||||||
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
labels: |
|
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=cloudserver
|
cache-from: type=gha,scope=cloudserver
|
||||||
cache-to: type=gha,mode=max,scope=cloudserver
|
cache-to: type=gha,mode=max,scope=cloudserver
|
||||||
- name: Build and push pykmip image
|
|
||||||
uses: docker/build-push-action@v5
|
build-federation-image:
|
||||||
with:
|
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
|
||||||
push: true
|
secrets: inherit
|
||||||
context: .github/pykmip
|
with:
|
||||||
tags: |
|
push: true
|
||||||
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
registry: registry.scality.com
|
||||||
labels: |
|
namespace: cloudserver-dev
|
||||||
git.repository=${{ github.repository }}
|
name: cloudserver
|
||||||
git.commit-sha=${{ github.sha }}
|
context: .
|
||||||
cache-from: type=gha,scope=pykmip
|
file: images/svc-base/Dockerfile
|
||||||
cache-to: type=gha,mode=max,scope=pykmip
|
tag: ${{ github.sha }}-svc-base
|
||||||
- name: Build and push MongoDB
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .github/docker/mongodb
|
|
||||||
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=mongodb
|
|
||||||
cache-to: type=gha,mode=max,scope=mongodb
|
|
||||||
|
|
||||||
multiple-backend:
|
multiple-backend:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
S3BACKEND: mem
|
S3BACKEND: mem
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
||||||
S3DATA: multiple
|
S3DATA: multiple
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- name: Login to Registry
|
- uses: actions/setup-python@v4
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
python-version: 3.9
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose --profile sproxyd up -d
|
run: docker compose up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
- name: Run multiple backend test
|
- name: Run multiple backend test
|
||||||
run: |-
|
run: |-
|
||||||
set -o pipefail;
|
set -o pipefail;
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
bash wait_for_local_port.bash 81 40
|
|
||||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
env:
|
env:
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
mongo-v0-ft-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
S3BACKEND: mem
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3KMS: file
|
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v0
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile mongo up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run functional tests
|
|
||||||
run: |-
|
|
||||||
set -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
mongo-v1-ft-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
S3BACKEND: mem
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3KMS: file
|
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v1
|
|
||||||
METADATA_MAX_CACHED_BUCKETS: 1
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile mongo up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run functional tests
|
|
||||||
run: |-
|
|
||||||
set -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -300,20 +207,26 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- job-name: file-ft-tests
|
- enable-null-compat: ''
|
||||||
|
job-name: file-ft-tests
|
||||||
|
- enable-null-compat: 'true'
|
||||||
|
job-name: file-ft-tests-null-compat
|
||||||
name: ${{ matrix.job-name }}
|
name: ${{ matrix.job-name }}
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
env:
|
env:
|
||||||
S3BACKEND: file
|
S3BACKEND: file
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: "yes"
|
||||||
|
ENABLE_NULL_VERSION_COMPAT_MODE: "${{ matrix.enable-null-compat }}"
|
||||||
JOB_NAME: ${{ matrix.job-name }}
|
JOB_NAME: ${{ matrix.job-name }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup matrix job artifacts directory
|
- name: Setup matrix job artifacts directory
|
||||||
|
@ -321,6 +234,10 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
set -exu
|
set -exu
|
||||||
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
|
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
|
||||||
|
- name: Setup python test environment
|
||||||
|
run: |
|
||||||
|
sudo apt-get install -y libdigest-hmac-perl
|
||||||
|
pip install 's3cmd==2.3.0'
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
run: docker compose up -d
|
run: docker compose up -d
|
||||||
working-directory: .github/docker
|
working-directory: .github/docker
|
||||||
|
@ -330,7 +247,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -346,12 +263,14 @@ jobs:
|
||||||
ENABLE_UTAPI_V2: t
|
ENABLE_UTAPI_V2: t
|
||||||
S3BACKEND: mem
|
S3BACKEND: mem
|
||||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Setup CI services
|
- name: Setup CI services
|
||||||
|
@ -363,51 +282,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 8000 40
|
bash wait_for_local_port.bash 8000 40
|
||||||
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
quota-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
inflights:
|
|
||||||
- name: "With Inflights"
|
|
||||||
value: "true"
|
|
||||||
- name: "Without Inflights"
|
|
||||||
value: "false"
|
|
||||||
env:
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3BACKEND: mem
|
|
||||||
S3QUOTA: scuba
|
|
||||||
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
|
|
||||||
SCUBA_HOST: localhost
|
|
||||||
SCUBA_PORT: 8100
|
|
||||||
SCUBA_HEALTHCHECK_FREQUENCY: 100
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile mongo up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run quota tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
@ -422,14 +297,15 @@ jobs:
|
||||||
env:
|
env:
|
||||||
S3BACKEND: file
|
S3BACKEND: file
|
||||||
S3VAULT: mem
|
S3VAULT: mem
|
||||||
MPU_TESTING: "yes"
|
MPU_TESTING: true
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
JOB_NAME: ${{ github.job }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
- name: Setup CI environment
|
- name: Setup CI environment
|
||||||
uses: ./.github/actions/setup-ci
|
uses: ./.github/actions/setup-ci
|
||||||
- name: Copy KMIP certs
|
- name: Copy KMIP certs
|
||||||
|
@ -445,85 +321,7 @@ jobs:
|
||||||
bash wait_for_local_port.bash 5696 40
|
bash wait_for_local_port.bash 5696 40
|
||||||
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
- name: Upload logs to artifacts
|
- name: Upload logs to artifacts
|
||||||
uses: scality/action-artifacts@v4
|
uses: scality/action-artifacts@v3
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
ceph-backend-test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
S3BACKEND: mem
|
|
||||||
S3DATA: multiple
|
|
||||||
S3KMS: file
|
|
||||||
CI_CEPH: 'true'
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Login to GitHub Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- uses: ruby/setup-ruby@v1
|
|
||||||
with:
|
|
||||||
ruby-version: '2.5.9'
|
|
||||||
- name: Install Ruby dependencies
|
|
||||||
run: |
|
|
||||||
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
|
|
||||||
- name: Install Java dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile ceph up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
env:
|
|
||||||
S3METADATA: mongodb
|
|
||||||
- name: Run Ceph multiple backend tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
bash .github/ceph/wait_for_ceph.sh
|
|
||||||
bash wait_for_local_port.bash 27018 40
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/multibackend-tests.log
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
S3METADATA: mem
|
|
||||||
- name: Run Java tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
mvn test | tee /tmp/artifacts/${{ github.job }}/java-tests.log
|
|
||||||
working-directory: tests/functional/jaws
|
|
||||||
- name: Run Ruby tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
|
|
||||||
working-directory: tests/functional/fog
|
|
||||||
- name: Run Javascript AWS SDK tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
|
|
||||||
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
|
|
||||||
S3BACKEND: file
|
|
||||||
S3VAULT: mem
|
|
||||||
S3METADATA: mongodb
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
with:
|
||||||
method: upload
|
method: upload
|
||||||
url: https://artifacts.scality.net
|
url: https://artifacts.scality.net
|
||||||
|
|
|
@ -22,14 +22,9 @@ coverage
|
||||||
# Compiled binary addons (http://nodejs.org/api/addons.html)
|
# Compiled binary addons (http://nodejs.org/api/addons.html)
|
||||||
build/Release
|
build/Release
|
||||||
|
|
||||||
# Sphinx build dir
|
|
||||||
_build
|
|
||||||
|
|
||||||
# Dependency directory
|
# Dependency directory
|
||||||
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git
|
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git
|
||||||
node_modules
|
node_modules
|
||||||
yarn.lock
|
|
||||||
.tox
|
|
||||||
|
|
||||||
# Junit directory
|
# Junit directory
|
||||||
junit
|
junit
|
||||||
|
|
93
Dockerfile
93
Dockerfile
|
@ -1,60 +1,51 @@
|
||||||
ARG NODE_VERSION=16.20-bullseye-slim
|
FROM node:16.13.2-slim
|
||||||
|
MAINTAINER Giorgio Regni <gr@scality.com>
|
||||||
FROM node:${NODE_VERSION} as builder
|
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y --no-install-recommends \
|
|
||||||
build-essential \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
git \
|
|
||||||
gnupg2 \
|
|
||||||
jq \
|
|
||||||
python3 \
|
|
||||||
ssh \
|
|
||||||
wget \
|
|
||||||
libffi-dev \
|
|
||||||
zlib1g-dev \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& mkdir -p /root/ssh \
|
|
||||||
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
|
|
||||||
|
|
||||||
ENV PYTHON=python3
|
|
||||||
COPY package.json yarn.lock /usr/src/app/
|
|
||||||
RUN npm install typescript -g
|
|
||||||
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
FROM node:${NODE_VERSION}
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
jq \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
ENV NO_PROXY localhost,127.0.0.1
|
|
||||||
ENV no_proxy localhost,127.0.0.1
|
|
||||||
|
|
||||||
EXPOSE 8000
|
|
||||||
EXPOSE 8002
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
jq \
|
|
||||||
tini \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
# Keep the .git directory in order to properly report version
|
# Keep the .git directory in order to properly report version
|
||||||
COPY . /usr/src/app
|
COPY ./package.json yarn.lock ./
|
||||||
COPY --from=builder /usr/src/app/node_modules ./node_modules/
|
|
||||||
|
|
||||||
|
ENV PYTHON=python3.9
|
||||||
|
ENV PY_VERSION=3.9.7
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y --no-install-recommends \
|
||||||
|
jq \
|
||||||
|
python \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
ssh \
|
||||||
|
ca-certificates \
|
||||||
|
wget \
|
||||||
|
libffi-dev \
|
||||||
|
zlib1g-dev \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& mkdir -p /root/ssh \
|
||||||
|
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
|
||||||
|
|
||||||
|
RUN cd /tmp \
|
||||||
|
&& wget https://www.python.org/ftp/python/$PY_VERSION/Python-$PY_VERSION.tgz \
|
||||||
|
&& tar -C /usr/local/bin -xzvf Python-$PY_VERSION.tgz \
|
||||||
|
&& cd /usr/local/bin/Python-$PY_VERSION \
|
||||||
|
&& ./configure --enable-optimizations \
|
||||||
|
&& make \
|
||||||
|
&& make altinstall \
|
||||||
|
&& rm -rf /tmp/Python-$PY_VERSION.tgz
|
||||||
|
|
||||||
|
RUN yarn cache clean \
|
||||||
|
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \
|
||||||
|
&& apt-get autoremove --purge -y python git build-essential \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& yarn cache clean \
|
||||||
|
&& rm -rf ~/.node-gyp \
|
||||||
|
&& rm -rf /tmp/yarn-*
|
||||||
|
|
||||||
|
COPY ./ ./
|
||||||
|
|
||||||
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
||||||
|
|
||||||
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
|
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||||
|
|
||||||
CMD [ "yarn", "start" ]
|
CMD [ "yarn", "start" ]
|
||||||
|
|
||||||
|
EXPOSE 8000 8002
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# S3 Healthcheck
|
# S3 Healthcheck
|
||||||
|
|
||||||
Scality S3 exposes a healthcheck route `/live` on the port used
|
Scality S3 exposes a healthcheck route `/_/healthcheck` which returns a
|
||||||
for the metrics (defaults to port 8002) which returns a
|
|
||||||
response with HTTP code
|
response with HTTP code
|
||||||
|
|
||||||
- 200 OK
|
- 200 OK
|
||||||
|
|
165
README.md
165
README.md
|
@ -1,7 +1,12 @@
|
||||||
# Zenko CloudServer with Vitastor Backend
|
# Zenko CloudServer
|
||||||
|
|
||||||
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
||||||
|
|
||||||
|
[![CircleCI][badgepub]](https://circleci.com/gh/scality/S3)
|
||||||
|
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/S3)
|
||||||
|
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/scality/s3server/)
|
||||||
|
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||||
|
@ -11,71 +16,125 @@ Scality’s Open Source Multi-Cloud Data Controller.
|
||||||
CloudServer provides a single AWS S3 API interface to access multiple
|
CloudServer provides a single AWS S3 API interface to access multiple
|
||||||
backend data storage both on-premise or public in the cloud.
|
backend data storage both on-premise or public in the cloud.
|
||||||
|
|
||||||
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
|
CloudServer is useful for Developers, either to run as part of a
|
||||||
backend support.
|
continous integration test environment to emulate the AWS S3 service locally
|
||||||
|
or as an abstraction layer to develop object storage enabled
|
||||||
|
application on the go.
|
||||||
|
|
||||||
## Quick Start with Vitastor
|
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
|
||||||
|
|
||||||
Vitastor Backend is in experimental status, however you can already try to
|
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
||||||
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
|
|
||||||
it works too 😊.
|
|
||||||
|
|
||||||
Installation instructions:
|
## Docker
|
||||||
|
|
||||||
### Install Vitastor
|
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/scality/s3server/)
|
||||||
|
|
||||||
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
|
## Contributing
|
||||||
|
|
||||||
### Install Zenko with Vitastor Backend
|
In order to contribute, please follow the
|
||||||
|
[Contributing Guidelines](
|
||||||
|
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
|
## Installation
|
||||||
- Install dependencies: `npm install --omit dev` or just `npm install`
|
|
||||||
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
|
|
||||||
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
|
|
||||||
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
|
|
||||||
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
|
|
||||||
|
|
||||||
### Install and Configure MongoDB
|
### Dependencies
|
||||||
|
|
||||||
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
|
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
|
||||||
|
. Up-to-date versions can be found at
|
||||||
|
[Nodesource](https://github.com/nodesource/distributions).
|
||||||
|
|
||||||
### Setup Zenko
|
### Clone source code
|
||||||
|
|
||||||
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
|
```shell
|
||||||
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
|
git clone https://github.com/scality/S3.git
|
||||||
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
|
|
||||||
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
|
|
||||||
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
|
|
||||||
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
|
|
||||||
access keys, but it's not published, so let's use a file for now.
|
|
||||||
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
|
|
||||||
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
|
|
||||||
in this file.
|
|
||||||
|
|
||||||
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
|
|
||||||
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
|
|
||||||
|
|
||||||
### Start Zenko
|
|
||||||
|
|
||||||
Start the S3 server with: `node index.js`
|
|
||||||
|
|
||||||
If you use default settings, Zenko CloudServer starts on port 8000.
|
|
||||||
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
|
|
||||||
|
|
||||||
Now you can access your S3 with `s3cmd` or `geesefs`:
|
|
||||||
|
|
||||||
```
|
|
||||||
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
### Install js dependencies
|
||||||
AWS_ACCESS_KEY_ID=accessKey1 \
|
|
||||||
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
|
Go to the ./S3 folder,
|
||||||
geesefs --endpoint http://localhost:8000 testbucket mountdir
|
|
||||||
|
```shell
|
||||||
|
yarn install --frozen-lockfile
|
||||||
```
|
```
|
||||||
|
|
||||||
# Author & License
|
If you get an error regarding installation of the diskUsage module,
|
||||||
|
please install g++.
|
||||||
|
|
||||||
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
If you get an error regarding level-down bindings, try clearing your yarn cache:
|
||||||
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
|
|
||||||
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
|
```shell
|
||||||
|
yarn cache clean
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with a file backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
|
||||||
|
9991 are also open locally for internal transfer of metadata and data,
|
||||||
|
respectively.
|
||||||
|
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
By default the metadata files will be saved in the
|
||||||
|
localMetadata directory and the data files will be saved
|
||||||
|
in the localData directory within the ./S3 directory on your
|
||||||
|
machine. These directories have been pre-created within the
|
||||||
|
repository. If you would like to save the data or metadata in
|
||||||
|
different locations of your choice, you must specify them with absolute paths.
|
||||||
|
So, when starting the server:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||||
|
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||||
|
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with multiple data backends
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3DATA='multiple'
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to
|
||||||
|
choose where each object will be saved by setting
|
||||||
|
the following header with a locationConstraint on
|
||||||
|
a PUT request:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
```
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the
|
||||||
|
location constraint of the bucket will determine
|
||||||
|
where the data is saved. If the bucket has no location
|
||||||
|
constraint, the endpoint of the PUT request will be
|
||||||
|
used to determine location.
|
||||||
|
|
||||||
|
See the Configuration section below to learn how to set
|
||||||
|
location constraints.
|
||||||
|
|
||||||
|
## Run it with an in-memory backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
yarn run mem_backend
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
|
||||||
|
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
||||||
|
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||||
|
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
---
|
---
|
||||||
theme: jekyll-theme-modernist
|
theme: jekyll-theme-minimal
|
||||||
|
|
|
@ -1,56 +0,0 @@
|
||||||
{
|
|
||||||
"accounts": [{
|
|
||||||
"name": "Bart",
|
|
||||||
"email": "sampleaccount1@sampling.com",
|
|
||||||
"arn": "arn:aws:iam::123456789012:root",
|
|
||||||
"canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be",
|
|
||||||
"shortid": "123456789012",
|
|
||||||
"keys": [{
|
|
||||||
"access": "accessKey1",
|
|
||||||
"secret": "verySecretKey1"
|
|
||||||
}]
|
|
||||||
}, {
|
|
||||||
"name": "Lisa",
|
|
||||||
"email": "sampleaccount2@sampling.com",
|
|
||||||
"arn": "arn:aws:iam::123456789013:root",
|
|
||||||
"canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf",
|
|
||||||
"shortid": "123456789013",
|
|
||||||
"keys": [{
|
|
||||||
"access": "accessKey2",
|
|
||||||
"secret": "verySecretKey2"
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Clueso",
|
|
||||||
"email": "inspector@clueso.info",
|
|
||||||
"arn": "arn:aws:iam::123456789014:root",
|
|
||||||
"canonicalID": "http://acs.zenko.io/accounts/service/clueso",
|
|
||||||
"shortid": "123456789014",
|
|
||||||
"keys": [{
|
|
||||||
"access": "cluesoKey1",
|
|
||||||
"secret": "cluesoSecretKey1"
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Replication",
|
|
||||||
"email": "inspector@replication.info",
|
|
||||||
"arn": "arn:aws:iam::123456789015:root",
|
|
||||||
"canonicalID": "http://acs.zenko.io/accounts/service/replication",
|
|
||||||
"shortid": "123456789015",
|
|
||||||
"keys": [{
|
|
||||||
"access": "replicationKey1",
|
|
||||||
"secret": "replicationSecretKey1"
|
|
||||||
}]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Lifecycle",
|
|
||||||
"email": "inspector@lifecycle.info",
|
|
||||||
"arn": "arn:aws:iam::123456789016:root",
|
|
||||||
"canonicalID": "http://acs.zenko.io/accounts/service/lifecycle",
|
|
||||||
"shortid": "123456789016",
|
|
||||||
"keys": [{
|
|
||||||
"access": "lifecycleKey1",
|
|
||||||
"secret": "lifecycleSecretKey1"
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/usr/bin/env node
|
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
require('../lib/nfs/utilities.js').createBucketWithNFSEnabled();
|
|
|
@ -1,108 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
// 2>/dev/null ; exec "$(which nodejs 2>/dev/null || which node)" "$0" "$@"
|
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const { auth } = require('arsenal');
|
|
||||||
const commander = require('commander');
|
|
||||||
|
|
||||||
const http = require('http');
|
|
||||||
const https = require('https');
|
|
||||||
const logger = require('../lib/utilities/logger');
|
|
||||||
|
|
||||||
function _performSearch(host,
|
|
||||||
port,
|
|
||||||
bucketName,
|
|
||||||
query,
|
|
||||||
listVersions,
|
|
||||||
accessKey,
|
|
||||||
secretKey,
|
|
||||||
sessionToken,
|
|
||||||
verbose, ssl) {
|
|
||||||
const escapedSearch = encodeURIComponent(query);
|
|
||||||
const options = {
|
|
||||||
host,
|
|
||||||
port,
|
|
||||||
method: 'GET',
|
|
||||||
path: `/${bucketName}/?search=${escapedSearch}${listVersions ? '&&versions' : ''}`,
|
|
||||||
headers: {
|
|
||||||
'Content-Length': 0,
|
|
||||||
},
|
|
||||||
rejectUnauthorized: false,
|
|
||||||
versions: '',
|
|
||||||
};
|
|
||||||
if (sessionToken) {
|
|
||||||
options.headers['x-amz-security-token'] = sessionToken;
|
|
||||||
}
|
|
||||||
const transport = ssl ? https : http;
|
|
||||||
const request = transport.request(options, response => {
|
|
||||||
if (verbose) {
|
|
||||||
logger.info('response status code', {
|
|
||||||
statusCode: response.statusCode,
|
|
||||||
});
|
|
||||||
logger.info('response headers', { headers: response.headers });
|
|
||||||
}
|
|
||||||
const body = [];
|
|
||||||
response.setEncoding('utf8');
|
|
||||||
response.on('data', chunk => body.push(chunk));
|
|
||||||
response.on('end', () => {
|
|
||||||
if (response.statusCode >= 200 && response.statusCode < 300) {
|
|
||||||
logger.info('Success');
|
|
||||||
process.stdout.write(body.join(''));
|
|
||||||
process.exit(0);
|
|
||||||
} else {
|
|
||||||
logger.error('request failed with HTTP Status ', {
|
|
||||||
statusCode: response.statusCode,
|
|
||||||
body: body.join(''),
|
|
||||||
});
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
});
|
|
||||||
// generateV4Headers exepects request object with path that does not
|
|
||||||
// include query
|
|
||||||
request.path = `/${bucketName}`;
|
|
||||||
const requestData = listVersions ? { search: query, versions: '' } : { search: query };
|
|
||||||
auth.client.generateV4Headers(request, requestData, accessKey, secretKey, 's3');
|
|
||||||
request.path = `/${bucketName}?search=${escapedSearch}${listVersions ? '&&versions' : ''}`;
|
|
||||||
if (verbose) {
|
|
||||||
logger.info('request headers', { headers: request._headers });
|
|
||||||
}
|
|
||||||
request.end();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This function is used as a binary to send a request to S3 to perform a
|
|
||||||
* search on the objects in a bucket
|
|
||||||
*
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function searchBucket() {
|
|
||||||
// TODO: Include other bucket listing possible query params?
|
|
||||||
commander
|
|
||||||
.version('0.0.1')
|
|
||||||
.option('-a, --access-key <accessKey>', 'Access key id')
|
|
||||||
.option('-k, --secret-key <secretKey>', 'Secret access key')
|
|
||||||
.option('-t, --session-token <sessionToken>', 'Session token')
|
|
||||||
.option('-b, --bucket <bucket>', 'Name of the bucket')
|
|
||||||
.option('-q, --query <query>', 'Search query')
|
|
||||||
.option('-h, --host <host>', 'Host of the server')
|
|
||||||
.option('-p, --port <port>', 'Port of the server')
|
|
||||||
.option('-s', '--ssl', 'Enable ssl')
|
|
||||||
.option('-l, --list-versions', 'List all versions of the objects that meet the search query, ' +
|
|
||||||
'otherwise only list the latest version')
|
|
||||||
.option('-v, --verbose')
|
|
||||||
.parse(process.argv);
|
|
||||||
const { host, port, accessKey, secretKey, sessionToken, bucket, query, listVersions, verbose, ssl } =
|
|
||||||
commander;
|
|
||||||
|
|
||||||
if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
|
|
||||||
logger.error('missing parameter');
|
|
||||||
commander.outputHelp();
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
_performSearch(host, port, bucket, query, listVersions, accessKey, secretKey, sessionToken, verbose,
|
|
||||||
ssl);
|
|
||||||
}
|
|
||||||
|
|
||||||
searchBucket();
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
---
|
||||||
|
general:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /^ultron\/.*/ # Ignore ultron/* branches
|
||||||
|
artifacts:
|
||||||
|
- coverage/
|
||||||
|
|
||||||
|
machine:
|
||||||
|
node:
|
||||||
|
version: 6.13.1
|
||||||
|
services:
|
||||||
|
- redis
|
||||||
|
- docker
|
||||||
|
ruby:
|
||||||
|
version: "2.4.1"
|
||||||
|
environment:
|
||||||
|
CXX: g++-4.9
|
||||||
|
ENABLE_LOCAL_CACHE: true
|
||||||
|
REPORT_TOKEN: report-token-1
|
||||||
|
hosts:
|
||||||
|
bucketwebsitetester.s3-website-us-east-1.amazonaws.com: 127.0.0.1
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
override:
|
||||||
|
- rm -rf node_modules
|
||||||
|
- yarn install --frozen-lockfile
|
||||||
|
post:
|
||||||
|
- sudo pip install flake8 yamllint
|
||||||
|
- sudo pip install s3cmd==1.6.1
|
||||||
|
# fog and ruby testing dependencies
|
||||||
|
- gem install fog-aws -v 1.3.0
|
||||||
|
- gem install mime-types -v 3.1
|
||||||
|
- gem install rspec -v 3.5
|
||||||
|
- gem install json
|
||||||
|
# java sdk dependencies
|
||||||
|
- sudo apt-get install -y -q default-jdk
|
||||||
|
|
||||||
|
|
||||||
|
test:
|
||||||
|
override:
|
||||||
|
- docker run --name squid-proxy -d --net=host
|
||||||
|
--publish 3128:3128 sameersbn/squid:3.3.8-23
|
||||||
|
- bash tests.bash:
|
||||||
|
parallel: true
|
|
@ -0,0 +1,23 @@
|
||||||
|
{
|
||||||
|
"accounts": [{
|
||||||
|
"name": "Bart",
|
||||||
|
"email": "sampleaccount1@sampling.com",
|
||||||
|
"arn": "arn:aws:iam::123456789012:root",
|
||||||
|
"canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be",
|
||||||
|
"shortid": "123456789012",
|
||||||
|
"keys": [{
|
||||||
|
"access": "accessKey1",
|
||||||
|
"secret": "verySecretKey1"
|
||||||
|
}]
|
||||||
|
}, {
|
||||||
|
"name": "Lisa",
|
||||||
|
"email": "sampleaccount2@sampling.com",
|
||||||
|
"arn": "arn:aws:iam::123456789013:root",
|
||||||
|
"canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf",
|
||||||
|
"shortid": "123456789013",
|
||||||
|
"keys": [{
|
||||||
|
"access": "accessKey2",
|
||||||
|
"secret": "verySecretKey2"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
|
@ -4,16 +4,13 @@
|
||||||
"metricsPort": 8002,
|
"metricsPort": 8002,
|
||||||
"metricsListenOn": [],
|
"metricsListenOn": [],
|
||||||
"replicationGroupId": "RG001",
|
"replicationGroupId": "RG001",
|
||||||
"workers": 4,
|
|
||||||
"restEndpoints": {
|
"restEndpoints": {
|
||||||
"localhost": "us-east-1",
|
"localhost": "us-east-1",
|
||||||
"127.0.0.1": "us-east-1",
|
"127.0.0.1": "us-east-1",
|
||||||
"cloudserver-front": "us-east-1",
|
"cloudserver-front": "us-east-1",
|
||||||
"s3.docker.test": "us-east-1",
|
"s3.docker.test": "us-east-1",
|
||||||
"127.0.0.2": "us-east-1",
|
"127.0.0.2": "us-east-1",
|
||||||
"s3.amazonaws.com": "us-east-1",
|
"s3.amazonaws.com": "us-east-1"
|
||||||
"zenko-cloudserver-replicator": "us-east-1",
|
|
||||||
"lb": "us-east-1"
|
|
||||||
},
|
},
|
||||||
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
|
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
|
||||||
"s3-website.us-east-2.amazonaws.com",
|
"s3-website.us-east-2.amazonaws.com",
|
||||||
|
@ -28,8 +25,7 @@
|
||||||
"s3-website-eu-west-1.amazonaws.com",
|
"s3-website-eu-west-1.amazonaws.com",
|
||||||
"s3-website-sa-east-1.amazonaws.com",
|
"s3-website-sa-east-1.amazonaws.com",
|
||||||
"s3-website.localhost",
|
"s3-website.localhost",
|
||||||
"s3-website.scality.test",
|
"s3-website.scality.test"],
|
||||||
"zenkoazuretest.blob.core.windows.net"],
|
|
||||||
"replicationEndpoints": [{
|
"replicationEndpoints": [{
|
||||||
"site": "zenko",
|
"site": "zenko",
|
||||||
"servers": ["127.0.0.1:8000"],
|
"servers": ["127.0.0.1:8000"],
|
||||||
|
@ -38,14 +34,6 @@
|
||||||
"site": "us-east-2",
|
"site": "us-east-2",
|
||||||
"type": "aws_s3"
|
"type": "aws_s3"
|
||||||
}],
|
}],
|
||||||
"backbeat": {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 8900
|
|
||||||
},
|
|
||||||
"workflowEngineOperator": {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 3001
|
|
||||||
},
|
|
||||||
"cdmi": {
|
"cdmi": {
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"port": 81,
|
"port": 81,
|
||||||
|
@ -59,7 +47,7 @@
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"port": 8500
|
"port": 8500
|
||||||
},
|
},
|
||||||
"clusters": 1,
|
"clusters": 10,
|
||||||
"log": {
|
"log": {
|
||||||
"logLevel": "info",
|
"logLevel": "info",
|
||||||
"dumpLevel": "error"
|
"dumpLevel": "error"
|
||||||
|
@ -75,10 +63,6 @@
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"port": 9991
|
"port": 9991
|
||||||
},
|
},
|
||||||
"pfsClient": {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 9992
|
|
||||||
},
|
|
||||||
"metadataDaemon": {
|
"metadataDaemon": {
|
||||||
"bindAddress": "localhost",
|
"bindAddress": "localhost",
|
||||||
"port": 9990
|
"port": 9990
|
||||||
|
@ -87,47 +71,10 @@
|
||||||
"bindAddress": "localhost",
|
"bindAddress": "localhost",
|
||||||
"port": 9991
|
"port": 9991
|
||||||
},
|
},
|
||||||
"pfsDaemon": {
|
|
||||||
"bindAddress": "localhost",
|
|
||||||
"port": 9992
|
|
||||||
},
|
|
||||||
"recordLog": {
|
"recordLog": {
|
||||||
"enabled": true,
|
"enabled": false,
|
||||||
"recordLogName": "s3-recordlog"
|
"recordLogName": "s3-recordlog"
|
||||||
},
|
},
|
||||||
"mongodb": {
|
|
||||||
"replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020",
|
|
||||||
"writeConcern": "majority",
|
|
||||||
"replicaSet": "rs0",
|
|
||||||
"readPreference": "primary",
|
|
||||||
"database": "metadata"
|
|
||||||
},
|
|
||||||
"authdata": "authdata.json",
|
|
||||||
"backends": {
|
|
||||||
"auth": "file",
|
|
||||||
"data": "file",
|
|
||||||
"metadata": "mongodb",
|
|
||||||
"kms": "file",
|
|
||||||
"quota": "none"
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
|
||||||
"aws_s3": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": false,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gcp": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": true,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"requests": {
|
"requests": {
|
||||||
"viaProxy": false,
|
"viaProxy": false,
|
||||||
"trustedProxyCIDRs": [],
|
"trustedProxyCIDRs": [],
|
|
@ -1,71 +0,0 @@
|
||||||
{
|
|
||||||
"port": 8000,
|
|
||||||
"listenOn": [],
|
|
||||||
"metricsPort": 8002,
|
|
||||||
"metricsListenOn": [],
|
|
||||||
"replicationGroupId": "RG001",
|
|
||||||
"restEndpoints": {
|
|
||||||
"localhost": "STANDARD",
|
|
||||||
"127.0.0.1": "STANDARD",
|
|
||||||
"yourhostname.ru": "STANDARD"
|
|
||||||
},
|
|
||||||
"websiteEndpoints": [
|
|
||||||
"static.yourhostname.ru"
|
|
||||||
],
|
|
||||||
"replicationEndpoints": [ {
|
|
||||||
"site": "zenko",
|
|
||||||
"servers": ["127.0.0.1:8000"],
|
|
||||||
"default": true
|
|
||||||
} ],
|
|
||||||
"log": {
|
|
||||||
"logLevel": "info",
|
|
||||||
"dumpLevel": "error"
|
|
||||||
},
|
|
||||||
"healthChecks": {
|
|
||||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
|
||||||
},
|
|
||||||
"backends": {
|
|
||||||
"metadata": "mongodb"
|
|
||||||
},
|
|
||||||
"mongodb": {
|
|
||||||
"replicaSetHosts": "127.0.0.1:27017",
|
|
||||||
"writeConcern": "majority",
|
|
||||||
"replicaSet": "rs0",
|
|
||||||
"readPreference": "primary",
|
|
||||||
"database": "s3",
|
|
||||||
"authCredentials": {
|
|
||||||
"username": "s3",
|
|
||||||
"password": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
|
||||||
"aws_s3": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": false,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gcp": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": true,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"requests": {
|
|
||||||
"viaProxy": false,
|
|
||||||
"trustedProxyCIDRs": [],
|
|
||||||
"extractClientIPFromHeader": ""
|
|
||||||
},
|
|
||||||
"bucketNotificationDestinations": [
|
|
||||||
{
|
|
||||||
"resource": "target1",
|
|
||||||
"type": "dummy",
|
|
||||||
"host": "localhost:6000"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
61
constants.js
61
constants.js
|
@ -39,8 +39,6 @@ const constants = {
|
||||||
// once the multipart upload is complete.
|
// once the multipart upload is complete.
|
||||||
mpuBucketPrefix: 'mpuShadowBucket',
|
mpuBucketPrefix: 'mpuShadowBucket',
|
||||||
blacklistedPrefixes: { bucket: [], object: [] },
|
blacklistedPrefixes: { bucket: [], object: [] },
|
||||||
// GCP Object Tagging Prefix
|
|
||||||
gcpTaggingPrefix: 'aws-tag-',
|
|
||||||
// PublicId is used as the canonicalID for a request that contains
|
// PublicId is used as the canonicalID for a request that contains
|
||||||
// no authentication information. Requestor can access
|
// no authentication information. Requestor can access
|
||||||
// only public resources
|
// only public resources
|
||||||
|
@ -66,20 +64,14 @@ const constants = {
|
||||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
|
||||||
minimumAllowedPartSize: 5242880,
|
minimumAllowedPartSize: 5242880,
|
||||||
|
|
||||||
// AWS sets a maximum total parts limit
|
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
|
|
||||||
maximumAllowedPartCount: 10000,
|
|
||||||
|
|
||||||
gcpMaximumAllowedPartCount: 1024,
|
|
||||||
|
|
||||||
// Max size on put part or copy part is 5GB. For functional
|
// Max size on put part or copy part is 5GB. For functional
|
||||||
// testing use 110 MB as max
|
// testing use 110 MB as max
|
||||||
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 :
|
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 :
|
||||||
5368709120,
|
5368709120,
|
||||||
|
|
||||||
// Max size allowed in a single put object request is 5GB
|
// AWS sets a maximum total parts limit
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
|
||||||
maximumAllowedUploadSize: 5368709120,
|
maximumAllowedPartCount: 10000,
|
||||||
|
|
||||||
// AWS states max size for user-defined metadata (x-amz-meta- headers) is
|
// AWS states max size for user-defined metadata (x-amz-meta- headers) is
|
||||||
// 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
// 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||||
|
@ -104,6 +96,7 @@ const constants = {
|
||||||
'policyStatus',
|
'policyStatus',
|
||||||
'publicAccessBlock',
|
'publicAccessBlock',
|
||||||
'requestPayment',
|
'requestPayment',
|
||||||
|
'restore',
|
||||||
'torrent',
|
'torrent',
|
||||||
],
|
],
|
||||||
|
|
||||||
|
@ -116,21 +109,11 @@ const constants = {
|
||||||
],
|
],
|
||||||
|
|
||||||
// user metadata header to set object locationConstraint
|
// user metadata header to set object locationConstraint
|
||||||
objectLocationConstraintHeader: 'x-amz-storage-class',
|
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
||||||
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
||||||
legacyLocations: ['sproxyd', 'legacy'],
|
legacyLocations: ['sproxyd', 'legacy'],
|
||||||
// declare here all existing service accounts and their properties
|
|
||||||
// (if any, otherwise an empty object)
|
|
||||||
serviceAccountProperties: {
|
|
||||||
replication: {},
|
|
||||||
lifecycle: {},
|
|
||||||
gc: {},
|
|
||||||
'md-ingestion': {
|
|
||||||
canReplicate: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
/* eslint-disable camelcase */
|
/* eslint-disable camelcase */
|
||||||
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
|
externalBackends: { aws_s3: true, azure: true, gcp: true },
|
||||||
// some of the available data backends (if called directly rather
|
// some of the available data backends (if called directly rather
|
||||||
// than through the multiple backend gateway) need a key provided
|
// than through the multiple backend gateway) need a key provided
|
||||||
// as a string as first parameter of the get/delete methods.
|
// as a string as first parameter of the get/delete methods.
|
||||||
|
@ -139,19 +122,13 @@ const constants = {
|
||||||
// for external backends, don't call unless at least 1 minute
|
// for external backends, don't call unless at least 1 minute
|
||||||
// (60,000 milliseconds) since last call
|
// (60,000 milliseconds) since last call
|
||||||
externalBackendHealthCheckInterval: 60000,
|
externalBackendHealthCheckInterval: 60000,
|
||||||
versioningNotImplBackends: { azure: true, gcp: true },
|
versioningNotImplBackends: { azure: true },
|
||||||
mpuMDStoredExternallyBackend: { aws_s3: true, gcp: true },
|
mpuMDStoredExternallyBackend: { aws_s3: true },
|
||||||
skipBatchDeleteBackends: { azure: true, gcp: true },
|
|
||||||
s3HandledBackends: { azure: true, gcp: true },
|
|
||||||
hasCopyPartBackends: { aws_s3: true, gcp: true },
|
|
||||||
/* eslint-enable camelcase */
|
/* eslint-enable camelcase */
|
||||||
mpuMDStoredOnS3Backend: { azure: true },
|
mpuMDStoredOnS3Backend: { azure: true },
|
||||||
azureAccountNameRegex: /^[a-z0-9]{3,24}$/,
|
azureAccountNameRegex: /^[a-z0-9]{3,24}$/,
|
||||||
base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' +
|
base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' +
|
||||||
'(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'),
|
'(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'),
|
||||||
productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko',
|
|
||||||
// location constraint delimiter
|
|
||||||
zenkoSeparator: ':',
|
|
||||||
// user metadata applied on zenko objects
|
// user metadata applied on zenko objects
|
||||||
zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
|
zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
|
||||||
bucketOwnerActions: [
|
bucketOwnerActions: [
|
||||||
|
@ -204,20 +181,22 @@ const constants = {
|
||||||
'objectPutRetention',
|
'objectPutRetention',
|
||||||
],
|
],
|
||||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||||
allowedRestoreObjectRequestTierValues: ['Standard'],
|
// The AWS assumed Role resource type
|
||||||
|
assumedRoleArnResourceType: 'assumed-role',
|
||||||
|
// Session name of the backbeat lifecycle assumed role session.
|
||||||
|
backbeatLifecycleSessionName: 'backbeat-lifecycle',
|
||||||
|
multiObjectDeleteConcurrency: 50,
|
||||||
lifecycleListing: {
|
lifecycleListing: {
|
||||||
CURRENT_TYPE: 'current',
|
CURRENT_TYPE: 'current',
|
||||||
NON_CURRENT_TYPE: 'noncurrent',
|
NON_CURRENT_TYPE: 'noncurrent',
|
||||||
ORPHAN_DM_TYPE: 'orphan',
|
ORPHAN_DM_TYPE: 'orphan',
|
||||||
},
|
},
|
||||||
multiObjectDeleteConcurrency: 50,
|
|
||||||
maxScannedLifecycleListingEntries: 10000,
|
maxScannedLifecycleListingEntries: 10000,
|
||||||
overheadField: [
|
overheadField: [
|
||||||
'content-length',
|
'content-length',
|
||||||
'owner-id',
|
'owner-id',
|
||||||
'versionId',
|
'versionId',
|
||||||
'isNull',
|
'isNull',
|
||||||
'isDeleteMarker',
|
|
||||||
],
|
],
|
||||||
unsupportedSignatureChecksums: new Set([
|
unsupportedSignatureChecksums: new Set([
|
||||||
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
|
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
|
||||||
|
@ -229,20 +208,6 @@ const constants = {
|
||||||
'UNSIGNED-PAYLOAD',
|
'UNSIGNED-PAYLOAD',
|
||||||
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
|
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
|
||||||
]),
|
]),
|
||||||
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
|
|
||||||
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
|
|
||||||
// The AWS assumed Role resource type
|
|
||||||
assumedRoleArnResourceType: 'assumed-role',
|
|
||||||
// Session name of the backbeat lifecycle assumed role session.
|
|
||||||
backbeatLifecycleSessionName: 'backbeat-lifecycle',
|
|
||||||
actionsToConsiderAsObjectPut: [
|
|
||||||
'initiateMultipartUpload',
|
|
||||||
'objectPutPart',
|
|
||||||
'completeMultipartUpload',
|
|
||||||
],
|
|
||||||
// if requester is not bucket owner, bucket policy actions should be denied with
|
|
||||||
// MethodNotAllowed error
|
|
||||||
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = constants;
|
module.exports = constants;
|
||||||
|
|
|
@ -17,17 +17,13 @@ process.on('uncaughtException', err => {
|
||||||
if (config.backends.data === 'file' ||
|
if (config.backends.data === 'file' ||
|
||||||
(config.backends.data === 'multiple' &&
|
(config.backends.data === 'multiple' &&
|
||||||
config.backends.metadata !== 'scality')) {
|
config.backends.metadata !== 'scality')) {
|
||||||
const dataServer = new arsenal.network.rest.RESTServer({
|
const dataServer = new arsenal.network.rest.RESTServer(
|
||||||
bindAddress: config.dataDaemon.bindAddress,
|
{ bindAddress: config.dataDaemon.bindAddress,
|
||||||
port: config.dataDaemon.port,
|
port: config.dataDaemon.port,
|
||||||
dataStore: new arsenal.storage.data.file.DataFileStore({
|
dataStore: new arsenal.storage.data.file.DataFileStore(
|
||||||
dataPath: config.dataDaemon.dataPath,
|
{ dataPath: config.dataDaemon.dataPath,
|
||||||
log: config.log,
|
log: config.log }),
|
||||||
noSync: config.dataDaemon.noSync,
|
log: config.log });
|
||||||
noCache: config.dataDaemon.noCache,
|
|
||||||
}),
|
|
||||||
log: config.log,
|
|
||||||
});
|
|
||||||
dataServer.setup(err => {
|
dataServer.setup(err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logger.error('Error initializing REST data server',
|
logger.error('Error initializing REST data server',
|
||||||
|
|
|
@ -6,15 +6,14 @@ set -e
|
||||||
# modifying config.json
|
# modifying config.json
|
||||||
JQ_FILTERS_CONFIG="."
|
JQ_FILTERS_CONFIG="."
|
||||||
|
|
||||||
# ENDPOINT var can accept comma separated values
|
|
||||||
# for multiple endpoint locations
|
|
||||||
if [[ "$ENDPOINT" ]]; then
|
if [[ "$ENDPOINT" ]]; then
|
||||||
IFS="," read -ra HOST_NAMES <<< "$ENDPOINT"
|
HOST_NAME="$ENDPOINT"
|
||||||
for host in "${HOST_NAMES[@]}"; do
|
fi
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$host\"]=\"us-east-1\""
|
|
||||||
done
|
if [[ "$HOST_NAME" ]]; then
|
||||||
echo "Host name has been modified to ${HOST_NAMES[@]}"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$HOST_NAME\"]=\"us-east-1\""
|
||||||
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with ${HOST_NAMES[@]}"
|
echo "Host name has been modified to $HOST_NAME"
|
||||||
|
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with $HOST_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$LOG_LEVEL" ]]; then
|
if [[ "$LOG_LEVEL" ]]; then
|
||||||
|
@ -26,7 +25,7 @@ if [[ "$LOG_LEVEL" ]]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$SSL" && "$HOST_NAMES" ]]; then
|
if [[ "$SSL" && "$HOST_NAME" ]]; then
|
||||||
# This condition makes sure that the certificates are not generated twice. (for docker restart)
|
# This condition makes sure that the certificates are not generated twice. (for docker restart)
|
||||||
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
|
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
|
||||||
# Compute config for utapi tests
|
# Compute config for utapi tests
|
||||||
|
@ -37,15 +36,15 @@ prompt = no
|
||||||
req_extensions = s3_req
|
req_extensions = s3_req
|
||||||
|
|
||||||
[req_distinguished_name]
|
[req_distinguished_name]
|
||||||
CN = ${HOST_NAMES[0]}
|
CN = ${HOST_NAME}
|
||||||
|
|
||||||
[s3_req]
|
[s3_req]
|
||||||
subjectAltName = @alt_names
|
subjectAltName = @alt_names
|
||||||
extendedKeyUsage = serverAuth, clientAuth
|
extendedKeyUsage = serverAuth, clientAuth
|
||||||
|
|
||||||
[alt_names]
|
[alt_names]
|
||||||
DNS.1 = *.${HOST_NAMES[0]}
|
DNS.1 = *.${HOST_NAME}
|
||||||
DNS.2 = ${HOST_NAMES[0]}
|
DNS.2 = ${HOST_NAME}
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
|
@ -71,14 +70,9 @@ fi
|
||||||
if [[ "$LISTEN_ADDR" ]]; then
|
if [[ "$LISTEN_ADDR" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsDaemon.bindAddress=\"$LISTEN_ADDR\""
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .listenOn=[\"$LISTEN_ADDR:8000\"]"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .listenOn=[\"$LISTEN_ADDR:8000\"]"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$REPLICATION_GROUP_ID" ]] ; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .replicationGroupId=\"$REPLICATION_GROUP_ID\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$DATA_HOST" ]]; then
|
if [[ "$DATA_HOST" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataClient.host=\"$DATA_HOST\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataClient.host=\"$DATA_HOST\""
|
||||||
fi
|
fi
|
||||||
|
@ -87,22 +81,6 @@ if [[ "$METADATA_HOST" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$PFSD_HOST" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsClient.host=\"$PFSD_HOST\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$MONGODB_HOSTS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$MONGODB_RS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSet=\"$MONGODB_RS\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$MONGODB_DATABASE" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.database=\"$MONGODB_DATABASE\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$REDIS_HA_NAME" ]; then
|
if [ -z "$REDIS_HA_NAME" ]; then
|
||||||
REDIS_HA_NAME='mymaster'
|
REDIS_HA_NAME='mymaster'
|
||||||
fi
|
fi
|
||||||
|
@ -135,70 +113,14 @@ if [[ "$RECORDLOG_ENABLED" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$STORAGE_LIMIT_ENABLED" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.metrics[.utapi.metrics | length]=\"location\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$CRR_METRICS_HOST" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.host=\"$CRR_METRICS_HOST\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$CRR_METRICS_PORT" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$WE_OPERATOR_HOST" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.host=\"$WE_OPERATOR_HOST\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$WE_OPERATOR_PORT" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.port=$WE_OPERATOR_PORT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# external backends http(s) agent config
|
|
||||||
|
|
||||||
# AWS
|
|
||||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAlive=$AWS_S3_HTTPAGENT_KEEPALIVE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAliveMsecs=$AWS_S3_HTTPAGENT_KEEPALIVE_MS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxFreeSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
#GCP
|
|
||||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAlive=$GCP_HTTPAGENT_KEEPALIVE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAliveMsecs=$GCP_HTTPAGENT_KEEPALIVE_MS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
|
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "$BUCKETD_BOOTSTRAP" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .bucketd.bootstrap=[\"$BUCKETD_BOOTSTRAP\""]
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "$TESTING_MODE" ]]; then
|
if [[ "$TESTING_MODE" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
|
||||||
fi
|
fi
|
||||||
|
@ -208,10 +130,6 @@ if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
||||||
mv config.json.tmp config.json
|
mv config.json.tmp config.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test -v INITIAL_INSTANCE_ID && test -v S3METADATAPATH && ! test -f ${S3METADATAPATH}/uuid ; then
|
|
||||||
echo -n ${INITIAL_INSTANCE_ID} > ${S3METADATAPATH}/uuid
|
|
||||||
fi
|
|
||||||
|
|
||||||
# s3 secret credentials for Zenko
|
# s3 secret credentials for Zenko
|
||||||
if [ -r /run/secrets/s3-credentials ] ; then
|
if [ -r /run/secrets/s3-credentials ] ; then
|
||||||
. /run/secrets/s3-credentials
|
. /run/secrets/s3-credentials
|
||||||
|
|
|
@ -27,7 +27,7 @@ including null versions and delete markers, described in the above
|
||||||
links.
|
links.
|
||||||
|
|
||||||
Implementation of Bucket Versioning in Zenko CloudServer
|
Implementation of Bucket Versioning in Zenko CloudServer
|
||||||
--------------------------------------------------------
|
-----------------------------------------
|
||||||
|
|
||||||
Overview of Metadata and API Component Roles
|
Overview of Metadata and API Component Roles
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
@ -66,7 +66,7 @@ The second section, `"Implementation of Bucket Versioning in
|
||||||
API" <#implementation-of-bucket-versioning-in-api>`__, describes the way
|
API" <#implementation-of-bucket-versioning-in-api>`__, describes the way
|
||||||
the metadata options are used in the API within S3 actions to create new
|
the metadata options are used in the API within S3 actions to create new
|
||||||
versions, update their metadata, and delete them. The management of null
|
versions, update their metadata, and delete them. The management of null
|
||||||
versions and creation of delete markers is also described in this
|
versions and creation of delete markers are also described in this
|
||||||
section.
|
section.
|
||||||
|
|
||||||
Implementation of Bucket Versioning in Metadata
|
Implementation of Bucket Versioning in Metadata
|
||||||
|
@ -179,13 +179,12 @@ PUT
|
||||||
the master version with this version.
|
the master version with this version.
|
||||||
- ``versionId: <versionId>`` create or update a specific version (for updating
|
- ``versionId: <versionId>`` create or update a specific version (for updating
|
||||||
version's ACL or tags, or remote updates in geo-replication)
|
version's ACL or tags, or remote updates in geo-replication)
|
||||||
|
- if the version identified by ``versionId`` happens to be the latest
|
||||||
* if the version identified by ``versionId`` happens to be the latest
|
|
||||||
version, the master version will be updated as well
|
version, the master version will be updated as well
|
||||||
* if the master version is not as recent as the version identified by
|
- if the master version is not as recent as the version identified by
|
||||||
``versionId``, as may happen with cross-region replication, the master
|
``versionId``, as may happen with cross-region replication, the master
|
||||||
will be updated as well
|
will be updated as well
|
||||||
* note that with ``versionId`` set to an empty string ``''``, it will
|
- note that with ``versionId`` set to an empty string ``''``, it will
|
||||||
overwrite the master version only (same as no options, but the master
|
overwrite the master version only (same as no options, but the master
|
||||||
version will have a ``versionId`` property set in its metadata like
|
version will have a ``versionId`` property set in its metadata like
|
||||||
any other version). The ``versionId`` will never be exposed to an
|
any other version). The ``versionId`` will never be exposed to an
|
||||||
|
@ -209,13 +208,10 @@ A deletion targeting the latest version of an object has to:
|
||||||
- delete the specified version identified by ``versionId``
|
- delete the specified version identified by ``versionId``
|
||||||
- replace the master version with a version that is a placeholder for
|
- replace the master version with a version that is a placeholder for
|
||||||
deletion
|
deletion
|
||||||
|
|
||||||
- this version contains a special keyword, 'isPHD', to indicate the
|
- this version contains a special keyword, 'isPHD', to indicate the
|
||||||
master version was deleted and needs to be updated
|
master version was deleted and needs to be updated
|
||||||
|
|
||||||
- initiate a repair operation to update the value of the master
|
- initiate a repair operation to update the value of the master
|
||||||
version:
|
version:
|
||||||
|
|
||||||
- involves listing the versions of the object and get the latest
|
- involves listing the versions of the object and get the latest
|
||||||
version to replace the placeholder delete version
|
version to replace the placeholder delete version
|
||||||
- if no more versions exist, metadata deletes the master version,
|
- if no more versions exist, metadata deletes the master version,
|
||||||
|
@ -759,16 +755,16 @@ command in the Zenko CloudServer directory:
|
||||||
|
|
||||||
This will open two ports:
|
This will open two ports:
|
||||||
|
|
||||||
- one is based on socket.io and is used for metadata transfers (9990 by
|
- one is based on socket.io and is used for metadata transfers (9990 by
|
||||||
default)
|
default)
|
||||||
|
|
||||||
- the other is a REST interface used for data transfers (9991 by
|
- the other is a REST interface used for data transfers (9991 by
|
||||||
default)
|
default)
|
||||||
|
|
||||||
Then, one or more instances of Zenko CloudServer without the dmd can be started
|
Then, one or more instances of Zenko CloudServer without the dmd can be started
|
||||||
elsewhere with:
|
elsewhere with:
|
||||||
|
|
||||||
.. code:: sh
|
::
|
||||||
|
|
||||||
yarn run start_s3server
|
yarn run start_s3server
|
||||||
|
|
||||||
|
@ -796,10 +792,10 @@ access:
|
||||||
|
|
||||||
To run a remote dmd, you have to do the following:
|
To run a remote dmd, you have to do the following:
|
||||||
|
|
||||||
- change both ``"host"`` attributes to the IP or host name where the
|
- change both ``"host"`` attributes to the IP or host name where the
|
||||||
dmd is run.
|
dmd is run.
|
||||||
|
|
||||||
- Modify the ``"bindAddress"`` attributes in ``"metadataDaemon"`` and
|
- Modify the ``"bindAddress"`` attributes in ``"metadataDaemon"`` and
|
||||||
``"dataDaemon"`` sections where the dmd is run to accept remote
|
``"dataDaemon"`` sections where the dmd is run to accept remote
|
||||||
connections (e.g. ``"::"``)
|
connections (e.g. ``"::"``)
|
||||||
|
|
||||||
|
@ -835,13 +831,13 @@ and ``createReadStream``. They more or less map the parameters accepted
|
||||||
by the corresponding calls in the LevelUp implementation of LevelDB.
|
by the corresponding calls in the LevelUp implementation of LevelDB.
|
||||||
They differ in the following:
|
They differ in the following:
|
||||||
|
|
||||||
- The ``sync`` option is ignored (under the hood, puts are gathered
|
- The ``sync`` option is ignored (under the hood, puts are gathered
|
||||||
into batches which have their ``sync`` property enforced when they
|
into batches which have their ``sync`` property enforced when they
|
||||||
are committed to the storage)
|
are committed to the storage)
|
||||||
|
|
||||||
- Some additional versioning-specific options are supported
|
- Some additional versioning-specific options are supported
|
||||||
|
|
||||||
- ``createReadStream`` becomes asynchronous, takes an additional
|
- ``createReadStream`` becomes asynchronous, takes an additional
|
||||||
callback argument and returns the stream in the second callback
|
callback argument and returns the stream in the second callback
|
||||||
parameter
|
parameter
|
||||||
|
|
||||||
|
@ -851,10 +847,10 @@ with ``DEBUG='socket.io*'`` environment variable set.
|
||||||
One parameter controls the timeout value after which RPC commands sent
|
One parameter controls the timeout value after which RPC commands sent
|
||||||
end with a timeout error, it can be changed either:
|
end with a timeout error, it can be changed either:
|
||||||
|
|
||||||
- via the ``DEFAULT_CALL_TIMEOUT_MS`` option in
|
- via the ``DEFAULT_CALL_TIMEOUT_MS`` option in
|
||||||
``lib/network/rpc/rpc.js``
|
``lib/network/rpc/rpc.js``
|
||||||
|
|
||||||
- or in the constructor call of the ``MetadataFileClient`` object (in
|
- or in the constructor call of the ``MetadataFileClient`` object (in
|
||||||
``lib/metadata/bucketfile/backend.js`` as ``callTimeoutMs``.
|
``lib/metadata/bucketfile/backend.js`` as ``callTimeoutMs``.
|
||||||
|
|
||||||
Default value is 30000.
|
Default value is 30000.
|
||||||
|
@ -868,10 +864,10 @@ can tune the behavior (for better throughput or getting it more robust
|
||||||
on weak networks), they have to be set in ``mdserver.js`` file directly,
|
on weak networks), they have to be set in ``mdserver.js`` file directly,
|
||||||
as there is no support in ``config.json`` for now for those options:
|
as there is no support in ``config.json`` for now for those options:
|
||||||
|
|
||||||
- ``streamMaxPendingAck``: max number of pending ack events not yet
|
- ``streamMaxPendingAck``: max number of pending ack events not yet
|
||||||
received (default is 5)
|
received (default is 5)
|
||||||
|
|
||||||
- ``streamAckTimeoutMs``: timeout for receiving an ack after an output
|
- ``streamAckTimeoutMs``: timeout for receiving an ack after an output
|
||||||
stream packet is sent to the client (default is 5000)
|
stream packet is sent to the client (default is 5000)
|
||||||
|
|
||||||
Data exchange through the REST data port
|
Data exchange through the REST data port
|
||||||
|
@ -922,17 +918,17 @@ Listing Types
|
||||||
We use three different types of metadata listing for various operations.
|
We use three different types of metadata listing for various operations.
|
||||||
Here are the scenarios we use each for:
|
Here are the scenarios we use each for:
|
||||||
|
|
||||||
- 'Delimiter' - when no versions are possible in the bucket since it is
|
- 'Delimiter' - when no versions are possible in the bucket since it is
|
||||||
an internally-used only bucket which is not exposed to a user.
|
an internally-used only bucket which is not exposed to a user.
|
||||||
Namely,
|
Namely,
|
||||||
|
|
||||||
1. to list objects in the "user's bucket" to respond to a GET SERVICE
|
1. to list objects in the "user's bucket" to respond to a GET SERVICE
|
||||||
request and
|
request and
|
||||||
2. to do internal listings on an MPU shadow bucket to complete multipart
|
2. to do internal listings on an MPU shadow bucket to complete multipart
|
||||||
upload operations.
|
upload operations.
|
||||||
|
|
||||||
- 'DelimiterVersion' - to list all versions in a bucket
|
- 'DelimiterVersion' - to list all versions in a bucket
|
||||||
- 'DelimiterMaster' - to list just the master versions of objects in a
|
- 'DelimiterMaster' - to list just the master versions of objects in a
|
||||||
bucket
|
bucket
|
||||||
|
|
||||||
Algorithms
|
Algorithms
|
||||||
|
|
|
@ -178,7 +178,7 @@ Ruby
|
||||||
~~~~
|
~~~~
|
||||||
|
|
||||||
`AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__
|
`AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
.. code:: ruby
|
.. code:: ruby
|
||||||
|
|
||||||
|
@ -239,7 +239,6 @@ Python
|
||||||
Client integration
|
Client integration
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
client = boto3.client(
|
client = boto3.client(
|
||||||
|
@ -254,7 +253,6 @@ Client integration
|
||||||
Full integration (with object mapping)
|
Full integration (with object mapping)
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from botocore.utils import fix_s3_host
|
from botocore.utils import fix_s3_host
|
||||||
|
@ -295,51 +293,3 @@ Should force path-style requests even though v3 advertises it does by default.
|
||||||
$client->createBucket(array(
|
$client->createBucket(array(
|
||||||
'Bucket' => 'bucketphp',
|
'Bucket' => 'bucketphp',
|
||||||
));
|
));
|
||||||
|
|
||||||
Go
|
|
||||||
~~
|
|
||||||
|
|
||||||
`AWS Go SDK <https://github.com/aws/aws-sdk-go>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. code:: go
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
os.Setenv("AWS_ACCESS_KEY_ID", "accessKey1")
|
|
||||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "verySecretKey1")
|
|
||||||
endpoint := "http://localhost:8000"
|
|
||||||
timeout := time.Duration(10) * time.Second
|
|
||||||
sess := session.Must(session.NewSession())
|
|
||||||
|
|
||||||
// Create a context with a timeout that will abort the upload if it takes
|
|
||||||
// more than the passed in timeout.
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
svc := s3.New(sess, &aws.Config{
|
|
||||||
Region: aws.String(endpoints.UsEast1RegionID),
|
|
||||||
Endpoint: &endpoint,
|
|
||||||
})
|
|
||||||
|
|
||||||
out, err := svc.ListBucketsWithContext(ctx, &s3.ListBucketsInput{})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
} else {
|
|
||||||
fmt.Println(out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ Got an idea? Get started!
|
||||||
In order to contribute, please follow the `Contributing
|
In order to contribute, please follow the `Contributing
|
||||||
Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__.
|
Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__.
|
||||||
If anything is unclear to you, reach out to us on
|
If anything is unclear to you, reach out to us on
|
||||||
`forum <https://forum.zenko.io/>`__ or via a GitHub issue.
|
`slack <https://zenko-io.slack.com/>`__ or via a GitHub issue.
|
||||||
|
|
||||||
Don't write code? There are other ways to help!
|
Don't write code? There are other ways to help!
|
||||||
-----------------------------------------------
|
-----------------------------------------------
|
||||||
|
|
376
docs/DOCKER.rst
376
docs/DOCKER.rst
|
@ -1,7 +1,11 @@
|
||||||
Docker
|
Docker
|
||||||
======
|
======
|
||||||
|
|
||||||
.. _environment-variables:
|
- `Environment Variables <#environment-variables>`__
|
||||||
|
- `Tunables and setup tips <#tunables-and-setup-tips>`__
|
||||||
|
- `Examples for continuous integration with
|
||||||
|
Docker <#continuous-integration-with-docker-hosted CloudServer>`__
|
||||||
|
- `Examples for going in production with Docker <#in-production-with-docker-hosted CloudServer>`__
|
||||||
|
|
||||||
Environment Variables
|
Environment Variables
|
||||||
---------------------
|
---------------------
|
||||||
|
@ -11,27 +15,25 @@ S3DATA
|
||||||
|
|
||||||
S3DATA=multiple
|
S3DATA=multiple
|
||||||
^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^
|
||||||
|
Allows you to run Scality Zenko CloudServer with multiple data backends, defined
|
||||||
This variable enables running CloudServer with multiple data backends, defined
|
|
||||||
as regions.
|
as regions.
|
||||||
|
When using multiple data backends, a custom ``locationConfig.json`` file is
|
||||||
|
mandatory. It will allow you to set custom regions. You will then need to
|
||||||
|
provide associated rest_endpoints for each custom region in your
|
||||||
|
``config.json`` file.
|
||||||
|
`Learn more about multiple backends configuration <../GETTING_STARTED/#location-configuration>`__
|
||||||
|
|
||||||
For multiple data backends, a custom locationConfig.json file is required.
|
If you are using Scality RING endpoints, please refer to your customer
|
||||||
This file enables you to set custom regions. You must provide associated
|
documentation.
|
||||||
rest_endpoints for each custom region in config.json.
|
|
||||||
|
|
||||||
`Learn more about multiple-backend configurations <GETTING_STARTED.html#location-configuration>`__
|
Running it with an AWS S3 hosted backend
|
||||||
|
""""""""""""""""""""""""""""""""""""""""
|
||||||
If you are using Scality RING endpoints, refer to your customer documentation.
|
To run CloudServer with an S3 AWS backend, you will have to add a new section
|
||||||
|
to your ``locationConfig.json`` file with the ``aws_s3`` location type:
|
||||||
Running CloudServer with an AWS S3-Hosted Backend
|
|
||||||
"""""""""""""""""""""""""""""""""""""""""""""""""
|
|
||||||
|
|
||||||
To run CloudServer with an S3 AWS backend, add a new section to the
|
|
||||||
``locationConfig.json`` file with the ``aws_s3`` location type:
|
|
||||||
|
|
||||||
.. code:: json
|
.. code:: json
|
||||||
|
|
||||||
(...)
|
(...)
|
||||||
"awsbackend": {
|
"awsbackend": {
|
||||||
"type": "aws_s3",
|
"type": "aws_s3",
|
||||||
"details": {
|
"details": {
|
||||||
|
@ -41,134 +43,121 @@ To run CloudServer with an S3 AWS backend, add a new section to the
|
||||||
"credentialsProfile": "aws_hosted_profile"
|
"credentialsProfile": "aws_hosted_profile"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(...)
|
(...)
|
||||||
|
|
||||||
Edit your AWS credentials file to enable your preferred command-line tool.
|
You will also have to edit your AWS credentials file to be able to use your
|
||||||
This file must mention credentials for all backends in use. You can use
|
command line tool of choice. This file should mention credentials for all the
|
||||||
several profiles if multiple profiles are configured.
|
backends you're using. You can use several profiles when using multiple
|
||||||
|
profiles.
|
||||||
|
|
||||||
.. code:: json
|
.. code:: json
|
||||||
|
|
||||||
[default]
|
[default]
|
||||||
aws_access_key_id=accessKey1
|
aws_access_key_id=accessKey1
|
||||||
aws_secret_access_key=verySecretKey1
|
aws_secret_access_key=verySecretKey1
|
||||||
[aws_hosted_profile]
|
[aws_hosted_profile]
|
||||||
aws_access_key_id={{YOUR_ACCESS_KEY}}
|
aws_access_key_id={{YOUR_ACCESS_KEY}}
|
||||||
aws_secret_access_key={{YOUR_SECRET_KEY}}
|
aws_secret_access_key={{YOUR_SECRET_KEY}}
|
||||||
|
|
||||||
As with locationConfig.json, the AWS credentials file must be mounted at
|
Just as you need to mount your locationConfig.json, you will need to mount your
|
||||||
run time: ``-v ~/.aws/credentials:/root/.aws/credentials`` on Unix-like
|
AWS credentials file at run time:
|
||||||
systems (Linux, OS X, etc.), or
|
``-v ~/.aws/credentials:/root/.aws/credentials`` on Linux, OS X, or Unix or
|
||||||
``-v C:\Users\USERNAME\.aws\credential:/root/.aws/credentials`` on Windows
|
``-v C:\Users\USERNAME\.aws\credential:/root/.aws/credentials`` on Windows
|
||||||
|
|
||||||
.. note:: One account cannot copy to another account with a source and
|
NOTE: One account can't copy to another account with a source and
|
||||||
destination on real AWS unless the account associated with the
|
destination on real AWS unless the account associated with the
|
||||||
accessKey/secretKey pairs used for the destination bucket has source
|
access Key/secret Key pairs used for the destination bucket has rights
|
||||||
bucket access privileges. To enable this, update ACLs directly on AWS.
|
to get in the source bucket. ACL's would have to be updated
|
||||||
|
on AWS directly to enable this.
|
||||||
|
|
||||||
S3BACKEND
|
S3BACKEND
|
||||||
~~~~~~~~~
|
~~~~~~
|
||||||
|
|
||||||
S3BACKEND=file
|
S3BACKEND=file
|
||||||
^^^^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
When storing file data, for it to be persistent you must mount docker volumes
|
||||||
For stored file data to persist, you must mount Docker volumes
|
for both data and metadata. See `this section <#using-docker-volumes-in-production>`__
|
||||||
for both data and metadata. See :ref:`In Production with a Docker-Hosted CloudServer <in-production-w-a-Docker-hosted-cloudserver>`
|
|
||||||
|
|
||||||
S3BACKEND=mem
|
S3BACKEND=mem
|
||||||
^^^^^^^^^^^^^
|
^^^^^^^^^^
|
||||||
|
This is ideal for testing - no data will remain after container is shutdown.
|
||||||
This is ideal for testing: no data remains after the container is shut down.
|
|
||||||
|
|
||||||
ENDPOINT
|
ENDPOINT
|
||||||
~~~~~~~~
|
~~~~~~~~
|
||||||
|
|
||||||
This variable specifies the endpoint. To direct CloudServer requests to
|
This variable specifies your endpoint. If you have a domain such as
|
||||||
new.host.com, for example, specify the endpoint with:
|
new.host.com, by specifying that here, you and your users can direct s3
|
||||||
|
server requests to new.host.com.
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e ENDPOINT=new.host.com zenko/cloudserver
|
docker run -d --name s3server -p 8000:8000 -e ENDPOINT=new.host.com scality/s3server
|
||||||
|
|
||||||
.. note:: On Unix-like systems (Linux, OS X, etc.) edit /etc/hosts
|
Note: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
|
||||||
to associate 127.0.0.1 with new.host.com.
|
permissions, make sure to associate 127.0.0.1 with ``new.host.com``
|
||||||
|
|
||||||
REMOTE_MANAGEMENT_DISABLE
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
CloudServer is a part of `Zenko <https://www.zenko.io/>`__. When you run CloudServer standalone it will still try to connect to Orbit by default (browser-based graphical user interface for Zenko).
|
|
||||||
|
|
||||||
Setting this variable to true(1) will default to accessKey1 and verySecretKey1 for credentials and disable the automatic Orbit management:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e REMOTE_MANAGEMENT_DISABLE=1 zenko/cloudserver
|
|
||||||
|
|
||||||
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
These variables specify authentication credentials for an account named
|
These variables specify authentication credentials for an account named
|
||||||
“CustomAccount”.
|
"CustomAccount".
|
||||||
|
|
||||||
Set account credentials for multiple accounts by editing conf/authdata.json
|
You can set credentials for many accounts by editing
|
||||||
(see below for further details). To specify one set for personal use, set these
|
``conf/authdata.json`` (see below for further info), but if you just
|
||||||
environment variables:
|
want to specify one set of your own, you can use these environment
|
||||||
|
variables.
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey \
|
docker run -d --name s3server -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey
|
||||||
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey zenko/cloudserver
|
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/s3server
|
||||||
|
|
||||||
.. note:: This takes precedence over the contents of the authdata.json
|
Note: Anything in the ``authdata.json`` file will be ignored. Note: The
|
||||||
file. The authdata.json file is ignored.
|
old ``ACCESS_KEY`` and ``SECRET_KEY`` environment variables are now
|
||||||
|
deprecated
|
||||||
.. note:: The ACCESS_KEY and SECRET_KEY environment variables are
|
|
||||||
deprecated.
|
|
||||||
|
|
||||||
LOG\_LEVEL
|
LOG\_LEVEL
|
||||||
~~~~~~~~~~
|
~~~~~~~~~~
|
||||||
|
|
||||||
This variable changes the log level. There are three levels: info, debug,
|
This variable allows you to change the log level: info, debug or trace.
|
||||||
and trace. The default is info. Debug provides more detailed logs, and trace
|
The default is info. Debug will give you more detailed logs and trace
|
||||||
provides the most detailed logs.
|
will give you the most detailed.
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e LOG_LEVEL=trace zenko/cloudserver
|
docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
|
||||||
|
|
||||||
SSL
|
SSL
|
||||||
~~~
|
~~~
|
||||||
|
|
||||||
Set true, this variable runs CloudServer with SSL.
|
This variable set to true allows you to run S3 with SSL:
|
||||||
|
|
||||||
If SSL is set true:
|
**Note1**: You also need to specify the ENDPOINT environment variable.
|
||||||
|
**Note2**: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
|
||||||
|
permissions, make sure to associate 127.0.0.1 with ``<YOUR_ENDPOINT>``
|
||||||
|
|
||||||
* The ENDPOINT environment variable must also be specified.
|
**Warning**: These certs, being self-signed (and the CA being generated
|
||||||
|
inside the container) will be untrusted by any clients, and could
|
||||||
* On Unix-like systems (Linux, OS X, etc.), 127.0.0.1 must be associated with
|
disappear on a container upgrade. That's ok as long as it's for quick
|
||||||
<YOUR_ENDPOINT> in /etc/hosts.
|
testing. Also, best security practice for non-testing would be to use an
|
||||||
|
extra container to do SSL/TLS termination such as haproxy/nginx/stunnel
|
||||||
.. Warning:: Self-signed certs with a CA generated within the container are
|
to limit what an exploit on either component could expose, as well as
|
||||||
suitable for testing purposes only. Clients cannot trust them, and they may
|
certificates in a mounted volume
|
||||||
disappear altogether on a container upgrade. The best security practice for
|
|
||||||
production environments is to use an extra container, such as
|
|
||||||
haproxy/nginx/stunnel, for SSL/TLS termination and to pull certificates
|
|
||||||
from a mounted volume, limiting what an exploit on either component
|
|
||||||
can expose.
|
|
||||||
|
|
||||||
.. code:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT> \
|
docker run -d --name s3server -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT>
|
||||||
zenko/cloudserver
|
scality/s3server
|
||||||
|
|
||||||
For more information about using ClousdServer with SSL, see `Using SSL <GETTING_STARTED.html#Using SSL>`__
|
More information about how to use S3 server with SSL
|
||||||
|
`here <https://s3.scality.com/v1.0/page/scality-with-ssl>`__
|
||||||
|
|
||||||
LISTEN\_ADDR
|
LISTEN\_ADDR
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
This variable causes CloudServer and its data and metadata components to
|
This variable instructs the Zenko CloudServer, and its data and metadata
|
||||||
listen on the specified address. This allows starting the data or metadata
|
components to listen on the specified address. This allows starting the data
|
||||||
servers as standalone services, for example.
|
or metadata servers as standalone services, for example.
|
||||||
|
|
||||||
.. code:: shell
|
.. code:: shell
|
||||||
|
|
||||||
|
@ -185,8 +174,8 @@ Zenko CloudServer.
|
||||||
|
|
||||||
.. code:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -e DATA_HOST=cloudserver-data \
|
docker run -d --name s3server -e DATA_HOST=s3server-data
|
||||||
-e METADATA_HOST=cloudserver-metadata zenko/cloudserver yarn run start_s3server
|
-e METADATA_HOST=s3server-metadata scality/s3server npm run start_s3server
|
||||||
|
|
||||||
REDIS\_HOST
|
REDIS\_HOST
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
@ -196,21 +185,19 @@ localhost.
|
||||||
|
|
||||||
.. code:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 \
|
docker run -d --name s3server -p 8000:8000
|
||||||
-e REDIS_HOST=my-redis-server.example.com zenko/cloudserver
|
-e REDIS_HOST=my-redis-server.example.com scality/s3server
|
||||||
|
|
||||||
REDIS\_PORT
|
REDIS\_PORT
|
||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
|
|
||||||
Use this variable to connect to the Redis cache server on a port other
|
Use this variable to connect to the redis cache server on another port than
|
||||||
than the default 6379.
|
the default 6379.
|
||||||
|
|
||||||
.. code:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 \
|
docker run -d --name s3server -p 8000:8000
|
||||||
-e REDIS_PORT=6379 zenko/cloudserver
|
-e REDIS_PORT=6379 scality/s3server
|
||||||
|
|
||||||
.. _tunables-and-setup-tips:
|
|
||||||
|
|
||||||
Tunables and Setup Tips
|
Tunables and Setup Tips
|
||||||
-----------------------
|
-----------------------
|
||||||
|
@ -218,57 +205,60 @@ Tunables and Setup Tips
|
||||||
Using Docker Volumes
|
Using Docker Volumes
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
CloudServer runs with a file backend by default, meaning that data is
|
Zenko CloudServer runs with a file backend by default.
|
||||||
stored inside the CloudServer’s Docker container.
|
|
||||||
|
|
||||||
For data and metadata to persist, data and metadata must be hosted in Docker
|
So, by default, the data is stored inside your Zenko CloudServer Docker
|
||||||
volumes outside the CloudServer’s Docker container. Otherwise, the data
|
container.
|
||||||
and metadata are destroyed when the container is erased.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
However, if you want your data and metadata to persist, you **MUST** use
|
||||||
|
Docker volumes to host your data and metadata outside your Zenko CloudServer
|
||||||
|
Docker container. Otherwise, the data and metadata will be destroyed
|
||||||
|
when you erase the container.
|
||||||
|
|
||||||
$ docker run -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
.. code:: shell
|
||||||
-p 8000:8000 -d zenko/cloudserver
|
|
||||||
|
|
||||||
This command mounts the ./data host directory to the container
|
docker run -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||||
at /usr/src/app/localData and the ./metadata host directory to
|
-p 8000:8000 -d scality/s3server
|
||||||
the container at /usr/src/app/localMetaData.
|
|
||||||
|
|
||||||
.. tip:: These host directories can be mounted to any accessible mount
|
This command mounts the host directory, ``./data``, into the container
|
||||||
point, such as /mnt/data and /mnt/metadata, for example.
|
at ``/usr/src/app/localData`` and the host directory, ``./metadata``, into
|
||||||
|
the container at ``/usr/src/app/localMetaData``. It can also be any host
|
||||||
|
mount point, like ``/mnt/data`` and ``/mnt/metadata``.
|
||||||
|
|
||||||
Adding, Modifying, or Deleting Accounts or Credentials
|
Adding modifying or deleting accounts or users credentials
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
1. Create a customized authdata.json file locally based on /conf/authdata.json.
|
1. Create locally a customized ``authdata.json`` based on our ``/conf/authdata.json``.
|
||||||
|
|
||||||
2. Use `Docker volumes <https://docs.docker.com/storage/volumes/>`__
|
|
||||||
to override the default ``authdata.json`` through a Docker file mapping.
|
|
||||||
|
|
||||||
|
2. Use `Docker
|
||||||
|
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__
|
||||||
|
to override the default ``authdata.json`` through a docker file mapping.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d \
|
docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
|
||||||
zenko/cloudserver
|
scality/s3server
|
||||||
|
|
||||||
Specifying a Host Name
|
Specifying your own host name
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To specify a host name (for example, s3.domain.name), provide your own
|
To specify a host name (e.g. s3.domain.name), you can provide your own
|
||||||
`config.json <https://github.com/scality/cloudserver/blob/master/config.json>`__
|
`config.json <https://github.com/scality/S3/blob/master/config.json>`__
|
||||||
file using `Docker volumes <https://docs.docker.com/storage/volumes/>`__.
|
using `Docker
|
||||||
|
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__.
|
||||||
|
|
||||||
First, add a new key-value pair to the restEndpoints section of your
|
First add a new key-value pair in the restEndpoints section of your
|
||||||
config.json. Make the key the host name you want, and the value the default
|
config.json. The key in the key-value pair should be the host name you
|
||||||
location\_constraint for this endpoint.
|
would like to add and the value is the default location\_constraint for
|
||||||
|
this endpoint.
|
||||||
|
|
||||||
For example, ``s3.example.com`` is mapped to ``us-east-1`` which is one
|
For example, ``s3.example.com`` is mapped to ``us-east-1`` which is one
|
||||||
of the ``location_constraints`` listed in your locationConfig.json file
|
of the ``location_constraints`` listed in your locationConfig.json file
|
||||||
`here <https://github.com/scality/S3/blob/master/locationConfig.json>`__.
|
`here <https://github.com/scality/S3/blob/master/locationConfig.json>`__.
|
||||||
|
|
||||||
For more information about location configuration, see:
|
More information about location configuration
|
||||||
`GETTING STARTED <GETTING_STARTED.html#location-configuration>`__
|
`here <https://github.com/scality/S3/blob/master/README.md#location-configuration>`__
|
||||||
|
|
||||||
.. code:: json
|
.. code:: json
|
||||||
|
|
||||||
|
@ -276,33 +266,33 @@ For more information about location configuration, see:
|
||||||
"localhost": "file",
|
"localhost": "file",
|
||||||
"127.0.0.1": "file",
|
"127.0.0.1": "file",
|
||||||
...
|
...
|
||||||
"cloudserver.example.com": "us-east-1"
|
"s3.example.com": "us-east-1"
|
||||||
},
|
},
|
||||||
|
|
||||||
Next, run CloudServer using a `Docker volume
|
Then, run your Scality S3 Server using `Docker
|
||||||
<https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
|
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d zenko/cloudserver
|
docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
|
||||||
|
|
||||||
The local ``config.json`` file overrides the default one through a Docker
|
Your local ``config.json`` file will override the default one through a
|
||||||
file mapping.
|
docker file mapping.
|
||||||
|
|
||||||
Running as an Unprivileged User
|
Running as an unprivileged user
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
CloudServer runs as root by default.
|
Zenko CloudServer runs as root by default.
|
||||||
|
|
||||||
To change this, modify the dockerfile and specify a user before the
|
You can change that by modifing the dockerfile and specifying a user
|
||||||
entry point.
|
before the entrypoint.
|
||||||
|
|
||||||
The user must exist within the container, and must own the
|
The user needs to exist within the container, and own the folder
|
||||||
/usr/src/app directory for CloudServer to run.
|
**/usr/src/app** for Scality Zenko CloudServer to run properly.
|
||||||
|
|
||||||
For example, the following dockerfile lines can be modified:
|
For instance, you can modify these lines in the dockerfile:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
...
|
...
|
||||||
&& groupadd -r -g 1001 scality \
|
&& groupadd -r -g 1001 scality \
|
||||||
|
@ -314,58 +304,54 @@ For example, the following dockerfile lines can be modified:
|
||||||
USER scality
|
USER scality
|
||||||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||||
|
|
||||||
.. _continuous-integration-with-docker-hosted-cloudserver:
|
Continuous integration with Docker hosted CloudServer
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
Continuous Integration with a Docker-Hosted CloudServer
|
When you start the Docker Scality Zenko CloudServer image, you can adjust the
|
||||||
-------------------------------------------------------
|
configuration of the Scality Zenko CloudServer instance by passing one or more
|
||||||
|
environment variables on the docker run command line.
|
||||||
|
|
||||||
When you start the Docker CloudServer image, you can adjust the
|
Sample ways to run it for CI are:
|
||||||
configuration of the CloudServer instance by passing one or more
|
|
||||||
environment variables on the ``docker run`` command line.
|
|
||||||
|
|
||||||
|
- With custom locations (one in-memory, one hosted on AWS), and custom
|
||||||
|
credentials mounted:
|
||||||
|
|
||||||
To run CloudServer for CI with custom locations (one in-memory,
|
.. code:: shell
|
||||||
one hosted on AWS), and custom credentials mounted:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
docker run --name CloudServer -p 8000:8000
|
||||||
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||||
|
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json
|
||||||
|
-v ~/.aws/credentials:/root/.aws/credentials
|
||||||
|
-e S3DATA=multiple -e S3BACKEND=mem scality/s3server
|
||||||
|
|
||||||
$ docker run --name CloudServer -p 8000:8000 \
|
- With custom locations, (one in-memory, one hosted on AWS, one file),
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
and custom credentials set as environment variables
|
||||||
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
|
(see `this section <#scality-access-key-id-and-scality-secret-access-key>`__):
|
||||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
|
||||||
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver
|
|
||||||
|
|
||||||
To run CloudServer for CI with custom locations, (one in-memory, one
|
.. code:: shell
|
||||||
hosted on AWS, and one file), and custom credentials `set as environment
|
|
||||||
variables <GETTING_STARTED.html#scality-access-key-id-and-scality-secret-access-key>`__):
|
|
||||||
|
|
||||||
.. code-block:: shell
|
docker run --name CloudServer -p 8000:8000
|
||||||
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||||
|
-v ~/.aws/credentials:/root/.aws/credentials
|
||||||
|
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||||
|
-e SCALITY_ACCESS_KEY_ID=accessKey1
|
||||||
|
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1
|
||||||
|
-e S3DATA=multiple -e S3BACKEND=mem scality/s3server
|
||||||
|
|
||||||
$ docker run --name CloudServer -p 8000:8000 \
|
In production with Docker hosted CloudServer
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
--------------------------------------------
|
||||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
|
||||||
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
|
||||||
-e SCALITY_ACCESS_KEY_ID=accessKey1 \
|
|
||||||
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1 \
|
|
||||||
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver
|
|
||||||
|
|
||||||
.. _in-production-w-a-Docker-hosted-cloudserver:
|
In production, we expect that data will be persistent, that you will use the
|
||||||
|
multiple backends capabilities of Zenko CloudServer, and that you will have a
|
||||||
|
custom endpoint for your local storage, and custom credentials for your local
|
||||||
|
storage:
|
||||||
|
|
||||||
In Production with a Docker-Hosted CloudServer
|
.. code:: shell
|
||||||
----------------------------------------------
|
|
||||||
|
|
||||||
Because data must persist in production settings, CloudServer offers
|
docker run -d --name CloudServer
|
||||||
multiple-backend capabilities. This requires a custom endpoint
|
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||||
and custom credentials for local storage.
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||||
|
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json
|
||||||
Customize these with:
|
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple
|
||||||
|
-e ENDPOINT=custom.endpoint.com
|
||||||
.. code-block:: shell
|
-p 8000:8000 -d scality/s3server
|
||||||
|
|
||||||
$ docker run -d --name CloudServer \
|
|
||||||
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
|
||||||
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
|
|
||||||
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple \
|
|
||||||
-e ENDPOINT=custom.endpoint.com \
|
|
||||||
-p 8000:8000 -d zenko/cloudserver \
|
|
||||||
|
|
|
@ -1,221 +1,214 @@
|
||||||
Getting Started
|
Getting Started
|
||||||
===============
|
=================
|
||||||
|
|
||||||
.. figure:: ../res/scality-cloudserver-logo.png
|
.. figure:: ../res/scality-cloudserver-logo.png
|
||||||
:alt: Zenko CloudServer logo
|
:alt: Zenko CloudServer logo
|
||||||
|
|
||||||
|
|CircleCI| |Scality CI|
|
||||||
Dependencies
|
|
||||||
------------
|
|
||||||
|
|
||||||
Building and running the Scality Zenko CloudServer requires node.js 10.x and
|
|
||||||
yarn v1.17.x. Up-to-date versions can be found at
|
|
||||||
`Nodesource <https://github.com/nodesource/distributions>`__.
|
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
1. Clone the source code
|
Dependencies
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
.. code-block:: shell
|
Building and running the Scality Zenko CloudServer requires node.js 6.9.5 and
|
||||||
|
npm v3 . Up-to-date versions can be found at
|
||||||
|
`Nodesource <https://github.com/nodesource/distributions>`__.
|
||||||
|
|
||||||
$ git clone https://github.com/scality/cloudserver.git
|
Clone source code
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
2. Go to the cloudserver directory and use yarn to install the js dependencies.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ cd cloudserver
|
|
||||||
$ yarn install
|
|
||||||
|
|
||||||
Running CloudServer with a File Backend
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ yarn start
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000. Two additional ports, 9990
|
|
||||||
and 9991, are also open locally for internal transfer of metadata and
|
|
||||||
data, respectively.
|
|
||||||
|
|
||||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
|
||||||
|
|
||||||
By default, metadata files are saved in the localMetadata directory and
|
|
||||||
data files are saved in the localData directory in the local ./cloudserver
|
|
||||||
directory. These directories are pre-created within the repository. To
|
|
||||||
save data or metadata in different locations, you must specify them using
|
|
||||||
absolute paths. Thus, when starting the server:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ mkdir -m 700 $(pwd)/myFavoriteDataPath
|
|
||||||
$ mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
|
||||||
$ export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
|
||||||
$ export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
|
||||||
$ yarn start
|
|
||||||
|
|
||||||
Running CloudServer with Multiple Data Backends
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ export S3DATA='multiple'
|
|
||||||
$ yarn start
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000.
|
|
||||||
|
|
||||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
|
||||||
|
|
||||||
With multiple backends, you can choose where each object is saved by setting
|
|
||||||
the following header with a location constraint in a PUT request:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
|
||||||
|
|
||||||
If no header is sent with a PUT object request, the bucket’s location
|
|
||||||
constraint determines where the data is saved. If the bucket has no
|
|
||||||
location constraint, the endpoint of the PUT request determines location.
|
|
||||||
|
|
||||||
See the Configuration_ section to set location constraints.
|
|
||||||
|
|
||||||
Run CloudServer with an In-Memory Backend
|
|
||||||
-----------------------------------------
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ yarn run mem_backend
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000.
|
|
||||||
|
|
||||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
|
||||||
|
|
||||||
Run CloudServer with Vault User Management
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
.. code:: shell
|
.. code:: shell
|
||||||
|
|
||||||
export S3VAULT=vault
|
git clone https://github.com/scality/S3.git
|
||||||
yarn start
|
|
||||||
|
|
||||||
Note: Vault is proprietary and must be accessed separately.
|
Install js dependencies
|
||||||
This starts a Zenko CloudServer using Vault for user management.
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Run CloudServer for Continuous Integration Testing or in Production with Docker
|
Go to the ./S3 folder,
|
||||||
-------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
Run Cloudserver with `DOCKER <DOCKER.html>`__
|
.. code:: shell
|
||||||
|
|
||||||
Testing
|
npm install
|
||||||
~~~~~~~
|
|
||||||
|
|
||||||
Run unit tests with the command:
|
Run it with a file backend
|
||||||
|
--------------------------
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ yarn test
|
npm start
|
||||||
|
|
||||||
Run multiple-backend unit tests with:
|
This starts an Zenko CloudServer on port 8000. Two additional ports 9990 and
|
||||||
|
9991 are also open locally for internal transfer of metadata and data,
|
||||||
|
respectively.
|
||||||
|
|
||||||
.. code-block:: shell
|
The default access key is accessKey1 with a secret key of
|
||||||
|
verySecretKey1.
|
||||||
|
|
||||||
$ CI=true S3DATA=multiple yarn start
|
By default the metadata files will be saved in the localMetadata
|
||||||
$ yarn run multiple_backend_test
|
directory and the data files will be saved in the localData directory
|
||||||
|
within the ./S3 directory on your machine. These directories have been
|
||||||
|
pre-created within the repository. If you would like to save the data or
|
||||||
|
metadata in different locations of your choice, you must specify them
|
||||||
|
with absolute paths. So, when starting the server:
|
||||||
|
|
||||||
Run the linter with:
|
.. code:: shell
|
||||||
|
|
||||||
.. code-block:: shell
|
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||||
|
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||||
|
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
|
npm start
|
||||||
|
|
||||||
$ yarn run lint
|
Run it with multiple data backends
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
Running Functional Tests Locally
|
.. code:: shell
|
||||||
|
|
||||||
|
export S3DATA='multiple'
|
||||||
|
npm start
|
||||||
|
|
||||||
|
This starts an Zenko CloudServer on port 8000. The default access key is
|
||||||
|
accessKey1 with a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to choose where each object
|
||||||
|
will be saved by setting the following header with a locationConstraint
|
||||||
|
on a PUT request:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the location constraint
|
||||||
|
of the bucket will determine where the data is saved. If the bucket has
|
||||||
|
no location constraint, the endpoint of the PUT request will be used to
|
||||||
|
determine location.
|
||||||
|
|
||||||
|
See the Configuration section below to learn how to set location
|
||||||
|
constraints.
|
||||||
|
|
||||||
|
Run it with an in-memory backend
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
To pass AWS and Azure backend tests locally, modify
|
.. code:: shell
|
||||||
tests/locationConfig/locationConfigTests.json so that ``awsbackend``
|
|
||||||
specifies the bucketname of a bucket you have access to based on your
|
npm run mem_backend
|
||||||
credentials, and modify ``azurebackend`` with details for your Azure account.
|
|
||||||
|
This starts an Zenko CloudServer on port 8000. The default access key is
|
||||||
|
accessKey1 with a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
Run it for continuous integration testing or in production with Docker
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
|
`DOCKER <../DOCKER/>`__
|
||||||
|
|
||||||
|
Testing
|
||||||
|
-------
|
||||||
|
|
||||||
|
You can run the unit tests with the following command:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm test
|
||||||
|
|
||||||
|
You can run the multiple backend unit tests with:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
CI=true S3DATA=multiple npm start
|
||||||
|
npm run multiple_backend_test
|
||||||
|
|
||||||
|
You can run the linter with:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm run lint
|
||||||
|
|
||||||
|
Running functional tests locally:
|
||||||
|
|
||||||
|
For the AWS backend and Azure backend tests to pass locally,
|
||||||
|
you must modify tests/locationConfigTests.json so that awsbackend
|
||||||
|
specifies a bucketname of a bucket you have access to based on
|
||||||
|
your credentials profile and modify "azurebackend" with details
|
||||||
|
for your Azure account.
|
||||||
|
|
||||||
The test suite requires additional tools, **s3cmd** and **Redis**
|
The test suite requires additional tools, **s3cmd** and **Redis**
|
||||||
installed in the environment the tests are running in.
|
installed in the environment the tests are running in.
|
||||||
|
|
||||||
1. Install `s3cmd <http://s3tools.org/download>`__
|
- Install `s3cmd <http://s3tools.org/download>`__
|
||||||
|
- Install `redis <https://redis.io/download>`__ and start Redis.
|
||||||
|
- Add localCache section to your ``config.json``:
|
||||||
|
|
||||||
2. Install `redis <https://redis.io/download>`__ and start Redis.
|
::
|
||||||
|
|
||||||
3. Add localCache section to ``config.json``:
|
"localCache": {
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
"localCache": {
|
|
||||||
"host": REDIS_HOST,
|
"host": REDIS_HOST,
|
||||||
"port": REDIS_PORT
|
"port": REDIS_PORT
|
||||||
}
|
}
|
||||||
|
|
||||||
where ``REDIS_HOST`` is the Redis instance IP address (``"127.0.0.1"``
|
where ``REDIS_HOST`` is your Redis instance IP address (``"127.0.0.1"``
|
||||||
if Redis is running locally) and ``REDIS_PORT`` is the Redis instance
|
if your Redis is running locally) and ``REDIS_PORT`` is your Redis
|
||||||
port (``6379`` by default)
|
instance port (``6379`` by default)
|
||||||
|
|
||||||
4. Add the following to the local etc/hosts file:
|
- Add the following to the etc/hosts file on your machine:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
|
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
|
||||||
|
|
||||||
5. Start Zenko CloudServer in memory and run the functional tests:
|
- Start the Zenko CloudServer in memory and run the functional tests:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ CI=true yarn run mem_backend
|
CI=true npm run mem_backend
|
||||||
$ CI=true yarn run ft_test
|
CI=true npm run ft_test
|
||||||
|
|
||||||
.. _Configuration:
|
|
||||||
|
|
||||||
Configuration
|
Configuration
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
There are three configuration files for Zenko CloudServer:
|
There are three configuration files for your Scality Zenko CloudServer:
|
||||||
|
|
||||||
* ``conf/authdata.json``, for authentication.
|
1. ``conf/authdata.json``, described above for authentication
|
||||||
|
|
||||||
* ``locationConfig.json``, to configure where data is saved.
|
2. ``locationConfig.json``, to set up configuration options for
|
||||||
|
|
||||||
* ``config.json``, for general configuration options.
|
where data will be saved
|
||||||
|
|
||||||
.. _location-configuration:
|
3. ``config.json``, for general configuration options
|
||||||
|
|
||||||
Location Configuration
|
Location Configuration
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
You must specify at least one locationConstraint in locationConfig.json
|
You must specify at least one locationConstraint in your
|
||||||
(or leave it as pre-configured).
|
locationConfig.json (or leave as pre-configured).
|
||||||
|
|
||||||
You must also specify 'us-east-1' as a locationConstraint. If you put a
|
You must also specify 'us-east-1' as a locationConstraint so if you only
|
||||||
bucket to an unknown endpoint and do not specify a locationConstraint in
|
define one locationConstraint, that would be it. If you put a bucket to
|
||||||
the PUT bucket call, us-east-1 is used.
|
an unknown endpoint and do not specify a locationConstraint in the put
|
||||||
|
bucket call, us-east-1 will be used.
|
||||||
|
|
||||||
For instance, the following locationConstraint saves data sent to
|
For instance, the following locationConstraint will save data sent to
|
||||||
``myLocationConstraint`` to the file backend:
|
``myLocationConstraint`` to the file backend:
|
||||||
|
|
||||||
.. code:: json
|
.. code:: json
|
||||||
|
|
||||||
"myLocationConstraint": {
|
"myLocationConstraint": {
|
||||||
"type": "file",
|
"type": "file",
|
||||||
"legacyAwsBehavior": false,
|
"legacyAwsBehavior": false,
|
||||||
"details": {}
|
"details": {}
|
||||||
},
|
},
|
||||||
|
|
||||||
Each locationConstraint must include the ``type``, ``legacyAwsBehavior``,
|
Each locationConstraint must include the ``type``,
|
||||||
and ``details`` keys. ``type`` indicates which backend is used for that
|
``legacyAwsBehavior``, and ``details`` keys. ``type`` indicates which
|
||||||
region. Supported backends are mem, file, and scality.``legacyAwsBehavior``
|
backend will be used for that region. Currently, mem, file, and scality
|
||||||
indicates whether the region behaves the same as the AWS S3 'us-east-1'
|
are the supported backends. ``legacyAwsBehavior`` indicates whether the
|
||||||
region. If the locationConstraint type is ``scality``, ``details`` must
|
region will have the same behavior as the AWS S3 'us-east-1' region. If
|
||||||
contain connector information for sproxyd. If the locationConstraint type
|
the locationConstraint type is scality, ``details`` should contain
|
||||||
is ``mem`` or ``file``, ``details`` must be empty.
|
connector information for sproxyd. If the locationConstraint type is mem
|
||||||
|
or file, ``details`` should be empty.
|
||||||
|
|
||||||
Once locationConstraints is set in locationConfig.json, specify a default
|
Once you have your locationConstraints in your locationConfig.json, you
|
||||||
locationConstraint for each endpoint.
|
can specify a default locationConstraint for each of your endpoints.
|
||||||
|
|
||||||
For instance, the following sets the ``localhost`` endpoint to the
|
For instance, the following sets the ``localhost`` endpoint to the
|
||||||
``myLocationConstraint`` data backend defined above:
|
``myLocationConstraint`` data backend defined above:
|
||||||
|
@ -226,24 +219,26 @@ For instance, the following sets the ``localhost`` endpoint to the
|
||||||
"localhost": "myLocationConstraint"
|
"localhost": "myLocationConstraint"
|
||||||
},
|
},
|
||||||
|
|
||||||
To use an endpoint other than localhost for Zenko CloudServer, the endpoint
|
If you would like to use an endpoint other than localhost for your
|
||||||
must be listed in ``restEndpoints``. Otherwise, if the server is running
|
Scality Zenko CloudServer, that endpoint MUST be listed in your
|
||||||
with a:
|
``restEndpoints``. Otherwise if your server is running with a:
|
||||||
|
|
||||||
* **file backend**: The default location constraint is ``file``
|
- **file backend**: your default location constraint will be ``file``
|
||||||
* **memory backend**: The default location constraint is ``mem``
|
|
||||||
|
- **memory backend**: your default location constraint will be ``mem``
|
||||||
|
|
||||||
Endpoints
|
Endpoints
|
||||||
~~~~~~~~~
|
~~~~~~~~~
|
||||||
|
|
||||||
The Zenko CloudServer supports endpoints that are rendered in either:
|
Note that our Zenko CloudServer supports both:
|
||||||
|
|
||||||
* path style: http://myhostname.com/mybucket or
|
- path-style: http://myhostname.com/mybucket
|
||||||
* hosted style: http://mybucket.myhostname.com
|
- hosted-style: http://mybucket.myhostname.com
|
||||||
|
|
||||||
However, if an IP address is specified for the host, hosted-style requests
|
However, hosted-style requests will not hit the server if you are using
|
||||||
cannot reach the server. Use path-style requests in that case. For example,
|
an ip address for your host. So, make sure you are using path-style
|
||||||
if you are using the AWS SDK for JavaScript, instantiate your client like this:
|
requests in that case. For instance, if you are using the AWS SDK for
|
||||||
|
JavaScript, you would instantiate your client like this:
|
||||||
|
|
||||||
.. code:: js
|
.. code:: js
|
||||||
|
|
||||||
|
@ -252,99 +247,87 @@ if you are using the AWS SDK for JavaScript, instantiate your client like this:
|
||||||
s3ForcePathStyle: true,
|
s3ForcePathStyle: true,
|
||||||
});
|
});
|
||||||
|
|
||||||
Setting Your Own Access and Secret Key Pairs
|
Setting your own access key and secret key pairs
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Credentials can be set for many accounts by editing ``conf/authdata.json``,
|
You can set credentials for many accounts by editing
|
||||||
but use the ``SCALITY_ACCESS_KEY_ID`` and ``SCALITY_SECRET_ACCESS_KEY``
|
``conf/authdata.json`` but if you want to specify one set of your own
|
||||||
environment variables to specify your own credentials.
|
credentials, you can use ``SCALITY_ACCESS_KEY_ID`` and
|
||||||
|
``SCALITY_SECRET_ACCESS_KEY`` environment variables.
|
||||||
_`scality-access-key-id-and-scality-secret-access-key`
|
|
||||||
|
|
||||||
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
These variables specify authentication credentials for an account named
|
These variables specify authentication credentials for an account named
|
||||||
“CustomAccount”.
|
"CustomAccount".
|
||||||
|
|
||||||
.. note:: Anything in the ``authdata.json`` file is ignored.
|
Note: Anything in the ``authdata.json`` file will be ignored.
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code:: shell
|
||||||
|
|
||||||
$ SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey yarn start
|
SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey npm start
|
||||||
|
|
||||||
.. _Using_SSL:
|
|
||||||
|
|
||||||
Using SSL
|
Scality with SSL
|
||||||
~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To use https with your local CloudServer, you must set up
|
If you wish to use https with your local Zenko CloudServer, you need to set up
|
||||||
SSL certificates.
|
SSL certificates. Here is a simple guide of how to do it.
|
||||||
|
|
||||||
1. Deploy CloudServer using `our DockerHub page
|
Deploying Zenko CloudServer
|
||||||
<https://hub.docker.com/r/zenko/cloudserver/>`__ (run it with a file
|
^^^^^^^^^^^^^^^^^^^
|
||||||
backend).
|
|
||||||
|
|
||||||
.. Note:: If Docker is not installed locally, follow the
|
First, you need to deploy **Zenko CloudServer**. This can be done very easily
|
||||||
`instructions to install it for your distribution
|
via `our **DockerHub**
|
||||||
<https://docs.docker.com/engine/installation/>`__
|
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
|
||||||
|
with a file backend).
|
||||||
|
|
||||||
2. Update the CloudServer container’s config
|
*Note:* *- If you don't have docker installed on your machine, here
|
||||||
|
are the `instructions to install it for your
|
||||||
|
distribution <https://docs.docker.com/engine/installation/>`__*
|
||||||
|
|
||||||
Add your certificates to your container. To do this,
|
Updating your Zenko CloudServer container's config
|
||||||
#. exec inside the CloudServer container.
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
#. Run ``$> docker ps`` to find the container’s ID (the corresponding
|
You're going to add your certificates to your container. In order to do
|
||||||
image name is ``scality/cloudserver``.
|
so, you need to exec inside your Zenko CloudServer container. Run a
|
||||||
|
``$> docker ps`` and find your container's id (the corresponding image
|
||||||
|
name should be ``scality/s3server``. Copy the corresponding container id
|
||||||
|
(here we'll use ``894aee038c5e``, and run:
|
||||||
|
|
||||||
#. Copy the corresponding container ID (``894aee038c5e`` in the present
|
.. code:: sh
|
||||||
example), and run:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
$> docker exec -it 894aee038c5e bash
|
||||||
|
|
||||||
$> docker exec -it 894aee038c5e bash
|
You're now inside your container, using an interactive terminal :)
|
||||||
|
|
||||||
This puts you inside your container, using an interactive terminal.
|
Generate SSL key and certificates
|
||||||
|
**********************************
|
||||||
|
|
||||||
3. Generate the SSL key and certificates. The paths where the different
|
There are 5 steps to this generation. The paths where the different
|
||||||
files are stored are defined after the ``-out`` option in each of the
|
files are stored are defined after the ``-out`` option in each command
|
||||||
following commands.
|
|
||||||
|
|
||||||
#. Generate a private key for your certificate signing request (CSR):
|
.. code:: sh
|
||||||
|
|
||||||
.. code-block:: shell
|
# Generate a private key for your CSR
|
||||||
|
$> openssl genrsa -out ca.key 2048
|
||||||
|
# Generate a self signed certificate for your local Certificate Authority
|
||||||
|
$> openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=scality.test"
|
||||||
|
|
||||||
$> openssl genrsa -out ca.key 2048
|
# Generate a key for Zenko CloudServer
|
||||||
|
$> openssl genrsa -out test.key 2048
|
||||||
|
# Generate a Certificate Signing Request for S3 Server
|
||||||
|
$> openssl req -new -key test.key -out test.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.scality.test"
|
||||||
|
# Generate a local-CA-signed certificate for S3 Server
|
||||||
|
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
|
||||||
|
|
||||||
#. Generate a self-signed certificate for your local certificate
|
Update Zenko CloudServer ``config.json``
|
||||||
authority (CA):
|
**********************************
|
||||||
|
|
||||||
.. code:: shell
|
Add a ``certFilePaths`` section to ``./config.json`` with the
|
||||||
|
appropriate paths:
|
||||||
|
|
||||||
$> openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=scality.test"
|
.. code:: json
|
||||||
|
|
||||||
#. Generate a key for the CloudServer:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> openssl genrsa -out test.key 2048
|
|
||||||
|
|
||||||
#. Generate a CSR for CloudServer:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> openssl req -new -key test.key -out test.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.scality.test"
|
|
||||||
|
|
||||||
#. Generate a certificate for CloudServer signed by the local CA:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
|
|
||||||
|
|
||||||
4. Update Zenko CloudServer ``config.json``. Add a ``certFilePaths``
|
|
||||||
section to ``./config.json`` with appropriate paths:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
"certFilePaths": {
|
"certFilePaths": {
|
||||||
"key": "./test.key",
|
"key": "./test.key",
|
||||||
|
@ -352,36 +335,42 @@ SSL certificates.
|
||||||
"ca": "./ca.crt"
|
"ca": "./ca.crt"
|
||||||
}
|
}
|
||||||
|
|
||||||
5. Run your container with the new config.
|
Run your container with the new config
|
||||||
|
****************************************
|
||||||
|
|
||||||
#. Exit the container by running ``$> exit``.
|
First, you need to exit your container. Simply run ``$> exit``. Then,
|
||||||
|
you need to restart your container. Normally, a simple
|
||||||
|
``$> docker restart s3server`` should do the trick.
|
||||||
|
|
||||||
#. Restart the container with ``$> docker restart cloudserver``.
|
Update your host config
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
6. Update the host configuration by adding s3.scality.test
|
Associates local IP addresses with hostname
|
||||||
to /etc/hosts:
|
*******************************************
|
||||||
|
|
||||||
.. code:: bash
|
In your ``/etc/hosts`` file on Linux, OS X, or Unix (with root
|
||||||
|
permissions), edit the line of localhost so it looks like this:
|
||||||
|
|
||||||
127.0.0.1 localhost s3.scality.test
|
::
|
||||||
|
|
||||||
7. Copy the local certificate authority (ca.crt in step 4) from your
|
127.0.0.1 localhost s3.scality.test
|
||||||
container. Choose the path to save this file to (in the present
|
|
||||||
example, ``/root/ca.crt``), and run:
|
|
||||||
|
|
||||||
.. code:: shell
|
Copy the local certificate authority from your container
|
||||||
|
*********************************************************
|
||||||
|
|
||||||
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt
|
In the above commands, it's the file named ``ca.crt``. Choose the path
|
||||||
|
you want to save this file at (here we chose ``/root/ca.crt``), and run
|
||||||
|
something like:
|
||||||
|
|
||||||
.. note:: Your container ID will be different, and your path to
|
.. code:: sh
|
||||||
ca.crt may be different.
|
|
||||||
|
|
||||||
Test the Config
|
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt
|
||||||
^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
If aws-sdk is not installed, run ``$> yarn install aws-sdk``.
|
Test your config
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
Paste the following script into a file named "test.js":
|
If you do not have aws-sdk installed, run ``$> npm install aws-sdk``. In
|
||||||
|
a ``test.js`` file, paste the following script:
|
||||||
|
|
||||||
.. code:: js
|
.. code:: js
|
||||||
|
|
||||||
|
@ -421,13 +410,8 @@ Paste the following script into a file named "test.js":
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
Now run this script with:
|
Now run that script with ``$> nodejs test.js``. If all goes well, it
|
||||||
|
should output ``SSL is cool!``. Enjoy that added security!
|
||||||
.. code::
|
|
||||||
|
|
||||||
$> nodejs test.js
|
|
||||||
|
|
||||||
On success, the script outputs ``SSL is cool!``.
|
|
||||||
|
|
||||||
|
|
||||||
.. |CircleCI| image:: https://circleci.com/gh/scality/S3.svg?style=svg
|
.. |CircleCI| image:: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||||
|
|
|
@ -4,415 +4,479 @@ Integrations
|
||||||
High Availability
|
High Availability
|
||||||
=================
|
=================
|
||||||
|
|
||||||
`Docker Swarm <https://docs.docker.com/engine/swarm/>`__ is a clustering tool
|
`Docker swarm <https://docs.docker.com/engine/swarm/>`__ is a
|
||||||
developed by Docker for use with its containers. It can be used to start
|
clustering tool developped by Docker and ready to use with its
|
||||||
services, which we define to ensure CloudServer's continuous availability to
|
containers. It allows to start a service, which we define and use as a
|
||||||
end users. A swarm defines a manager and *n* workers among *n* + 1 servers.
|
means to ensure Zenko CloudServer's continuous availability to the end user.
|
||||||
|
Indeed, a swarm defines a manager and n workers among n+1 servers. We
|
||||||
This tutorial shows how to perform a basic setup with three servers, which
|
will do a basic setup in this tutorial, with just 3 servers, which
|
||||||
provides strong service resiliency, while remaining easy to use and
|
already provides a strong service resiliency, whilst remaining easy to
|
||||||
maintain. We will use NFS through Docker to share data and
|
do as an individual. We will use NFS through docker to share data and
|
||||||
metadata between the different servers.
|
metadata between the different servers.
|
||||||
|
|
||||||
Sections are labeled **On Server**, **On Clients**, or
|
You will see that the steps of this tutorial are defined as **On
|
||||||
**On All Machines**, referring respectively to NFS server, NFS clients, or
|
Server**, **On Clients**, **On All Machines**. This refers respectively
|
||||||
NFS server and clients. In the present example, the server’s IP address is
|
to NFS Server, NFS Clients, or NFS Server and Clients. In our example,
|
||||||
**10.200.15.113** and the client IP addresses are **10.200.15.96** and
|
the IP of the Server will be **10.200.15.113**, while the IPs of the
|
||||||
**10.200.15.97**
|
Clients will be **10.200.15.96 and 10.200.15.97**
|
||||||
|
|
||||||
1. Install Docker (on All Machines)
|
Installing docker
|
||||||
|
-----------------
|
||||||
|
|
||||||
Docker 17.03.0-ce is used for this tutorial. Docker 1.12.6 and later will
|
Any version from docker 1.12.6 onwards should work; we used Docker
|
||||||
likely work, but is not tested.
|
17.03.0-ce for this tutorial.
|
||||||
|
|
||||||
* On Ubuntu 14.04
|
On All Machines
|
||||||
Install Docker CE for Ubuntu as `documented at Docker
|
~~~~~~~~~~~~~~~
|
||||||
<https://docs.docker.com/install/linux/docker-ce/ubuntu/>`__.
|
|
||||||
Install the aufs dependency as recommended by Docker. The required
|
|
||||||
commands are:
|
|
||||||
|
|
||||||
.. code:: sh
|
On Ubuntu 14.04
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
$> sudo apt-get update
|
The docker website has `solid
|
||||||
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
|
documentation <https://docs.docker.com/engine/installation/linux/ubuntu/>`__.
|
||||||
$> sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
|
We have chosen to install the aufs dependency, as recommended by Docker.
|
||||||
$> curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
Here are the required commands:
|
||||||
$> sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
|
||||||
$> sudo apt-get update
|
|
||||||
$> sudo apt-get install docker-ce
|
|
||||||
|
|
||||||
* On CentOS 7
|
|
||||||
Install Docker CE as `documented at Docker
|
|
||||||
<https://docs.docker.com/install/linux/docker-ce/centos/>`__.
|
|
||||||
The required commands are:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo yum install -y yum-utils
|
|
||||||
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
|
||||||
$> sudo yum makecache fast
|
|
||||||
$> sudo yum install docker-ce
|
|
||||||
$> sudo systemctl start docker
|
|
||||||
|
|
||||||
2. Install NFS on Client(s)
|
|
||||||
|
|
||||||
NFS clients mount Docker volumes over the NFS server’s shared folders.
|
|
||||||
If the NFS commons are installed, manual mounts are no longer needed.
|
|
||||||
|
|
||||||
* On Ubuntu 14.04
|
|
||||||
|
|
||||||
Install the NFS commons with apt-get:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo apt-get install nfs-common
|
|
||||||
|
|
||||||
* On CentOS 7
|
|
||||||
|
|
||||||
Install the NFS utils; then start required services:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> yum install nfs-utils
|
|
||||||
$> sudo systemctl enable rpcbind
|
|
||||||
$> sudo systemctl enable nfs-server
|
|
||||||
$> sudo systemctl enable nfs-lock
|
|
||||||
$> sudo systemctl enable nfs-idmap
|
|
||||||
$> sudo systemctl start rpcbind
|
|
||||||
$> sudo systemctl start nfs-server
|
|
||||||
$> sudo systemctl start nfs-lock
|
|
||||||
$> sudo systemctl start nfs-idmap
|
|
||||||
|
|
||||||
3. Install NFS (on Server)
|
|
||||||
|
|
||||||
The NFS server hosts the data and metadata. The package(s) to install on it
|
|
||||||
differs from the package installed on the clients.
|
|
||||||
|
|
||||||
* On Ubuntu 14.04
|
|
||||||
|
|
||||||
Install the NFS server-specific package and the NFS commons:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo apt-get install nfs-kernel-server nfs-common
|
|
||||||
|
|
||||||
* On CentOS 7
|
|
||||||
|
|
||||||
Install the NFS utils and start the required services:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> yum install nfs-utils
|
|
||||||
$> sudo systemctl enable rpcbind
|
|
||||||
$> sudo systemctl enable nfs-server
|
|
||||||
$> sudo systemctl enable nfs-lock
|
|
||||||
$> sudo systemctl enable nfs-idmap
|
|
||||||
$> sudo systemctl start rpcbind
|
|
||||||
$> sudo systemctl start nfs-server
|
|
||||||
$> sudo systemctl start nfs-lock
|
|
||||||
$> sudo systemctl start nfs-idmap
|
|
||||||
|
|
||||||
For both distributions:
|
|
||||||
|
|
||||||
#. Choose where shared data and metadata from the local
|
|
||||||
`CloudServer <http://www.zenko.io/cloudserver/>`__ shall be stored (The
|
|
||||||
present example uses /var/nfs/data and /var/nfs/metadata). Set permissions
|
|
||||||
for these folders for
|
|
||||||
sharing over NFS:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> mkdir -p /var/nfs/data /var/nfs/metadata
|
|
||||||
$> chmod -R 777 /var/nfs/
|
|
||||||
|
|
||||||
#. The /etc/exports file configures network permissions and r-w-x permissions
|
|
||||||
for NFS access. Edit /etc/exports, adding the following lines:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
|
||||||
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
|
||||||
|
|
||||||
Ubuntu applies the no\_subtree\_check option by default, so both
|
|
||||||
folders are declared with the same permissions, even though they’re in
|
|
||||||
the same tree.
|
|
||||||
|
|
||||||
#. Export this new NFS table:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo exportfs -a
|
|
||||||
|
|
||||||
#. Edit the ``MountFlags`` option in the Docker config in
|
|
||||||
/lib/systemd/system/docker.service to enable NFS mount from Docker volumes
|
|
||||||
on other machines:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
MountFlags=shared
|
|
||||||
|
|
||||||
#. Restart the NFS server and Docker daemons to apply these changes.
|
|
||||||
|
|
||||||
* On Ubuntu 14.04
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo service nfs-kernel-server restart
|
|
||||||
$> sudo service docker restart
|
|
||||||
|
|
||||||
* On CentOS 7
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo systemctl restart nfs-server
|
|
||||||
$> sudo systemctl daemon-reload
|
|
||||||
$> sudo systemctl restart docker
|
|
||||||
|
|
||||||
|
|
||||||
4. Set Up a Docker Swarm
|
|
||||||
|
|
||||||
* On all machines and distributions:
|
|
||||||
|
|
||||||
Set up the Docker volumes to be mounted to the NFS server for CloudServer’s
|
|
||||||
data and metadata storage. The following commands must be replicated on all
|
|
||||||
machines:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
|
|
||||||
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
|
|
||||||
|
|
||||||
There is no need to ``docker exec`` these volumes to mount them: the
|
|
||||||
Docker Swarm manager does this when the Docker service is started.
|
|
||||||
|
|
||||||
* On a server:
|
|
||||||
|
|
||||||
To start a Docker service on a Docker Swarm cluster, initialize the cluster
|
|
||||||
(that is, define a manager), prompt workers/nodes to join in, and then start
|
|
||||||
the service.
|
|
||||||
|
|
||||||
Initialize the swarm cluster, and review its response:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker swarm init --advertise-addr 10.200.15.113
|
|
||||||
|
|
||||||
Swarm initialized: current node (db2aqfu3bzfzzs9b1kfeaglmq) is now a manager.
|
|
||||||
|
|
||||||
To add a worker to this swarm, run the following command:
|
|
||||||
|
|
||||||
docker swarm join \
|
|
||||||
--token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka \
|
|
||||||
10.200.15.113:2377
|
|
||||||
|
|
||||||
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
|
||||||
|
|
||||||
* On clients:
|
|
||||||
|
|
||||||
Copy and paste the command provided by your Docker Swarm init. A successful
|
|
||||||
request/response will resemble:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
|
|
||||||
|
|
||||||
This node joined a swarm as a worker.
|
|
||||||
|
|
||||||
Set Up Docker Swarm on Clients on a Server
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Start the service on the Swarm cluster.
|
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/cloudserver
|
$> sudo apt-get update
|
||||||
|
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
|
||||||
|
$> sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
|
||||||
|
$> curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||||
|
$> sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
|
$> sudo apt-get update
|
||||||
|
$> sudo apt-get install docker-ce
|
||||||
|
|
||||||
On a successful installation, ``docker service ls`` returns the following
|
On CentOS 7
|
||||||
output:
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
The docker website has `solid
|
||||||
|
documentation <https://docs.docker.com/engine/installation/linux/centos/>`__.
|
||||||
|
Here are the required commands:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo yum install -y yum-utils
|
||||||
|
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||||
|
$> sudo yum makecache fast
|
||||||
|
$> sudo yum install docker-ce
|
||||||
|
$> sudo systemctl start docker
|
||||||
|
|
||||||
|
Configure NFS
|
||||||
|
-------------
|
||||||
|
|
||||||
|
On Clients
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
Your NFS Clients will mount Docker volumes over your NFS Server's shared
|
||||||
|
folders. Hence, you don't have to mount anything manually, you just have
|
||||||
|
to install the NFS commons:
|
||||||
|
|
||||||
|
On Ubuntu 14.04
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Simply install the NFS commons:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo apt-get install nfs-common
|
||||||
|
|
||||||
|
On CentOS 7
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Install the NFS utils, and then start the required services:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> yum install nfs-utils
|
||||||
|
$> sudo systemctl enable rpcbind
|
||||||
|
$> sudo systemctl enable nfs-server
|
||||||
|
$> sudo systemctl enable nfs-lock
|
||||||
|
$> sudo systemctl enable nfs-idmap
|
||||||
|
$> sudo systemctl start rpcbind
|
||||||
|
$> sudo systemctl start nfs-server
|
||||||
|
$> sudo systemctl start nfs-lock
|
||||||
|
$> sudo systemctl start nfs-idmap
|
||||||
|
|
||||||
|
On Server
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
Your NFS Server will be the machine to physically host the data and
|
||||||
|
metadata. The package(s) we will install on it is slightly different
|
||||||
|
from the one we installed on the clients.
|
||||||
|
|
||||||
|
On Ubuntu 14.04
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Install the NFS server specific package and the NFS commons:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo apt-get install nfs-kernel-server nfs-common
|
||||||
|
|
||||||
|
On CentOS 7
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Same steps as with the client: install the NFS utils and start the
|
||||||
|
required services:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> yum install nfs-utils
|
||||||
|
$> sudo systemctl enable rpcbind
|
||||||
|
$> sudo systemctl enable nfs-server
|
||||||
|
$> sudo systemctl enable nfs-lock
|
||||||
|
$> sudo systemctl enable nfs-idmap
|
||||||
|
$> sudo systemctl start rpcbind
|
||||||
|
$> sudo systemctl start nfs-server
|
||||||
|
$> sudo systemctl start nfs-lock
|
||||||
|
$> sudo systemctl start nfs-idmap
|
||||||
|
|
||||||
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Choose where your shared data and metadata from your local `Zenko CloudServer
|
||||||
|
<http://www.zenko.io/cloudserver/>`__ will be stored.
|
||||||
|
We chose to go with /var/nfs/data and /var/nfs/metadata. You also need
|
||||||
|
to set proper sharing permissions for these folders as they'll be shared
|
||||||
|
over NFS:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> mkdir -p /var/nfs/data /var/nfs/metadata
|
||||||
|
$> chmod -R 777 /var/nfs/
|
||||||
|
|
||||||
|
Now you need to update your **/etc/exports** file. This is the file that
|
||||||
|
configures network permissions and rwx permissions for NFS access. By
|
||||||
|
default, Ubuntu applies the no\_subtree\_check option, so we declared
|
||||||
|
both folders with the same permissions, even though they're in the same
|
||||||
|
tree:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo vim /etc/exports
|
||||||
|
|
||||||
|
In this file, add the following lines:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||||
|
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||||
|
|
||||||
|
Export this new NFS table:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo exportfs -a
|
||||||
|
|
||||||
|
Eventually, you need to allow for NFS mount from Docker volumes on other
|
||||||
|
machines. You need to change the Docker config in
|
||||||
|
**/lib/systemd/system/docker.service**:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo vim /lib/systemd/system/docker.service
|
||||||
|
|
||||||
|
In this file, change the **MountFlags** option:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
MountFlags=shared
|
||||||
|
|
||||||
|
Now you just need to restart the NFS server and docker daemons so your
|
||||||
|
changes apply.
|
||||||
|
|
||||||
|
On Ubuntu 14.04
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Restart your NFS Server and docker services:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo service nfs-kernel-server restart
|
||||||
|
$> sudo service docker restart
|
||||||
|
|
||||||
|
On CentOS 7
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Restart your NFS Server and docker daemons:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo systemctl restart nfs-server
|
||||||
|
$> sudo systemctl daemon-reload
|
||||||
|
$> sudo systemctl restart docker
|
||||||
|
|
||||||
|
Set up your Docker Swarm service
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
On All Machines
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
We will now set up the Docker volumes that will be mounted to the NFS
|
||||||
|
Server and serve as data and metadata storage for Zenko CloudServer. These two
|
||||||
|
commands have to be replicated on all machines:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
|
||||||
|
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
|
||||||
|
|
||||||
|
There is no need to ""docker exec" these volumes to mount them: the
|
||||||
|
Docker Swarm manager will do it when the Docker service will be started.
|
||||||
|
|
||||||
|
On Server
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
To start a Docker service on a Docker Swarm cluster, you first have to
|
||||||
|
initialize that cluster (i.e.: define a manager), then have the
|
||||||
|
workers/nodes join in, and then start the service. Initialize the swarm
|
||||||
|
cluster, and look at the response:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker swarm init --advertise-addr 10.200.15.113
|
||||||
|
|
||||||
|
Swarm initialized: current node (db2aqfu3bzfzzs9b1kfeaglmq) is now a manager.
|
||||||
|
|
||||||
|
To add a worker to this swarm, run the following command:
|
||||||
|
|
||||||
|
docker swarm join \
|
||||||
|
--token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka \
|
||||||
|
10.200.15.113:2377
|
||||||
|
|
||||||
|
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
||||||
|
|
||||||
|
On Clients
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
Simply copy/paste the command provided by your docker swarm init. When
|
||||||
|
all goes well, you'll get something like this:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
|
||||||
|
|
||||||
|
This node joined a swarm as a worker.
|
||||||
|
|
||||||
|
On Server
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
Start the service on your swarm cluster!
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/s3server
|
||||||
|
|
||||||
|
If you run a docker service ls, you should have the following output:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> docker service ls
|
$> docker service ls
|
||||||
ID NAME MODE REPLICAS IMAGE
|
ID NAME MODE REPLICAS IMAGE
|
||||||
ocmggza412ft s3 replicated 1/1 scality/cloudserver:latest
|
ocmggza412ft s3 replicated 1/1 scality/s3server:latest
|
||||||
|
|
||||||
If the service does not start, consider disabling apparmor/SELinux.
|
If your service won't start, consider disabling apparmor/SELinux.
|
||||||
|
|
||||||
Testing the High-Availability CloudServer
|
Testing your High Availability S3Server
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
---------------------------------------
|
||||||
|
|
||||||
On all machines (client/server) and distributions (Ubuntu and CentOS),
|
On All Machines
|
||||||
determine where CloudServer is running using ``docker ps``. CloudServer can
|
|
||||||
operate on any node of the Swarm cluster, manager or worker. When you find
|
|
||||||
it, you can kill it with ``docker stop <container id>``. It will respawn
|
|
||||||
on a different node. Now, if one server falls, or if Docker stops
|
|
||||||
unexpectedly, the end user will still be able to access your the local CloudServer.
|
|
||||||
|
|
||||||
Troubleshooting
|
|
||||||
~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To troubleshoot the service, run:
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Try to find out where your Scality Zenko CloudServer is actually running using
|
||||||
|
the **docker ps** command. It can be on any node of the swarm cluster,
|
||||||
|
manager or worker. When you find it, you can kill it, with **docker stop
|
||||||
|
<container id>** and you'll see it respawn on a different node of the
|
||||||
|
swarm cluster. Now you see, if one of your servers falls, or if docker
|
||||||
|
stops unexpectedly, your end user will still be able to access your
|
||||||
|
local Zenko CloudServer.
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
---------------
|
||||||
|
|
||||||
|
To troubleshoot the service you can run:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> docker service ps s3docker service ps s3
|
$> docker service ps s3docker service ps s3
|
||||||
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
|
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
|
||||||
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/cloudserver localhost.localdomain.localdomain Running Running 7 days ago
|
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/s3server localhost.localdomain.localdomain Running Running 7 days ago
|
||||||
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/cloudserver localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
|
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/s3server localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
|
||||||
|
|
||||||
If the error is truncated, view the error in detail by inspecting the
|
If the error is truncated it is possible to have a more detailed view of
|
||||||
Docker task ID:
|
the error by inspecting the docker task ID:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> docker inspect cvmf3j3bz8w6r4h0lf3pxo6eu
|
$> docker inspect cvmf3j3bz8w6r4h0lf3pxo6eu
|
||||||
|
|
||||||
Off you go!
|
Off you go!
|
||||||
~~~~~~~~~~~
|
-----------
|
||||||
|
|
||||||
|
Let us know what you use this functionality for, and if you'd like any
|
||||||
|
specific developments around it. Or, even better: come and contribute to
|
||||||
|
our `Github repository <https://github.com/scality/s3/>`__! We look
|
||||||
|
forward to meeting you!
|
||||||
|
|
||||||
Let us know how you use this and if you'd like any specific developments
|
|
||||||
around it. Even better: come and contribute to our `Github repository
|
|
||||||
<https://github.com/scality/s3/>`__! We look forward to meeting you!
|
|
||||||
|
|
||||||
S3FS
|
S3FS
|
||||||
====
|
====
|
||||||
|
Export your buckets as a filesystem with s3fs on top of Zenko CloudServer
|
||||||
You can export buckets as a filesystem with s3fs on CloudServer.
|
|
||||||
|
|
||||||
`s3fs <https://github.com/s3fs-fuse/s3fs-fuse>`__ is an open source
|
`s3fs <https://github.com/s3fs-fuse/s3fs-fuse>`__ is an open source
|
||||||
tool, available both on Debian and RedHat distributions, that enables
|
tool that allows you to mount an S3 bucket on a filesystem-like backend.
|
||||||
you to mount an S3 bucket on a filesystem-like backend. This tutorial uses
|
It is available both on Debian and RedHat distributions. For this
|
||||||
an Ubuntu 14.04 host to deploy and use s3fs over CloudServer.
|
tutorial, we used an Ubuntu 14.04 host to deploy and use s3fs over
|
||||||
|
Scality's Zenko CloudServer.
|
||||||
|
|
||||||
Deploying Zenko CloudServer with SSL
|
Deploying Zenko CloudServer with SSL
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
----------------------------
|
||||||
|
|
||||||
First, deploy CloudServer with a file backend using `our DockerHub page
|
First, you need to deploy **Zenko CloudServer**. This can be done very easily
|
||||||
<https://hub.docker.com/r/zenko/cloudserver>`__.
|
via `our DockerHub
|
||||||
|
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
|
||||||
|
with a file backend).
|
||||||
|
|
||||||
.. note::
|
*Note:* *- If you don't have docker installed on your machine, here
|
||||||
|
are the `instructions to install it for your
|
||||||
|
distribution <https://docs.docker.com/engine/installation/>`__*
|
||||||
|
|
||||||
If Docker is not installed on your machine, follow
|
You also necessarily have to set up SSL with Zenko CloudServer to use s3fs. We
|
||||||
`these instructions <https://docs.docker.com/engine/installation/>`__
|
have a nice
|
||||||
to install it for your distribution.
|
`tutorial <https://s3.scality.com/v1.0/page/scality-with-ssl>`__ to help
|
||||||
|
you do it.
|
||||||
|
|
||||||
You must also set up SSL with CloudServer to use s3fs. See `Using SSL
|
s3fs setup
|
||||||
<./GETTING_STARTED#Using_SSL>`__ for instructions.
|
----------
|
||||||
|
|
||||||
s3fs Setup
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
Installing s3fs
|
Installing s3fs
|
||||||
---------------
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
Follow the instructions in the s3fs `README
|
s3fs has quite a few dependencies. As explained in their
|
||||||
<https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation-from-pre-built-packages>`__,
|
`README <https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation>`__,
|
||||||
|
the following commands should install everything for Ubuntu 14.04:
|
||||||
Check that s3fs is properly installed. A version check should return
|
|
||||||
a response resembling:
|
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> s3fs --version
|
$> sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev
|
||||||
|
$> sudo apt-get install libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||||
|
|
||||||
|
Now you want to install s3fs per se:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||||
|
$> cd s3fs-fuse
|
||||||
|
$> ./autogen.sh
|
||||||
|
$> ./configure
|
||||||
|
$> make
|
||||||
|
$> sudo make install
|
||||||
|
|
||||||
|
Check that s3fs is properly installed by checking its version. it should
|
||||||
|
answer as below:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> s3fs --version
|
||||||
|
|
||||||
Amazon Simple Storage Service File System V1.80(commit:d40da2c) with OpenSSL
|
Amazon Simple Storage Service File System V1.80(commit:d40da2c) with OpenSSL
|
||||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
|
||||||
License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>
|
|
||||||
This is free software: you are free to change and redistribute it.
|
|
||||||
There is NO WARRANTY, to the extent permitted by law.
|
|
||||||
|
|
||||||
Configuring s3fs
|
Configuring s3fs
|
||||||
----------------
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
s3fs expects you to provide it with a password file. Our file is
|
s3fs expects you to provide it with a password file. Our file is
|
||||||
``/etc/passwd-s3fs``. The structure for this file is
|
``/etc/passwd-s3fs``. The structure for this file is
|
||||||
``ACCESSKEYID:SECRETKEYID``, so, for CloudServer, you can run:
|
``ACCESSKEYID:SECRETKEYID``, so, for S3Server, you can run:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> echo 'accessKey1:verySecretKey1' > /etc/passwd-s3fs
|
$> echo 'accessKey1:verySecretKey1' > /etc/passwd-s3fs
|
||||||
$> chmod 600 /etc/passwd-s3fs
|
$> chmod 600 /etc/passwd-s3fs
|
||||||
|
|
||||||
Using CloudServer with s3fs
|
Using Zenko CloudServer with s3fs
|
||||||
---------------------------
|
------------------------
|
||||||
|
|
||||||
1. Use /mnt/tests3fs as a mount point.
|
First, you're going to need a mountpoint; we chose ``/mnt/tests3fs``:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> mkdir /mnt/tests3fs
|
$> mkdir /mnt/tests3fs
|
||||||
|
|
||||||
2. Create a bucket on your local CloudServer. In the present example it is
|
Then, you want to create a bucket on your local Zenko CloudServer; we named it
|
||||||
named “tests3fs”.
|
``tests3fs``:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> s3cmd mb s3://tests3fs
|
$> s3cmd mb s3://tests3fs
|
||||||
|
|
||||||
3. Mount the bucket to your mount point with s3fs:
|
*Note:* *- If you've never used s3cmd with our Zenko CloudServer, our README
|
||||||
|
provides you with a `recommended
|
||||||
|
config <https://github.com/scality/S3/blob/master/README.md#s3cmd>`__*
|
||||||
|
|
||||||
.. code:: sh
|
Now you can mount your bucket to your mountpoint with s3fs:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
$> s3fs tests3fs /mnt/tests3fs -o passwd_file=/etc/passwd-s3fs -o url="https://s3.scality.test:8000/" -o use_path_request_style
|
$> s3fs tests3fs /mnt/tests3fs -o passwd_file=/etc/passwd-s3fs -o url="https://s3.scality.test:8000/" -o use_path_request_style
|
||||||
|
|
||||||
The structure of this command is:
|
*If you're curious, the structure of this command is*
|
||||||
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``. Of these mandatory
|
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``\ *, and the
|
||||||
options:
|
options are mandatory and serve the following purposes:
|
||||||
|
* ``passwd_file``\ *: specifiy path to password file;
|
||||||
|
* ``url``\ *: specify the hostname used by your SSL provider;
|
||||||
|
* ``use_path_request_style``\ *: force path style (by default, s3fs
|
||||||
|
uses subdomains (DNS style)).*
|
||||||
|
|
||||||
* ``passwd_file`` specifies the path to the password file.
|
| From now on, you can either add files to your mountpoint, or add
|
||||||
* ``url`` specifies the host name used by your SSL provider.
|
objects to your bucket, and they'll show in the other.
|
||||||
* ``use_path_request_style`` forces the path style (by default,
|
| For example, let's' create two files, and then a directory with a file
|
||||||
s3fs uses DNS-style subdomains).
|
in our mountpoint:
|
||||||
|
|
||||||
Once the bucket is mounted, files added to the mount point or
|
.. code:: sh
|
||||||
objects added to the bucket will appear in both locations.
|
|
||||||
|
|
||||||
Example
|
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
|
||||||
-------
|
$> mkdir /mnt/tests3fs/dir1
|
||||||
|
$> touch /mnt/tests3fs/dir1/file3
|
||||||
|
|
||||||
Create two files, and then a directory with a file in our mount point:
|
Now, I can use s3cmd to show me what is actually in S3Server:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
|
$> s3cmd ls -r s3://tests3fs
|
||||||
$> mkdir /mnt/tests3fs/dir1
|
|
||||||
$> touch /mnt/tests3fs/dir1/file3
|
|
||||||
|
|
||||||
Now, use s3cmd to show what is in CloudServer:
|
2017-02-28 17:28 0 s3://tests3fs/dir1/
|
||||||
|
2017-02-28 17:29 0 s3://tests3fs/dir1/file3
|
||||||
|
2017-02-28 17:28 0 s3://tests3fs/file1
|
||||||
|
2017-02-28 17:28 0 s3://tests3fs/file2
|
||||||
|
|
||||||
.. code:: sh
|
Now you can enjoy a filesystem view on your local Zenko CloudServer!
|
||||||
|
|
||||||
$> s3cmd ls -r s3://tests3fs
|
|
||||||
|
|
||||||
2017-02-28 17:28 0 s3://tests3fs/dir1/
|
|
||||||
2017-02-28 17:29 0 s3://tests3fs/dir1/file3
|
|
||||||
2017-02-28 17:28 0 s3://tests3fs/file1
|
|
||||||
2017-02-28 17:28 0 s3://tests3fs/file2
|
|
||||||
|
|
||||||
Now you can enjoy a filesystem view on your local CloudServer.
|
|
||||||
|
|
||||||
|
|
||||||
Duplicity
|
Duplicity
|
||||||
=========
|
=========
|
||||||
|
|
||||||
How to back up your files with CloudServer.
|
How to backup your files with Zenko CloudServer.
|
||||||
|
|
||||||
Installing Duplicity and its Dependencies
|
Installing
|
||||||
|
-----------
|
||||||
|
|
||||||
|
Installing Duplicity and its dependencies
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
To install `Duplicity <http://duplicity.nongnu.org/>`__,
|
Second, you want to install
|
||||||
go to `this site <https://code.launchpad.net/duplicity/0.7-series>`__.
|
`Duplicity <http://duplicity.nongnu.org/index.html>`__. You have to
|
||||||
Download the latest tarball. Decompress it and follow the instructions
|
download `this
|
||||||
in the README.
|
tarball <https://code.launchpad.net/duplicity/0.7-series/0.7.11/+download/duplicity-0.7.11.tar.gz>`__,
|
||||||
|
decompress it, and then checkout the README inside, which will give you
|
||||||
.. code:: sh
|
a list of dependencies to install. If you're using Ubuntu 14.04, this is
|
||||||
|
your lucky day: here is a lazy step by step install.
|
||||||
$> tar zxvf duplicity-0.7.11.tar.gz
|
|
||||||
$> cd duplicity-0.7.11
|
|
||||||
$> python setup.py install
|
|
||||||
|
|
||||||
You may receive error messages indicating the need to install some or all
|
|
||||||
of the following dependencies:
|
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
|
@ -420,20 +484,30 @@ of the following dependencies:
|
||||||
$> apt-get install python-dev python-pip python-lockfile
|
$> apt-get install python-dev python-pip python-lockfile
|
||||||
$> pip install -U boto
|
$> pip install -U boto
|
||||||
|
|
||||||
Testing the Installation
|
Then you want to actually install Duplicity:
|
||||||
------------------------
|
|
||||||
|
|
||||||
1. Check that CloudServer is running. Run ``$> docker ps``. You should
|
.. code:: sh
|
||||||
see one container named ``scality/cloudserver``. If you do not, run
|
|
||||||
``$> docker start cloudserver`` and check again.
|
|
||||||
|
|
||||||
|
$> tar zxvf duplicity-0.7.11.tar.gz
|
||||||
|
$> cd duplicity-0.7.11
|
||||||
|
$> python setup.py install
|
||||||
|
|
||||||
2. Duplicity uses a module called “Boto” to send requests to S3. Boto
|
Using
|
||||||
requires a configuration file located in ``/etc/boto.cfg`` to store
|
------
|
||||||
your credentials and preferences. A minimal configuration
|
|
||||||
you can fine tune `following these instructions
|
Testing your installation
|
||||||
<http://boto.cloudhackers.com/en/latest/getting_started.html>`__ is
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
shown here:
|
|
||||||
|
First, we're just going to quickly check that Zenko CloudServer is actually
|
||||||
|
running. To do so, simply run ``$> docker ps`` . You should see one
|
||||||
|
container named ``scality/s3server``. If that is not the case, try
|
||||||
|
``$> docker start s3server``, and check again.
|
||||||
|
|
||||||
|
Secondly, as you probably know, Duplicity uses a module called **Boto**
|
||||||
|
to send requests to S3. Boto requires a configuration file located in
|
||||||
|
**``/etc/boto.cfg``** to have your credentials and preferences. Here is
|
||||||
|
a minimalistic config `that you can finetune following these
|
||||||
|
instructions <http://boto.cloudhackers.com/en/latest/getting_started.html>`__.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
|
@ -447,51 +521,54 @@ Testing the Installation
|
||||||
# If using SSL, unmute and provide absolute path to local CA certificate
|
# If using SSL, unmute and provide absolute path to local CA certificate
|
||||||
# ca_certificates_file = /absolute/path/to/ca.crt
|
# ca_certificates_file = /absolute/path/to/ca.crt
|
||||||
|
|
||||||
.. note:: To set up SSL with CloudServer, check out our `Using SSL
|
*Note:* *If you want to set up SSL with Zenko CloudServer, check out our
|
||||||
<./GETTING_STARTED#Using_SSL>`__ in GETTING STARTED.
|
`tutorial <http://link/to/SSL/tutorial>`__*
|
||||||
|
|
||||||
3. At this point all requirements to run CloudServer as a backend to Duplicity
|
At this point, we've met all the requirements to start running Zenko CloudServer
|
||||||
have been met. A local folder/file should back up to the local S3.
|
as a backend to Duplicity. So we should be able to back up a local
|
||||||
Try it with the decompressed Duplicity folder:
|
folder/file to local S3. Let's try with the duplicity decompressed
|
||||||
|
folder:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
$> duplicity duplicity-0.7.11 "s3://127.0.0.1:8000/testbucket/"
|
$> duplicity duplicity-0.7.11 "s3://127.0.0.1:8000/testbucket/"
|
||||||
|
|
||||||
.. note:: Duplicity will prompt for a symmetric encryption passphrase.
|
*Note:* *Duplicity will prompt you for a symmetric encryption
|
||||||
Save it carefully, as you will need it to recover your data.
|
passphrase. Save it somewhere as you will need it to recover your
|
||||||
Alternatively, you can add the ``--no-encryption`` flag
|
data. Alternatively, you can also add the ``--no-encryption`` flag
|
||||||
and the data will be stored plain.
|
and the data will be stored plain.*
|
||||||
|
|
||||||
If this command is successful, you will receive an output resembling:
|
If this command is succesful, you will get an output looking like this:
|
||||||
|
|
||||||
.. code:: sh
|
::
|
||||||
|
|
||||||
--------------[ Backup Statistics ]--------------
|
--------------[ Backup Statistics ]--------------
|
||||||
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
|
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
|
||||||
EndTime 1486486547.40 (Tue Feb 7 16:55:47 2017)
|
EndTime 1486486547.40 (Tue Feb 7 16:55:47 2017)
|
||||||
ElapsedTime 0.27 (0.27 seconds)
|
ElapsedTime 0.27 (0.27 seconds)
|
||||||
SourceFiles 388
|
SourceFiles 388
|
||||||
SourceFileSize 6634529 (6.33 MB)
|
SourceFileSize 6634529 (6.33 MB)
|
||||||
NewFiles 388
|
NewFiles 388
|
||||||
NewFileSize 6634529 (6.33 MB)
|
NewFileSize 6634529 (6.33 MB)
|
||||||
DeletedFiles 0
|
DeletedFiles 0
|
||||||
ChangedFiles 0
|
ChangedFiles 0
|
||||||
ChangedFileSize 0 (0 bytes)
|
ChangedFileSize 0 (0 bytes)
|
||||||
ChangedDeltaSize 0 (0 bytes)
|
ChangedDeltaSize 0 (0 bytes)
|
||||||
DeltaEntries 388
|
DeltaEntries 388
|
||||||
RawDeltaSize 6392865 (6.10 MB)
|
RawDeltaSize 6392865 (6.10 MB)
|
||||||
TotalDestinationSizeChange 2003677 (1.91 MB)
|
TotalDestinationSizeChange 2003677 (1.91 MB)
|
||||||
Errors 0
|
Errors 0
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
|
||||||
Congratulations! You can now back up to your local S3 through Duplicity.
|
Congratulations! You can now backup to your local S3 through duplicity
|
||||||
|
:)
|
||||||
|
|
||||||
Automating Backups
|
Automating backups
|
||||||
------------------
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
The easiest way to back up files periodically is to write a bash script
|
Now you probably want to back up your files periodically. The easiest
|
||||||
and add it to your crontab. A suggested script follows.
|
way to do this is to write a bash script and add it to your crontab.
|
||||||
|
Here is my suggestion for such a file:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: sh
|
||||||
|
|
||||||
|
@ -500,33 +577,33 @@ and add it to your crontab. A suggested script follows.
|
||||||
# Export your passphrase so you don't have to type anything
|
# Export your passphrase so you don't have to type anything
|
||||||
export PASSPHRASE="mypassphrase"
|
export PASSPHRASE="mypassphrase"
|
||||||
|
|
||||||
# To use a GPG key, put it here and uncomment the line below
|
# If you want to use a GPG Key, put it here and unmute the line below
|
||||||
#GPG_KEY=
|
#GPG_KEY=
|
||||||
|
|
||||||
# Define your backup bucket, with localhost specified
|
# Define your backup bucket, with localhost specified
|
||||||
DEST="s3://127.0.0.1:8000/testbucketcloudserver/"
|
DEST="s3://127.0.0.1:8000/testbuckets3server/"
|
||||||
|
|
||||||
# Define the absolute path to the folder to back up
|
# Define the absolute path to the folder you want to backup
|
||||||
SOURCE=/root/testfolder
|
SOURCE=/root/testfolder
|
||||||
|
|
||||||
# Set to "full" for full backups, and "incremental" for incremental backups
|
# Set to "full" for full backups, and "incremental" for incremental backups
|
||||||
# Warning: you must perform one full backup befor you can perform
|
# Warning: you have to perform one full backup befor you can perform
|
||||||
# incremental ones on top of it
|
# incremental ones on top of it
|
||||||
FULL=incremental
|
FULL=incremental
|
||||||
|
|
||||||
# How long to keep backups. If you don't want to delete old backups, keep
|
# How long to keep backups for; if you don't want to delete old
|
||||||
# this value empty; otherwise, the syntax is "1Y" for one year, "1M" for
|
# backups, keep empty; otherwise, syntax is "1Y" for one year, "1M"
|
||||||
# one month, "1D" for one day.
|
# for one month, "1D" for one day
|
||||||
OLDER_THAN="1Y"
|
OLDER_THAN="1Y"
|
||||||
|
|
||||||
# is_running checks whether Duplicity is currently completing a task
|
# is_running checks whether duplicity is currently completing a task
|
||||||
is_running=$(ps -ef | grep duplicity | grep python | wc -l)
|
is_running=$(ps -ef | grep duplicity | grep python | wc -l)
|
||||||
|
|
||||||
# If Duplicity is already completing a task, this will not run
|
# If duplicity is already completing a task, this will simply not run
|
||||||
if [ $is_running -eq 0 ]; then
|
if [ $is_running -eq 0 ]; then
|
||||||
echo "Backup for ${SOURCE} started"
|
echo "Backup for ${SOURCE} started"
|
||||||
|
|
||||||
# To delete backups older than a certain time, do it here
|
# If you want to delete backups older than a certain time, we do it here
|
||||||
if [ "$OLDER_THAN" != "" ]; then
|
if [ "$OLDER_THAN" != "" ]; then
|
||||||
echo "Removing backups older than ${OLDER_THAN}"
|
echo "Removing backups older than ${OLDER_THAN}"
|
||||||
duplicity remove-older-than ${OLDER_THAN} ${DEST}
|
duplicity remove-older-than ${OLDER_THAN} ${DEST}
|
||||||
|
@ -549,17 +626,17 @@ and add it to your crontab. A suggested script follows.
|
||||||
# Forget the passphrase...
|
# Forget the passphrase...
|
||||||
unset PASSPHRASE
|
unset PASSPHRASE
|
||||||
|
|
||||||
Put this file in ``/usr/local/sbin/backup.sh``. Run ``crontab -e`` and
|
So let's say you put this file in ``/usr/local/sbin/backup.sh.`` Next
|
||||||
paste your configuration into the file that opens. If you're unfamiliar
|
you want to run ``crontab -e`` and paste your configuration in the file
|
||||||
with Cron, here is a good `HowTo
|
that opens. If you're unfamiliar with Cron, here is a good `How
|
||||||
<https://help.ubuntu.com/community/CronHowto>`__. If the folder being
|
To <https://help.ubuntu.com/community/CronHowto>`__. The folder I'm
|
||||||
backed up is a folder to be modified permanently during the work day,
|
backing up is a folder I modify permanently during my workday, so I want
|
||||||
we can set incremental backups every 5 minutes from 8 AM to 9 PM Monday
|
incremental backups every 5mn from 8AM to 9PM monday to friday. Here is
|
||||||
through Friday by pasting the following line into crontab:
|
the line I will paste in my crontab:
|
||||||
|
|
||||||
.. code:: sh
|
.. code:: cron
|
||||||
|
|
||||||
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh
|
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh
|
||||||
|
|
||||||
Adding or removing files from the folder being backed up will result in
|
Now I can try and add / remove files from the folder I'm backing up, and
|
||||||
incremental backups in the bucket.
|
I will see incremental backups in my bucket.
|
||||||
|
|
|
@ -1,263 +0,0 @@
|
||||||
Metadata Search Documentation
|
|
||||||
=============================
|
|
||||||
|
|
||||||
Description
|
|
||||||
-----------
|
|
||||||
|
|
||||||
This feature enables metadata search to be performed on the metadata of objects
|
|
||||||
stored in Zenko.
|
|
||||||
|
|
||||||
Requirements
|
|
||||||
------------
|
|
||||||
|
|
||||||
* MongoDB
|
|
||||||
|
|
||||||
Design
|
|
||||||
------
|
|
||||||
|
|
||||||
The Metadata Search feature expands on the existing :code:`GET Bucket` S3 API by
|
|
||||||
enabling users to conduct metadata searches by adding the custom Zenko query
|
|
||||||
string parameter, :code:`search`. The :code:`search` parameter is structured as a pseudo
|
|
||||||
SQL WHERE clause, and supports basic SQL operators. For example:
|
|
||||||
:code:`"A=1 AND B=2 OR C=3"` (complex queries can be built using nesting
|
|
||||||
operators, :code:`(` and :code:`)`).
|
|
||||||
|
|
||||||
The search process is as follows:
|
|
||||||
|
|
||||||
* Zenko receives a :code:`GET` request.
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# regular getBucket request
|
|
||||||
GET /bucketname HTTP/1.1
|
|
||||||
Host: 127.0.0.1:8000
|
|
||||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
|
||||||
Authorization: authorization string
|
|
||||||
|
|
||||||
# getBucket versions request
|
|
||||||
GET /bucketname?versions HTTP/1.1
|
|
||||||
Host: 127.0.0.1:8000
|
|
||||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
|
||||||
Authorization: authorization string
|
|
||||||
|
|
||||||
# search getBucket request
|
|
||||||
GET /bucketname?search=key%3Dsearch-item HTTP/1.1
|
|
||||||
Host: 127.0.0.1:8000
|
|
||||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
|
||||||
Authorization: authorization string
|
|
||||||
|
|
||||||
* If the request does *not* contain the :code:`search` query parameter, Zenko performs
|
|
||||||
a normal bucket listing and returns an XML result containing the list of
|
|
||||||
objects.
|
|
||||||
* If the request *does* contain the :code:`search` query parameter, Zenko parses and
|
|
||||||
validates the search string.
|
|
||||||
|
|
||||||
- If the search string is invalid, Zenko returns an :code:`InvalidArgument` error.
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
<?xml version=\"1.0\" encoding=\"UTF-8\"?>
|
|
||||||
<Error>
|
|
||||||
<Code>InvalidArgument</Code>
|
|
||||||
<Message>Invalid sql where clause sent as search query</Message>
|
|
||||||
<Resource></Resource>
|
|
||||||
<RequestId>d1d6afc64345a8e1198e</RequestId>
|
|
||||||
</Error>
|
|
||||||
|
|
||||||
- If the search string is valid, Zenko parses it and generates an abstract
|
|
||||||
syntax tree (AST). The AST is then passed to the MongoDB backend to be
|
|
||||||
used as the query filter for retrieving objects from a bucket that
|
|
||||||
satisfies the requested search conditions. Zenko parses the filtered
|
|
||||||
results and returns them as the response.
|
|
||||||
|
|
||||||
Metadata search results have the same structure as a :code:`GET Bucket` response:
|
|
||||||
|
|
||||||
.. code:: xml
|
|
||||||
|
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
|
||||||
<Name>bucketname</Name>
|
|
||||||
<Prefix/>
|
|
||||||
<Marker/>
|
|
||||||
<MaxKeys>1000</MaxKeys>
|
|
||||||
<IsTruncated>false</IsTruncated>
|
|
||||||
<Contents>
|
|
||||||
<Key>objectKey</Key>
|
|
||||||
<LastModified>2018-04-19T18:31:49.426Z</LastModified>
|
|
||||||
<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
|
|
||||||
<Size>0</Size>
|
|
||||||
<Owner>
|
|
||||||
<ID>79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be</ID>
|
|
||||||
<DisplayName>Bart</DisplayName>
|
|
||||||
</Owner>
|
|
||||||
<StorageClass>STANDARD</StorageClass>
|
|
||||||
</Contents>
|
|
||||||
<Contents>
|
|
||||||
...
|
|
||||||
</Contents>
|
|
||||||
</ListBucketResult>
|
|
||||||
|
|
||||||
Performing Metadata Searches with Zenko
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
You can perform metadata searches by:
|
|
||||||
|
|
||||||
+ Using the :code:`search_bucket` tool in the
|
|
||||||
`Scality/S3 <https://github.com/scality/S3>`_ GitHub repository.
|
|
||||||
+ Creating a signed HTTP request to Zenko in your preferred programming
|
|
||||||
language.
|
|
||||||
|
|
||||||
Using the S3 Tool
|
|
||||||
+++++++++++++++++
|
|
||||||
|
|
||||||
After cloning the `Scality/S3 <https://github.com/scality/S3>`_ GitHub repository
|
|
||||||
and installing the necessary dependencies, run the following command in the S3
|
|
||||||
project’s root directory to access the search tool:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
node bin/search_bucket
|
|
||||||
|
|
||||||
This generates the following output:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
Usage: search_bucket [options]
|
|
||||||
|
|
||||||
Options:
|
|
||||||
|
|
||||||
-V, --version output the version number
|
|
||||||
-a, --access-key <accessKey> Access key id
|
|
||||||
-k, --secret-key <secretKey> Secret access key
|
|
||||||
-b, --bucket <bucket> Name of the bucket
|
|
||||||
-q, --query <query> Search query
|
|
||||||
-h, --host <host> Host of the server
|
|
||||||
-p, --port <port> Port of the server
|
|
||||||
-s --ssl
|
|
||||||
-v, --verbose
|
|
||||||
-h, --help output usage information
|
|
||||||
|
|
||||||
In the following examples, Zenko Server is accessible on endpoint
|
|
||||||
:code:`http://127.0.0.1:8000` and contains the bucket :code:`zenkobucket`.
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# search for objects with metadata "blue"
|
|
||||||
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
|
|
||||||
-q "x-amz-meta-color=blue" -h 127.0.0.1 -p 8000
|
|
||||||
|
|
||||||
# search for objects tagged with "type=color"
|
|
||||||
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
|
|
||||||
-q "tags.type=color" -h 127.0.0.1 -p 8000
|
|
||||||
|
|
||||||
Coding Examples
|
|
||||||
+++++++++++++++
|
|
||||||
|
|
||||||
Search requests can be also performed by making HTTP requests authenticated
|
|
||||||
with one of the AWS Signature schemes: version 2 or version 4. \
|
|
||||||
For more about authentication scheme, see:
|
|
||||||
|
|
||||||
* https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
|
|
||||||
* http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
|
|
||||||
* http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
|
|
||||||
|
|
||||||
You can also view examples for making requests with Auth V4 in various
|
|
||||||
languages `here <../../../examples>`__.
|
|
||||||
|
|
||||||
Specifying Metadata Fields
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To search system metadata headers:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
{system-metadata-key}{supported SQL op}{search value}
|
|
||||||
# example
|
|
||||||
key = blueObject
|
|
||||||
size > 0
|
|
||||||
key LIKE "blue.*"
|
|
||||||
|
|
||||||
To search custom user metadata:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# metadata must be prefixed with "x-amz-meta-"
|
|
||||||
x-amz-meta-{user-metadata-key}{supported SQL op}{search value}
|
|
||||||
|
|
||||||
# example
|
|
||||||
x-amz-meta-color = blue
|
|
||||||
x-amz-meta-color != red
|
|
||||||
x-amz-meta-color LIKE "b.*"
|
|
||||||
|
|
||||||
To search tags:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# tag searches must be prefixed with "tags."
|
|
||||||
tags.{tag-key}{supported SQL op}{search value}
|
|
||||||
# example
|
|
||||||
tags.type = color
|
|
||||||
|
|
||||||
Examples queries:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# searching for objects with custom metadata "color"=blue" and are tagged
|
|
||||||
# "type"="color"
|
|
||||||
|
|
||||||
tags.type="color" AND x-amz-meta-color="blue"
|
|
||||||
|
|
||||||
# searching for objects with the object key containing the substring "blue"
|
|
||||||
# or (custom metadata "color"=blue" and are tagged "type"="color")
|
|
||||||
|
|
||||||
key LIKE '.*blue.*' OR (x-amz-meta-color="blue" AND tags.type="color")
|
|
||||||
|
|
||||||
Differences from SQL
|
|
||||||
++++++++++++++++++++
|
|
||||||
|
|
||||||
Zenko metadata search queries are similar to SQL-query :code:`WHERE` clauses, but
|
|
||||||
differ in that:
|
|
||||||
|
|
||||||
* They follow the :code:`PCRE` format
|
|
||||||
* They do not require values with hyphens to be enclosed in
|
|
||||||
backticks, :code:``(`)``
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# SQL query
|
|
||||||
`x-amz-meta-search-item` = `ice-cream-cone`
|
|
||||||
|
|
||||||
# MD Search query
|
|
||||||
x-amz-meta-search-item = ice-cream-cone
|
|
||||||
|
|
||||||
* Search queries do not support all SQL operators.
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# Supported SQL operators:
|
|
||||||
=, <, >, <=, >=, !=, AND, OR, LIKE, <>
|
|
||||||
|
|
||||||
# Unsupported SQL operators:
|
|
||||||
NOT, BETWEEN, IN, IS, +, -, %, ^, /, *, !
|
|
||||||
|
|
||||||
Using Regular Expressions in Metadata Search
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Regular expressions in Zenko metadata search differ from SQL in the following
|
|
||||||
ways:
|
|
||||||
|
|
||||||
+ Wildcards are represented with :code:`.*` instead of :code:`%`.
|
|
||||||
+ Regex patterns must be wrapped in quotes. Failure to do this can lead to
|
|
||||||
misinterpretation of patterns.
|
|
||||||
+ As with :code:`PCRE`, regular expressions can be entered in either the
|
|
||||||
:code:`/pattern/` syntax or as the pattern itself if regex options are
|
|
||||||
not required.
|
|
||||||
|
|
||||||
Example regular expressions:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# search for strings containing word substring "helloworld"
|
|
||||||
".*helloworld.*"
|
|
||||||
"/.*helloworld.*/"
|
|
||||||
"/.*helloworld.*/i"
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Minimal makefile for Sphinx documentation
|
|
||||||
#
|
|
||||||
|
|
||||||
# You can set these variables from the command line.
|
|
||||||
SPHINXOPTS =
|
|
||||||
SPHINXBUILD = sphinx-build
|
|
||||||
SPHINXPROJ = Zenko
|
|
||||||
SOURCEDIR = .
|
|
||||||
BUILDDIR = _build
|
|
||||||
|
|
||||||
# Put it first so that "make" without argument is like "make help".
|
|
||||||
help:
|
|
||||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
|
||||||
|
|
||||||
.PHONY: help Makefile
|
|
||||||
|
|
||||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
|
||||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
|
||||||
%: Makefile
|
|
||||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
|
||||||
|
|
|
@ -1,73 +0,0 @@
|
||||||
# Cloudserver Release Plan
|
|
||||||
|
|
||||||
## Docker Image Generation
|
|
||||||
|
|
||||||
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
|
|
||||||
CloudServer has a few images there:
|
|
||||||
|
|
||||||
* Cloudserver container image: ghcr.io/scality/cloudserver
|
|
||||||
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
|
||||||
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
|
||||||
|
|
||||||
With every CI build, the CI will push images, tagging the
|
|
||||||
content with the developer branch's short SHA-1 commit hash.
|
|
||||||
This allows those images to be used by developers, CI builds,
|
|
||||||
build chain and so on.
|
|
||||||
|
|
||||||
Tagged versions of cloudserver will be stored in the production namespace.
|
|
||||||
|
|
||||||
## How to Pull Docker Images
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker pull ghcr.io/scality/cloudserver:<commit hash>
|
|
||||||
docker pull ghcr.io/scality/cloudserver:<tag>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Release Process
|
|
||||||
|
|
||||||
To release a production image:
|
|
||||||
|
|
||||||
* Create a PR to bump the package version
|
|
||||||
Update Cloudserver's `package.json` by bumping it to the relevant next
|
|
||||||
version in a new PR. Per example if the last released version was
|
|
||||||
`8.4.7`, the next version would be `8.4.8`.
|
|
||||||
|
|
||||||
```js
|
|
||||||
{
|
|
||||||
"name": "cloudserver",
|
|
||||||
"version": "8.4.8", <--- Here
|
|
||||||
[...]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Review & merge the PR
|
|
||||||
|
|
||||||
* Create the release on GitHub
|
|
||||||
|
|
||||||
* Go the Release tab (https://github.com/scality/cloudserver/releases);
|
|
||||||
* Click on the `Draft new release button`;
|
|
||||||
* In the `tag` field, type the name of the release (`8.4.8`), and confirm
|
|
||||||
to create the tag on publish;
|
|
||||||
* Click on `Generate release notes` button to fill the fields;
|
|
||||||
* Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case);
|
|
||||||
* Click to `Publish the release` to create the GitHub release and git tag
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
* the Git tag will be created automatically.
|
|
||||||
* this should be done as soon as the PR is merged, so that the tag
|
|
||||||
is put on the "version bump" commit.
|
|
||||||
|
|
||||||
* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force)
|
|
||||||
|
|
||||||
* Branch Name: The one used for the tag earlier. In this example `development/8.4`
|
|
||||||
* Override Stage: 'release'
|
|
||||||
* Extra properties:
|
|
||||||
* name: `'tag'`, value: `[release version]`, in this example`'8.4.8'`
|
|
||||||
|
|
||||||
* Release the release version on Jira
|
|
||||||
|
|
||||||
* Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page)
|
|
||||||
* Create a next version
|
|
||||||
* Name: `[next version]`, in this example `8.4.9`
|
|
||||||
* Click `...` and select `Release` on the recently released version (`8.4.8`)
|
|
||||||
* Fill in the field to move incomplete version to the next one
|
|
|
@ -1,12 +1,10 @@
|
||||||
.. _use-public-cloud:
|
|
||||||
|
|
||||||
Using Public Clouds as data backends
|
Using Public Clouds as data backends
|
||||||
====================================
|
====================================
|
||||||
|
|
||||||
Introduction
|
Introduction
|
||||||
------------
|
------------
|
||||||
|
|
||||||
As stated in our `GETTING STARTED guide <GETTING_STARTED.html#location-configuration>`__,
|
As stated in our `GETTING STARTED guide <../GETTING_STARTED/#location-configuration>`__,
|
||||||
new data backends can be added by creating a region (also called location
|
new data backends can be added by creating a region (also called location
|
||||||
constraint) with the right endpoint and credentials.
|
constraint) with the right endpoint and credentials.
|
||||||
This section of the documentation shows you how to set up our currently
|
This section of the documentation shows you how to set up our currently
|
||||||
|
@ -162,8 +160,8 @@ CloudServer.
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||||
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
||||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
-v ~/.aws/credentials:/root/.aws/credentials \
|
||||||
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000 \
|
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
|
||||||
-d scality/cloudserver
|
-d scality/s3server
|
||||||
|
|
||||||
Testing: put an object to AWS S3 using CloudServer
|
Testing: put an object to AWS S3 using CloudServer
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
@ -245,7 +243,7 @@ There are a few configurable options here:
|
||||||
this region should behave like any other AWS S3 region (in the case of MS Azure
|
this region should behave like any other AWS S3 region (in the case of MS Azure
|
||||||
hosted data, this is mostly relevant for the format of errors);
|
hosted data, this is mostly relevant for the format of errors);
|
||||||
- :code:`azureStorageEndpoint` : set to your storage account's endpoint, usually
|
- :code:`azureStorageEndpoint` : set to your storage account's endpoint, usually
|
||||||
:code:`https://{{storageAccountName}}.blob.core.windows.net`;
|
:code:`https://{{storageAccountName}}.blob.core.windows.name`;
|
||||||
- :code:`azureContainerName` : set to an *existing container* in your MS Azure
|
- :code:`azureContainerName` : set to an *existing container* in your MS Azure
|
||||||
storage account; this is the container in which your data will be stored for
|
storage account; this is the container in which your data will be stored for
|
||||||
this location constraint;
|
this location constraint;
|
||||||
|
@ -324,7 +322,7 @@ CloudServer.
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||||
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
||||||
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
|
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
|
||||||
-d scality/cloudserver
|
-d scality/s3server
|
||||||
|
|
||||||
Testing: put an object to MS Azure using CloudServer
|
Testing: put an object to MS Azure using CloudServer
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
|
@ -1,79 +0,0 @@
|
||||||
============================================
|
|
||||||
Add New Backend Storage To Zenko CloudServer
|
|
||||||
============================================
|
|
||||||
|
|
||||||
This set of documents aims at bootstrapping developers with Zenko's CloudServer
|
|
||||||
module, so they can then go on and contribute features.
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
|
|
||||||
non-s3-compatible-backend
|
|
||||||
s3-compatible-backend
|
|
||||||
|
|
||||||
We always encourage our community to offer new extensions to Zenko,
|
|
||||||
and new backend support is paramount to meeting more community needs.
|
|
||||||
If that is something you want to contribute (or just do on your own
|
|
||||||
version of the cloudserver image), this is the guid to read. Please
|
|
||||||
make sure you follow our `Contributing Guidelines`_/.
|
|
||||||
|
|
||||||
If you need help with anything, please search our `forum`_ for more
|
|
||||||
information.
|
|
||||||
|
|
||||||
Add support for a new backend
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Currently the main public cloud protocols are supported and more can
|
|
||||||
be added. There are two main types of backend: those compatible with
|
|
||||||
Amazon's S3 protocol and those not compatible.
|
|
||||||
|
|
||||||
================= ========== ============ ===========
|
|
||||||
Backend type Supported Active WIP Not started
|
|
||||||
================= ========== ============ ===========
|
|
||||||
Private disk/fs x
|
|
||||||
AWS S3 x
|
|
||||||
Microsoft Azure x
|
|
||||||
Backblaze B2 x
|
|
||||||
Google Cloud x
|
|
||||||
Openstack Swift x
|
|
||||||
================= ========== ============ ===========
|
|
||||||
|
|
||||||
.. important:: Should you want to request for a new backend to be
|
|
||||||
supported, please do so by opening a `Github issue`_,
|
|
||||||
and filling out the "Feature Request" section of our
|
|
||||||
template.
|
|
||||||
|
|
||||||
To add support for a new backend support to CloudServer official
|
|
||||||
repository, please follow these steps:
|
|
||||||
|
|
||||||
- familiarize yourself with our `Contributing Guidelines`_
|
|
||||||
- open a `Github issue`_ and fill out Feature Request form, and
|
|
||||||
specify you would like to contribute it yourself;
|
|
||||||
- wait for our core team to get back to you with an answer on whether
|
|
||||||
we are interested in taking that contribution in (and hence
|
|
||||||
committing to maintaining it over time);
|
|
||||||
- once approved, fork the repository and start your development;
|
|
||||||
- use the `forum`_ with any question you may have during the
|
|
||||||
development process;
|
|
||||||
- when you think it's ready, let us know so that we create a feature
|
|
||||||
branch against which we'll compare and review your code;
|
|
||||||
- open a pull request with your changes against that dedicated feature
|
|
||||||
branch;
|
|
||||||
- once that pull request gets merged, you're done.
|
|
||||||
|
|
||||||
.. tip::
|
|
||||||
|
|
||||||
While we do take care of the final rebase (when we merge your feature
|
|
||||||
branch on the latest default branch), we do ask that you keep up to date with our latest default branch
|
|
||||||
until then.
|
|
||||||
|
|
||||||
.. important::
|
|
||||||
|
|
||||||
If we do not approve your feature request, you may of course still
|
|
||||||
work on supporting a new backend: all our "no" means is that we do not
|
|
||||||
have the resources, as part of our core development team, to maintain
|
|
||||||
this feature for the moment.
|
|
||||||
|
|
||||||
.. _GitHub issue: https://github.com/scality/S3/issues
|
|
||||||
.. _Contributing Guidelines: https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md
|
|
||||||
.. _forum: https://forum.zenko.io
|
|
|
@ -1,53 +0,0 @@
|
||||||
=================
|
|
||||||
Add A New Backend
|
|
||||||
=================
|
|
||||||
|
|
||||||
Supporting all possible public cloud storage APIs is CloudServer's
|
|
||||||
ultimate goal. As an open source project, contributions are welcome.
|
|
||||||
|
|
||||||
The first step is to get familiar with building a custom Docker image
|
|
||||||
for CloudServer.
|
|
||||||
|
|
||||||
Build a Custom Docker Image
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Clone Zenko's CloudServer, install all dependencies and start the
|
|
||||||
service:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ git clone https://github.com/scality/cloudserver
|
|
||||||
$ cd cloudserver
|
|
||||||
$ yarn install
|
|
||||||
$ yarn start
|
|
||||||
|
|
||||||
.. tip::
|
|
||||||
|
|
||||||
Some optional dependencies may fail, resulting in you seeing `yarn
|
|
||||||
WARN` messages; these can safely be ignored. Refer to the User
|
|
||||||
documentation for all available options.
|
|
||||||
|
|
||||||
Build the Docker image:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
# docker build . -t
|
|
||||||
# {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
|
|
||||||
|
|
||||||
Push the newly created Docker image to your own hub:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
# docker push
|
|
||||||
# {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
To perform this last operation, you need to be authenticated with DockerHub
|
|
||||||
|
|
||||||
There are two main types of backend you could want Zenko to support:
|
|
||||||
|
|
||||||
== link:S3_COMPATIBLE_BACKENDS.adoc[S3 compatible data backends]
|
|
||||||
|
|
||||||
== link:NON_S3_COMPATIBLE_BACKENDS.adoc[Data backends using another protocol than the S3 protocol]
|
|
||||||
|
|
|
@ -1,530 +0,0 @@
|
||||||
==========================================================
|
|
||||||
Adding support for data backends not supporting the S3 API
|
|
||||||
==========================================================
|
|
||||||
|
|
||||||
These backends abstract the complexity of multiple APIs to let users
|
|
||||||
work on a single common namespace across multiple clouds.
|
|
||||||
|
|
||||||
This documents aims at introducing you to the right files in
|
|
||||||
CloudServer (the Zenko stack's subcomponent in charge of API
|
|
||||||
translation, among other things) to add support to your own backend of
|
|
||||||
choice.
|
|
||||||
|
|
||||||
General configuration
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
There are a number of constants and environment variables to define to support a
|
|
||||||
new data backend; here is a list and where to find them:
|
|
||||||
|
|
||||||
:file:`/constants.js`
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
* give your backend type a name, as part of the `externalBackends` object;
|
|
||||||
* specify whether versioning is implemented, as part of the
|
|
||||||
`versioningNotImplemented` object;
|
|
||||||
|
|
||||||
:file:`/lib/Config.js`
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
* this is where you should put common utility functions, like the ones to parse
|
|
||||||
the location object from `locationConfig.json`;
|
|
||||||
* make sure you define environment variables (like `GCP_SERVICE_EMAIL` as we'll
|
|
||||||
use those internally for the CI to test against the real remote backend;
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
* this file is where you'll instantiate your backend client; this should be a
|
|
||||||
class with a constructor taking the config object built in `/lib/Config.js` as
|
|
||||||
parameter;
|
|
||||||
* over time, you may need some utility functions which we've defined in the
|
|
||||||
folder `/api/apiUtils`, and in the file `/lib/data/external/utils`;
|
|
||||||
|
|
||||||
:file:`/lib/data/external/utils.js`
|
|
||||||
-----------------------------------
|
|
||||||
|
|
||||||
* make sure to add options for `sourceLocationConstraintType` to be equal to
|
|
||||||
the name you gave your backend in :file:`/constants.js`;
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
* this folder is where you'll put the functions needed for supporting your
|
|
||||||
backend; keep your files as atomic as possible;
|
|
||||||
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
------------------------------------------------------
|
|
||||||
|
|
||||||
* this file is where you'll create location profiles to be used by your
|
|
||||||
functional tests;
|
|
||||||
|
|
||||||
:file:`/lib/data/locationConstraintParser.js`
|
|
||||||
---------------------------------------------
|
|
||||||
|
|
||||||
* this is where you'll instantiate your client if the operation the end user
|
|
||||||
sent effectively writes to your backend; everything happens inside the
|
|
||||||
function `parseLC()`; you should add a condition that executes if
|
|
||||||
`locationObj.type` is the name of your backend (that you defined in
|
|
||||||
`constants.js`), and instantiates a client of yours. See pseudocode below,
|
|
||||||
assuming location type name is `ztore`:
|
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: js
|
|
||||||
:linenos:
|
|
||||||
:emphasize-lines: 12
|
|
||||||
|
|
||||||
(...) //<1>
|
|
||||||
const ZtoreClient = require('./external/ZtoreClient');
|
|
||||||
const { config } = require('../Config'); //<1>
|
|
||||||
|
|
||||||
function parseLC(){ //<1>
|
|
||||||
(...) //<1>
|
|
||||||
Object.keys(config.locationConstraints).forEach(location => { //<1>
|
|
||||||
const locationObj = config.locationConstraints[location]; //<1>
|
|
||||||
(...) //<1>
|
|
||||||
if (locationObj.type === 'ztore' {
|
|
||||||
const ztoreEndpoint = config.getZtoreEndpoint(location);
|
|
||||||
const ztoreCredentials = config.getZtoreCredentials(location); //<2>
|
|
||||||
clients[location] = new ZtoreClient({
|
|
||||||
ztoreEndpoint,
|
|
||||||
ztoreCredentials,
|
|
||||||
ztoreBucketname: locationObj.details.ztoreBucketName,
|
|
||||||
bucketMatch: locationObj.details.BucketMatch,
|
|
||||||
dataStoreName: location,
|
|
||||||
}); //<3>
|
|
||||||
clients[location].clientType = 'ztore';
|
|
||||||
});
|
|
||||||
(...) //<1>
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
1. Code that is already there
|
|
||||||
2. You may need more utility functions depending on your backend specs
|
|
||||||
3. You may have more fields required in your constructor object depending on
|
|
||||||
your backend specs
|
|
||||||
|
|
||||||
Operation of type PUT
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
PUT routes are usually where people get started, as it's the easiest to check!
|
|
||||||
Simply go on your remote backend console and you'll be able to see whether your
|
|
||||||
object actually went up in the cloud...
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `put()` function is also called
|
|
||||||
`put()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
|
|
||||||
- define a function with signature like
|
|
||||||
`put(stream, size, keyContext, reqUids, callback)`; this is worth exploring a
|
|
||||||
bit more as these parameters are the same for all backends:
|
|
||||||
//TODO: generate this from jsdoc
|
|
||||||
|
|
||||||
- `stream`: the stream of data you want to put in the cloud; if you're
|
|
||||||
unfamiliar with node.js streams, we suggest you start training, as we use
|
|
||||||
them a lot !
|
|
||||||
|
|
||||||
- `size`: the size of the object you're trying to put;
|
|
||||||
|
|
||||||
- `keyContext`: an object with metadata about the operation; common entries are
|
|
||||||
`namespace`, `buckerName`, `owner`, `cipherBundle`, and `tagging`; if these
|
|
||||||
are not sufficient for your integration, contact us to get architecture
|
|
||||||
validation before adding new entries;
|
|
||||||
|
|
||||||
- `reqUids`: the request unique ID used for logging;
|
|
||||||
|
|
||||||
- `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your PUT operation, and
|
|
||||||
then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/put/put{{BackendName}}js`
|
|
||||||
-----------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- every contribution should come with thorough functional tests, showing
|
|
||||||
nominal context gives expected behaviour, and error cases are handled in a way
|
|
||||||
that is standard with the backend (including error messages and code);
|
|
||||||
- the ideal setup is if you simulate your backend locally, so as not to be
|
|
||||||
subjected to network flakiness in the CI; however, we know there might not be
|
|
||||||
mockups available for every client; if that is the case of your backend, you
|
|
||||||
may test against the "real" endpoint of your data backend;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
- where you'll define a constant for your backend location matching your
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
- depending on your backend, the sample `keys[]` and associated made up objects
|
|
||||||
may not work for you (if your backend's key format is different, for example);
|
|
||||||
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
|
||||||
function returning ajusted `keys[]` to your tests.
|
|
||||||
|
|
||||||
Operation of type GET
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
GET routes are easy to test after PUT routes are implemented, hence why we're
|
|
||||||
covering them second.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `get()` function is also called
|
|
||||||
`get()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
|
|
||||||
- define a function with signature like
|
|
||||||
`get(objectGetInfo, range, reqUids, callback)`; this is worth exploring a
|
|
||||||
bit more as these parameters are the same for all backends:
|
|
||||||
|
|
||||||
//TODO: generate this from jsdoc
|
|
||||||
|
|
||||||
- `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
|
|
||||||
data store, and `client`, the data store name;
|
|
||||||
|
|
||||||
- `range`: the range of bytes you will get, for "get-by-range" operations (we
|
|
||||||
recommend you do simple GETs first, and then look at this);
|
|
||||||
|
|
||||||
- `reqUids`: the request unique ID used for logging;
|
|
||||||
|
|
||||||
- `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your GET operation, and
|
|
||||||
then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
|
|
||||||
-----------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- every contribution should come with thorough functional tests, showing
|
|
||||||
nominal context gives expected behaviour, and error cases are handled in a way
|
|
||||||
that is standard with the backend (including error messages and code);
|
|
||||||
- the ideal setup is if you simulate your backend locally, so as not to be
|
|
||||||
subjected to network flakiness in the CI; however, we know there might not be
|
|
||||||
mockups available for every client; if that is the case of your backend, you
|
|
||||||
may test against the "real" endpoint of your data backend;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
.. note:: You should need this section if you have followed the tutorial in order
|
|
||||||
(that is, if you have covered the PUT operation already)
|
|
||||||
|
|
||||||
- where you'll define a constant for your backend location matching your
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
- depending on your backend, the sample `keys[]` and associated made up objects
|
|
||||||
may not work for you (if your backend's key format is different, for example);
|
|
||||||
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
|
||||||
|
|
||||||
Operation of type DELETE
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
DELETE routes are easy to test after PUT routes are implemented, and they are
|
|
||||||
similar to GET routes in our implementation, hence why we're covering them
|
|
||||||
third.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `delete()` function is also called
|
|
||||||
`delete()`, and it's defined in :file:`/lib/data/multipleBackendGateway.js`;
|
|
||||||
- define a function with signature like
|
|
||||||
`delete(objectGetInfo, reqUids, callback)`; this is worth exploring a
|
|
||||||
bit more as these parameters are the same for all backends:
|
|
||||||
|
|
||||||
//TODO: generate this from jsdoc
|
|
||||||
* `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
|
|
||||||
data store, and `client`, the data store name;
|
|
||||||
* `reqUids`: the request unique ID used for logging;
|
|
||||||
* `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your DELETE operation,
|
|
||||||
and then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/delete/delete{{BackendName}}js`
|
|
||||||
-----------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- every contribution should come with thorough functional tests, showing
|
|
||||||
nominal context gives expected behaviour, and error cases are handled in a way
|
|
||||||
that is standard with the backend (including error messages and code);
|
|
||||||
- the ideal setup is if you simulate your backend locally, so as not to be
|
|
||||||
subjected to network flakiness in the CI; however, we know there might not be
|
|
||||||
mockups available for every client; if that is the case of your backend, you
|
|
||||||
may test against the "real" endpoint of your data backend;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
.. note:: You should need this section if you have followed the
|
|
||||||
tutorial in order (that is, if you have covered the PUT operation
|
|
||||||
already)
|
|
||||||
|
|
||||||
- where you'll define a constant for your backend location matching your
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
- depending on your backend, the sample `keys[]` and associated made up objects
|
|
||||||
may not work for you (if your backend's key format is different, for example);
|
|
||||||
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
|
||||||
|
|
||||||
Operation of type HEAD
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
HEAD routes are very similar to DELETE routes in our implementation, hence why
|
|
||||||
we're covering them fourth.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `head()` function is also called
|
|
||||||
`head()`, and it's defined in :file:`/lib/data/multipleBackendGateway.js`;
|
|
||||||
- define a function with signature like
|
|
||||||
`head(objectGetInfo, reqUids, callback)`; this is worth exploring a
|
|
||||||
bit more as these parameters are the same for all backends:
|
|
||||||
|
|
||||||
// TODO:: generate this from jsdoc
|
|
||||||
|
|
||||||
* `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
|
|
||||||
data store, and `client`, the data store name;
|
|
||||||
* `reqUids`: the request unique ID used for logging;
|
|
||||||
* `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your HEAD operation,
|
|
||||||
and then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
|
|
||||||
-----------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- every contribution should come with thorough functional tests, showing
|
|
||||||
nominal context gives expected behaviour, and error cases are handled in a way
|
|
||||||
that is standard with the backend (including error messages and code);
|
|
||||||
- the ideal setup is if you simulate your backend locally, so as not to be
|
|
||||||
subjected to network flakiness in the CI; however, we know there might not be
|
|
||||||
mockups available for every client; if that is the case of your backend, you
|
|
||||||
may test against the "real" endpoint of your data backend;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
.. note:: You should need this section if you have followed the tutorial in order
|
|
||||||
(that is, if you have covered the PUT operation already)
|
|
||||||
|
|
||||||
- where you'll define a constant for your backend location matching your
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
- depending on your backend, the sample `keys[]` and associated made up objects
|
|
||||||
may not work for you (if your backend's key format is different, for example);
|
|
||||||
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
|
||||||
|
|
||||||
Healthcheck
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Healtchecks are used to make sure failure to write to a remote cloud is due to
|
|
||||||
a problem on that remote cloud, an not on Zenko's side.
|
|
||||||
This is usually done by trying to create a bucket that already exists, and
|
|
||||||
making sure you get the expected answer.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `healthcheck()` function is called
|
|
||||||
`checkExternalBackend()` and it's defined in
|
|
||||||
:file:`/lib/data/multipleBackendGateway.js`; you will need to add your own;
|
|
||||||
- your healtcheck function should get `location` as a parameter, which is an
|
|
||||||
object comprising:`
|
|
||||||
|
|
||||||
* `reqUids`: the request unique ID used for logging;
|
|
||||||
* `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/{{backendName}}_create_bucket.js`
|
|
||||||
-------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should write the function performing the actual bucket
|
|
||||||
creation;
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/utils.js`
|
|
||||||
-------------------------------------------------------
|
|
||||||
|
|
||||||
- add an object named per your backend's name to the `backendHealth` dictionary,
|
|
||||||
with proper `response` and `time` entries;
|
|
||||||
|
|
||||||
:file:`lib/data/multipleBackendGateway.js`
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
- edit the `healthcheck` function to add your location's array, and call your
|
|
||||||
healthcheck; see pseudocode below for a sample implementation, provided your
|
|
||||||
backend name is `ztore`
|
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: js
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
(...) //<1>
|
|
||||||
|
|
||||||
healthcheck: (flightCheckOnStartUp, log, callback) => { //<1>
|
|
||||||
(...) //<1>
|
|
||||||
const ztoreArray = []; //<2>
|
|
||||||
async.each(Object.keys(clients), (location, cb) => { //<1>
|
|
||||||
(...) //<1>
|
|
||||||
} else if (client.clientType === 'ztore' {
|
|
||||||
ztoreArray.push(location); //<3>
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
(...) //<1>
|
|
||||||
multBackendResp[location] = { code: 200, message: 'OK' }; //<1>
|
|
||||||
return cb();
|
|
||||||
}, () => { //<1>
|
|
||||||
async.parallel([
|
|
||||||
(...) //<1>
|
|
||||||
next => checkExternalBackend( //<4>
|
|
||||||
clients, ztoreArray, 'ztore', flightCheckOnStartUp,
|
|
||||||
externalBackendHealthCheckInterval, next),
|
|
||||||
] (...) //<1>
|
|
||||||
});
|
|
||||||
(...) //<1>
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
1. Code that is already there
|
|
||||||
2. The array that will store all locations of type 'ztore'
|
|
||||||
3. Where you add locations of type 'ztore' to the array
|
|
||||||
4. Where you actually call the healthcheck function on all 'ztore' locations
|
|
||||||
|
|
||||||
Multipart upload (MPU)
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This is the final part to supporting a new backend! MPU is far from
|
|
||||||
the easiest subject, but you've come so far it shouldn't be a problem.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
You'll be creating four functions with template signatures:
|
|
||||||
|
|
||||||
- `createMPU(Key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
|
|
||||||
cacheControl, contentDisposition, contentEncoding, log, callback)` will
|
|
||||||
initiate the multi part upload process; now, here, all parameters are
|
|
||||||
metadata headers except for:
|
|
||||||
|
|
||||||
* `Key`, the key id for the final object (collection of all parts);
|
|
||||||
* `bucketName`, the name of the bucket to which we will do an MPU;
|
|
||||||
* `log`, the logger;
|
|
||||||
|
|
||||||
- `uploadPart(request, streamingV4Params, stream, size, key, uploadId, partNumber, bucketName, log, callback)`
|
|
||||||
will be called for each part; the parameters can be explicited as follow:
|
|
||||||
|
|
||||||
* `request`, the request object for putting the part;
|
|
||||||
* `streamingV4Params`, parameters for auth V4 parameters against S3;
|
|
||||||
* `stream`, the node.js readable stream used to put the part;
|
|
||||||
* `size`, the size of the part;
|
|
||||||
* `key`, the key of the object;
|
|
||||||
* `uploadId`, multipart upload id string;
|
|
||||||
* `partNumber`, the number of the part in this MPU (ordered);
|
|
||||||
* `bucketName`, the name of the bucket to which we will do an MPU;
|
|
||||||
* `log`, the logger;
|
|
||||||
|
|
||||||
- `completeMPU(jsonList, mdInfo, key, uploadId, bucketName, log, callback)` will
|
|
||||||
end the MPU process once all parts are uploaded; parameters can be explicited
|
|
||||||
as follows:
|
|
||||||
|
|
||||||
* `jsonList`, user-sent list of parts to include in final mpu object;
|
|
||||||
* `mdInfo`, object containing 3 keys: storedParts, mpuOverviewKey, and
|
|
||||||
splitter;
|
|
||||||
* `key`, the key of the object;
|
|
||||||
* `uploadId`, multipart upload id string;
|
|
||||||
* `bucketName`, name of bucket;
|
|
||||||
* `log`, logger instance:
|
|
||||||
|
|
||||||
- `abortMPU(key, uploadId, bucketName, log, callback)` will handle errors, and
|
|
||||||
make sure that all parts that may have been uploaded will be deleted if the
|
|
||||||
MPU ultimately fails; the parameters are:
|
|
||||||
|
|
||||||
* `key`, the key of the object;
|
|
||||||
* `uploadId`, multipart upload id string;
|
|
||||||
* `bucketName`, name of bucket;
|
|
||||||
* `log`, logger instance.
|
|
||||||
|
|
||||||
:file:`/lib/api/objectPutPart.js`
|
|
||||||
---------------------------------
|
|
||||||
|
|
||||||
- you'll need to add your backend type in appropriate sections (simply look for
|
|
||||||
other backends already implemented).
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your MPU operations,
|
|
||||||
and then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`lib/data/multipleBackendGateway.js`
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
- edit the `createMPU` function to add your location type, and call your
|
|
||||||
`createMPU()`; see pseudocode below for a sample implementation, provided your
|
|
||||||
backend name is `ztore`
|
|
||||||
|
|
||||||
.. code-block:: javascript
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
(...) //<1>
|
|
||||||
createMPU:(key, metaHeaders, bucketName, websiteRedirectHeader, //<1>
|
|
||||||
location, contentType, cacheControl, contentDisposition,
|
|
||||||
contentEncoding, log, cb) => {
|
|
||||||
const client = clients[location]; //<1>
|
|
||||||
if (client.clientType === 'aws_s3') { //<1>
|
|
||||||
return client.createMPU(key, metaHeaders, bucketName,
|
|
||||||
websiteRedirectHeader, contentType, cacheControl,
|
|
||||||
contentDisposition, contentEncoding, log, cb);
|
|
||||||
} else if (client.clientType === 'ztore') { //<2>
|
|
||||||
return client.createMPU(key, metaHeaders, bucketName,
|
|
||||||
websiteRedirectHeader, contentType, cacheControl,
|
|
||||||
contentDisposition, contentEncoding, log, cb);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
};
|
|
||||||
(...) //<1>
|
|
||||||
|
|
||||||
1. Code that is already there
|
|
||||||
2. Where the `createMPU()` of your client is actually called
|
|
||||||
|
|
||||||
Add functional tests
|
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/initMPU/{{BackendName}}InitMPU.js`
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/listParts/{{BackendName}}ListPart.js`
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/{{BackendName}}AbortMPU.js`
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/{{BackendName}}CompleteMPU.js`
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/{{BackendName}}UploadPart.js`
|
|
||||||
|
|
||||||
Adding support in Orbit, Zenko's UI for simplified Multi Cloud Management
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This can only be done by our core developers' team. Once your backend
|
|
||||||
integration is merged, you may open a feature request on the
|
|
||||||
`Zenko repository`_, and we will
|
|
||||||
get back to you after we evaluate feasability and maintainability.
|
|
||||||
|
|
||||||
.. _Zenko repository: https://www.github.com/scality/Zenko/issues/new
|
|
|
@ -1,43 +0,0 @@
|
||||||
======================
|
|
||||||
S3-Compatible Backends
|
|
||||||
======================
|
|
||||||
|
|
||||||
|
|
||||||
Adding Support in CloudServer
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This is the easiest case for backend support integration: there is nothing to do
|
|
||||||
but configuration! Follow the steps described in our
|
|
||||||
:ref:`use-public-cloud` and make sure you:
|
|
||||||
|
|
||||||
- set ``details.awsEndpoint`` to your storage provider endpoint;
|
|
||||||
|
|
||||||
- use ``details.credentials`` and *not* ``details.credentialsProfile`` to set your
|
|
||||||
credentials for that S3-compatible backend.
|
|
||||||
|
|
||||||
For example, if you’re using a Wasabi bucket as a backend, then your region
|
|
||||||
definition for that backend will look something like:
|
|
||||||
::
|
|
||||||
|
|
||||||
"wasabi-bucket-zenkobucket": {
|
|
||||||
"type": "aws_s3",
|
|
||||||
"legacyAwsBehavior": true,
|
|
||||||
"details": {
|
|
||||||
"awsEndpoint": "s3.wasabisys.com",
|
|
||||||
"bucketName": "zenkobucket",
|
|
||||||
"bucketMatch": true,
|
|
||||||
"credentials": {
|
|
||||||
"accessKey": "\\{YOUR_WASABI_ACCESS_KEY}",
|
|
||||||
"secretKey": "\\{YOUR_WASABI_SECRET_KEY}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
Adding Support in Zenko Orbit
|
|
||||||
#############################
|
|
||||||
|
|
||||||
This can only be done by our core developpers' team. If that’s what you’re
|
|
||||||
after, open a feature request on the `Zenko repository`_, and we will
|
|
||||||
get back to you after we evaluate feasability and maintainability.
|
|
||||||
|
|
||||||
.. _Zenko repository: https://www.github.com/scality/Zenko/issues/new
|
|
|
@ -1,12 +1,11 @@
|
||||||
Scality Zenko CloudServer
|
Scality Zenko CloudServer
|
||||||
=========================
|
==================
|
||||||
|
|
||||||
.. _user-docs:
|
.. _user-docs:
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
:caption: Documentation
|
:caption: Documentation
|
||||||
:glob:
|
|
||||||
|
|
||||||
CONTRIBUTING
|
CONTRIBUTING
|
||||||
GETTING_STARTED
|
GETTING_STARTED
|
||||||
|
@ -15,4 +14,3 @@ Scality Zenko CloudServer
|
||||||
DOCKER
|
DOCKER
|
||||||
INTEGRATIONS
|
INTEGRATIONS
|
||||||
ARCHITECTURE
|
ARCHITECTURE
|
||||||
developers/*
|
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
Sphinx >= 1.7.5
|
|
||||||
recommonmark >= 0.4.0
|
|
|
@ -1,119 +0,0 @@
|
||||||
#
|
|
||||||
# This file is autogenerated by pip-compile
|
|
||||||
# To update, run:
|
|
||||||
#
|
|
||||||
# tox -e pip-compile
|
|
||||||
#
|
|
||||||
alabaster==0.7.12 \
|
|
||||||
--hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
|
|
||||||
--hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 \
|
|
||||||
# via sphinx
|
|
||||||
babel==2.6.0 \
|
|
||||||
--hash=sha256:6778d85147d5d85345c14a26aada5e478ab04e39b078b0745ee6870c2b5cf669 \
|
|
||||||
--hash=sha256:8cba50f48c529ca3fa18cf81fa9403be176d374ac4d60738b839122dfaaa3d23 \
|
|
||||||
# via sphinx
|
|
||||||
certifi==2018.10.15 \
|
|
||||||
--hash=sha256:339dc09518b07e2fa7eda5450740925974815557727d6bd35d319c1524a04a4c \
|
|
||||||
--hash=sha256:6d58c986d22b038c8c0df30d639f23a3e6d172a05c3583e766f4c0b785c0986a \
|
|
||||||
# via requests
|
|
||||||
chardet==3.0.4 \
|
|
||||||
--hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
|
|
||||||
--hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
|
|
||||||
# via requests
|
|
||||||
commonmark==0.5.4 \
|
|
||||||
--hash=sha256:34d73ec8085923c023930dfc0bcd1c4286e28a2a82de094bb72fabcc0281cbe5 \
|
|
||||||
# via recommonmark
|
|
||||||
docutils==0.14 \
|
|
||||||
--hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
|
|
||||||
--hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
|
|
||||||
--hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 \
|
|
||||||
# via recommonmark, sphinx
|
|
||||||
idna==2.7 \
|
|
||||||
--hash=sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e \
|
|
||||||
--hash=sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16 \
|
|
||||||
# via requests
|
|
||||||
imagesize==1.1.0 \
|
|
||||||
--hash=sha256:3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8 \
|
|
||||||
--hash=sha256:f3832918bc3c66617f92e35f5d70729187676313caa60c187eb0f28b8fe5e3b5 \
|
|
||||||
# via sphinx
|
|
||||||
jinja2==2.10 \
|
|
||||||
--hash=sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd \
|
|
||||||
--hash=sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4 \
|
|
||||||
# via sphinx
|
|
||||||
markupsafe==1.1.0 \
|
|
||||||
--hash=sha256:048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432 \
|
|
||||||
--hash=sha256:130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b \
|
|
||||||
--hash=sha256:19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9 \
|
|
||||||
--hash=sha256:1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af \
|
|
||||||
--hash=sha256:1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834 \
|
|
||||||
--hash=sha256:1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd \
|
|
||||||
--hash=sha256:1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d \
|
|
||||||
--hash=sha256:31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7 \
|
|
||||||
--hash=sha256:3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b \
|
|
||||||
--hash=sha256:4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3 \
|
|
||||||
--hash=sha256:525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c \
|
|
||||||
--hash=sha256:52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2 \
|
|
||||||
--hash=sha256:52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7 \
|
|
||||||
--hash=sha256:5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36 \
|
|
||||||
--hash=sha256:5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1 \
|
|
||||||
--hash=sha256:5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e \
|
|
||||||
--hash=sha256:7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1 \
|
|
||||||
--hash=sha256:83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c \
|
|
||||||
--hash=sha256:857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856 \
|
|
||||||
--hash=sha256:98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550 \
|
|
||||||
--hash=sha256:bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492 \
|
|
||||||
--hash=sha256:d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672 \
|
|
||||||
--hash=sha256:e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401 \
|
|
||||||
--hash=sha256:edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6 \
|
|
||||||
--hash=sha256:efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6 \
|
|
||||||
--hash=sha256:f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c \
|
|
||||||
--hash=sha256:f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd \
|
|
||||||
--hash=sha256:fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1 \
|
|
||||||
# via jinja2
|
|
||||||
packaging==18.0 \
|
|
||||||
--hash=sha256:0886227f54515e592aaa2e5a553332c73962917f2831f1b0f9b9f4380a4b9807 \
|
|
||||||
--hash=sha256:f95a1e147590f204328170981833854229bb2912ac3d5f89e2a8ccd2834800c9 \
|
|
||||||
# via sphinx
|
|
||||||
pygments==2.2.0 \
|
|
||||||
--hash=sha256:78f3f434bcc5d6ee09020f92ba487f95ba50f1e3ef83ae96b9d5ffa1bab25c5d \
|
|
||||||
--hash=sha256:dbae1046def0efb574852fab9e90209b23f556367b5a320c0bcb871c77c3e8cc \
|
|
||||||
# via sphinx
|
|
||||||
pyparsing==2.3.0 \
|
|
||||||
--hash=sha256:40856e74d4987de5d01761a22d1621ae1c7f8774585acae358aa5c5936c6c90b \
|
|
||||||
--hash=sha256:f353aab21fd474459d97b709e527b5571314ee5f067441dc9f88e33eecd96592 \
|
|
||||||
# via packaging
|
|
||||||
pytz==2018.7 \
|
|
||||||
--hash=sha256:31cb35c89bd7d333cd32c5f278fca91b523b0834369e757f4c5641ea252236ca \
|
|
||||||
--hash=sha256:8e0f8568c118d3077b46be7d654cc8167fa916092e28320cde048e54bfc9f1e6 \
|
|
||||||
# via babel
|
|
||||||
recommonmark==0.4.0 \
|
|
||||||
--hash=sha256:6e29c723abcf5533842376d87c4589e62923ecb6002a8e059eb608345ddaff9d \
|
|
||||||
--hash=sha256:cd8bf902e469dae94d00367a8197fb7b81fcabc9cfb79d520e0d22d0fbeaa8b7
|
|
||||||
requests==2.20.1 \
|
|
||||||
--hash=sha256:65b3a120e4329e33c9889db89c80976c5272f56ea92d3e74da8a463992e3ff54 \
|
|
||||||
--hash=sha256:ea881206e59f41dbd0bd445437d792e43906703fff75ca8ff43ccdb11f33f263 \
|
|
||||||
# via sphinx
|
|
||||||
six==1.11.0 \
|
|
||||||
--hash=sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9 \
|
|
||||||
--hash=sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb \
|
|
||||||
# via packaging, sphinx
|
|
||||||
snowballstemmer==1.2.1 \
|
|
||||||
--hash=sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128 \
|
|
||||||
--hash=sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89 \
|
|
||||||
# via sphinx
|
|
||||||
sphinx==1.8.2 \
|
|
||||||
--hash=sha256:120732cbddb1b2364471c3d9f8bfd4b0c5b550862f99a65736c77f970b142aea \
|
|
||||||
--hash=sha256:b348790776490894e0424101af9c8413f2a86831524bd55c5f379d3e3e12ca64
|
|
||||||
sphinxcontrib-websupport==1.1.0 \
|
|
||||||
--hash=sha256:68ca7ff70785cbe1e7bccc71a48b5b6d965d79ca50629606c7861a21b206d9dd \
|
|
||||||
--hash=sha256:9de47f375baf1ea07cdb3436ff39d7a9c76042c10a769c52353ec46e4e8fc3b9 \
|
|
||||||
# via sphinx
|
|
||||||
typing==3.6.6 \
|
|
||||||
--hash=sha256:4027c5f6127a6267a435201981ba156de91ad0d1d98e9ddc2aa173453453492d \
|
|
||||||
--hash=sha256:57dcf675a99b74d64dacf6fba08fb17cf7e3d5fdff53d4a30ea2a5e7e52543d4 \
|
|
||||||
--hash=sha256:a4c8473ce11a65999c8f59cb093e70686b6c84c98df58c1dae9b3b196089858a \
|
|
||||||
# via sphinx
|
|
||||||
urllib3==1.24.1 \
|
|
||||||
--hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \
|
|
||||||
--hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \
|
|
||||||
# via requests
|
|
|
@ -1,46 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"bytes"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"io/ioutil"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Input AWS access key, secret key
|
|
||||||
aws_access_key_id := "accessKey1"
|
|
||||||
aws_secret_access_key := "verySecretKey1"
|
|
||||||
endpoint := "http://localhost:8000"
|
|
||||||
bucket_name := "bucketname"
|
|
||||||
searchQuery := url.QueryEscape("x-amz-meta-color=blue")
|
|
||||||
buf := bytes.NewBuffer([]byte{})
|
|
||||||
|
|
||||||
requestUrl := fmt.Sprintf("%s/%s?search=%s",
|
|
||||||
endpoint, bucket_name, searchQuery)
|
|
||||||
|
|
||||||
request, err := http.NewRequest("GET", requestUrl, buf)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
reader := bytes.NewReader(buf.Bytes())
|
|
||||||
credentials := credentials.NewStaticCredentials(aws_access_key_id,
|
|
||||||
aws_secret_access_key, "")
|
|
||||||
signer := v4.NewSigner(credentials)
|
|
||||||
signer.Sign(request, reader, "s3", "us-east-1", time.Now())
|
|
||||||
client := &http.Client{}
|
|
||||||
resp, err := client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
fmt.Println(string(body))
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
const { S3 } = require('aws-sdk');
|
|
||||||
const config = {
|
|
||||||
sslEnabled: false,
|
|
||||||
endpoint: 'http://127.0.0.1:8000',
|
|
||||||
signatureCache: false,
|
|
||||||
signatureVersion: 'v4',
|
|
||||||
region: 'us-east-1',
|
|
||||||
s3ForcePathStyle: true,
|
|
||||||
accessKeyId: 'accessKey1',
|
|
||||||
secretAccessKey: 'verySecretKey1',
|
|
||||||
};
|
|
||||||
const s3Client = new S3(config);
|
|
||||||
|
|
||||||
const encodedSearch =
|
|
||||||
encodeURIComponent('x-amz-meta-color="blue"');
|
|
||||||
const req = s3Client.listObjects({ Bucket: 'bucketname' });
|
|
||||||
|
|
||||||
// the build event
|
|
||||||
req.on('build', () => {
|
|
||||||
req.httpRequest.path = `${req.httpRequest.path}?search=${encodedSearch}`;
|
|
||||||
});
|
|
||||||
req.on('success', res => {
|
|
||||||
process.stdout.write(`Result ${res.data}`);
|
|
||||||
});
|
|
||||||
req.on('error', err => {
|
|
||||||
process.stdout.write(`Error ${err}`);
|
|
||||||
});
|
|
||||||
req.send();
|
|
|
@ -1,79 +0,0 @@
|
||||||
import datetime
|
|
||||||
import hashlib
|
|
||||||
import hmac
|
|
||||||
import urllib
|
|
||||||
# pip install requests
|
|
||||||
import requests
|
|
||||||
|
|
||||||
access_key = 'accessKey1'
|
|
||||||
secret_key = 'verySecretKey1'
|
|
||||||
|
|
||||||
method = 'GET'
|
|
||||||
service = 's3'
|
|
||||||
host = 'localhost:8000'
|
|
||||||
region = 'us-east-1'
|
|
||||||
canonical_uri = '/bucketname'
|
|
||||||
query = 'x-amz-meta-color=blue'
|
|
||||||
canonical_querystring = 'search=%s' % (urllib.quote(query))
|
|
||||||
algorithm = 'AWS4-HMAC-SHA256'
|
|
||||||
|
|
||||||
t = datetime.datetime.utcnow()
|
|
||||||
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
|
|
||||||
date_stamp = t.strftime('%Y%m%d')
|
|
||||||
|
|
||||||
# Key derivation functions. See:
|
|
||||||
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
|
|
||||||
|
|
||||||
|
|
||||||
def sign(key, msg):
|
|
||||||
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
|
||||||
|
|
||||||
|
|
||||||
def getSignatureKey(key, date_stamp, regionName, serviceName):
|
|
||||||
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
|
|
||||||
kRegion = sign(kDate, regionName)
|
|
||||||
kService = sign(kRegion, serviceName)
|
|
||||||
kSigning = sign(kService, 'aws4_request')
|
|
||||||
return kSigning
|
|
||||||
|
|
||||||
|
|
||||||
payload_hash = hashlib.sha256('').hexdigest()
|
|
||||||
|
|
||||||
canonical_headers = \
|
|
||||||
'host:{0}\nx-amz-content-sha256:{1}\nx-amz-date:{2}\n' \
|
|
||||||
.format(host, payload_hash, amz_date)
|
|
||||||
|
|
||||||
signed_headers = 'host;x-amz-content-sha256;x-amz-date'
|
|
||||||
|
|
||||||
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
|
|
||||||
.format(method, canonical_uri, canonical_querystring, canonical_headers,
|
|
||||||
signed_headers, payload_hash)
|
|
||||||
print(canonical_request)
|
|
||||||
|
|
||||||
credential_scope = '{0}/{1}/{2}/aws4_request' \
|
|
||||||
.format(date_stamp, region, service)
|
|
||||||
|
|
||||||
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
|
|
||||||
.format(algorithm, amz_date, credential_scope,
|
|
||||||
hashlib.sha256(canonical_request).hexdigest())
|
|
||||||
|
|
||||||
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
|
|
||||||
|
|
||||||
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
|
|
||||||
hashlib.sha256).hexdigest()
|
|
||||||
|
|
||||||
authorization_header = \
|
|
||||||
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
|
|
||||||
.format(algorithm, access_key, credential_scope, signed_headers, signature)
|
|
||||||
|
|
||||||
# The 'host' header is added automatically by the Python 'requests' library.
|
|
||||||
headers = {
|
|
||||||
'X-Amz-Content-Sha256': payload_hash,
|
|
||||||
'X-Amz-Date': amz_date,
|
|
||||||
'Authorization': authorization_header
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring
|
|
||||||
|
|
||||||
r = requests.get(endpoint, headers=headers)
|
|
||||||
print(r.text)
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
|
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0
|
||||||
|
|
||||||
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
||||||
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
||||||
|
@ -14,10 +14,8 @@ RUN rm -f ~/.gitconfig && \
|
||||||
git config --global --add safe.directory . && \
|
git config --global --add safe.directory . && \
|
||||||
git lfs install && \
|
git lfs install && \
|
||||||
GIT_LFS_SKIP_SMUDGE=1 && \
|
GIT_LFS_SKIP_SMUDGE=1 && \
|
||||||
yarn global add typescript && \
|
|
||||||
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
||||||
yarn cache clean --all && \
|
yarn cache clean --all
|
||||||
yarn global remove typescript
|
|
||||||
|
|
||||||
# run symlinking separately to avoid yarn installation errors
|
# run symlinking separately to avoid yarn installation errors
|
||||||
# we might have to check if the symlinking is really needed!
|
# we might have to check if the symlinking is really needed!
|
||||||
|
|
12
index.js
12
index.js
|
@ -1,10 +1,10 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr(
|
/**
|
||||||
undefined,
|
* Catch uncaught exceptions and add timestamp to aid debugging
|
||||||
// Do not exit as workers have their own listener that will exit
|
*/
|
||||||
// But primary don't have another listener
|
process.on('uncaughtException', err => {
|
||||||
require('cluster').isPrimary ? 1 : null,
|
process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
|
||||||
);
|
});
|
||||||
|
|
||||||
require('./lib/server.js')();
|
require('./lib/server.js')();
|
||||||
|
|
1102
lib/Config.js
1102
lib/Config.js
File diff suppressed because it is too large
Load Diff
|
@ -7,8 +7,7 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
|
||||||
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
||||||
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
||||||
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
||||||
const bucketDeleteQuota = require('./bucketDeleteQuota');
|
const bucketGet = require('./bucketGet');
|
||||||
const { bucketGet } = require('./bucketGet');
|
|
||||||
const bucketGetACL = require('./bucketGetACL');
|
const bucketGetACL = require('./bucketGetACL');
|
||||||
const bucketGetCors = require('./bucketGetCors');
|
const bucketGetCors = require('./bucketGetCors');
|
||||||
const bucketGetVersioning = require('./bucketGetVersioning');
|
const bucketGetVersioning = require('./bucketGetVersioning');
|
||||||
|
@ -18,7 +17,6 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
|
||||||
const bucketGetNotification = require('./bucketGetNotification');
|
const bucketGetNotification = require('./bucketGetNotification');
|
||||||
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
||||||
const bucketGetPolicy = require('./bucketGetPolicy');
|
const bucketGetPolicy = require('./bucketGetPolicy');
|
||||||
const bucketGetQuota = require('./bucketGetQuota');
|
|
||||||
const bucketGetEncryption = require('./bucketGetEncryption');
|
const bucketGetEncryption = require('./bucketGetEncryption');
|
||||||
const bucketHead = require('./bucketHead');
|
const bucketHead = require('./bucketHead');
|
||||||
const { bucketPut } = require('./bucketPut');
|
const { bucketPut } = require('./bucketPut');
|
||||||
|
@ -35,7 +33,6 @@ const bucketPutNotification = require('./bucketPutNotification');
|
||||||
const bucketPutEncryption = require('./bucketPutEncryption');
|
const bucketPutEncryption = require('./bucketPutEncryption');
|
||||||
const bucketPutPolicy = require('./bucketPutPolicy');
|
const bucketPutPolicy = require('./bucketPutPolicy');
|
||||||
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
||||||
const bucketUpdateQuota = require('./bucketUpdateQuota');
|
|
||||||
const bucketGetReplication = require('./bucketGetReplication');
|
const bucketGetReplication = require('./bucketGetReplication');
|
||||||
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
||||||
const corsPreflight = require('./corsPreflight');
|
const corsPreflight = require('./corsPreflight');
|
||||||
|
@ -43,11 +40,10 @@ const completeMultipartUpload = require('./completeMultipartUpload');
|
||||||
const initiateMultipartUpload = require('./initiateMultipartUpload');
|
const initiateMultipartUpload = require('./initiateMultipartUpload');
|
||||||
const listMultipartUploads = require('./listMultipartUploads');
|
const listMultipartUploads = require('./listMultipartUploads');
|
||||||
const listParts = require('./listParts');
|
const listParts = require('./listParts');
|
||||||
const metadataSearch = require('./metadataSearch');
|
|
||||||
const { multiObjectDelete } = require('./multiObjectDelete');
|
const { multiObjectDelete } = require('./multiObjectDelete');
|
||||||
const multipartDelete = require('./multipartDelete');
|
const multipartDelete = require('./multipartDelete');
|
||||||
const objectCopy = require('./objectCopy');
|
const objectCopy = require('./objectCopy');
|
||||||
const { objectDelete } = require('./objectDelete');
|
const objectDelete = require('./objectDelete');
|
||||||
const objectDeleteTagging = require('./objectDeleteTagging');
|
const objectDeleteTagging = require('./objectDeleteTagging');
|
||||||
const objectGet = require('./objectGet');
|
const objectGet = require('./objectGet');
|
||||||
const objectGetACL = require('./objectGetACL');
|
const objectGetACL = require('./objectGetACL');
|
||||||
|
@ -62,17 +58,16 @@ const objectPutTagging = require('./objectPutTagging');
|
||||||
const objectPutPart = require('./objectPutPart');
|
const objectPutPart = require('./objectPutPart');
|
||||||
const objectPutCopyPart = require('./objectPutCopyPart');
|
const objectPutCopyPart = require('./objectPutCopyPart');
|
||||||
const objectPutRetention = require('./objectPutRetention');
|
const objectPutRetention = require('./objectPutRetention');
|
||||||
const objectRestore = require('./objectRestore');
|
|
||||||
const prepareRequestContexts
|
const prepareRequestContexts
|
||||||
= require('./apiUtils/authorization/prepareRequestContexts');
|
= require('./apiUtils/authorization/prepareRequestContexts');
|
||||||
const serviceGet = require('./serviceGet');
|
const serviceGet = require('./serviceGet');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const website = require('./website');
|
const websiteGet = require('./websiteGet');
|
||||||
|
const websiteHead = require('./websiteHead');
|
||||||
const writeContinue = require('../utilities/writeContinue');
|
const writeContinue = require('../utilities/writeContinue');
|
||||||
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
|
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
|
||||||
const parseCopySource = require('./apiUtils/object/parseCopySource');
|
const parseCopySource = require('./apiUtils/object/parseCopySource');
|
||||||
const { tagConditionKeyAuth } = require('./apiUtils/authorization/tagConditionKeys');
|
const { tagConditionKeyAuth } = require('./apiUtils/authorization/tagConditionKeys');
|
||||||
const { isRequesterASessionUser } = require('./apiUtils/authorization/permissionChecks');
|
|
||||||
const checkHttpHeadersSize = require('./apiUtils/object/checkHttpHeadersSize');
|
const checkHttpHeadersSize = require('./apiUtils/object/checkHttpHeadersSize');
|
||||||
|
|
||||||
const monitoringMap = policies.actionMaps.actionMonitoringMapS3;
|
const monitoringMap = policies.actionMaps.actionMonitoringMapS3;
|
||||||
|
@ -85,10 +80,6 @@ const api = {
|
||||||
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
// Attach the apiMethod method to the request, so it can used by monitoring in the server
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.apiMethod = apiMethod;
|
request.apiMethod = apiMethod;
|
||||||
// Array of end of API callbacks, used to perform some logic
|
|
||||||
// at the end of an API.
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
request.finalizerHooks = [];
|
|
||||||
|
|
||||||
const actionLog = monitoringMap[apiMethod];
|
const actionLog = monitoringMap[apiMethod];
|
||||||
if (!actionLog &&
|
if (!actionLog &&
|
||||||
|
@ -197,27 +188,21 @@ const api = {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => auth.server.doAuth(
|
next => auth.server.doAuth(
|
||||||
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
// VaultClient returns standard errors, but the route requires
|
|
||||||
// Arsenal errors
|
|
||||||
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
|
|
||||||
log.trace('authentication error', { error: err });
|
log.trace('authentication error', { error: err });
|
||||||
return next(arsenalError);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
}, 's3', requestContexts),
|
}, 's3', requestContexts),
|
||||||
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
|
(userInfo, authorizationResults, streamingV4Params, next) => {
|
||||||
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
const authNames = { accountName: userInfo.getAccountDisplayName() };
|
||||||
if (userInfo.isRequesterAnIAMUser()) {
|
if (userInfo.isRequesterAnIAMUser()) {
|
||||||
authNames.userName = userInfo.getIAMdisplayName();
|
authNames.userName = userInfo.getIAMdisplayName();
|
||||||
}
|
}
|
||||||
if (isRequesterASessionUser(userInfo)) {
|
|
||||||
authNames.sessionName = userInfo.getShortid().split(':')[1];
|
|
||||||
}
|
|
||||||
log.addDefaultFields(authNames);
|
log.addDefaultFields(authNames);
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
}
|
}
|
||||||
// issue 100 Continue to the client
|
// issue 100 Continue to the client
|
||||||
writeContinue(request, response);
|
writeContinue(request, response);
|
||||||
|
@ -248,12 +233,12 @@ const api = {
|
||||||
}
|
}
|
||||||
// Convert array of post buffers into one string
|
// Convert array of post buffers into one string
|
||||||
request.post = Buffer.concat(post, postLength).toString();
|
request.post = Buffer.concat(post, postLength).toString();
|
||||||
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
|
return next(null, userInfo, authorizationResults, streamingV4Params);
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
},
|
},
|
||||||
// Tag condition keys require information from CloudServer for evaluation
|
// Tag condition keys require information from CloudServer for evaluation
|
||||||
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
|
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
|
||||||
authorizationResults,
|
authorizationResults,
|
||||||
request,
|
request,
|
||||||
requestContexts,
|
requestContexts,
|
||||||
|
@ -264,14 +249,13 @@ const api = {
|
||||||
log.trace('tag authentication error', { error: err });
|
log.trace('tag authentication error', { error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
|
return next(null, userInfo, authResultsWithTags, streamingV4Params);
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
|
], (err, userInfo, authorizationResults, streamingV4Params) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
request.accountQuotas = infos?.accountQuota;
|
|
||||||
if (authorizationResults) {
|
if (authorizationResults) {
|
||||||
const checkedResults = checkAuthResults(authorizationResults);
|
const checkedResults = checkAuthResults(authorizationResults);
|
||||||
if (checkedResults instanceof Error) {
|
if (checkedResults instanceof Error) {
|
||||||
|
@ -288,23 +272,19 @@ const api = {
|
||||||
return acc;
|
return acc;
|
||||||
}, {});
|
}, {});
|
||||||
}
|
}
|
||||||
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
|
|
||||||
(hook, done) => hook(err, done),
|
|
||||||
() => callback(err, ...results));
|
|
||||||
|
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
request._response = response;
|
request._response = response;
|
||||||
return this[apiMethod](userInfo, request, streamingV4Params,
|
return this[apiMethod](userInfo, request, streamingV4Params,
|
||||||
log, methodCallback, authorizationResults);
|
log, callback, authorizationResults);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
||||||
return this[apiMethod](userInfo, request, sourceBucket,
|
return this[apiMethod](userInfo, request, sourceBucket,
|
||||||
sourceObject, sourceVersionId, log, methodCallback);
|
sourceObject, sourceVersionId, log, callback);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
|
||||||
}
|
}
|
||||||
return this[apiMethod](userInfo, request, log, methodCallback);
|
return this[apiMethod](userInfo, request, log, callback);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
bucketDelete,
|
bucketDelete,
|
||||||
|
@ -325,21 +305,18 @@ const api = {
|
||||||
bucketPutCors,
|
bucketPutCors,
|
||||||
bucketPutVersioning,
|
bucketPutVersioning,
|
||||||
bucketPutTagging,
|
bucketPutTagging,
|
||||||
bucketDeleteTagging,
|
|
||||||
bucketGetTagging,
|
bucketGetTagging,
|
||||||
bucketPutWebsite,
|
bucketPutWebsite,
|
||||||
bucketPutReplication,
|
bucketPutReplication,
|
||||||
bucketGetReplication,
|
bucketGetReplication,
|
||||||
bucketDeleteReplication,
|
bucketDeleteReplication,
|
||||||
bucketDeleteQuota,
|
|
||||||
bucketPutLifecycle,
|
bucketPutLifecycle,
|
||||||
bucketUpdateQuota,
|
|
||||||
bucketGetLifecycle,
|
bucketGetLifecycle,
|
||||||
bucketDeleteLifecycle,
|
bucketDeleteLifecycle,
|
||||||
bucketPutPolicy,
|
bucketPutPolicy,
|
||||||
bucketGetPolicy,
|
bucketGetPolicy,
|
||||||
bucketGetQuota,
|
|
||||||
bucketDeletePolicy,
|
bucketDeletePolicy,
|
||||||
|
bucketDeleteTagging,
|
||||||
bucketPutObjectLock,
|
bucketPutObjectLock,
|
||||||
bucketPutNotification,
|
bucketPutNotification,
|
||||||
bucketGetNotification,
|
bucketGetNotification,
|
||||||
|
@ -349,7 +326,6 @@ const api = {
|
||||||
initiateMultipartUpload,
|
initiateMultipartUpload,
|
||||||
listMultipartUploads,
|
listMultipartUploads,
|
||||||
listParts,
|
listParts,
|
||||||
metadataSearch,
|
|
||||||
multiObjectDelete,
|
multiObjectDelete,
|
||||||
multipartDelete,
|
multipartDelete,
|
||||||
objectDelete,
|
objectDelete,
|
||||||
|
@ -368,10 +344,9 @@ const api = {
|
||||||
objectPutPart,
|
objectPutPart,
|
||||||
objectPutCopyPart,
|
objectPutCopyPart,
|
||||||
objectPutRetention,
|
objectPutRetention,
|
||||||
objectRestore,
|
|
||||||
serviceGet,
|
serviceGet,
|
||||||
websiteGet: website,
|
websiteGet,
|
||||||
websiteHead: website,
|
websiteHead,
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = api;
|
module.exports = api;
|
||||||
|
|
|
@ -1,43 +1,16 @@
|
||||||
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
|
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const { parseCIDR, isValid } = require('ipaddr.js');
|
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
const {
|
const {
|
||||||
allAuthedUsersId,
|
allAuthedUsersId, bucketOwnerActions, logId, publicId,
|
||||||
bucketOwnerActions,
|
assumedRoleArnResourceType, backbeatLifecycleSessionName, arrayOfAllowed,
|
||||||
logId,
|
|
||||||
publicId,
|
|
||||||
arrayOfAllowed,
|
|
||||||
assumedRoleArnResourceType,
|
|
||||||
backbeatLifecycleSessionName,
|
|
||||||
actionsToConsiderAsObjectPut,
|
|
||||||
} = constants;
|
} = constants;
|
||||||
|
|
||||||
// whitelist buckets to allow public read on objects
|
// whitelist buckets to allow public read on objects
|
||||||
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
|
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
|
||||||
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
|
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
|
||||||
|
|
||||||
function getServiceAccountProperties(canonicalID) {
|
|
||||||
const canonicalIDArray = canonicalID.split('/');
|
|
||||||
const serviceName = canonicalIDArray[canonicalIDArray.length - 1];
|
|
||||||
return constants.serviceAccountProperties[serviceName];
|
|
||||||
}
|
|
||||||
|
|
||||||
function isServiceAccount(canonicalID) {
|
|
||||||
return getServiceAccountProperties(canonicalID) !== undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
function isRequesterASessionUser(authInfo) {
|
|
||||||
const regexpAssumedRoleArn = /^arn:aws:sts::[0-9]{12}:assumed-role\/.*$/;
|
|
||||||
return regexpAssumedRoleArn.test(authInfo.getArn());
|
|
||||||
}
|
|
||||||
|
|
||||||
function isRequesterNonAccountUser(authInfo) {
|
|
||||||
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks the access control for a given bucket based on the request type and user's canonical ID.
|
* Checks the access control for a given bucket based on the request type and user's canonical ID.
|
||||||
*
|
*
|
||||||
|
@ -51,21 +24,17 @@ function isRequesterNonAccountUser(authInfo) {
|
||||||
|
|
||||||
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
||||||
// Same logic applies on the Versioned APIs, so let's simplify it.
|
// Same logic applies on the Versioned APIs, so let's simplify it.
|
||||||
let requestTypeParsed = requestType.endsWith('Version') ?
|
const requestTypeParsed = requestType.endsWith('Version') ?
|
||||||
requestType.slice(0, 'Version'.length * -1) : requestType;
|
requestType.slice(0, 'Version'.length * -1) : requestType;
|
||||||
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
|
|
||||||
'objectPut' : requestTypeParsed;
|
|
||||||
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
|
|
||||||
'objectPut' : mainApiCall;
|
|
||||||
if (bucket.getOwner() === canonicalID) {
|
if (bucket.getOwner() === canonicalID) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (parsedMainApiCall === 'objectGet') {
|
if (mainApiCall === 'objectGet') {
|
||||||
if (requestTypeParsed === 'objectGetTagging') {
|
if (requestTypeParsed === 'objectGetTagging') {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (parsedMainApiCall === 'objectPut') {
|
if (mainApiCall === 'objectPut') {
|
||||||
if (arrayOfAllowed.includes(requestTypeParsed)) {
|
if (arrayOfAllowed.includes(requestTypeParsed)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -141,18 +110,14 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
|
||||||
// authorization check should just return true so can move on to check
|
// authorization check should just return true so can move on to check
|
||||||
// rights at the object level.
|
// rights at the object level.
|
||||||
return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
|
return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
|
||||||
|| requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
|| requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
|
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
|
||||||
isUserUnauthenticated, mainApiCall) {
|
isUserUnauthenticated, mainApiCall) {
|
||||||
const bucketOwner = bucket.getOwner();
|
const bucketOwner = bucket.getOwner();
|
||||||
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
|
|
||||||
'objectPut' : requestType;
|
|
||||||
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
|
|
||||||
'objectPut' : mainApiCall;
|
|
||||||
// acls don't distinguish between users and accounts, so both should be allowed
|
// acls don't distinguish between users and accounts, so both should be allowed
|
||||||
if (bucketOwnerActions.includes(requestTypeParsed)
|
if (bucketOwnerActions.includes(requestType)
|
||||||
&& (bucketOwner === canonicalID)) {
|
&& (bucketOwner === canonicalID)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -161,9 +126,9 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backward compatibility
|
// Backward compatibility
|
||||||
if (parsedMainApiCall === 'objectGet') {
|
if (mainApiCall === 'objectGet') {
|
||||||
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
|
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
|
||||||
&& requestTypeParsed === 'objectGetTagging') {
|
&& requestType === 'objectGetTagging') {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -172,7 +137,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
|
if (requestType === 'objectGet' || requestType === 'objectHead') {
|
||||||
if (objectMD.acl.Canned === 'public-read'
|
if (objectMD.acl.Canned === 'public-read'
|
||||||
|| objectMD.acl.Canned === 'public-read-write'
|
|| objectMD.acl.Canned === 'public-read-write'
|
||||||
|| (objectMD.acl.Canned === 'authenticated-read'
|
|| (objectMD.acl.Canned === 'authenticated-read'
|
||||||
|
@ -198,11 +163,11 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|
||||||
|
|
||||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||||
// bucket has canned ACL public-read-write
|
// bucket has canned ACL public-read-write
|
||||||
if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
|
if (requestType === 'objectPut' || requestType === 'objectDelete') {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestTypeParsed === 'objectPutACL') {
|
if (requestType === 'objectPutACL') {
|
||||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||||
&& bucketOwner === canonicalID)
|
&& bucketOwner === canonicalID)
|
||||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|
@ -218,7 +183,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requestTypeParsed === 'objectGetACL') {
|
if (requestType === 'objectGetACL') {
|
||||||
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
|
||||||
&& bucketOwner === canonicalID)
|
&& bucketOwner === canonicalID)
|
||||||
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
|
||||||
|
@ -239,7 +204,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
|
||||||
const bucketAcl = bucket.getAcl();
|
const bucketAcl = bucket.getAcl();
|
||||||
const allowPublicReads = publicReadBuckets.includes(bucket.getName())
|
const allowPublicReads = publicReadBuckets.includes(bucket.getName())
|
||||||
&& bucketAcl.Canned === 'public-read'
|
&& bucketAcl.Canned === 'public-read'
|
||||||
&& (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
|
&& (requestType === 'objectGet' || requestType === 'objectHead');
|
||||||
if (allowPublicReads) {
|
if (allowPublicReads) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -373,7 +338,7 @@ function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner,
|
||||||
}
|
}
|
||||||
|
|
||||||
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
|
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
|
||||||
actionImplicitDeniesInput = {}, isWebsite = false) {
|
actionImplicitDeniesInput = {}) {
|
||||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||||
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
||||||
const mainApiCall = requestTypes[0];
|
const mainApiCall = requestTypes[0];
|
||||||
|
@ -388,24 +353,15 @@ function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, lo
|
||||||
let requesterIsNotUser = true;
|
let requesterIsNotUser = true;
|
||||||
let arn = null;
|
let arn = null;
|
||||||
if (authInfo) {
|
if (authInfo) {
|
||||||
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
requesterIsNotUser = !authInfo.isRequesterAnIAMUser();
|
||||||
arn = authInfo.getArn();
|
arn = authInfo.getArn();
|
||||||
}
|
}
|
||||||
// if the bucket owner is an account, users should not have default access
|
// if the bucket owner is an account, users should not have default access
|
||||||
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
|
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser) {
|
||||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||||
return results[_requestType];
|
return results[_requestType];
|
||||||
}
|
}
|
||||||
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
|
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
|
||||||
// In case of error bucket access is checked with bucketGet
|
|
||||||
// For website, bucket policy only uses objectGet and ignores bucketGet
|
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
|
|
||||||
// bucketGet should be used to check acl but switched to objectGet for bucket policy
|
|
||||||
if (isWebsite && _requestType === 'bucketGet') {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
_requestType = 'objectGet';
|
|
||||||
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
|
|
||||||
}
|
|
||||||
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
||||||
request, aclPermission, results, actionImplicitDenies);
|
request, aclPermission, results, actionImplicitDenies);
|
||||||
});
|
});
|
||||||
|
@ -425,12 +381,12 @@ function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, aut
|
||||||
arn = authInfo.getArn();
|
arn = authInfo.getArn();
|
||||||
}
|
}
|
||||||
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
|
||||||
request, true, results, actionImplicitDenies);
|
request, true, results, actionImplicitDenies);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
|
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
|
||||||
actionImplicitDeniesInput = {}, isWebsite = false) {
|
actionImplicitDeniesInput = {}) {
|
||||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||||
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
|
||||||
const results = {};
|
const results = {};
|
||||||
|
@ -443,31 +399,27 @@ function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authI
|
||||||
? _requestType.slice(0, -7) : _requestType;
|
? _requestType.slice(0, -7) : _requestType;
|
||||||
const bucketOwner = bucket.getOwner();
|
const bucketOwner = bucket.getOwner();
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
// check bucket has read access
|
|
||||||
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
|
|
||||||
let permission = 'bucketGet';
|
|
||||||
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
|
|
||||||
permission = 'objectPut';
|
|
||||||
}
|
|
||||||
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
|
|
||||||
actionImplicitDenies, isWebsite);
|
|
||||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||||
// bucket has canned ACL public-read-write
|
// bucket has canned ACL public-read-write
|
||||||
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
|
if (parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete') {
|
||||||
&& results[_requestType] === false) {
|
|
||||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||||
|
return results[_requestType];
|
||||||
}
|
}
|
||||||
|
// check bucket has read access
|
||||||
|
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
|
||||||
|
results[_requestType] = isBucketAuthorized(bucket, 'bucketGet', canonicalID, authInfo, log, request,
|
||||||
|
actionImplicitDenies);
|
||||||
return results[_requestType];
|
return results[_requestType];
|
||||||
}
|
}
|
||||||
let requesterIsNotUser = true;
|
let requesterIsNotUser = true;
|
||||||
let arn = null;
|
let arn = null;
|
||||||
let isUserUnauthenticated = false;
|
let isUserUnauthenticated = false;
|
||||||
if (authInfo) {
|
if (authInfo) {
|
||||||
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
|
requesterIsNotUser = !authInfo.isRequesterAnIAMUser();
|
||||||
arn = authInfo.getArn();
|
arn = authInfo.getArn();
|
||||||
isUserUnauthenticated = arn === undefined;
|
isUserUnauthenticated = arn === undefined;
|
||||||
}
|
}
|
||||||
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
|
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser) {
|
||||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||||
return results[_requestType];
|
return results[_requestType];
|
||||||
}
|
}
|
||||||
|
@ -476,8 +428,8 @@ function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authI
|
||||||
// - account is the bucket owner
|
// - account is the bucket owner
|
||||||
// - requester is account, not user
|
// - requester is account, not user
|
||||||
if (bucketOwnerActions.includes(parsedMethodName)
|
if (bucketOwnerActions.includes(parsedMethodName)
|
||||||
&& (bucketOwner === canonicalID)
|
&& (bucketOwner === canonicalID)
|
||||||
&& requesterIsNotUser) {
|
&& requesterIsNotUser) {
|
||||||
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
results[_requestType] = actionImplicitDenies[_requestType] === false;
|
||||||
return results[_requestType];
|
return results[_requestType];
|
||||||
}
|
}
|
||||||
|
@ -515,46 +467,11 @@ function validatePolicyResource(bucketName, policy) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkIp(value) {
|
function checkIp(value) {
|
||||||
const errString = 'Invalid IP address in Conditions';
|
const isValid = /^(\d{1,3}\.){3}\d{1,3}(\/\d{1,2})?$/.test(value);
|
||||||
|
if (isValid) {
|
||||||
const values = Array.isArray(value) ? value : [value];
|
return null;
|
||||||
|
|
||||||
for (let i = 0; i < values.length; i++) {
|
|
||||||
// these preliminary checks are validating the provided
|
|
||||||
// ip address against ipaddr.js, the library we use when
|
|
||||||
// evaluating IP condition keys. It ensures compatibility,
|
|
||||||
// but additional checks are required to enforce the right
|
|
||||||
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
|
|
||||||
// we would accept different ip formats, which is not
|
|
||||||
// standard in an AWS use case.
|
|
||||||
try {
|
|
||||||
try {
|
|
||||||
parseCIDR(values[i]);
|
|
||||||
} catch (err) {
|
|
||||||
isValid(values[i]);
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
return errString;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply the existing IP validation logic to each element
|
|
||||||
const validateIpRegex = ip => {
|
|
||||||
if (constants.ipv4Regex.test(ip)) {
|
|
||||||
return ip.split('.').every(part => parseInt(part, 10) <= 255);
|
|
||||||
}
|
|
||||||
if (constants.ipv6Regex.test(ip)) {
|
|
||||||
return ip.split(':').every(part => part.length <= 4);
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (validateIpRegex(values[i]) !== true) {
|
|
||||||
return errString;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return 'Invalid IP address in Conditions';
|
||||||
// If the function hasn't returned by now, all elements are valid
|
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function checks all bucket policy conditions if the values provided
|
// This function checks all bucket policy conditions if the values provided
|
||||||
|
@ -567,33 +484,29 @@ function validatePolicyConditions(policy) {
|
||||||
// keys where value type does not seem to be checked by AWS:
|
// keys where value type does not seem to be checked by AWS:
|
||||||
// - s3:object-lock-remaining-retention-days
|
// - s3:object-lock-remaining-retention-days
|
||||||
|
|
||||||
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// there can be multiple statements in the policy, each with a Condition enclosure
|
// there can be multiple statements in the policy, each with a Condition enclosure
|
||||||
for (let i = 0; i < policy.Statement.length; i++) {
|
for (let i = 0; i < policy.Statement.length; i++) {
|
||||||
const s = policy.Statement[i];
|
const s = policy.Statement[i];
|
||||||
if (s.Condition) {
|
if (s.Condition) {
|
||||||
const conditionOperators = Object.keys(s.Condition);
|
const conditionOperators = Object.keys(s.Condition);
|
||||||
// there can be multiple condition operations in the Condition enclosure
|
// there can be multiple condition operations in the Condition enclosure
|
||||||
// eslint-disable-next-line no-restricted-syntax
|
for (let j = 0; j < conditionOperators.length; j++) {
|
||||||
for (const conditionOperator of conditionOperators) {
|
const conditionOperator = conditionOperators[j];
|
||||||
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
|
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
|
||||||
const conditionValue = s.Condition[conditionOperator][conditionKey];
|
const conditionValue = s.Condition[conditionOperator][conditionKey];
|
||||||
const validCondition = validConditions.find(validCondition =>
|
const validCondition = validConditions.find(validCondition =>
|
||||||
validCondition.conditionKey === conditionKey
|
validCondition.conditionKey === conditionKey
|
||||||
);
|
);
|
||||||
// AWS returns does not return an error if the condition starts with 'aws:'
|
// this is the seen behaviour on AWS... don't ask me why
|
||||||
// so we reproduce this behaviour
|
|
||||||
if (!validCondition && !conditionKey.startsWith('aws:')) {
|
if (!validCondition && !conditionKey.startsWith('aws:')) {
|
||||||
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
|
return 'Policy has an invalid condition key';
|
||||||
}
|
}
|
||||||
|
let conditionValueTypeError;
|
||||||
if (validCondition && validCondition.conditionValueTypeChecker) {
|
if (validCondition && validCondition.conditionValueTypeChecker) {
|
||||||
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
|
conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
|
||||||
if (conditionValueTypeError) {
|
}
|
||||||
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
|
if (conditionValueTypeError) {
|
||||||
}
|
return conditionValueTypeError;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -628,10 +541,6 @@ function isLifecycleSession(arn) {
|
||||||
module.exports = {
|
module.exports = {
|
||||||
isBucketAuthorized,
|
isBucketAuthorized,
|
||||||
isObjAuthorized,
|
isObjAuthorized,
|
||||||
getServiceAccountProperties,
|
|
||||||
isServiceAccount,
|
|
||||||
isRequesterASessionUser,
|
|
||||||
isRequesterNonAccountUser,
|
|
||||||
checkBucketAcls,
|
checkBucketAcls,
|
||||||
checkObjectAcls,
|
checkObjectAcls,
|
||||||
validatePolicyResource,
|
validatePolicyResource,
|
||||||
|
|
|
@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
apiMethod, 's3');
|
apiMethod, 's3');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (apiMethod === 'bucketPut') {
|
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,17 +65,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
|
|
||||||
const requestContexts = [];
|
const requestContexts = [];
|
||||||
|
|
||||||
if (apiMethod === 'multiObjectDelete') {
|
if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||||
// MultiObjectDelete does not require any authorization when evaluating
|
|
||||||
// the API. Instead, we authorize each object passed.
|
|
||||||
// But in order to get any relevant information from the authorization service
|
|
||||||
// for example, the account quota, we must send a request context object
|
|
||||||
// with no `specificResource`. We expect the result to be an implicit deny.
|
|
||||||
// In the API, we then ignore these authorization results, and we can use
|
|
||||||
// any information returned, e.g., the quota.
|
|
||||||
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
|
|
||||||
requestContexts.push(requestContextMultiObjectDelete);
|
|
||||||
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|
|
||||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||||
'objectGet';
|
'objectGet';
|
||||||
|
@ -157,59 +147,20 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
generateRequestContext('objectGet');
|
generateRequestContext('objectGet');
|
||||||
requestContexts.push(getObjectRequestContext);
|
requestContexts.push(getObjectRequestContext);
|
||||||
} else if (apiMethodAfterVersionCheck === 'objectPut') {
|
} else if (apiMethodAfterVersionCheck === 'objectPut') {
|
||||||
// if put object with version
|
const putRequestContext =
|
||||||
if (request.headers['x-scal-s3-version-id'] ||
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
request.headers['x-scal-s3-version-id'] === '') {
|
requestContexts.push(putRequestContext);
|
||||||
const putVersionRequestContext =
|
// if put object (versioning) with tag set
|
||||||
generateRequestContext('objectPutVersion');
|
if (request.headers['x-amz-tagging']) {
|
||||||
requestContexts.push(putVersionRequestContext);
|
const putTaggingRequestContext =
|
||||||
} else {
|
generateRequestContext('objectPutTagging');
|
||||||
const putRequestContext =
|
requestContexts.push(putTaggingRequestContext);
|
||||||
generateRequestContext(apiMethodAfterVersionCheck);
|
|
||||||
requestContexts.push(putRequestContext);
|
|
||||||
// if put object (versioning) with tag set
|
|
||||||
if (request.headers['x-amz-tagging']) {
|
|
||||||
const putTaggingRequestContext =
|
|
||||||
generateRequestContext('objectPutTagging');
|
|
||||||
requestContexts.push(putTaggingRequestContext);
|
|
||||||
}
|
|
||||||
if (['ON', 'OFF'].includes(request.headers['x-amz-object-lock-legal-hold-status'])) {
|
|
||||||
const putLegalHoldStatusAction =
|
|
||||||
generateRequestContext('objectPutLegalHold');
|
|
||||||
requestContexts.push(putLegalHoldStatusAction);
|
|
||||||
}
|
|
||||||
// if put object (versioning) with ACL
|
|
||||||
if (isHeaderAcl(request.headers)) {
|
|
||||||
const putAclRequestContext =
|
|
||||||
generateRequestContext('objectPutACL');
|
|
||||||
requestContexts.push(putAclRequestContext);
|
|
||||||
}
|
|
||||||
if (request.headers['x-amz-object-lock-mode']) {
|
|
||||||
const putObjectLockRequestContext =
|
|
||||||
generateRequestContext('objectPutRetention');
|
|
||||||
requestContexts.push(putObjectLockRequestContext);
|
|
||||||
}
|
|
||||||
if (request.headers['x-amz-version-id']) {
|
|
||||||
const putObjectVersionRequestContext =
|
|
||||||
generateRequestContext('objectPutTaggingVersion');
|
|
||||||
requestContexts.push(putObjectVersionRequestContext);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if (apiMethodAfterVersionCheck === 'initiateMultipartUpload' ||
|
if (['ON', 'OFF'].includes(request.headers['x-amz-object-lock-legal-hold-status'])) {
|
||||||
apiMethodAfterVersionCheck === 'objectPutPart' ||
|
const putLegalHoldStatusAction =
|
||||||
apiMethodAfterVersionCheck === 'completeMultipartUpload'
|
generateRequestContext('objectPutLegalHold');
|
||||||
) {
|
requestContexts.push(putLegalHoldStatusAction);
|
||||||
if (request.headers['x-scal-s3-version-id'] ||
|
|
||||||
request.headers['x-scal-s3-version-id'] === '') {
|
|
||||||
const putVersionRequestContext =
|
|
||||||
generateRequestContext('objectPutVersion');
|
|
||||||
requestContexts.push(putVersionRequestContext);
|
|
||||||
} else {
|
|
||||||
const putRequestContext =
|
|
||||||
generateRequestContext(apiMethodAfterVersionCheck);
|
|
||||||
requestContexts.push(putRequestContext);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// if put object (versioning) with ACL
|
// if put object (versioning) with ACL
|
||||||
if (isHeaderAcl(request.headers)) {
|
if (isHeaderAcl(request.headers)) {
|
||||||
const putAclRequestContext =
|
const putAclRequestContext =
|
||||||
|
|
|
@ -60,8 +60,6 @@ function updateRequestContextsWithTags(request, requestContexts, apiMethod, log,
|
||||||
log.trace('error processing tag condition key evaluation');
|
log.trace('error processing tag condition key evaluation');
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
// FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter
|
|
||||||
// eslint-disable-next-line no-restricted-syntax
|
|
||||||
for (const rc of requestContexts) {
|
for (const rc of requestContexts) {
|
||||||
rc.setNeedTagEval(true);
|
rc.setNeedTagEval(true);
|
||||||
if (requestTagsQuery) {
|
if (requestTagsQuery) {
|
||||||
|
|
|
@ -10,15 +10,13 @@ const { parseBucketEncryptionHeaders } = require('./bucketEncryption');
|
||||||
const metadata = require('../../../metadata/wrapper');
|
const metadata = require('../../../metadata/wrapper');
|
||||||
const kms = require('../../../kms/wrapper');
|
const kms = require('../../../kms/wrapper');
|
||||||
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
|
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
|
||||||
const { isServiceAccount } = require('../authorization/permissionChecks');
|
|
||||||
|
|
||||||
const usersBucket = constants.usersBucket;
|
const usersBucket = constants.usersBucket;
|
||||||
const oldUsersBucket = constants.oldUsersBucket;
|
const oldUsersBucket = constants.oldUsersBucket;
|
||||||
const zenkoSeparator = constants.zenkoSeparator;
|
|
||||||
const userBucketOwner = 'admin';
|
const userBucketOwner = 'admin';
|
||||||
|
|
||||||
|
|
||||||
function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
|
function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
// BACKWARD: Simplify once do not have to deal with old
|
// BACKWARD: Simplify once do not have to deal with old
|
||||||
// usersbucket name and old splitter
|
// usersbucket name and old splitter
|
||||||
|
|
||||||
|
@ -30,10 +28,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
|
||||||
const splitter = usersBucketAttrs ?
|
const splitter = usersBucketAttrs ?
|
||||||
constants.splitter : constants.oldSplitter;
|
constants.splitter : constants.oldSplitter;
|
||||||
let key = createKeyForUserBucket(canonicalID, splitter, bucketName);
|
let key = createKeyForUserBucket(canonicalID, splitter, bucketName);
|
||||||
const omVal = {
|
const omVal = { creationDate: new Date().toJSON() };
|
||||||
creationDate: new Date().toJSON(),
|
|
||||||
ingestion: bucketMD.getIngestion(),
|
|
||||||
};
|
|
||||||
// If the new format usersbucket does not exist, try to put the
|
// If the new format usersbucket does not exist, try to put the
|
||||||
// key in the old usersBucket using the old splitter.
|
// key in the old usersBucket using the old splitter.
|
||||||
// Otherwise put the key in the new format usersBucket
|
// Otherwise put the key in the new format usersBucket
|
||||||
|
@ -41,7 +36,7 @@ function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
|
||||||
usersBucket : oldUsersBucket;
|
usersBucket : oldUsersBucket;
|
||||||
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
||||||
omVal, {}, log, err => {
|
omVal, {}, log, err => {
|
||||||
if (err?.is?.NoSuchBucket) {
|
if (err && err.is.NoSuchBucket) {
|
||||||
// There must be no usersBucket so createBucket
|
// There must be no usersBucket so createBucket
|
||||||
// one using the new format
|
// one using the new format
|
||||||
log.trace('users bucket does not exist, ' +
|
log.trace('users bucket does not exist, ' +
|
||||||
|
@ -96,7 +91,7 @@ function freshStartCreateBucket(bucket, canonicalID, log, callback) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
log.trace('created bucket in metadata');
|
log.trace('created bucket in metadata');
|
||||||
return addToUsersBucket(canonicalID, bucketName, bucket, log, err => {
|
return addToUsersBucket(canonicalID, bucketName, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
|
@ -119,7 +114,7 @@ function freshStartCreateBucket(bucket, canonicalID, log, callback) {
|
||||||
*/
|
*/
|
||||||
function cleanUpBucket(bucketMD, canonicalID, log, callback) {
|
function cleanUpBucket(bucketMD, canonicalID, log, callback) {
|
||||||
const bucketName = bucketMD.getName();
|
const bucketName = bucketMD.getName();
|
||||||
return addToUsersBucket(canonicalID, bucketName, bucketMD, log, err => {
|
return addToUsersBucket(canonicalID, bucketName, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
|
@ -170,28 +165,15 @@ function createBucket(authInfo, bucketName, headers,
|
||||||
const ownerDisplayName =
|
const ownerDisplayName =
|
||||||
authInfo.getAccountDisplayName();
|
authInfo.getAccountDisplayName();
|
||||||
const creationDate = new Date().toJSON();
|
const creationDate = new Date().toJSON();
|
||||||
const isNFSEnabled = headers['x-scal-nfs-enabled'] === 'true';
|
|
||||||
const headerObjectLock = headers['x-amz-bucket-object-lock-enabled'];
|
const headerObjectLock = headers['x-amz-bucket-object-lock-enabled'];
|
||||||
const objectLockEnabled
|
const objectLockEnabled
|
||||||
= headerObjectLock && headerObjectLock.toLowerCase() === 'true';
|
= headerObjectLock && headerObjectLock.toLowerCase() === 'true';
|
||||||
const bucket = new BucketInfo(bucketName, canonicalID, ownerDisplayName,
|
const bucket = new BucketInfo(bucketName, canonicalID, ownerDisplayName,
|
||||||
creationDate, BucketInfo.currentModelVersion(), null, null, null, null,
|
creationDate, BucketInfo.currentModelVersion(), null, null, null, null,
|
||||||
null, null, null, null, null, null, null, null, null, isNFSEnabled,
|
null, null, null, null, null, null, null, null, objectLockEnabled);
|
||||||
null, null, objectLockEnabled);
|
|
||||||
let locationConstraintVal = null;
|
|
||||||
|
|
||||||
if (locationConstraint) {
|
if (locationConstraint !== undefined) {
|
||||||
const [locationConstraintStr, ingestion] =
|
bucket.setLocationConstraint(locationConstraint);
|
||||||
locationConstraint.split(zenkoSeparator);
|
|
||||||
if (locationConstraintStr) {
|
|
||||||
locationConstraintVal = locationConstraintStr;
|
|
||||||
bucket.setLocationConstraint(locationConstraintStr);
|
|
||||||
}
|
|
||||||
if (ingestion === 'ingest') {
|
|
||||||
bucket.enableIngestion();
|
|
||||||
//automatically enable versioning for ingestion buckets
|
|
||||||
bucket.setVersioningConfiguration({ Status: 'Enabled' });
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (objectLockEnabled) {
|
if (objectLockEnabled) {
|
||||||
// default versioning configuration AWS sets
|
// default versioning configuration AWS sets
|
||||||
|
@ -241,8 +223,7 @@ function createBucket(authInfo, bucketName, headers,
|
||||||
}
|
}
|
||||||
const existingBucketMD = results.getAnyExistingBucketInfo;
|
const existingBucketMD = results.getAnyExistingBucketInfo;
|
||||||
if (existingBucketMD instanceof BucketInfo &&
|
if (existingBucketMD instanceof BucketInfo &&
|
||||||
existingBucketMD.getOwner() !== canonicalID &&
|
existingBucketMD.getOwner() !== canonicalID) {
|
||||||
!isServiceAccount(canonicalID)) {
|
|
||||||
// return existingBucketMD to collect cors headers
|
// return existingBucketMD to collect cors headers
|
||||||
return cb(errors.BucketAlreadyExists, existingBucketMD);
|
return cb(errors.BucketAlreadyExists, existingBucketMD);
|
||||||
}
|
}
|
||||||
|
@ -273,7 +254,7 @@ function createBucket(authInfo, bucketName, headers,
|
||||||
// error unless old AWS behavior (us-east-1)
|
// error unless old AWS behavior (us-east-1)
|
||||||
// Existing locationConstraint must have legacyAwsBehavior === true
|
// Existing locationConstraint must have legacyAwsBehavior === true
|
||||||
// New locationConstraint should have legacyAwsBehavior === true
|
// New locationConstraint should have legacyAwsBehavior === true
|
||||||
if (isLegacyAWSBehavior(locationConstraintVal) &&
|
if (isLegacyAWSBehavior(locationConstraint) &&
|
||||||
isLegacyAWSBehavior(existingBucketMD.getLocationConstraint())) {
|
isLegacyAWSBehavior(existingBucketMD.getLocationConstraint())) {
|
||||||
log.trace('returning 200 instead of 409 to mirror us-east-1');
|
log.trace('returning 200 instead of 409 to mirror us-east-1');
|
||||||
return cb(null, existingBucketMD);
|
return cb(null, existingBucketMD);
|
||||||
|
|
|
@ -93,7 +93,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
|
||||||
log, (err, objectsListRes) => {
|
log, (err, objectsListRes) => {
|
||||||
// If no shadow bucket ever created, no ongoing MPU's, so
|
// If no shadow bucket ever created, no ongoing MPU's, so
|
||||||
// continue with deletion
|
// continue with deletion
|
||||||
if (err?.is.NoSuchBucket) {
|
if (err && err.is.NoSuchBucket) {
|
||||||
return next();
|
return next();
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -30,9 +30,6 @@ function bucketShield(bucket, requestType) {
|
||||||
// Otherwise return an error to the client
|
// Otherwise return an error to the client
|
||||||
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
||||||
(requestType !== 'objectPut' &&
|
(requestType !== 'objectPut' &&
|
||||||
requestType !== 'initiateMultipartUpload' &&
|
|
||||||
requestType !== 'objectPutPart' &&
|
|
||||||
requestType !== 'completeMultipartUpload' &&
|
|
||||||
requestType !== 'bucketPutACL' &&
|
requestType !== 'bucketPutACL' &&
|
||||||
requestType !== 'bucketDelete')) {
|
requestType !== 'bucketDelete')) {
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -1,26 +0,0 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
function checkPreferredLocations(location, locationConstraints, log) {
|
|
||||||
const retError = loc => {
|
|
||||||
const errMsg = 'value of the location you are attempting to set - ' +
|
|
||||||
`${loc} - is not listed in the locationConstraint config`;
|
|
||||||
log.trace(`locationConstraint is invalid - ${errMsg}`,
|
|
||||||
{ locationConstraint: loc });
|
|
||||||
return errors.InvalidLocationConstraint.customizeDescription(errMsg);
|
|
||||||
};
|
|
||||||
if (typeof location === 'string' && !locationConstraints[location]) {
|
|
||||||
return retError(location);
|
|
||||||
}
|
|
||||||
if (typeof location === 'object') {
|
|
||||||
const { read, write } = location;
|
|
||||||
if (!locationConstraints[read]) {
|
|
||||||
return retError(read);
|
|
||||||
}
|
|
||||||
if (!locationConstraints[write]) {
|
|
||||||
return retError(write);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = checkPreferredLocations;
|
|
|
@ -11,11 +11,11 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
||||||
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
||||||
// If the object representing the bucket is not in the
|
// If the object representing the bucket is not in the
|
||||||
// users bucket just continue
|
// users bucket just continue
|
||||||
if (error?.is.NoSuchKey) {
|
if (error && error.is.NoSuchKey) {
|
||||||
return cb(null);
|
return cb(null);
|
||||||
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
||||||
// have old user bucket format
|
// have old user bucket format
|
||||||
} else if (error?.is.NoSuchBucket) {
|
} else if (error && error.NoSuchBucket) {
|
||||||
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
||||||
oldSplitter, bucketName);
|
oldSplitter, bucketName);
|
||||||
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
||||||
|
|
|
@ -15,21 +15,11 @@ function getNotificationConfiguration(parsedXml) {
|
||||||
}
|
}
|
||||||
const targets = new Set(config.bucketNotificationDestinations.map(t => t.resource));
|
const targets = new Set(config.bucketNotificationDestinations.map(t => t.resource));
|
||||||
const notifConfigTargets = notifConfig.queueConfig.map(t => t.queueArn.split(':')[5]);
|
const notifConfigTargets = notifConfig.queueConfig.map(t => t.queueArn.split(':')[5]);
|
||||||
// getting invalid targets
|
if (!notifConfigTargets.every(t => targets.has(t))) {
|
||||||
const invalidTargets = [];
|
// TODO: match the error message to AWS's response along with
|
||||||
notifConfigTargets.forEach((t, i) => {
|
// the request destination name in the response
|
||||||
if (!targets.has(t)) {
|
const errDesc = 'Unable to validate the destination configuration';
|
||||||
invalidTargets.push({
|
return { error: errors.InvalidArgument.customizeDescription(errDesc) };
|
||||||
ArgumentName: notifConfig.queueConfig[i].queueArn,
|
|
||||||
ArgumentValue: 'The destination queue does not exist',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if (invalidTargets.length > 0) {
|
|
||||||
const errDesc = 'Unable to validate the following destination configurations';
|
|
||||||
let error = errors.InvalidArgument.customizeDescription(errDesc);
|
|
||||||
error = error.addMetadataEntry('invalidArguments', invalidTargets);
|
|
||||||
return { error };
|
|
||||||
}
|
}
|
||||||
return notifConfig;
|
return notifConfig;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
/**
|
|
||||||
* parse LIKE expressions
|
|
||||||
* @param {string} regex - regex pattern
|
|
||||||
* @return {object} MongoDB search object
|
|
||||||
*/
|
|
||||||
function parseLikeExpression(regex) {
|
|
||||||
if (typeof regex !== 'string') {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
const split = regex.split('/');
|
|
||||||
if (split.length < 3 || split[0] !== '') {
|
|
||||||
return { $regex: regex };
|
|
||||||
}
|
|
||||||
const pattern = split.slice(1, split.length - 1).join('/');
|
|
||||||
const regexOpt = split[split.length - 1];
|
|
||||||
return { $regex: new RegExp(pattern), $options: regexOpt };
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = parseLikeExpression;
|
|
|
@ -1,85 +0,0 @@
|
||||||
const parseLikeExpression = require('./parseLikeExpression');
|
|
||||||
|
|
||||||
/*
|
|
||||||
This code is based on code from https://github.com/olehch/sqltomongo
|
|
||||||
with the following license:
|
|
||||||
|
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2016 Oleh
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A helper object to map SQL-like naming to MongoDB query syntax
|
|
||||||
*/
|
|
||||||
const exprMapper = {
|
|
||||||
'=': '$eq',
|
|
||||||
'!=': '$ne',
|
|
||||||
'<>': '$ne',
|
|
||||||
'>': '$gt',
|
|
||||||
'<': '$lt',
|
|
||||||
'>=': '$gte',
|
|
||||||
'<=': '$lte',
|
|
||||||
'LIKE': '$regex',
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Parses object with WHERE clause recursively
|
|
||||||
* and generates MongoDB `find` query object
|
|
||||||
*/
|
|
||||||
function parseWhere(root) {
|
|
||||||
const operator = Object.keys(root)[0];
|
|
||||||
|
|
||||||
// extract leaf binary expressions
|
|
||||||
if (operator === 'AND') {
|
|
||||||
const e1 = parseWhere(root[operator][0]);
|
|
||||||
const e2 = parseWhere(root[operator][1]);
|
|
||||||
|
|
||||||
// eslint-disable-next-line
|
|
||||||
return { '$and' : [
|
|
||||||
e1,
|
|
||||||
e2,
|
|
||||||
] };
|
|
||||||
} else if (operator === 'OR') {
|
|
||||||
const e1 = parseWhere(root[operator][0]);
|
|
||||||
const e2 = parseWhere(root[operator][1]);
|
|
||||||
|
|
||||||
// eslint-disable-next-line
|
|
||||||
return { '$or' : [
|
|
||||||
e1,
|
|
||||||
e2,
|
|
||||||
] };
|
|
||||||
}
|
|
||||||
const field = root[operator][0];
|
|
||||||
const value = root[operator][1];
|
|
||||||
const expr = exprMapper[operator];
|
|
||||||
const obj = {};
|
|
||||||
|
|
||||||
if (operator === 'LIKE') {
|
|
||||||
obj[`value.${field}`] = parseLikeExpression(value);
|
|
||||||
} else {
|
|
||||||
obj[`value.${field}`] = { [expr]: value };
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = parseWhere;
|
|
|
@ -1,32 +0,0 @@
|
||||||
const config = require('../../../Config').config;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Validates that the replication configuration contains a preferred
|
|
||||||
* read location if the bucket location is a transient source
|
|
||||||
*
|
|
||||||
* @param {object} repConfig - replication configuration
|
|
||||||
* @param {object} bucket - bucket metadata
|
|
||||||
*
|
|
||||||
* @return {boolean} validity of replication configuration with
|
|
||||||
* transient source
|
|
||||||
*/
|
|
||||||
function validateReplicationConfig(repConfig, bucket) {
|
|
||||||
const bucketLocationName = bucket.getLocationConstraint();
|
|
||||||
if (!repConfig || !repConfig.rules) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const bucketLocation = config.locationConstraints[bucketLocationName];
|
|
||||||
if (!bucketLocation.isTransient) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return repConfig.rules.every(rule => {
|
|
||||||
if (!rule.storageClass) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
const storageClasses = rule.storageClass.split(',');
|
|
||||||
return storageClasses.some(
|
|
||||||
site => site.endsWith(':preferred_read'));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = validateReplicationConfig;
|
|
|
@ -1,96 +0,0 @@
|
||||||
const Parser = require('sql-where-parser');
|
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const objModel = require('arsenal').models.ObjectMD;
|
|
||||||
|
|
||||||
const BINARY_OP = 2;
|
|
||||||
const sqlConfig = {
|
|
||||||
operators: [
|
|
||||||
{
|
|
||||||
'=': BINARY_OP,
|
|
||||||
'<': BINARY_OP,
|
|
||||||
'>': BINARY_OP,
|
|
||||||
'<>': BINARY_OP,
|
|
||||||
'<=': BINARY_OP,
|
|
||||||
'>=': BINARY_OP,
|
|
||||||
'!=': BINARY_OP,
|
|
||||||
},
|
|
||||||
{ LIKE: BINARY_OP },
|
|
||||||
{ AND: BINARY_OP },
|
|
||||||
{ OR: BINARY_OP },
|
|
||||||
],
|
|
||||||
tokenizer: {
|
|
||||||
shouldTokenize: ['(', ')', '=', '!=', '<', '>', '<=', '>=', '<>'],
|
|
||||||
shouldMatch: ['"', '\'', '`'],
|
|
||||||
shouldDelimitBy: [' ', '\n', '\r', '\t'],
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const parser = new Parser(sqlConfig);
|
|
||||||
|
|
||||||
function _validateTree(whereClause, possibleAttributes) {
|
|
||||||
let invalidAttribute;
|
|
||||||
|
|
||||||
function _searchTree(node) {
|
|
||||||
if (typeof node !== 'object') {
|
|
||||||
invalidAttribute = node;
|
|
||||||
} else {
|
|
||||||
const operator = Object.keys(node)[0];
|
|
||||||
if (operator === 'AND' || operator === 'OR') {
|
|
||||||
_searchTree(node[operator][0]);
|
|
||||||
_searchTree(node[operator][1]);
|
|
||||||
} else {
|
|
||||||
const field = node[operator][0];
|
|
||||||
if (!field.startsWith('tags.') &&
|
|
||||||
!possibleAttributes[field] &&
|
|
||||||
!field.startsWith('replicationInfo.') &&
|
|
||||||
!field.startsWith('x-amz-meta-')) {
|
|
||||||
invalidAttribute = field;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_searchTree(whereClause);
|
|
||||||
return invalidAttribute;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* validateSearchParams - validate value of ?search= in request
|
|
||||||
* @param {string} searchParams - value of search params in request
|
|
||||||
* which should be jsu sql where clause
|
|
||||||
* For metadata: x-amz-meta-color=\"blue\"
|
|
||||||
* For tags: tags.x-amz-meta-color=\"blue\"
|
|
||||||
* For replication status : replication-status=\"PENDING\"
|
|
||||||
* For any other attribute: `content-length`=5
|
|
||||||
* @return {undefined | error} undefined if validates or arsenal error if not
|
|
||||||
*/
|
|
||||||
function validateSearchParams(searchParams) {
|
|
||||||
let ast;
|
|
||||||
try {
|
|
||||||
// allow using 'replicationStatus' as search param to increase
|
|
||||||
// ease of use, pending metadata search rework
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
searchParams = searchParams.replace(
|
|
||||||
'replication-status', 'replicationInfo.status');
|
|
||||||
ast = parser.parse(searchParams);
|
|
||||||
} catch (e) {
|
|
||||||
if (e) {
|
|
||||||
return {
|
|
||||||
error: errors.InvalidArgument
|
|
||||||
.customizeDescription('Invalid sql where clause ' +
|
|
||||||
'sent as search query'),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const possibleAttributes = objModel.getAttributes();
|
|
||||||
const invalidAttribute = _validateTree(ast, possibleAttributes);
|
|
||||||
if (invalidAttribute) {
|
|
||||||
return {
|
|
||||||
error: errors.InvalidArgument
|
|
||||||
.customizeDescription('Search param ' +
|
|
||||||
`contains unknown attribute: ${invalidAttribute}`) };
|
|
||||||
}
|
|
||||||
return {
|
|
||||||
ast,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = validateSearchParams;
|
|
|
@ -0,0 +1,233 @@
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const { legacyLocations } = require('../../../../constants.js');
|
||||||
|
const { locationConstraints } = config;
|
||||||
|
const escapeForXml = require('arsenal').s3middleware.escapeForXml;
|
||||||
|
|
||||||
|
class BackendInfo {
|
||||||
|
/**
|
||||||
|
* Represents the info necessary to evaluate which data backend to use
|
||||||
|
* on a data put call.
|
||||||
|
* @constructor
|
||||||
|
* @param {string | undefined} objectLocationConstraint - location constraint
|
||||||
|
* for object based on user meta header
|
||||||
|
* @param {string | undefined } bucketLocationConstraint - location
|
||||||
|
* constraint for bucket based on bucket metadata
|
||||||
|
* @param {string} requestEndpoint - endpoint to which request was made
|
||||||
|
* @param {string | undefined } legacyLocationConstraint - legacy location
|
||||||
|
* constraint
|
||||||
|
*/
|
||||||
|
constructor(objectLocationConstraint, bucketLocationConstraint,
|
||||||
|
requestEndpoint, legacyLocationConstraint) {
|
||||||
|
this._objectLocationConstraint = objectLocationConstraint;
|
||||||
|
this._bucketLocationConstraint = bucketLocationConstraint;
|
||||||
|
this._requestEndpoint = requestEndpoint;
|
||||||
|
this._legacyLocationConstraint = legacyLocationConstraint;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate proposed location constraint against config
|
||||||
|
* @param {string | undefined} locationConstraint - value of user
|
||||||
|
* metadata location constraint header or bucket location constraint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if valid, false if not
|
||||||
|
*/
|
||||||
|
static isValidLocationConstraint(locationConstraint, log) {
|
||||||
|
if (Object.keys(config.locationConstraints).
|
||||||
|
indexOf(locationConstraint) < 0) {
|
||||||
|
log.trace('proposed locationConstraint is invalid',
|
||||||
|
{ locationConstraint });
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate that request endpoint is listed in the restEndpoint config
|
||||||
|
* @param {string} requestEndpoint - request endpoint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if present, false if not
|
||||||
|
*/
|
||||||
|
static isRequestEndpointPresent(requestEndpoint, log) {
|
||||||
|
if (Object.keys(config.restEndpoints).indexOf(requestEndpoint) < 0) {
|
||||||
|
log.trace('requestEndpoint does not match config restEndpoints',
|
||||||
|
{ requestEndpoint });
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate that locationConstraint for request Endpoint matches
|
||||||
|
* one config locationConstraint
|
||||||
|
* @param {string} requestEndpoint - request endpoint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if matches, false if not
|
||||||
|
*/
|
||||||
|
static isRequestEndpointValueValid(requestEndpoint, log) {
|
||||||
|
if (Object.keys(config.locationConstraints).indexOf(config
|
||||||
|
.restEndpoints[requestEndpoint]) < 0) {
|
||||||
|
log.trace('the default locationConstraint for request' +
|
||||||
|
'Endpoint does not match any config locationConstraint',
|
||||||
|
{ requestEndpoint });
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate that s3 server is running with a file or memory backend
|
||||||
|
* @param {string} requestEndpoint - request endpoint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if running with file/mem backend, false if not
|
||||||
|
*/
|
||||||
|
static isMemOrFileBackend(requestEndpoint, log) {
|
||||||
|
if (config.backends.data === 'mem' ||
|
||||||
|
config.backends.data === 'file') {
|
||||||
|
log.trace('use data backend for the location', {
|
||||||
|
dataBackend: config.backends.data,
|
||||||
|
method: 'isMemOrFileBackend',
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate requestEndpoint against config or mem/file data backend
|
||||||
|
* - if there is no match for the request endpoint in the config
|
||||||
|
* restEndpoints and data backend is set to mem or file we will use this
|
||||||
|
* data backend for the location.
|
||||||
|
* - if locationConstraint for request Endpoint does not match
|
||||||
|
* any config locationConstraint, we will return an error
|
||||||
|
* @param {string} requestEndpoint - request endpoint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if valid, false if not
|
||||||
|
*/
|
||||||
|
static isValidRequestEndpointOrBackend(requestEndpoint, log) {
|
||||||
|
if (!BackendInfo.isRequestEndpointPresent(requestEndpoint, log)) {
|
||||||
|
return BackendInfo.isMemOrFileBackend(requestEndpoint, log);
|
||||||
|
}
|
||||||
|
return BackendInfo.isRequestEndpointValueValid(requestEndpoint, log);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate controlling BackendInfo Parameter
|
||||||
|
* @param {string | undefined} objectLocationConstraint - value of user
|
||||||
|
* metadata location constraint header
|
||||||
|
* @param {string | null} bucketLocationConstraint - location
|
||||||
|
* constraint from bucket metadata
|
||||||
|
* @param {string} requestEndpoint - endpoint of request
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {object} - location constraint validity
|
||||||
|
*/
|
||||||
|
static controllingBackendParam(objectLocationConstraint,
|
||||||
|
bucketLocationConstraint, requestEndpoint, log) {
|
||||||
|
if (objectLocationConstraint) {
|
||||||
|
if (BackendInfo.isValidLocationConstraint(objectLocationConstraint,
|
||||||
|
log)) {
|
||||||
|
log.trace('objectLocationConstraint is valid');
|
||||||
|
return { isValid: true };
|
||||||
|
}
|
||||||
|
log.trace('objectLocationConstraint is invalid');
|
||||||
|
return { isValid: false, description: 'Object Location Error - ' +
|
||||||
|
`Your object location "${escapeForXml(objectLocationConstraint)}"` +
|
||||||
|
'is not in your location config - Please update.' };
|
||||||
|
}
|
||||||
|
if (bucketLocationConstraint) {
|
||||||
|
if (BackendInfo.isValidLocationConstraint(bucketLocationConstraint,
|
||||||
|
log)) {
|
||||||
|
log.trace('bucketLocationConstraint is valid');
|
||||||
|
return { isValid: true };
|
||||||
|
}
|
||||||
|
log.trace('bucketLocationConstraint is invalid');
|
||||||
|
return { isValid: false, description: 'Bucket Location Error - ' +
|
||||||
|
`Your bucket location "${escapeForXml(bucketLocationConstraint)}"` +
|
||||||
|
' is not in your location config - Please update.' };
|
||||||
|
}
|
||||||
|
const legacyLocationConstraint =
|
||||||
|
BackendInfo.getLegacyLocationConstraint();
|
||||||
|
if (legacyLocationConstraint) {
|
||||||
|
log.trace('legacy location is valid');
|
||||||
|
return { isValid: true, legacyLocationConstraint };
|
||||||
|
}
|
||||||
|
if (!BackendInfo.isValidRequestEndpointOrBackend(requestEndpoint,
|
||||||
|
log)) {
|
||||||
|
return { isValid: false, description: 'Endpoint Location Error - ' +
|
||||||
|
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
|
||||||
|
'in your config OR the default location constraint for request ' +
|
||||||
|
`endpoint "${escapeForXml(requestEndpoint)}" does not ` +
|
||||||
|
'match any config locationConstraint - Please update.' };
|
||||||
|
}
|
||||||
|
if (BackendInfo.isRequestEndpointPresent(requestEndpoint, log)) {
|
||||||
|
return { isValid: true };
|
||||||
|
}
|
||||||
|
return { isValid: true, defaultedToDataBackend: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return legacyLocationConstraint
|
||||||
|
* @return {string | undefined} legacyLocationConstraint;
|
||||||
|
*/
|
||||||
|
static getLegacyLocationConstraint() {
|
||||||
|
return legacyLocations.find(ll => locationConstraints[ll]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return objectLocationConstraint
|
||||||
|
* @return {string | undefined} objectLocationConstraint;
|
||||||
|
*/
|
||||||
|
getObjectLocationConstraint() {
|
||||||
|
return this._objectLocationConstraint;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return bucketLocationConstraint
|
||||||
|
* @return {string | undefined} bucketLocationConstraint;
|
||||||
|
*/
|
||||||
|
getBucketLocationConstraint() {
|
||||||
|
return this._bucketLocationConstraint;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return requestEndpoint
|
||||||
|
* @return {string} requestEndpoint;
|
||||||
|
*/
|
||||||
|
getRequestEndpoint() {
|
||||||
|
return this._requestEndpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return locationConstraint that should be used with put request
|
||||||
|
* Order of priority is:
|
||||||
|
* (1) objectLocationConstraint,
|
||||||
|
* (2) bucketLocationConstraint,
|
||||||
|
* (3) legacyLocationConstraint,
|
||||||
|
* (4) default locationConstraint for requestEndpoint if requestEndpoint
|
||||||
|
* is listed in restEndpoints in config.json
|
||||||
|
* (5) default data backend
|
||||||
|
* @return {string} locationConstraint;
|
||||||
|
*/
|
||||||
|
getControllingLocationConstraint() {
|
||||||
|
const objectLC = this.getObjectLocationConstraint();
|
||||||
|
const bucketLC = this.getBucketLocationConstraint();
|
||||||
|
const reqEndpoint = this.getRequestEndpoint();
|
||||||
|
if (objectLC) {
|
||||||
|
return objectLC;
|
||||||
|
}
|
||||||
|
if (bucketLC) {
|
||||||
|
return bucketLC;
|
||||||
|
}
|
||||||
|
if (this._legacyLocationConstraint) {
|
||||||
|
return this._legacyLocationConstraint;
|
||||||
|
}
|
||||||
|
if (config.restEndpoints[reqEndpoint]) {
|
||||||
|
return config.restEndpoints[reqEndpoint];
|
||||||
|
}
|
||||||
|
return config.backends.data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
BackendInfo,
|
||||||
|
};
|
|
@ -14,7 +14,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
uploadId,
|
uploadId,
|
||||||
preciseRequestType: request.apiMethods || 'multipartDelete',
|
preciseRequestType: 'multipartDelete',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
// For validating the request at the destinationBucket level
|
// For validating the request at the destinationBucket level
|
||||||
|
@ -73,6 +73,15 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
return next(null, mpuBucket, destBucket, skipDataDelete);
|
return next(null, mpuBucket, destBucket, skipDataDelete);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
function sendAbortPut(mpuBucket, destBucket, skipDataDelete, next) {
|
||||||
|
services.sendAbortMPUPut(bucketName, objectKey, uploadId, log,
|
||||||
|
err => {
|
||||||
|
if (err) {
|
||||||
|
return next(err, destBucket);
|
||||||
|
}
|
||||||
|
return next(null, mpuBucket, destBucket, skipDataDelete);
|
||||||
|
});
|
||||||
|
},
|
||||||
function getPartLocations(mpuBucket, destBucket, skipDataDelete,
|
function getPartLocations(mpuBucket, destBucket, skipDataDelete,
|
||||||
next) {
|
next) {
|
||||||
services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||||
|
@ -87,6 +96,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
},
|
},
|
||||||
function deleteData(mpuBucket, storedParts, destBucket,
|
function deleteData(mpuBucket, storedParts, destBucket,
|
||||||
skipDataDelete, next) {
|
skipDataDelete, next) {
|
||||||
|
// for Azure we do not need to delete data
|
||||||
if (skipDataDelete) {
|
if (skipDataDelete) {
|
||||||
return next(null, mpuBucket, storedParts, destBucket);
|
return next(null, mpuBucket, storedParts, destBucket);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
const { zenkoIDHeader } = require('arsenal').constants;
|
|
||||||
|
|
||||||
const _config = require('../../../Config').config;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* applyZenkoUserMD - if request is within a Zenko deployment, apply user
|
|
||||||
* metadata called "zenko-source" to the object
|
|
||||||
* @param {Object} metaHeaders - user metadata object
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function applyZenkoUserMD(metaHeaders) {
|
|
||||||
if (process.env.REMOTE_MANAGEMENT_DISABLE === '0' &&
|
|
||||||
!metaHeaders[zenkoIDHeader]) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
metaHeaders[zenkoIDHeader] = _config.getPublicInstanceId();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = applyZenkoUserMD;
|
|
|
@ -1,27 +0,0 @@
|
||||||
/**
|
|
||||||
* checkReadLocation - verify that a bucket's default read location exists
|
|
||||||
* for a specified read data locator
|
|
||||||
* @param {Config} config - Config object
|
|
||||||
* @param {string} locationName - location constraint
|
|
||||||
* @param {string} objectKey - object key
|
|
||||||
* @param {string} bucketName - bucket name
|
|
||||||
* @return {Object | null} return object containing location information
|
|
||||||
* if location exists; otherwise, null
|
|
||||||
*/
|
|
||||||
function checkReadLocation(config, locationName, objectKey, bucketName) {
|
|
||||||
const readLocation = config.getLocationConstraint(locationName);
|
|
||||||
if (readLocation) {
|
|
||||||
const bucketMatch = readLocation.details &&
|
|
||||||
readLocation.details.bucketMatch;
|
|
||||||
const backendKey = bucketMatch ? objectKey :
|
|
||||||
`${bucketName}/${objectKey}`;
|
|
||||||
return {
|
|
||||||
location: locationName,
|
|
||||||
key: backendKey,
|
|
||||||
locationType: readLocation.type,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = checkReadLocation;
|
|
|
@ -1,247 +0,0 @@
|
||||||
/*
|
|
||||||
* Code based on Yutaka Oishi (Fujifilm) contributions
|
|
||||||
* Date: 11 Sep 2020
|
|
||||||
*/
|
|
||||||
const { ObjectMDArchive } = require('arsenal').models;
|
|
||||||
const errors = require('arsenal').errors;
|
|
||||||
const { config } = require('../../../Config');
|
|
||||||
const { locationConstraints } = config;
|
|
||||||
|
|
||||||
const { scaledMsPerDay } = config.getTimeOptions();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get response header "x-amz-restore"
|
|
||||||
* Be called by objectHead.js
|
|
||||||
* @param {object} objMD - object's metadata
|
|
||||||
* @returns {string|undefined} x-amz-restore
|
|
||||||
*/
|
|
||||||
function getAmzRestoreResHeader(objMD) {
|
|
||||||
if (objMD.archive &&
|
|
||||||
objMD.archive.restoreRequestedAt &&
|
|
||||||
!objMD.archive.restoreCompletedAt) {
|
|
||||||
// Avoid race condition by relying on the `archive` MD of the object
|
|
||||||
// and return the right header after a RESTORE request.
|
|
||||||
// eslint-disable-next-line
|
|
||||||
return `ongoing-request="true"`;
|
|
||||||
}
|
|
||||||
if (objMD['x-amz-restore']) {
|
|
||||||
if (objMD['x-amz-restore']['expiry-date']) {
|
|
||||||
const utcDateTime = new Date(objMD['x-amz-restore']['expiry-date']).toUTCString();
|
|
||||||
// eslint-disable-next-line
|
|
||||||
return `ongoing-request="${objMD['x-amz-restore']['ongoing-request']}", expiry-date="${utcDateTime}"`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if restore can be done.
|
|
||||||
*
|
|
||||||
* @param {ObjectMD} objectMD - object metadata
|
|
||||||
* @param {object} log - werelogs logger
|
|
||||||
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
|
|
||||||
*/
|
|
||||||
function _validateStartRestore(objectMD, log) {
|
|
||||||
if (objectMD.archive?.restoreCompletedAt) {
|
|
||||||
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
|
|
||||||
// return InvalidObjectState error if the restored object is expired
|
|
||||||
// but restore info md of this object has not yet been cleared
|
|
||||||
log.debug('The restored object already expired.',
|
|
||||||
{
|
|
||||||
archive: objectMD.archive,
|
|
||||||
method: '_validateStartRestore',
|
|
||||||
});
|
|
||||||
return errors.InvalidObjectState;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If object is already restored, no further check is needed
|
|
||||||
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
|
|
||||||
// been reset.
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
|
|
||||||
if (!isLocationCold) {
|
|
||||||
// return InvalidObjectState error if the object is not in cold storage,
|
|
||||||
// not in cold storage means either location cold flag not exists or cold flag is explicit false
|
|
||||||
log.debug('The bucket of the object is not in a cold storage location.',
|
|
||||||
{
|
|
||||||
isLocationCold,
|
|
||||||
method: '_validateStartRestore',
|
|
||||||
});
|
|
||||||
return errors.InvalidObjectState;
|
|
||||||
}
|
|
||||||
if (objectMD.archive?.restoreRequestedAt) {
|
|
||||||
// return RestoreAlreadyInProgress error if the object is currently being restored
|
|
||||||
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
|
|
||||||
log.debug('The object is currently being restored.',
|
|
||||||
{
|
|
||||||
archive: objectMD.archive,
|
|
||||||
method: '_validateStartRestore',
|
|
||||||
});
|
|
||||||
return errors.RestoreAlreadyInProgress;
|
|
||||||
}
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if "put version id" is allowed
|
|
||||||
*
|
|
||||||
* @param {ObjectMD} objMD - object metadata
|
|
||||||
* @param {string} versionId - object's version id
|
|
||||||
* @param {object} log - werelogs logger
|
|
||||||
* @return {ArsenalError|undefined} - undefined if "put version id" is allowed
|
|
||||||
*/
|
|
||||||
function validatePutVersionId(objMD, versionId, log) {
|
|
||||||
if (!objMD) {
|
|
||||||
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
|
||||||
log.error('error no object metadata found', { method: 'validatePutVersionId', versionId });
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (objMD.isDeleteMarker) {
|
|
||||||
log.error('version is a delete marker', { method: 'validatePutVersionId', versionId });
|
|
||||||
return errors.MethodNotAllowed;
|
|
||||||
}
|
|
||||||
|
|
||||||
const isLocationCold = locationConstraints[objMD.dataStoreName]?.isCold;
|
|
||||||
if (!isLocationCold) {
|
|
||||||
log.error('The object data is not stored in a cold storage location.',
|
|
||||||
{
|
|
||||||
isLocationCold,
|
|
||||||
dataStoreName: objMD.dataStoreName,
|
|
||||||
method: 'validatePutVersionId',
|
|
||||||
});
|
|
||||||
return errors.InvalidObjectState;
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure object archive restoration is in progress
|
|
||||||
// NOTE: we do not use putObjectVersion to update the restoration period.
|
|
||||||
if (!objMD.archive || !objMD.archive.restoreRequestedAt || !objMD.archive.restoreRequestedDays
|
|
||||||
|| objMD.archive.restoreCompletedAt || objMD.archive.restoreWillExpireAt) {
|
|
||||||
log.error('object archive restoration is not in progress',
|
|
||||||
{ method: 'validatePutVersionId', versionId });
|
|
||||||
return errors.InvalidObjectState;
|
|
||||||
}
|
|
||||||
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if the object is already restored, and update the expiration date accordingly:
|
|
||||||
* > After restoring an archived object, you can update the restoration period by reissuing the
|
|
||||||
* > request with a new period. Amazon S3 updates the restoration period relative to the current
|
|
||||||
* > time.
|
|
||||||
*
|
|
||||||
* @param {ObjectMD} objectMD - object metadata
|
|
||||||
* @param {object} log - werelogs logger
|
|
||||||
* @return {boolean} - true if the object is already restored
|
|
||||||
*/
|
|
||||||
function _updateObjectExpirationDate(objectMD, log) {
|
|
||||||
// Check if restoreCompletedAt field exists
|
|
||||||
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
|
|
||||||
// checked earlier in the process, so checking again here would create weird states
|
|
||||||
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
|
|
||||||
log.debug('The restore status of the object.', {
|
|
||||||
isObjectAlreadyRestored,
|
|
||||||
method: 'isObjectAlreadyRestored'
|
|
||||||
});
|
|
||||||
if (isObjectAlreadyRestored) {
|
|
||||||
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
|
|
||||||
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
|
|
||||||
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
objectMD.archive.restoreWillExpireAt = expiryDate;
|
|
||||||
objectMD['x-amz-restore'] = {
|
|
||||||
'ongoing-request': false,
|
|
||||||
'expiry-date': expiryDate,
|
|
||||||
};
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
}
|
|
||||||
return isObjectAlreadyRestored;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* update restore expiration date.
|
|
||||||
*
|
|
||||||
* @param {ObjectMD} objectMD - objectMD instance
|
|
||||||
* @param {object} restoreParam - restore param
|
|
||||||
* @param {object} log - werelogs logger
|
|
||||||
* @return {ArsenalError|undefined} internal error if object MD is not valid
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
function _updateRestoreInfo(objectMD, restoreParam, log) {
|
|
||||||
if (!objectMD.archive) {
|
|
||||||
log.debug('objectMD.archive doesn\'t exits', {
|
|
||||||
objectMD,
|
|
||||||
method: '_updateRestoreInfo'
|
|
||||||
});
|
|
||||||
return errors.InternalError.customizeDescription('Archive metadata is missing.');
|
|
||||||
}
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
objectMD.archive.restoreRequestedAt = new Date();
|
|
||||||
objectMD.archive.restoreRequestedDays = restoreParam.days;
|
|
||||||
objectMD.originOp = 's3:ObjectRestore:Post';
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
if (!ObjectMDArchive.isValid(objectMD.archive)) {
|
|
||||||
log.debug('archive is not valid', {
|
|
||||||
archive: objectMD.archive,
|
|
||||||
method: '_updateRestoreInfo'
|
|
||||||
});
|
|
||||||
return errors.InternalError.customizeDescription('Invalid archive metadata.');
|
|
||||||
}
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* start to restore object.
|
|
||||||
* If not exist x-amz-restore, add it to objectMD.(x-amz-restore = false)
|
|
||||||
* calculate restore expiry-date and add it to objectMD.
|
|
||||||
* Be called by objectRestore.js
|
|
||||||
*
|
|
||||||
* @param {ObjectMD} objectMD - objectMd instance
|
|
||||||
* @param {object} restoreParam - bucket name
|
|
||||||
* @param {object} log - werelogs logger
|
|
||||||
* @param {function} cb - bucket name
|
|
||||||
* @return {undefined}
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
function startRestore(objectMD, restoreParam, log, cb) {
|
|
||||||
log.info('Validating if restore can be done or not.');
|
|
||||||
const checkResultError = _validateStartRestore(objectMD, log);
|
|
||||||
if (checkResultError) {
|
|
||||||
return cb(checkResultError);
|
|
||||||
}
|
|
||||||
log.info('Updating restore information.');
|
|
||||||
const updateResultError = _updateRestoreInfo(objectMD, restoreParam, log);
|
|
||||||
if (updateResultError) {
|
|
||||||
return cb(updateResultError);
|
|
||||||
}
|
|
||||||
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
|
|
||||||
return cb(null, isObjectAlreadyRestored);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* checks if object data is available or if it's in cold storage
|
|
||||||
* @param {ObjectMD} objMD Object metadata
|
|
||||||
* @returns {ArsenalError|null} error if object data is not available
|
|
||||||
*/
|
|
||||||
function verifyColdObjectAvailable(objMD) {
|
|
||||||
// return error when object is cold
|
|
||||||
if (objMD.archive &&
|
|
||||||
// Object is in cold backend
|
|
||||||
(!objMD.archive.restoreRequestedAt ||
|
|
||||||
// Object is being restored
|
|
||||||
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
|
|
||||||
const err = errors.InvalidObjectState
|
|
||||||
.customizeDescription('The operation is not valid for the object\'s storage class');
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
startRestore,
|
|
||||||
getAmzRestoreResHeader,
|
|
||||||
validatePutVersionId,
|
|
||||||
verifyColdObjectAvailable,
|
|
||||||
};
|
|
|
@ -5,22 +5,49 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { data } = require('../../../data/wrapper');
|
const { data } = require('../../../data/wrapper');
|
||||||
const services = require('../../../services');
|
const services = require('../../../services');
|
||||||
|
const logger = require('../../../utilities/logger');
|
||||||
const { dataStore } = require('./storeObject');
|
const { dataStore } = require('./storeObject');
|
||||||
const locationConstraintCheck = require('./locationConstraintCheck');
|
const locationConstraintCheck = require('./locationConstraintCheck');
|
||||||
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
|
const { versioningPreprocessing } = require('./versioning');
|
||||||
const removeAWSChunked = require('./removeAWSChunked');
|
const removeAWSChunked = require('./removeAWSChunked');
|
||||||
const getReplicationInfo = require('./getReplicationInfo');
|
const getReplicationInfo = require('./getReplicationInfo');
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
const validateWebsiteHeader = require('./websiteServing')
|
const validateWebsiteHeader = require('./websiteServing')
|
||||||
.validateWebsiteHeader;
|
.validateWebsiteHeader;
|
||||||
const applyZenkoUserMD = require('./applyZenkoUserMD');
|
const {
|
||||||
const { externalBackends, versioningNotImplBackends } = constants;
|
externalBackends, versioningNotImplBackends, zenkoIDHeader,
|
||||||
|
} = constants;
|
||||||
|
|
||||||
const externalVersioningErrorMessage = 'We do not currently support putting ' +
|
const externalVersioningErrorMessage = 'We do not currently support putting ' +
|
||||||
'a versioned object to a location-constraint of type Azure or GCP.';
|
'a versioned object to a location-constraint of type Azure.';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retro-propagation is where S3C ingestion will re-ingest an object whose
|
||||||
|
* request originated from Zenko.
|
||||||
|
* To avoid this, Zenko requests which create objects/versions will be tagged
|
||||||
|
* with a user-metadata header defined in constants.zenkoIDHeader. When
|
||||||
|
* ingesting objects into Zenko, we can determine if this object has already
|
||||||
|
* been created in Zenko.
|
||||||
|
* Delete marker requests cannot specify user-metadata fields, so we instead
|
||||||
|
* rely on checking the "user-agent" to see the origin of a request.
|
||||||
|
* If delete marker, and user-agent came from a Zenko client, we add the
|
||||||
|
* user-metadata field to the object metadata.
|
||||||
|
* @param {Object} metaHeaders - user metadata object
|
||||||
|
* @param {http.ClientRequest} request - client request with user-agent header
|
||||||
|
* @param {Boolean} isDeleteMarker - delete marker indicator
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function _checkAndApplyZenkoMD(metaHeaders, request, isDeleteMarker) {
|
||||||
|
const userAgent = request.headers['user-agent'];
|
||||||
|
|
||||||
|
if (isDeleteMarker && userAgent && userAgent.includes('Zenko')) {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
metaHeaders[zenkoIDHeader] = 'zenko';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
metadataStoreParams, dataToDelete, log, requestMethod, callback) {
|
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
|
||||||
services.metadataStoreObject(bucketName, dataGetInfo,
|
services.metadataStoreObject(bucketName, dataGetInfo,
|
||||||
cipherBundle, metadataStoreParams, (err, result) => {
|
cipherBundle, metadataStoreParams, (err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -30,7 +57,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
||||||
dataGetInfo[0].dataStoreName : null;
|
dataGetInfo[0].dataStoreName : null;
|
||||||
return data.batchDelete(dataToDelete, requestMethod,
|
return data.batchDelete(dataToDelete, requestMethod,
|
||||||
newDataStoreName, log, err => callback(err, result));
|
newDataStoreName, deleteLog, err => callback(err, result));
|
||||||
}
|
}
|
||||||
return callback(null, result);
|
return callback(null, result);
|
||||||
});
|
});
|
||||||
|
@ -52,7 +79,6 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
* credentialScope (to be used for streaming v4 auth if applicable)
|
* credentialScope (to be used for streaming v4 auth if applicable)
|
||||||
* @param {(object|null)} overheadField - fields to be included in metadata overhead
|
* @param {(object|null)} overheadField - fields to be included in metadata overhead
|
||||||
* @param {RequestLogger} log - logger instance
|
* @param {RequestLogger} log - logger instance
|
||||||
* @param {string} originOp - Origin operation
|
|
||||||
* @param {function} callback - callback function
|
* @param {function} callback - callback function
|
||||||
* @return {undefined} and call callback with (err, result) -
|
* @return {undefined} and call callback with (err, result) -
|
||||||
* result.contentMD5 - content md5 of new object or version
|
* result.contentMD5 - content md5 of new object or version
|
||||||
|
@ -60,10 +86,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
*/
|
*/
|
||||||
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||||
overheadField, log, originOp, callback) {
|
overheadField, log, callback) {
|
||||||
const putVersionId = request.headers['x-scal-s3-version-id'];
|
|
||||||
const isPutVersion = putVersionId || putVersionId === '';
|
|
||||||
|
|
||||||
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
||||||
// although the request method may actually be 'DELETE' if creating a
|
// although the request method may actually be 'DELETE' if creating a
|
||||||
// delete marker, for our purposes we consider this to be a 'PUT'
|
// delete marker, for our purposes we consider this to be a 'PUT'
|
||||||
|
@ -86,9 +109,9 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
});
|
});
|
||||||
return process.nextTick(() => callback(metaHeaders));
|
return process.nextTick(() => callback(metaHeaders));
|
||||||
}
|
}
|
||||||
// if the request occurs within a Zenko deployment, we place a user-metadata
|
// if receiving a request from Zenko for a delete marker, we place a
|
||||||
// field on the object
|
// user-metadata field on the object
|
||||||
applyZenkoUserMD(metaHeaders);
|
_checkAndApplyZenkoMD(metaHeaders, request, isDeleteMarker);
|
||||||
|
|
||||||
log.trace('meta headers', { metaHeaders, method: 'objectPut' });
|
log.trace('meta headers', { metaHeaders, method: 'objectPut' });
|
||||||
const objectKeyContext = {
|
const objectKeyContext = {
|
||||||
|
@ -114,26 +137,11 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
size,
|
size,
|
||||||
headers,
|
headers,
|
||||||
isDeleteMarker,
|
isDeleteMarker,
|
||||||
replicationInfo: getReplicationInfo(
|
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size, null, null, authInfo, isDeleteMarker),
|
||||||
objectKey, bucketMD, false, size, null, null, authInfo),
|
|
||||||
overheadField,
|
overheadField,
|
||||||
log,
|
log,
|
||||||
};
|
};
|
||||||
|
|
||||||
// For Azure BlobStorage API compatability
|
|
||||||
// If an object already exists copy/repair creation-time
|
|
||||||
// creation-time must remain static after an object is created
|
|
||||||
// --> EVEN FOR VERSIONS <--
|
|
||||||
if (objMD) {
|
|
||||||
if (objMD['creation-time']) {
|
|
||||||
metadataStoreParams.creationTime = objMD['creation-time'];
|
|
||||||
} else {
|
|
||||||
// If creation-time is not set (for old objects)
|
|
||||||
// fall back to the last modified and store it back to the db
|
|
||||||
metadataStoreParams.creationTime = objMD['last-modified'];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isDeleteMarker) {
|
if (!isDeleteMarker) {
|
||||||
metadataStoreParams.contentType = request.headers['content-type'];
|
metadataStoreParams.contentType = request.headers['content-type'];
|
||||||
metadataStoreParams.cacheControl = request.headers['cache-control'];
|
metadataStoreParams.cacheControl = request.headers['cache-control'];
|
||||||
|
@ -143,7 +151,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
removeAWSChunked(request.headers['content-encoding']);
|
removeAWSChunked(request.headers['content-encoding']);
|
||||||
metadataStoreParams.expires = request.headers.expires;
|
metadataStoreParams.expires = request.headers.expires;
|
||||||
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
||||||
metadataStoreParams.originOp = originOp;
|
metadataStoreParams.originOp = 's3:ObjectCreated:Put';
|
||||||
const defaultObjectLockConfiguration
|
const defaultObjectLockConfiguration
|
||||||
= bucketMD.getObjectLockConfiguration();
|
= bucketMD.getObjectLockConfiguration();
|
||||||
if (defaultObjectLockConfiguration) {
|
if (defaultObjectLockConfiguration) {
|
||||||
|
@ -158,7 +166,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
request.headers[constants.objectLocationConstraintHeader] =
|
request.headers[constants.objectLocationConstraintHeader] =
|
||||||
objMD[constants.objectLocationConstraintHeader];
|
objMD[constants.objectLocationConstraintHeader];
|
||||||
metadataStoreParams.originOp = originOp;
|
metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated';
|
||||||
}
|
}
|
||||||
|
|
||||||
const backendInfoObj =
|
const backendInfoObj =
|
||||||
|
@ -197,41 +205,13 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
const dontSkipBackend = externalBackends;
|
const dontSkipBackend = externalBackends;
|
||||||
/* eslint-enable camelcase */
|
/* eslint-enable camelcase */
|
||||||
|
|
||||||
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
|
const requestLogger =
|
||||||
const mdOnlySize = request.headers['x-amz-meta-size'];
|
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function storeData(next) {
|
function storeData(next) {
|
||||||
if (size === 0) {
|
if (size === 0 && !dontSkipBackend[locationType]) {
|
||||||
if (!dontSkipBackend[locationType]) {
|
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
|
||||||
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
|
return next(null, null, null);
|
||||||
return next(null, null, null);
|
|
||||||
}
|
|
||||||
// Handle mdOnlyHeader as a metadata only operation. If
|
|
||||||
// the object in question is actually 0 byte or has a body size
|
|
||||||
// then handle normally.
|
|
||||||
if (mdOnlyHeader === 'true' && mdOnlySize > 0) {
|
|
||||||
log.debug('metadata only operation x-amz-meta-mdonly');
|
|
||||||
const md5 = request.headers['x-amz-meta-md5chksum']
|
|
||||||
? new Buffer(request.headers['x-amz-meta-md5chksum'],
|
|
||||||
'base64').toString('hex') : null;
|
|
||||||
const numParts = request.headers['x-amz-meta-md5numparts'];
|
|
||||||
let _md5;
|
|
||||||
if (numParts === undefined) {
|
|
||||||
_md5 = md5;
|
|
||||||
} else {
|
|
||||||
_md5 = `${md5}-${numParts}`;
|
|
||||||
}
|
|
||||||
const versionId = request.headers['x-amz-meta-version-id'];
|
|
||||||
const dataGetInfo = {
|
|
||||||
key: objectKey,
|
|
||||||
dataStoreName: location,
|
|
||||||
dataStoreType: locationType,
|
|
||||||
dataStoreVersionId: versionId,
|
|
||||||
dataStoreMD5: _md5,
|
|
||||||
};
|
|
||||||
return next(null, dataGetInfo, _md5);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return dataStore(objectKeyContext, cipherBundle, request, size,
|
return dataStore(objectKeyContext, cipherBundle, request, size,
|
||||||
streamingV4Params, backendInfo, log, next);
|
streamingV4Params, backendInfo, log, next);
|
||||||
|
@ -256,19 +236,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
dataGetInfoArr[0].cipheredDataKey =
|
dataGetInfoArr[0].cipheredDataKey =
|
||||||
cipherBundle.cipheredDataKey;
|
cipherBundle.cipheredDataKey;
|
||||||
}
|
}
|
||||||
if (mdOnlyHeader === 'true') {
|
|
||||||
metadataStoreParams.size = mdOnlySize;
|
|
||||||
dataGetInfoArr[0].size = mdOnlySize;
|
|
||||||
}
|
|
||||||
metadataStoreParams.contentMD5 = calculatedHash;
|
metadataStoreParams.contentMD5 = calculatedHash;
|
||||||
return next(null, dataGetInfoArr);
|
return next(null, dataGetInfoArr);
|
||||||
},
|
},
|
||||||
function getVersioningInfo(infoArr, next) {
|
function getVersioningInfo(infoArr, next) {
|
||||||
// if x-scal-s3-version-id header is specified, we overwrite the object/version metadata.
|
|
||||||
if (isPutVersion) {
|
|
||||||
const options = overwritingVersioning(objMD, metadataStoreParams);
|
|
||||||
return process.nextTick(() => next(null, options, infoArr));
|
|
||||||
}
|
|
||||||
return versioningPreprocessing(bucketName, bucketMD,
|
return versioningPreprocessing(bucketName, bucketMD,
|
||||||
metadataStoreParams.objectKey, objMD, log, (err, options) => {
|
metadataStoreParams.objectKey, objMD, log, (err, options) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -294,7 +265,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
}
|
}
|
||||||
return _storeInMDandDeleteData(bucketName, infoArr,
|
return _storeInMDandDeleteData(bucketName, infoArr,
|
||||||
cipherBundle, metadataStoreParams,
|
cipherBundle, metadataStoreParams,
|
||||||
options.dataToDelete, log, requestMethod, next);
|
options.dataToDelete, requestLogger, requestMethod, next);
|
||||||
},
|
},
|
||||||
], callback);
|
], callback);
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,25 +4,23 @@ const {
|
||||||
LifecycleDateTime,
|
LifecycleDateTime,
|
||||||
LifecycleUtils,
|
LifecycleUtils,
|
||||||
} = require('arsenal').s3middleware.lifecycleHelpers;
|
} = require('arsenal').s3middleware.lifecycleHelpers;
|
||||||
const { config } = require('../../../Config');
|
|
||||||
|
|
||||||
const {
|
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
|
||||||
expireOneDayEarlier,
|
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
|
||||||
transitionOneDayEarlier,
|
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
|
||||||
timeProgressionFactor,
|
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
|
||||||
scaledMsPerDay,
|
|
||||||
} = config.getTimeOptions();
|
|
||||||
|
|
||||||
const lifecycleDateTime = new LifecycleDateTime({
|
const lifecycleDateTime = new LifecycleDateTime({
|
||||||
transitionOneDayEarlier,
|
transitionOneDayEarlier,
|
||||||
expireOneDayEarlier,
|
expireOneDayEarlier,
|
||||||
timeProgressionFactor,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
|
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime);
|
||||||
|
|
||||||
|
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
|
||||||
|
|
||||||
function calculateDate(objDate, expDays, datetime) {
|
function calculateDate(objDate, expDays, datetime) {
|
||||||
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
|
return new Date(datetime.getTimestamp(objDate) + expDays * oneDay);
|
||||||
}
|
}
|
||||||
|
|
||||||
function formatExpirationHeader(date, id) {
|
function formatExpirationHeader(date, id) {
|
||||||
|
|
|
@ -1,51 +0,0 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* getReplicationBackendDataLocator - compares given location constraint to
|
|
||||||
* replication backends
|
|
||||||
* @param {object} locationObj - object containing location information
|
|
||||||
* @param {string} locationObj.location - name of location constraint
|
|
||||||
* @param {string} locationObj.key - keyname of object in location constraint
|
|
||||||
* @param {string} locationObj.locationType - type of location constraint
|
|
||||||
* @param {object} replicationInfo - information about object replication
|
|
||||||
* @param {array} replicationInfo.backends - array containing information about
|
|
||||||
* each replication location
|
|
||||||
* @param {string} replicationInfo.backends[].site - name of replication
|
|
||||||
* location
|
|
||||||
* @param {string} replicationInfo.backends[].status - status of replication
|
|
||||||
* @param {string} replicationInfo.backends[].dataStoreVersionId - version id
|
|
||||||
* of object at replication location
|
|
||||||
* @return {object} res - response object
|
|
||||||
* {array} [res.dataLocator] - if COMPLETED status: array
|
|
||||||
* containing the cloud location,
|
|
||||||
* undefined otherwise
|
|
||||||
* {string} [res.status] - replication status if no error
|
|
||||||
* {string} [res.reason] - reason message if PENDING/FAILED
|
|
||||||
* {Error} [res.error] - defined if object is not replicated to
|
|
||||||
* location passed in locationObj
|
|
||||||
*/
|
|
||||||
function getReplicationBackendDataLocator(locationObj, replicationInfo) {
|
|
||||||
const repBackendResult = {};
|
|
||||||
const locMatch = replicationInfo.backends.find(
|
|
||||||
backend => backend.site === locationObj.location);
|
|
||||||
if (!locMatch) {
|
|
||||||
repBackendResult.error = errors.InvalidLocationConstraint.
|
|
||||||
customizeDescription('Object is not replicated to location ' +
|
|
||||||
'passed in location header');
|
|
||||||
return repBackendResult;
|
|
||||||
}
|
|
||||||
repBackendResult.status = locMatch.status;
|
|
||||||
if (['PENDING', 'FAILED'].includes(locMatch.status)) {
|
|
||||||
repBackendResult.reason =
|
|
||||||
`Object replication to specified backend is ${locMatch.status}`;
|
|
||||||
return repBackendResult;
|
|
||||||
}
|
|
||||||
repBackendResult.dataLocator = [{
|
|
||||||
key: locationObj.key,
|
|
||||||
dataStoreName: locationObj.location,
|
|
||||||
dataStoreType: locationObj.locationType,
|
|
||||||
dataStoreVersionId: locMatch.dataStoreVersionId }];
|
|
||||||
return repBackendResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = getReplicationBackendDataLocator;
|
|
|
@ -1,7 +1,5 @@
|
||||||
const s3config = require('../../../Config').config;
|
const s3config = require('../../../Config').config;
|
||||||
const { isServiceAccount, getServiceAccountProperties } =
|
const { isLifecycleSession } = require('../authorization/permissionChecks.js');
|
||||||
require('../authorization/permissionChecks');
|
|
||||||
const { replicationBackends } = require('arsenal').constants;
|
|
||||||
|
|
||||||
function _getBackend(objectMD, site) {
|
function _getBackend(objectMD, site) {
|
||||||
const backends = objectMD ? objectMD.replicationInfo.backends : [];
|
const backends = objectMD ? objectMD.replicationInfo.backends : [];
|
||||||
|
@ -23,31 +21,25 @@ function _getStorageClasses(rule) {
|
||||||
}
|
}
|
||||||
const { replicationEndpoints } = s3config;
|
const { replicationEndpoints } = s3config;
|
||||||
// If no storage class, use the given default endpoint or the sole endpoint
|
// If no storage class, use the given default endpoint or the sole endpoint
|
||||||
if (replicationEndpoints.length > 0) {
|
if (replicationEndpoints.length > 1) {
|
||||||
const endPoint =
|
const endPoint =
|
||||||
replicationEndpoints.find(endpoint => endpoint.default) || replicationEndpoints[0];
|
replicationEndpoints.find(endpoint => endpoint.default);
|
||||||
return [endPoint.site];
|
return [endPoint.site];
|
||||||
}
|
}
|
||||||
return undefined;
|
return [replicationEndpoints[0].site];
|
||||||
}
|
}
|
||||||
|
|
||||||
function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
||||||
objectMD, bucketMD) {
|
objectMD) {
|
||||||
const storageTypes = [];
|
const storageTypes = [];
|
||||||
const backends = [];
|
const backends = [];
|
||||||
const storageClasses = _getStorageClasses(rule);
|
const storageClasses = _getStorageClasses(rule);
|
||||||
if (!storageClasses) {
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
storageClasses.forEach(storageClass => {
|
storageClasses.forEach(storageClass => {
|
||||||
const storageClassName =
|
const location = s3config.locationConstraints[storageClass];
|
||||||
storageClass.endsWith(':preferred_read') ?
|
if (location && ['aws_s3', 'azure'].includes(location.type)) {
|
||||||
storageClass.split(':')[0] : storageClass;
|
|
||||||
const location = s3config.locationConstraints[storageClassName];
|
|
||||||
if (location && replicationBackends[location.type]) {
|
|
||||||
storageTypes.push(location.type);
|
storageTypes.push(location.type);
|
||||||
}
|
}
|
||||||
backends.push(_getBackend(objectMD, storageClassName));
|
backends.push(_getBackend(objectMD, storageClass));
|
||||||
});
|
});
|
||||||
if (storageTypes.length > 0 && operationType) {
|
if (storageTypes.length > 0 && operationType) {
|
||||||
content.push(operationType);
|
content.push(operationType);
|
||||||
|
@ -60,7 +52,6 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
||||||
storageClass: storageClasses.join(','),
|
storageClass: storageClasses.join(','),
|
||||||
role: replicationConfig.role,
|
role: replicationConfig.role,
|
||||||
storageType: storageTypes.join(','),
|
storageType: storageTypes.join(','),
|
||||||
isNFS: bucketMD.isNFS(),
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,45 +65,26 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
||||||
* @param {string} operationType - The type of operation to replicate
|
* @param {string} operationType - The type of operation to replicate
|
||||||
* @param {object} objectMD - The object metadata
|
* @param {object} objectMD - The object metadata
|
||||||
* @param {AuthInfo} [authInfo] - authentication info of object owner
|
* @param {AuthInfo} [authInfo] - authentication info of object owner
|
||||||
|
* @param {boolean} [isDeleteMarker] - whether creating a delete marker
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType,
|
function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType,
|
||||||
objectMD, authInfo) {
|
objectMD, authInfo, isDeleteMarker) {
|
||||||
const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA'];
|
const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA'];
|
||||||
const config = bucketMD.getReplicationConfiguration();
|
const config = bucketMD.getReplicationConfiguration();
|
||||||
|
// If bucket does not have a replication configuration, do not replicate.
|
||||||
// Do not replicate object in the following cases:
|
|
||||||
//
|
|
||||||
// - bucket does not have a replication configuration
|
|
||||||
//
|
|
||||||
// - replication configuration does not apply to the object
|
|
||||||
// (i.e. no rule matches object prefix)
|
|
||||||
//
|
|
||||||
// - replication configuration applies to the object (i.e. a rule matches
|
|
||||||
// object prefix) but the status is disabled
|
|
||||||
//
|
|
||||||
// - object owner is an internal service account like Lifecycle,
|
|
||||||
// unless the account properties explicitly allow it to
|
|
||||||
// replicate like MD ingestion (because we do not want to
|
|
||||||
// replicate objects created from actions triggered by internal
|
|
||||||
// services, by design)
|
|
||||||
|
|
||||||
if (config) {
|
if (config) {
|
||||||
let doReplicate = false;
|
// If delete an object due to a lifecycle action,
|
||||||
if (!authInfo || !isServiceAccount(authInfo.getCanonicalID())) {
|
// the delete marker is not replicated to the destination buckets.
|
||||||
doReplicate = true;
|
if (isDeleteMarker && authInfo && isLifecycleSession(authInfo.getArn())) {
|
||||||
} else {
|
return undefined;
|
||||||
const serviceAccountProps = getServiceAccountProperties(
|
|
||||||
authInfo.getCanonicalID());
|
|
||||||
doReplicate = serviceAccountProps.canReplicate;
|
|
||||||
}
|
}
|
||||||
if (doReplicate) {
|
|
||||||
const rule = config.rules.find(
|
const rule = config.rules.find(rule =>
|
||||||
rule => (objKey.startsWith(rule.prefix) && rule.enabled));
|
(objKey.startsWith(rule.prefix) && rule.enabled));
|
||||||
if (rule) {
|
if (rule) {
|
||||||
return _getReplicationInfo(
|
return _getReplicationInfo(rule, config, content, operationType,
|
||||||
rule, config, content, operationType, objectMD, bucketMD);
|
objectMD);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return undefined;
|
return undefined;
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
const { errors, models } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
const { BackendInfo } = models;
|
const { BackendInfo } = require('./BackendInfo');
|
||||||
const { config } = require('../../../Config');
|
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -30,7 +29,7 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) {
|
||||||
const bucketLocationConstraint = bucket.getLocationConstraint();
|
const bucketLocationConstraint = bucket.getLocationConstraint();
|
||||||
const requestEndpoint = request.parsedHost;
|
const requestEndpoint = request.parsedHost;
|
||||||
|
|
||||||
const controllingBackend = BackendInfo.controllingBackendParam(config,
|
const controllingBackend = BackendInfo.controllingBackendParam(
|
||||||
objectLocationConstraint, bucketLocationConstraint,
|
objectLocationConstraint, bucketLocationConstraint,
|
||||||
requestEndpoint, log);
|
requestEndpoint, log);
|
||||||
if (!controllingBackend.isValid) {
|
if (!controllingBackend.isValid) {
|
||||||
|
@ -40,7 +39,7 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) {
|
||||||
};
|
};
|
||||||
return backendInfoObj;
|
return backendInfoObj;
|
||||||
}
|
}
|
||||||
const backendInfo = new BackendInfo(config, objectLocationConstraint,
|
const backendInfo = new BackendInfo(objectLocationConstraint,
|
||||||
bucketLocationConstraint, requestEndpoint,
|
bucketLocationConstraint, requestEndpoint,
|
||||||
controllingBackend.legacyLocationConstraint);
|
controllingBackend.legacyLocationConstraint);
|
||||||
backendInfoObj = {
|
backendInfoObj = {
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const { config } = require('../../../Config');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* locationHeaderCheck - compares 'x-amz-location-constraint' header
|
|
||||||
* to location constraints in config
|
|
||||||
* @param {object} headers - request headers
|
|
||||||
* @param {string} objectKey - key name of object
|
|
||||||
* @param {string} bucketName - name of bucket
|
|
||||||
* @return {undefined|Object} returns error, object, or undefined
|
|
||||||
* @return {string} return.location - name of location constraint
|
|
||||||
* @return {string} return.key - name of object at location constraint
|
|
||||||
* @return {string} - return.locationType - type of location constraint
|
|
||||||
*/
|
|
||||||
function locationHeaderCheck(headers, objectKey, bucketName) {
|
|
||||||
const location = headers['x-amz-location-constraint'];
|
|
||||||
if (location) {
|
|
||||||
const validLocation = config.locationConstraints[location];
|
|
||||||
if (!validLocation) {
|
|
||||||
return errors.InvalidLocationConstraint.customizeDescription(
|
|
||||||
'Invalid location constraint specified in header');
|
|
||||||
}
|
|
||||||
const bucketMatch = validLocation.details.bucketMatch;
|
|
||||||
const backendKey = bucketMatch ? objectKey :
|
|
||||||
`${bucketName}/${objectKey}`;
|
|
||||||
return {
|
|
||||||
location,
|
|
||||||
key: backendKey,
|
|
||||||
locationType: validLocation.type,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
// no location header was passed
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = locationHeaderCheck;
|
|
|
@ -22,13 +22,8 @@ function locationKeysHaveChanged(prev, curr) {
|
||||||
return curr.every(v => v.key !== prev);
|
return curr.every(v => v.key !== prev);
|
||||||
}
|
}
|
||||||
const keysMap = {};
|
const keysMap = {};
|
||||||
prev.forEach(v => {
|
prev.forEach(v => { keysMap[v.key] = true; });
|
||||||
if (!keysMap[v.dataStoreType]) {
|
return curr.every(v => !keysMap[v.key]);
|
||||||
keysMap[v.dataStoreType] = {};
|
|
||||||
}
|
|
||||||
keysMap[v.dataStoreType][v.key] = true;
|
|
||||||
});
|
|
||||||
return curr.every(v => !(keysMap[v.dataStoreType] && keysMap[v.dataStoreType][v.key]));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = locationKeysHaveChanged;
|
module.exports = locationKeysHaveChanged;
|
||||||
|
|
|
@ -5,7 +5,6 @@ const { config } = require('../../../Config');
|
||||||
const vault = require('../../../auth/vault');
|
const vault = require('../../../auth/vault');
|
||||||
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
|
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
|
||||||
|
|
||||||
const { scaledMsPerDay } = config.getTimeOptions();
|
|
||||||
/**
|
/**
|
||||||
* Calculates retain until date for the locked object version
|
* Calculates retain until date for the locked object version
|
||||||
* @param {object} retention - includes days or years retention period
|
* @param {object} retention - includes days or years retention period
|
||||||
|
@ -21,9 +20,8 @@ function calculateRetainUntilDate(retention) {
|
||||||
const date = moment();
|
const date = moment();
|
||||||
// Calculate the number of days to retain the lock on the object
|
// Calculate the number of days to retain the lock on the object
|
||||||
const retainUntilDays = days || years * 365;
|
const retainUntilDays = days || years * 365;
|
||||||
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
|
|
||||||
const retainUntilDate
|
const retainUntilDate
|
||||||
= date.add(retainUntilDaysInMs, 'ms');
|
= date.add(retainUntilDays, 'days');
|
||||||
return retainUntilDate.toISOString();
|
return retainUntilDate.toISOString();
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -306,7 +304,7 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
const explicitDenyExists = authorizationResults.some(
|
const explicitDenyExists = authorizationResults.some(
|
||||||
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
|
authzResult => authzResult.isAllowed === false && authzResult.isImplicit === false);
|
||||||
if (explicitDenyExists) {
|
if (explicitDenyExists) {
|
||||||
log.trace('authorization check failed for user',
|
log.trace('authorization check failed for user',
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,172 +0,0 @@
|
||||||
const async = require('async');
|
|
||||||
const { errors, s3middleware } = require('arsenal');
|
|
||||||
|
|
||||||
const { allowedRestoreObjectRequestTierValues } = require('../../../../constants');
|
|
||||||
const coldStorage = require('./coldStorage');
|
|
||||||
const monitoring = require('../../../utilities/monitoringHandler');
|
|
||||||
const { pushMetric } = require('../../../utapi/utilities');
|
|
||||||
const { decodeVersionId } = require('./versioning');
|
|
||||||
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
|
|
||||||
const { parseRestoreRequestXml } = s3middleware.objectRestore;
|
|
||||||
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if tier is supported
|
|
||||||
* @param {object} restoreInfo - restore information
|
|
||||||
* @returns {ArsenalError|undefined} return NotImplemented error if tier not support
|
|
||||||
*/
|
|
||||||
function checkTierSupported(restoreInfo) {
|
|
||||||
if (!allowedRestoreObjectRequestTierValues.includes(restoreInfo.tier)) {
|
|
||||||
return errors.NotImplemented;
|
|
||||||
}
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* POST Object restore process
|
|
||||||
*
|
|
||||||
* @param {MetadataWrapper} metadata - metadata wrapper
|
|
||||||
* @param {object} mdUtils - utility object to treat metadata
|
|
||||||
* @param {AuthInfo} userInfo - Instance of AuthInfo class with requester's info
|
|
||||||
* @param {IncomingMessage} request - request info
|
|
||||||
* @param {object} log - Werelogs logger
|
|
||||||
* @param {function} callback callback function
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
|
||||||
const METHOD = 'objectRestore';
|
|
||||||
|
|
||||||
const { bucketName, objectKey } = request;
|
|
||||||
|
|
||||||
log.debug('processing request', { method: METHOD });
|
|
||||||
|
|
||||||
const decodedVidResult = decodeVersionId(request.query);
|
|
||||||
if (decodedVidResult instanceof Error) {
|
|
||||||
log.trace('invalid versionId query',
|
|
||||||
{
|
|
||||||
method: METHOD,
|
|
||||||
versionId: request.query.versionId,
|
|
||||||
error: decodedVidResult,
|
|
||||||
});
|
|
||||||
return process.nextTick(() => callback(decodedVidResult));
|
|
||||||
}
|
|
||||||
|
|
||||||
let isObjectRestored = false;
|
|
||||||
|
|
||||||
const mdValueParams = {
|
|
||||||
authInfo: userInfo,
|
|
||||||
bucketName,
|
|
||||||
objectKey,
|
|
||||||
versionId: decodedVidResult,
|
|
||||||
requestType: request.apiMethods || 'restoreObject',
|
|
||||||
/**
|
|
||||||
* Restoring an object might not cause any impact on
|
|
||||||
* the storage, if the object is already restored: in
|
|
||||||
* this case, the duration is extended. We disable the
|
|
||||||
* quota evaluation and trigger it manually.
|
|
||||||
*/
|
|
||||||
checkQuota: false,
|
|
||||||
request,
|
|
||||||
};
|
|
||||||
|
|
||||||
return async.waterfall([
|
|
||||||
// get metadata of bucket and object
|
|
||||||
function validateBucketAndObject(next) {
|
|
||||||
return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies,
|
|
||||||
log, (err, bucketMD, objectMD) => {
|
|
||||||
if (err) {
|
|
||||||
log.trace('request authorization failed', { method: METHOD, error: err });
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
// Call back error if object metadata could not be obtained
|
|
||||||
if (!objectMD) {
|
|
||||||
const err = decodedVidResult ? errors.NoSuchVersion : errors.NoSuchKey;
|
|
||||||
log.trace('error no object metadata found', { method: METHOD, error: err });
|
|
||||||
return next(err, bucketMD);
|
|
||||||
}
|
|
||||||
// If object metadata is delete marker,
|
|
||||||
// call back NoSuchKey or MethodNotAllowed depending on specifying versionId
|
|
||||||
if (objectMD.isDeleteMarker) {
|
|
||||||
let err = errors.NoSuchKey;
|
|
||||||
if (decodedVidResult) {
|
|
||||||
err = errors.MethodNotAllowed;
|
|
||||||
}
|
|
||||||
log.trace('version is a delete marker', { method: METHOD, error: err });
|
|
||||||
return next(err, bucketMD, objectMD);
|
|
||||||
}
|
|
||||||
log.info('it acquired the object metadata.', {
|
|
||||||
'method': METHOD,
|
|
||||||
});
|
|
||||||
return next(null, bucketMD, objectMD);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
// generate restore param obj from xml of request body and check tier validity
|
|
||||||
function parseRequestXmlAndCheckTier(bucketMD, objectMD, next) {
|
|
||||||
log.trace('parsing object restore information');
|
|
||||||
return parseRestoreRequestXml(request.post, log, (err, restoreInfo) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err, bucketMD, objectMD, restoreInfo);
|
|
||||||
}
|
|
||||||
log.info('it parsed xml of the request body.', { method: METHOD, value: restoreInfo });
|
|
||||||
const checkTierResult = checkTierSupported(restoreInfo);
|
|
||||||
if (checkTierResult instanceof Error) {
|
|
||||||
return next(checkTierResult);
|
|
||||||
}
|
|
||||||
return next(null, bucketMD, objectMD, restoreInfo);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
// start restore process
|
|
||||||
function startRestore(bucketMD, objectMD, restoreInfo, next) {
|
|
||||||
return coldStorage.startRestore(objectMD, restoreInfo, log,
|
|
||||||
(err, _isObjectRestored) => {
|
|
||||||
isObjectRestored = _isObjectRestored;
|
|
||||||
return next(err, bucketMD, objectMD);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
function evaluateQuotas(bucketMD, objectMD, next) {
|
|
||||||
if (isObjectRestored) {
|
|
||||||
return next(null, bucketMD, objectMD);
|
|
||||||
}
|
|
||||||
const actions = Array.isArray(mdValueParams.requestType) ?
|
|
||||||
mdValueParams.requestType : [mdValueParams.requestType];
|
|
||||||
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
|
|
||||||
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
|
|
||||||
false, log, err => next(err, bucketMD, objectMD));
|
|
||||||
},
|
|
||||||
function updateObjectMD(bucketMD, objectMD, next) {
|
|
||||||
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
|
|
||||||
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
|
|
||||||
log, err => next(err, bucketMD, objectMD));
|
|
||||||
},
|
|
||||||
],
|
|
||||||
(err, bucketMD) => {
|
|
||||||
// generate CORS response header
|
|
||||||
const responseHeaders = collectCorsHeaders(request.headers.origin, request.method, bucketMD);
|
|
||||||
if (err) {
|
|
||||||
log.trace('error processing request',
|
|
||||||
{
|
|
||||||
method: METHOD,
|
|
||||||
error: err,
|
|
||||||
});
|
|
||||||
monitoring.promMetrics(
|
|
||||||
'POST', bucketName, err.code, 'restoreObject');
|
|
||||||
return callback(err, err.code, responseHeaders);
|
|
||||||
}
|
|
||||||
pushMetric('restoreObject', log, {
|
|
||||||
userInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
if (isObjectRestored) {
|
|
||||||
monitoring.promMetrics(
|
|
||||||
'POST', bucketName, '200', 'restoreObject');
|
|
||||||
return callback(null, 200, responseHeaders);
|
|
||||||
}
|
|
||||||
monitoring.promMetrics(
|
|
||||||
'POST', bucketName, '202', 'restoreObject');
|
|
||||||
return callback(null, 202, responseHeaders);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
module.exports = objectRestore;
|
|
|
@ -0,0 +1,241 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const crypto = require('crypto');
|
||||||
|
const constants = require('../../../../constants');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* createAggregateETag - creates ETag from concatenated MPU part ETags to
|
||||||
|
* mimic AWS
|
||||||
|
* @param {string} concatETags - string of concatenated MPU part ETags
|
||||||
|
* @param {array} partList - list of parts to complete MPU with
|
||||||
|
* @return {string} aggregateETag - final complete MPU obj ETag
|
||||||
|
*/
|
||||||
|
function createAggregateETag(concatETags, partList) {
|
||||||
|
// AWS documentation is unclear on what the MD5 is that it returns
|
||||||
|
// in the response for a complete multipart upload request.
|
||||||
|
// The docs state that they might or might not
|
||||||
|
// return the MD5 of the complete object. It appears
|
||||||
|
// they are returning the MD5 of the parts' MD5s so that is
|
||||||
|
// what we have done here. We:
|
||||||
|
// 1) concatenate the hex version of the
|
||||||
|
// individual ETags
|
||||||
|
// 2) convert the concatenated hex to binary
|
||||||
|
// 3) take the md5 of the binary
|
||||||
|
// 4) create the hex digest of the md5
|
||||||
|
// 5) add '-' plus the number of parts at the end
|
||||||
|
|
||||||
|
// Convert the concatenated hex ETags to binary
|
||||||
|
const bufferedHex = Buffer.from(concatETags, 'hex');
|
||||||
|
// Convert the buffer to a binary string
|
||||||
|
const binaryString = bufferedHex.toString('binary');
|
||||||
|
// Get the md5 of the binary string
|
||||||
|
const md5Hash = crypto.createHash('md5');
|
||||||
|
md5Hash.update(binaryString, 'binary');
|
||||||
|
// Get the hex digest of the md5
|
||||||
|
let aggregateETag = md5Hash.digest('hex');
|
||||||
|
// Add the number of parts at the end
|
||||||
|
aggregateETag = `${aggregateETag}-${partList.length}`;
|
||||||
|
|
||||||
|
return aggregateETag;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* generateMpuPartStorageInfo - generates info needed for storage of
|
||||||
|
* completed MPU object
|
||||||
|
* @param {array} filteredPartList - list of parts filtered from metadata
|
||||||
|
* @return {object} partsInfo - contains three keys: aggregateETag,
|
||||||
|
* dataLocations, and calculatedSize
|
||||||
|
*/
|
||||||
|
function generateMpuPartStorageInfo(filteredPartList) {
|
||||||
|
// Assemble array of part locations, aggregate size
|
||||||
|
// and build string to create aggregate ETag
|
||||||
|
let calculatedSize = 0;
|
||||||
|
const dataLocations = [];
|
||||||
|
let concatETags = '';
|
||||||
|
const partsInfo = {};
|
||||||
|
|
||||||
|
filteredPartList.forEach((storedPart, index) => {
|
||||||
|
const partETagWithoutQuotes =
|
||||||
|
storedPart.ETag.slice(1, -1);
|
||||||
|
const dataStoreETag = `${index + 1}:${partETagWithoutQuotes}`;
|
||||||
|
concatETags += partETagWithoutQuotes;
|
||||||
|
|
||||||
|
// If part was put by a regular put part rather than a
|
||||||
|
// copy it is always one location. With a put part
|
||||||
|
// copy, could be multiple locations so loop over array
|
||||||
|
// of locations.
|
||||||
|
for (let j = 0; j < storedPart.locations.length; j++) {
|
||||||
|
// If the piece has parts (was a put part object
|
||||||
|
// copy) each piece will have a size attribute.
|
||||||
|
// Otherwise, the piece was put by a regular put
|
||||||
|
// part and the size the of the piece is the full
|
||||||
|
// part size.
|
||||||
|
const location = storedPart.locations[j];
|
||||||
|
// If there is no location, move on
|
||||||
|
if (!location || typeof location !== 'object') {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let pieceSize = Number.parseInt(storedPart.size, 10);
|
||||||
|
if (location.size) {
|
||||||
|
pieceSize = Number.parseInt(location.size, 10);
|
||||||
|
}
|
||||||
|
const pieceRetrievalInfo = {
|
||||||
|
key: location.key,
|
||||||
|
size: pieceSize,
|
||||||
|
start: calculatedSize,
|
||||||
|
dataStoreName: location.dataStoreName,
|
||||||
|
dataStoreETag,
|
||||||
|
cryptoScheme: location.sseCryptoScheme,
|
||||||
|
cipheredDataKey: location.sseCipheredDataKey,
|
||||||
|
};
|
||||||
|
dataLocations.push(pieceRetrievalInfo);
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
calculatedSize += pieceSize;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
partsInfo.aggregateETag =
|
||||||
|
createAggregateETag(concatETags, filteredPartList);
|
||||||
|
partsInfo.dataLocations = dataLocations;
|
||||||
|
partsInfo.calculatedSize = calculatedSize;
|
||||||
|
return partsInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validateAndFilterMpuParts - validates part list sent by user and filters
|
||||||
|
* parts stored in metadata against user part list
|
||||||
|
* @param {array} storedParts - array of parts stored in metadata
|
||||||
|
* @param {array} jsonList - array of parts sent by user for completion
|
||||||
|
* @param {string} mpuOverviewKey - metadata mpu key
|
||||||
|
* @param {string} splitter - mpu key divider
|
||||||
|
* @param {object} log - Werelogs instance
|
||||||
|
* @return {object} filtersPartsObj - contains 3 keys: partList, keysToDelete,
|
||||||
|
* and extraPartLocations
|
||||||
|
*/
|
||||||
|
function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey,
|
||||||
|
splitter, log) {
|
||||||
|
let storedPartsCopy = [];
|
||||||
|
const filteredPartsObj = {};
|
||||||
|
filteredPartsObj.partList = [];
|
||||||
|
|
||||||
|
const keysToDelete = [];
|
||||||
|
storedParts.forEach(item => {
|
||||||
|
keysToDelete.push(item.key);
|
||||||
|
storedPartsCopy.push({
|
||||||
|
// In order to delete the part listing in the shadow
|
||||||
|
// bucket, need the full key
|
||||||
|
key: item.key,
|
||||||
|
ETag: `"${item.value.ETag}"`,
|
||||||
|
size: item.value.Size,
|
||||||
|
locations: Array.isArray(item.value.partLocations) ?
|
||||||
|
item.value.partLocations : [item.value.partLocations],
|
||||||
|
});
|
||||||
|
});
|
||||||
|
keysToDelete.push(mpuOverviewKey);
|
||||||
|
|
||||||
|
// Check list sent to make sure valid
|
||||||
|
const partLength = jsonList.Part.length;
|
||||||
|
// A user can put more parts than they end up including
|
||||||
|
// in the completed MPU but there cannot be more
|
||||||
|
// parts in the complete message than were already put
|
||||||
|
if (partLength > storedPartsCopy.length) {
|
||||||
|
filteredPartsObj.error = errors.InvalidPart;
|
||||||
|
return filteredPartsObj;
|
||||||
|
}
|
||||||
|
|
||||||
|
let extraParts = [];
|
||||||
|
const extraPartLocations = [];
|
||||||
|
|
||||||
|
for (let i = 0; i < partLength; i++) {
|
||||||
|
const part = jsonList.Part[i];
|
||||||
|
const partNumber = Number.parseInt(part.PartNumber[0], 10);
|
||||||
|
// If the complete list of parts sent with
|
||||||
|
// the complete multipart upload request is not
|
||||||
|
// in ascending order return an error
|
||||||
|
if (i > 0) {
|
||||||
|
const previousPartNumber =
|
||||||
|
Number.parseInt(jsonList.Part[i - 1].PartNumber[0], 10);
|
||||||
|
if (partNumber <= previousPartNumber) {
|
||||||
|
filteredPartsObj.error = errors.InvalidPartOrder;
|
||||||
|
return filteredPartsObj;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let isPartUploaded = false;
|
||||||
|
while (storedPartsCopy.length > 0 && !isPartUploaded) {
|
||||||
|
const storedPart = storedPartsCopy[0];
|
||||||
|
const storedPartNumber =
|
||||||
|
Number.parseInt(storedPart.key.split(splitter)[1], 10);
|
||||||
|
|
||||||
|
if (storedPartNumber === partNumber) {
|
||||||
|
isPartUploaded = true;
|
||||||
|
filteredPartsObj.partList.push(storedPart);
|
||||||
|
|
||||||
|
let partETag = part.ETag[0].replace(/['"]/g, '');
|
||||||
|
// some clients send base64, convert to hex
|
||||||
|
// 32 chars = 16 bytes(2 chars-per-byte) = 128 bits of
|
||||||
|
// MD5 hex
|
||||||
|
if (partETag.length !== 32) {
|
||||||
|
const buffered = Buffer.from(part.ETag[0], 'base64')
|
||||||
|
.toString('hex');
|
||||||
|
partETag = `${buffered}`;
|
||||||
|
}
|
||||||
|
partETag = `"${partETag}"`;
|
||||||
|
// If list of parts sent with complete mpu request contains
|
||||||
|
// a part ETag that does not match the ETag for the part
|
||||||
|
// stored in metadata, return an error
|
||||||
|
if (partETag !== storedPart.ETag) {
|
||||||
|
filteredPartsObj.error = errors.InvalidPart;
|
||||||
|
return filteredPartsObj;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If any part other than the last part is less than
|
||||||
|
// 5MB, return an error
|
||||||
|
const storedPartSize =
|
||||||
|
Number.parseInt(storedPart.size, 10);
|
||||||
|
// allow smaller parts for testing
|
||||||
|
if (process.env.MPU_TESTING) {
|
||||||
|
log.info('MPU_TESTING env variable setting',
|
||||||
|
{ setting: process.env.MPU_TESTING });
|
||||||
|
}
|
||||||
|
if (process.env.MPU_TESTING !== 'yes' &&
|
||||||
|
i < jsonList.Part.length - 1 &&
|
||||||
|
storedPartSize < constants.minimumAllowedPartSize) {
|
||||||
|
log.debug('part too small on complete mpu');
|
||||||
|
filteredPartsObj.error = errors.EntityTooSmall;
|
||||||
|
return filteredPartsObj;
|
||||||
|
}
|
||||||
|
|
||||||
|
storedPartsCopy = storedPartsCopy.splice(1);
|
||||||
|
} else {
|
||||||
|
extraParts.push(storedPart);
|
||||||
|
storedPartsCopy = storedPartsCopy.splice(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!isPartUploaded) {
|
||||||
|
filteredPartsObj.error = errors.InvalidPart;
|
||||||
|
return filteredPartsObj;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
extraParts = extraParts.concat(storedPartsCopy);
|
||||||
|
// if extra parts, need to delete the data when done with completing
|
||||||
|
// mpu so extract the info to delete here
|
||||||
|
if (extraParts.length > 0) {
|
||||||
|
extraParts.forEach(part => {
|
||||||
|
const locations = part.locations;
|
||||||
|
locations.forEach(location => {
|
||||||
|
if (!location || typeof location !== 'object') {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
extraPartLocations.push(location);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
filteredPartsObj.keysToDelete = keysToDelete;
|
||||||
|
filteredPartsObj.extraPartLocations = extraPartLocations;
|
||||||
|
return filteredPartsObj;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
generateMpuPartStorageInfo,
|
||||||
|
validateAndFilterMpuParts,
|
||||||
|
};
|
|
@ -4,40 +4,13 @@ const async = require('async');
|
||||||
const metadata = require('../../../metadata/wrapper');
|
const metadata = require('../../../metadata/wrapper');
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
const { scaledMsPerDay } = config.getTimeOptions();
|
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
// Use Arsenal function to generate a version ID used internally by metadata
|
// Use Arsenal function to generate a version ID used internally by metadata
|
||||||
// for null versions that are created before bucket versioning is configured
|
// for null versions that are created before bucket versioning is configured
|
||||||
const nonVersionedObjId =
|
const nonVersionedObjId =
|
||||||
versionIdUtils.getInfVid(config.replicationGroupId);
|
versionIdUtils.getInfVid(config.replicationGroupId);
|
||||||
|
|
||||||
/** decodeVID - decode the version id
|
/** decodedVidResult - decode the version id from a query object
|
||||||
* @param {string} versionId - version ID
|
|
||||||
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
|
|
||||||
* fails due to improper format, otherwise undefined or the decoded version id
|
|
||||||
*/
|
|
||||||
function decodeVID(versionId) {
|
|
||||||
if (versionId === 'null') {
|
|
||||||
return versionId;
|
|
||||||
}
|
|
||||||
|
|
||||||
let decoded;
|
|
||||||
const invalidErr = errors.InvalidArgument.customizeDescription('Invalid version id specified');
|
|
||||||
try {
|
|
||||||
decoded = versionIdUtils.decode(versionId);
|
|
||||||
} catch (err) {
|
|
||||||
return invalidErr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (decoded instanceof Error) {
|
|
||||||
return invalidErr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return decoded;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** decodeVersionId - decode the version id from a query object
|
|
||||||
* @param {object} [reqQuery] - request query object
|
* @param {object} [reqQuery] - request query object
|
||||||
* @param {string} [reqQuery.versionId] - version ID sent in request query
|
* @param {string} [reqQuery.versionId] - version ID sent in request query
|
||||||
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
|
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
|
||||||
|
@ -47,7 +20,16 @@ function decodeVersionId(reqQuery) {
|
||||||
if (!reqQuery || !reqQuery.versionId) {
|
if (!reqQuery || !reqQuery.versionId) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
return decodeVID(reqQuery.versionId);
|
let versionId = reqQuery.versionId;
|
||||||
|
if (versionId === 'null') {
|
||||||
|
return versionId;
|
||||||
|
}
|
||||||
|
versionId = versionIdUtils.decode(versionId);
|
||||||
|
if (versionId instanceof Error) {
|
||||||
|
return errors.InvalidArgument
|
||||||
|
.customizeDescription('Invalid version id specified');
|
||||||
|
}
|
||||||
|
return versionId;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** getVersionIdResHeader - return encrypted version ID if appropriate
|
/** getVersionIdResHeader - return encrypted version ID if appropriate
|
||||||
|
@ -61,7 +43,8 @@ function getVersionIdResHeader(verCfg, objectMD) {
|
||||||
if (objectMD.isNull || !objectMD.versionId) {
|
if (objectMD.isNull || !objectMD.versionId) {
|
||||||
return 'null';
|
return 'null';
|
||||||
}
|
}
|
||||||
return versionIdUtils.encode(objectMD.versionId);
|
return versionIdUtils.encode(objectMD.versionId,
|
||||||
|
config.versionIdEncodingType);
|
||||||
}
|
}
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
@ -210,7 +193,7 @@ function processVersioningState(mst, vstat, nullVersionCompatMode) {
|
||||||
// null keys are used, which is used as an optimization to
|
// null keys are used, which is used as an optimization to
|
||||||
// avoid having to check the versioned key since there can
|
// avoid having to check the versioned key since there can
|
||||||
// be no more versioned key to clean up
|
// be no more versioned key to clean up
|
||||||
if (mst.isNull && mst.versionId && !mst.isNull2) {
|
if (mst.isNull && !mst.isNull2) {
|
||||||
const delOptions = { versionId: mst.versionId };
|
const delOptions = { versionId: mst.versionId };
|
||||||
return { options, delOptions };
|
return { options, delOptions };
|
||||||
}
|
}
|
||||||
|
@ -241,7 +224,7 @@ function processVersioningState(mst, vstat, nullVersionCompatMode) {
|
||||||
if (masterIsNull) {
|
if (masterIsNull) {
|
||||||
// if master is a null version or a non-versioned key,
|
// if master is a null version or a non-versioned key,
|
||||||
// copy it to a new null key
|
// copy it to a new null key
|
||||||
const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId;
|
const nullVersionId = mst.isNull ? mst.versionId : nonVersionedObjId;
|
||||||
if (nullVersionCompatMode) {
|
if (nullVersionCompatMode) {
|
||||||
options.extraMD = {
|
options.extraMD = {
|
||||||
nullVersionId,
|
nullVersionId,
|
||||||
|
@ -460,93 +443,6 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersio
|
||||||
return options;
|
return options;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Keep metadatas when the object is restored from cold storage
|
|
||||||
* but remove the specific ones we don't want to keep
|
|
||||||
* @param {object} objMD - obj metadata
|
|
||||||
* @param {object} metadataStoreParams - custom built object containing resource details.
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function restoreMetadata(objMD, metadataStoreParams) {
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
|
|
||||||
// We need to keep user metadata and tags
|
|
||||||
Object.keys(objMD).forEach(key => {
|
|
||||||
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
|
|
||||||
metadataStoreParams.metaHeaders[key] = objMD[key];
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (objMD['x-amz-website-redirect-location']) {
|
|
||||||
if (!metadataStoreParams.headers) {
|
|
||||||
metadataStoreParams.headers = {};
|
|
||||||
}
|
|
||||||
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (objMD.replicationInfo) {
|
|
||||||
metadataStoreParams.replicationInfo = objMD.replicationInfo;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (objMD.legalHold) {
|
|
||||||
metadataStoreParams.legalHold = objMD.legalHold;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (objMD.acl) {
|
|
||||||
metadataStoreParams.acl = objMD.acl;
|
|
||||||
}
|
|
||||||
|
|
||||||
metadataStoreParams.creationTime = objMD['creation-time'];
|
|
||||||
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
|
|
||||||
metadataStoreParams.taggingCopy = objMD.tags;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** overwritingVersioning - return versioning information for S3 to handle
|
|
||||||
* storing version metadata with a specific version id.
|
|
||||||
* @param {object} objMD - obj metadata
|
|
||||||
* @param {object} metadataStoreParams - custom built object containing resource details.
|
|
||||||
* @return {object} options
|
|
||||||
* options.versionId - specific versionId to overwrite in metadata
|
|
||||||
* options.isNull - (true/undefined) whether new version is null or not
|
|
||||||
* options.nullVersionId - if storing a null version in version history, the
|
|
||||||
* version id of the null version
|
|
||||||
*/
|
|
||||||
function overwritingVersioning(objMD, metadataStoreParams) {
|
|
||||||
metadataStoreParams.updateMicroVersionId = true;
|
|
||||||
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
|
|
||||||
|
|
||||||
// set correct originOp
|
|
||||||
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
|
|
||||||
|
|
||||||
// update restore
|
|
||||||
const days = objMD.archive?.restoreRequestedDays;
|
|
||||||
const now = Date.now();
|
|
||||||
metadataStoreParams.archive = {
|
|
||||||
archiveInfo: objMD.archive?.archiveInfo,
|
|
||||||
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
|
|
||||||
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
|
|
||||||
restoreCompletedAt: new Date(now),
|
|
||||||
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
|
|
||||||
};
|
|
||||||
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
|
|
||||||
const versionId = objMD.versionId || undefined;
|
|
||||||
const options = {
|
|
||||||
versionId,
|
|
||||||
isNull: objMD.isNull,
|
|
||||||
};
|
|
||||||
if (objMD.nullVersionId) {
|
|
||||||
options.extraMD = {
|
|
||||||
nullVersionId: objMD.nullVersionId,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
restoreMetadata(objMD, metadataStoreParams);
|
|
||||||
|
|
||||||
return options;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
decodeVersionId,
|
decodeVersionId,
|
||||||
getVersionIdResHeader,
|
getVersionIdResHeader,
|
||||||
|
@ -556,6 +452,4 @@ module.exports = {
|
||||||
versioningPreprocessing,
|
versioningPreprocessing,
|
||||||
getVersionSpecificMetadataOptions,
|
getVersionSpecificMetadataOptions,
|
||||||
preprocessingVersioningDelete,
|
preprocessingVersioningDelete,
|
||||||
overwritingVersioning,
|
|
||||||
decodeVID,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -101,33 +101,8 @@ function validateWebsiteHeader(header) {
|
||||||
header.startsWith('http://') || header.startsWith('https://'));
|
header.startsWith('http://') || header.startsWith('https://'));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* appendWebsiteIndexDocument - append index to objectKey if necessary
|
|
||||||
* @param {object} request - normalized request object
|
|
||||||
* @param {string} indexDocumentSuffix - index document from website config
|
|
||||||
* @param {boolean} force - flag to force append index
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) {
|
|
||||||
const reqObjectKey = request.objectKey ? request.objectKey : '';
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
|
|
||||||
// find index document if "directory" sent in request
|
|
||||||
if (reqObjectKey.endsWith('/')) {
|
|
||||||
request.objectKey += indexDocumentSuffix;
|
|
||||||
// find index document if no key provided
|
|
||||||
} else if (reqObjectKey === '') {
|
|
||||||
request.objectKey = indexDocumentSuffix;
|
|
||||||
// force for redirect 302 on folder without trailing / that has an index
|
|
||||||
} else if (force) {
|
|
||||||
request.objectKey += `/${indexDocumentSuffix}`;
|
|
||||||
}
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
findRoutingRule,
|
findRoutingRule,
|
||||||
extractRedirectInfo,
|
extractRedirectInfo,
|
||||||
validateWebsiteHeader,
|
validateWebsiteHeader,
|
||||||
appendWebsiteIndexDocument,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,314 +0,0 @@
|
||||||
const async = require('async');
|
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const monitoring = require('../../../utilities/monitoringHandler');
|
|
||||||
const {
|
|
||||||
actionNeedQuotaCheckCopy,
|
|
||||||
actionNeedQuotaCheck,
|
|
||||||
actionWithDataDeletion,
|
|
||||||
} = require('arsenal').policies;
|
|
||||||
const { config } = require('../../../Config');
|
|
||||||
const QuotaService = require('../../../quotas/quotas');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Process the bytes to write based on the request and object metadata
|
|
||||||
* @param {string} apiMethod - api method
|
|
||||||
* @param {BucketInfo} bucket - bucket info
|
|
||||||
* @param {string} versionId - version id of the object
|
|
||||||
* @param {number} contentLength - content length of the object
|
|
||||||
* @param {object} objMD - object metadata
|
|
||||||
* @param {object} destObjMD - destination object metadata
|
|
||||||
* @return {number} processed content length
|
|
||||||
*/
|
|
||||||
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
|
|
||||||
let bytes = contentLength;
|
|
||||||
if (apiMethod === 'objectRestore') {
|
|
||||||
// object is being restored
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10);
|
|
||||||
} else if (!bytes && objMD?.['content-length']) {
|
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
|
||||||
if (!destObjMD || bucket.isVersioningEnabled()) {
|
|
||||||
// object is being copied
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10);
|
|
||||||
} else if (!bucket.isVersioningEnabled()) {
|
|
||||||
// object is being copied and replaces the target
|
|
||||||
bytes = Number.parseInt(objMD['content-length'], 10) -
|
|
||||||
Number.parseInt(destObjMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
|
|
||||||
// object is being deleted
|
|
||||||
bytes = -Number.parseInt(objMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
|
|
||||||
// object is being replaced: store the diff, if the bucket is not versioned
|
|
||||||
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
|
|
||||||
}
|
|
||||||
return bytes || 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Checks if a metric is stale based on the provided parameters.
|
|
||||||
*
|
|
||||||
* @param {Object} metric - The metric object to check.
|
|
||||||
* @param {string} resourceType - The type of the resource.
|
|
||||||
* @param {string} resourceName - The name of the resource.
|
|
||||||
* @param {string} action - The action being performed.
|
|
||||||
* @param {number} inflight - The number of inflight requests.
|
|
||||||
* @param {Object} log - The logger object.
|
|
||||||
* @returns {boolean} Returns true if the metric is stale, false otherwise.
|
|
||||||
*/
|
|
||||||
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
|
|
||||||
if (metric.date && Date.now() - new Date(metric.date).getTime() >
|
|
||||||
QuotaService.maxStaleness) {
|
|
||||||
log.warn('Stale metrics from the quota service, allowing the request', {
|
|
||||||
resourceType,
|
|
||||||
resourceName,
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
});
|
|
||||||
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Evaluates quotas for a bucket and an account and update inflight count.
|
|
||||||
*
|
|
||||||
* @param {number} bucketQuota - The quota limit for the bucket.
|
|
||||||
* @param {number} accountQuota - The quota limit for the account.
|
|
||||||
* @param {object} bucket - The bucket object.
|
|
||||||
* @param {object} account - The account object.
|
|
||||||
* @param {number} inflight - The number of inflight requests.
|
|
||||||
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
|
|
||||||
* @param {string} action - The action being performed.
|
|
||||||
* @param {object} log - The logger object.
|
|
||||||
* @param {function} callback - The callback function to be called when evaluation is complete.
|
|
||||||
* @returns {object} - The result of the evaluation.
|
|
||||||
*/
|
|
||||||
function _evaluateQuotas(
|
|
||||||
bucketQuota,
|
|
||||||
accountQuota,
|
|
||||||
bucket,
|
|
||||||
account,
|
|
||||||
inflight,
|
|
||||||
inflightForCheck,
|
|
||||||
action,
|
|
||||||
log,
|
|
||||||
callback,
|
|
||||||
) {
|
|
||||||
let bucketQuotaExceeded = false;
|
|
||||||
let accountQuotaExceeded = false;
|
|
||||||
const creationDate = new Date(bucket.getCreationDate()).getTime();
|
|
||||||
return async.parallel({
|
|
||||||
bucketQuota: parallelDone => {
|
|
||||||
if (bucketQuota > 0) {
|
|
||||||
return QuotaService.getUtilizationMetrics('bucket',
|
|
||||||
`${bucket.getName()}_${creationDate}`, null, {
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
}, (err, bucketMetrics) => {
|
|
||||||
if (err || inflight < 0) {
|
|
||||||
return parallelDone(err);
|
|
||||||
}
|
|
||||||
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
|
|
||||||
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
|
|
||||||
log.debug('Bucket quota exceeded', {
|
|
||||||
bucket: bucket.getName(),
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
quota: bucketQuota,
|
|
||||||
bytesTotal: bucketMetrics.bytesTotal,
|
|
||||||
});
|
|
||||||
bucketQuotaExceeded = true;
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
},
|
|
||||||
accountQuota: parallelDone => {
|
|
||||||
if (accountQuota > 0 && account?.account) {
|
|
||||||
return QuotaService.getUtilizationMetrics('account',
|
|
||||||
account.account, null, {
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
}, (err, accountMetrics) => {
|
|
||||||
if (err || inflight < 0) {
|
|
||||||
return parallelDone(err);
|
|
||||||
}
|
|
||||||
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
|
|
||||||
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
|
|
||||||
log.debug('Account quota exceeded', {
|
|
||||||
accountId: account.account,
|
|
||||||
action,
|
|
||||||
inflight,
|
|
||||||
quota: accountQuota,
|
|
||||||
bytesTotal: accountMetrics.bytesTotal,
|
|
||||||
});
|
|
||||||
accountQuotaExceeded = true;
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return parallelDone();
|
|
||||||
},
|
|
||||||
}, err => {
|
|
||||||
if (err) {
|
|
||||||
log.warn('Error evaluating quotas', {
|
|
||||||
error: err.name,
|
|
||||||
description: err.message,
|
|
||||||
isInflightDeletion: inflight < 0,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Monitors the duration of quota evaluation for a specific API method.
|
|
||||||
*
|
|
||||||
* @param {string} apiMethod - The name of the API method being monitored.
|
|
||||||
* @param {string} type - The type of quota being evaluated.
|
|
||||||
* @param {string} code - The code associated with the quota being evaluated.
|
|
||||||
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
|
|
||||||
* @returns {undefined} - Returns nothing.
|
|
||||||
*/
|
|
||||||
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
|
|
||||||
monitoring.quotaEvaluationDuration.labels({
|
|
||||||
action: apiMethod,
|
|
||||||
type,
|
|
||||||
code,
|
|
||||||
}).observe(duration / 1e9);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @param {Request} request - request object
|
|
||||||
* @param {BucketInfo} bucket - bucket object
|
|
||||||
* @param {Account} account - account object
|
|
||||||
* @param {array} apiNames - action names: operations to authorize
|
|
||||||
* @param {string} apiMethod - the main API call
|
|
||||||
* @param {number} inflight - inflight bytes
|
|
||||||
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
|
|
||||||
* the incoming bytes, are under the limit.
|
|
||||||
* @param {Logger} log - logger
|
|
||||||
* @param {function} callback - callback function
|
|
||||||
* @returns {boolean} - true if the quota is valid, false otherwise
|
|
||||||
*/
|
|
||||||
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
|
|
||||||
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
|
|
||||||
return callback(null);
|
|
||||||
}
|
|
||||||
let type;
|
|
||||||
let bucketQuotaExceeded = false;
|
|
||||||
let accountQuotaExceeded = false;
|
|
||||||
let quotaEvaluationDuration;
|
|
||||||
const requestStartTime = process.hrtime.bigint();
|
|
||||||
const bucketQuota = bucket.getQuota();
|
|
||||||
const accountQuota = account?.quota || 0;
|
|
||||||
const shouldSendInflights = config.isQuotaInflightEnabled();
|
|
||||||
|
|
||||||
if (bucketQuota && accountQuota) {
|
|
||||||
type = 'bucket+account';
|
|
||||||
} else if (bucketQuota) {
|
|
||||||
type = 'bucket';
|
|
||||||
} else {
|
|
||||||
type = 'account';
|
|
||||||
}
|
|
||||||
|
|
||||||
if (actionWithDataDeletion[apiMethod]) {
|
|
||||||
type = 'delete';
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
|
|
||||||
if (bucketQuota > 0 || accountQuota > 0) {
|
|
||||||
log.warn('quota is set for a bucket, but the quota service is disabled', {
|
|
||||||
bucketName: bucket.getName(),
|
|
||||||
});
|
|
||||||
monitoring.requestWithQuotaMetricsUnavailable.inc();
|
|
||||||
}
|
|
||||||
return callback(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isStorageReserved) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
inflight = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return async.forEach(apiNames, (apiName, done) => {
|
|
||||||
// Object copy operations first check the target object,
|
|
||||||
// meaning the source object, containing the current bytes,
|
|
||||||
// is checked second. This logic handles these APIs calls by
|
|
||||||
// ensuring the bytes are positives (i.e., not an object
|
|
||||||
// replacement).
|
|
||||||
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
inflight = Math.abs(inflight);
|
|
||||||
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
|
|
||||||
return done();
|
|
||||||
}
|
|
||||||
// When inflights are disabled, the sum of the current utilization metrics
|
|
||||||
// and the current bytes are compared with the quota. The current bytes
|
|
||||||
// are not sent to the utilization service. When inflights are enabled,
|
|
||||||
// the sum of the current utilization metrics only are compared with the
|
|
||||||
// quota. They include the current inflight bytes sent in the request.
|
|
||||||
let _inflights = shouldSendInflights ? inflight : undefined;
|
|
||||||
const inflightForCheck = shouldSendInflights ? 0 : inflight;
|
|
||||||
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
|
||||||
inflightForCheck, apiName, log,
|
|
||||||
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
bucketQuotaExceeded = _bucketQuotaExceeded;
|
|
||||||
accountQuotaExceeded = _accountQuotaExceeded;
|
|
||||||
|
|
||||||
// Inflights are inverted: in case of cleanup, we just re-issue
|
|
||||||
// the same API call.
|
|
||||||
if (_inflights) {
|
|
||||||
_inflights = -_inflights;
|
|
||||||
}
|
|
||||||
|
|
||||||
request.finalizerHooks.push((errorFromAPI, _done) => {
|
|
||||||
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
|
|
||||||
const quotaCleanUpStartTime = process.hrtime.bigint();
|
|
||||||
// Quotas are cleaned only in case of error in the API
|
|
||||||
async.waterfall([
|
|
||||||
cb => {
|
|
||||||
if (errorFromAPI) {
|
|
||||||
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
|
||||||
null, apiName, log, cb);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
},
|
|
||||||
], () => {
|
|
||||||
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
|
|
||||||
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
|
|
||||||
return _done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
}, err => {
|
|
||||||
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
|
|
||||||
if (err) {
|
|
||||||
log.warn('Error getting metrics from the quota service, allowing the request', {
|
|
||||||
error: err.name,
|
|
||||||
description: err.message,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (!actionWithDataDeletion[apiMethod] &&
|
|
||||||
(bucketQuotaExceeded || accountQuotaExceeded)) {
|
|
||||||
return callback(errors.QuotaExceeded);
|
|
||||||
}
|
|
||||||
return callback();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
processBytesToWrite,
|
|
||||||
isMetricStale,
|
|
||||||
validateQuotas,
|
|
||||||
};
|
|
|
@ -3,7 +3,7 @@ const constants = require('../../../constants');
|
||||||
const services = require('../../services');
|
const services = require('../../services');
|
||||||
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../../utapi/utilities');
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
const monitoring = require('../../utilities/monitoringHandler');
|
const monitoring = require('../../utilities/metrics');
|
||||||
const { getLocationConstraintErrorMessage, processCurrents,
|
const { getLocationConstraintErrorMessage, processCurrents,
|
||||||
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
||||||
const { config } = require('../../Config');
|
const { config } = require('../../Config');
|
||||||
|
|
|
@ -4,7 +4,7 @@ const services = require('../../services');
|
||||||
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../../utapi/utilities');
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
const monitoring = require('../../utilities/monitoringHandler');
|
const monitoring = require('../../utilities/metrics');
|
||||||
const { getLocationConstraintErrorMessage, processNonCurrents,
|
const { getLocationConstraintErrorMessage, processNonCurrents,
|
||||||
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
||||||
const { config } = require('../../Config');
|
const { config } = require('../../Config');
|
||||||
|
|
|
@ -3,7 +3,7 @@ const constants = require('../../../constants');
|
||||||
const services = require('../../services');
|
const services = require('../../services');
|
||||||
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../../utapi/utilities');
|
const { pushMetric } = require('../../utapi/utilities');
|
||||||
const monitoring = require('../../utilities/monitoringHandler');
|
const monitoring = require('../../utilities/metrics');
|
||||||
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
|
||||||
const { config } = require('../../Config');
|
const { config } = require('../../Config');
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
|
const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/metrics');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bucketDelete - DELETE bucket (currently supports only non-versioned buckets)
|
* bucketDelete - DELETE bucket (currently supports only non-versioned buckets)
|
||||||
|
|
|
@ -6,7 +6,7 @@ const { isBucketAuthorized } =
|
||||||
require('./apiUtils/authorization/permissionChecks');
|
require('./apiUtils/authorization/permissionChecks');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/metrics');
|
||||||
|
|
||||||
const requestType = 'bucketDeleteCors';
|
const requestType = 'bucketDeleteCors';
|
||||||
|
|
||||||
|
@ -38,8 +38,8 @@ function bucketDeleteCors(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
log.trace('found bucket in metadata');
|
log.trace('found bucket in metadata');
|
||||||
|
|
||||||
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
|
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request,
|
||||||
authInfo, log, request, request.actionImplicitDenies)) {
|
request.actionImplicitDenies)) {
|
||||||
log.debug('access denied for user on bucket', {
|
log.debug('access denied for user on bucket', {
|
||||||
requestType,
|
requestType,
|
||||||
method: 'bucketDeleteCors',
|
method: 'bucketDeleteCors',
|
||||||
|
|
|
@ -21,7 +21,7 @@ function bucketDeleteEncryption(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketDeleteEncryption',
|
requestType: 'bucketDeleteEncryption',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@ const metadata = require('../metadata/wrapper');
|
||||||
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/metrics');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bucketDeleteLifecycle - Delete the bucket Lifecycle configuration
|
* bucketDeleteLifecycle - Delete the bucket Lifecycle configuration
|
||||||
|
@ -18,7 +18,7 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: request.apiMethods || 'bucketDeleteLifecycle',
|
requestType: 'bucketDeleteLifecycle',
|
||||||
request,
|
request,
|
||||||
};
|
};
|
||||||
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue