Compare commits
2 Commits
developmen
...
FT/AntoraS
Author | SHA1 | Date |
---|---|---|
LaureVergeron | 9de151abeb | |
LaureVergeron | 5e62766a9c |
|
@ -1,9 +1,3 @@
|
||||||
node_modules
|
node_modules
|
||||||
localData/*
|
localData/*
|
||||||
localMetadata/*
|
localMetadata/*
|
||||||
# Keep the .git/HEAD file in order to properly report version
|
|
||||||
.git/objects
|
|
||||||
.github
|
|
||||||
.tox
|
|
||||||
coverage
|
|
||||||
.DS_Store
|
|
||||||
|
|
55
.eslintrc
55
.eslintrc
|
@ -1,54 +1 @@
|
||||||
{
|
{ "extends": "scality" }
|
||||||
"extends": "scality",
|
|
||||||
"plugins": [
|
|
||||||
"mocha"
|
|
||||||
],
|
|
||||||
"rules": {
|
|
||||||
"import/extensions": "off",
|
|
||||||
"lines-around-directive": "off",
|
|
||||||
"no-underscore-dangle": "off",
|
|
||||||
"indent": "off",
|
|
||||||
"object-curly-newline": "off",
|
|
||||||
"operator-linebreak": "off",
|
|
||||||
"function-paren-newline": "off",
|
|
||||||
"import/newline-after-import": "off",
|
|
||||||
"prefer-destructuring": "off",
|
|
||||||
"implicit-arrow-linebreak": "off",
|
|
||||||
"no-bitwise": "off",
|
|
||||||
"dot-location": "off",
|
|
||||||
"comma-dangle": "off",
|
|
||||||
"no-undef-init": "off",
|
|
||||||
"global-require": "off",
|
|
||||||
"import/no-dynamic-require": "off",
|
|
||||||
"class-methods-use-this": "off",
|
|
||||||
"no-plusplus": "off",
|
|
||||||
"no-else-return": "off",
|
|
||||||
"object-property-newline": "off",
|
|
||||||
"import/order": "off",
|
|
||||||
"no-continue": "off",
|
|
||||||
"no-tabs": "off",
|
|
||||||
"lines-between-class-members": "off",
|
|
||||||
"prefer-spread": "off",
|
|
||||||
"no-lonely-if": "off",
|
|
||||||
"no-useless-escape": "off",
|
|
||||||
"no-restricted-globals": "off",
|
|
||||||
"no-buffer-constructor": "off",
|
|
||||||
"import/no-extraneous-dependencies": "off",
|
|
||||||
"space-unary-ops": "off",
|
|
||||||
"no-useless-return": "off",
|
|
||||||
"no-unexpected-multiline": "off",
|
|
||||||
"no-mixed-operators": "off",
|
|
||||||
"newline-per-chained-call": "off",
|
|
||||||
"operator-assignment": "off",
|
|
||||||
"spaced-comment": "off",
|
|
||||||
"comma-style": "off",
|
|
||||||
"no-restricted-properties": "off",
|
|
||||||
"new-parens": "off",
|
|
||||||
"no-multi-spaces": "off",
|
|
||||||
"quote-props": "off",
|
|
||||||
"mocha/no-exclusive-tests": "error",
|
|
||||||
},
|
|
||||||
"parserOptions": {
|
|
||||||
"ecmaVersion": 2020
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,32 +1,19 @@
|
||||||
# General support information
|
# Issue template
|
||||||
|
|
||||||
GitHub Issues are **reserved** for actionable bug reports (including
|
If you are reporting a new issue, make sure that we do not have any
|
||||||
documentation inaccuracies), and feature requests.
|
duplicates already open. You can ensure this by searching the issue list for
|
||||||
**All questions** (regarding configuration, use cases, performance, community,
|
this repository. If there is a duplicate, please close your issue and add a
|
||||||
events, setup and usage recommendations, among other things) should be asked on
|
comment to the existing issue instead.
|
||||||
the **[Zenko Forum](http://forum.zenko.io/)**.
|
|
||||||
|
|
||||||
> Questions opened as GitHub issues will systematically be closed, and moved to
|
## General support information
|
||||||
> the [Zenko Forum](http://forum.zenko.io/).
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||||
|
General questions should be sent to the
|
||||||
## Avoiding duplicates
|
[S3 scality server Forum](http://forum.scality.com/).
|
||||||
|
|
||||||
When reporting a new issue/requesting a feature, make sure that we do not have
|
|
||||||
any duplicates already open:
|
|
||||||
|
|
||||||
- search the issue list for this repository (use the search bar, select
|
|
||||||
"Issues" on the left pane after searching);
|
|
||||||
- if there is a duplicate, please do not open your issue, and add a comment
|
|
||||||
to the existing issue instead.
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
## Bug report information
|
## Bug report information
|
||||||
|
|
||||||
(delete this section (everything between the lines) if you're not reporting a bug
|
(delete this section if not applicable)
|
||||||
but requesting a feature)
|
|
||||||
|
|
||||||
### Description
|
### Description
|
||||||
|
|
||||||
|
@ -42,22 +29,13 @@ Describe the results you received
|
||||||
|
|
||||||
### Expected result
|
### Expected result
|
||||||
|
|
||||||
Describe the results you expected
|
Describe the results you expecteds
|
||||||
|
|
||||||
### Additional information
|
### Additional information: (Node.js version, Docker version, etc)
|
||||||
|
|
||||||
- Node.js version,
|
|
||||||
- Docker version,
|
|
||||||
- yarn version,
|
|
||||||
- distribution/OS,
|
|
||||||
- optional: anything else you deem helpful to us.
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
## Feature Request
|
## Feature Request
|
||||||
|
|
||||||
(delete this section (everything between the lines) if you're not requesting
|
(delete this section if not applicable)
|
||||||
a feature but reporting a bug)
|
|
||||||
|
|
||||||
### Proposal
|
### Proposal
|
||||||
|
|
||||||
|
@ -74,14 +52,3 @@ What you would like to happen
|
||||||
### Use case
|
### Use case
|
||||||
|
|
||||||
Please provide use cases for changing the current behavior
|
Please provide use cases for changing the current behavior
|
||||||
|
|
||||||
### Additional information
|
|
||||||
|
|
||||||
- Is this request for your company? Y/N
|
|
||||||
- If Y: Company name:
|
|
||||||
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
|
|
||||||
- Are you willing to contribute this feature yourself?
|
|
||||||
- Position/Title:
|
|
||||||
- How did you hear about us?
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
|
@ -1,43 +0,0 @@
|
||||||
---
|
|
||||||
name: "Setup CI environment"
|
|
||||||
description: "Setup Cloudserver CI environment"
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- name: Setup etc/hosts
|
|
||||||
shell: bash
|
|
||||||
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
|
|
||||||
- name: Setup Credentials
|
|
||||||
shell: bash
|
|
||||||
run: bash .github/scripts/credentials.bash
|
|
||||||
- name: Setup job artifacts directory
|
|
||||||
shell: bash
|
|
||||||
run: |-
|
|
||||||
set -exu;
|
|
||||||
mkdir -p /tmp/artifacts/${JOB_NAME}/;
|
|
||||||
- uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '16'
|
|
||||||
cache: 'yarn'
|
|
||||||
- name: install dependencies
|
|
||||||
shell: bash
|
|
||||||
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
|
|
||||||
- uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pip
|
|
||||||
key: ${{ runner.os }}-pip
|
|
||||||
- uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: 3.9
|
|
||||||
- name: Setup python2 test environment
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get install -y libdigest-hmac-perl
|
|
||||||
pip install 's3cmd==2.3.0'
|
|
||||||
- name: fix sproxyd.conf permissions
|
|
||||||
shell: bash
|
|
||||||
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
|
|
||||||
- name: ensure fuse kernel module is loaded (for sproxyd)
|
|
||||||
shell: bash
|
|
||||||
run: sudo modprobe fuse
|
|
|
@ -1,25 +0,0 @@
|
||||||
FROM ceph/daemon:v3.2.1-stable-3.2-mimic-centos-7
|
|
||||||
|
|
||||||
ENV CEPH_DAEMON demo
|
|
||||||
ENV CEPH_DEMO_DAEMONS mon,mgr,osd,rgw
|
|
||||||
|
|
||||||
ENV CEPH_DEMO_UID zenko
|
|
||||||
ENV CEPH_DEMO_ACCESS_KEY accessKey1
|
|
||||||
ENV CEPH_DEMO_SECRET_KEY verySecretKey1
|
|
||||||
ENV CEPH_DEMO_BUCKET zenkobucket
|
|
||||||
|
|
||||||
ENV CEPH_PUBLIC_NETWORK 0.0.0.0/0
|
|
||||||
ENV MON_IP 0.0.0.0
|
|
||||||
ENV NETWORK_AUTO_DETECT 4
|
|
||||||
ENV RGW_CIVETWEB_PORT 8001
|
|
||||||
|
|
||||||
RUN rm /etc/yum.repos.d/tcmu-runner.repo
|
|
||||||
|
|
||||||
ADD ./entrypoint-wrapper.sh /
|
|
||||||
RUN chmod +x /entrypoint-wrapper.sh && \
|
|
||||||
yum install -y python-pip && \
|
|
||||||
yum clean all && \
|
|
||||||
pip install awscli && \
|
|
||||||
rm -rf /root/.cache/pip
|
|
||||||
|
|
||||||
ENTRYPOINT [ "/entrypoint-wrapper.sh" ]
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
touch /artifacts/ceph.log
|
|
||||||
mkfifo /tmp/entrypoint_output
|
|
||||||
# We run this in the background so that we can tail the RGW log after init,
|
|
||||||
# because entrypoint.sh never returns
|
|
||||||
|
|
||||||
# The next line will be needed when ceph builds 3.2.2 so I'll leave it here
|
|
||||||
# bash /opt/ceph-container/bin/entrypoint.sh > /tmp/entrypoint_output &
|
|
||||||
|
|
||||||
bash /entrypoint.sh > /tmp/entrypoint_output &
|
|
||||||
entrypoint_pid="$!"
|
|
||||||
while read -r line; do
|
|
||||||
echo $line
|
|
||||||
# When we find this line server has started
|
|
||||||
if [ -n "$(echo $line | grep 'Creating bucket')" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done < /tmp/entrypoint_output
|
|
||||||
|
|
||||||
# Make our buckets - CEPH_DEMO_BUCKET is set to force the "Creating bucket" message, but unused
|
|
||||||
s3cmd mb s3://cephbucket s3://cephbucket2
|
|
||||||
|
|
||||||
mkdir /root/.aws
|
|
||||||
cat > /root/.aws/credentials <<EOF
|
|
||||||
[default]
|
|
||||||
aws_access_key_id = accessKey1
|
|
||||||
aws_secret_access_key = verySecretKey1
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Enable versioning on them
|
|
||||||
for bucket in cephbucket cephbucket2; do
|
|
||||||
echo "Enabling versiong for $bucket"
|
|
||||||
aws --endpoint http://127.0.0.1:8001 s3api put-bucket-versioning --bucket $bucket --versioning Status=Enabled
|
|
||||||
done
|
|
||||||
tail -f /var/log/ceph/client.rgw.*.log | tee -a /artifacts/ceph.log
|
|
||||||
wait $entrypoint_pid
|
|
|
@ -1,11 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# This script is needed because RADOS Gateway
|
|
||||||
# will open the port before beginning to serve traffic
|
|
||||||
# causing wait_for_local_port.bash to exit immediately
|
|
||||||
|
|
||||||
echo 'Waiting for ceph'
|
|
||||||
while [ -z "$(curl 127.0.0.1:8001 2>/dev/null)" ]; do
|
|
||||||
sleep 1
|
|
||||||
echo -n "."
|
|
||||||
done
|
|
|
@ -1,10 +0,0 @@
|
||||||
---
|
|
||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: npm
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: daily
|
|
||||||
time: "13:00"
|
|
||||||
open-pull-requests-limit: 10
|
|
||||||
target-branch: "development/7.4"
|
|
|
@ -1,36 +0,0 @@
|
||||||
azurebackend_AZURE_STORAGE_ACCESS_KEY
|
|
||||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME
|
|
||||||
azurebackend_AZURE_STORAGE_ENDPOINT
|
|
||||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY
|
|
||||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME
|
|
||||||
azurebackend2_AZURE_STORAGE_ENDPOINT
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT
|
|
||||||
azuretest_AZURE_BLOB_ENDPOINT
|
|
||||||
b2backend_B2_ACCOUNT_ID
|
|
||||||
b2backend_B2_STORAGE_ACCESS_KEY
|
|
||||||
GOOGLE_SERVICE_EMAIL
|
|
||||||
GOOGLE_SERVICE_KEY
|
|
||||||
AWS_S3_BACKEND_ACCESS_KEY
|
|
||||||
AWS_S3_BACKEND_SECRET_KEY
|
|
||||||
AWS_S3_BACKEND_ACCESS_KEY_2
|
|
||||||
AWS_S3_BACKEND_SECRET_KEY_2
|
|
||||||
AWS_GCP_BACKEND_ACCESS_KEY
|
|
||||||
AWS_GCP_BACKEND_SECRET_KEY
|
|
||||||
AWS_GCP_BACKEND_ACCESS_KEY_2
|
|
||||||
AWS_GCP_BACKEND_SECRET_KEY_2
|
|
||||||
b2backend_B2_STORAGE_ENDPOINT
|
|
||||||
gcpbackend2_GCP_SERVICE_EMAIL
|
|
||||||
gcpbackend2_GCP_SERVICE_KEY
|
|
||||||
gcpbackend2_GCP_SERVICE_KEYFILE
|
|
||||||
gcpbackend_GCP_SERVICE_EMAIL
|
|
||||||
gcpbackend_GCP_SERVICE_KEY
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_EMAIL
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_KEY
|
|
||||||
gcpbackend_GCP_SERVICE_KEYFILE
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE
|
|
||||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
|
|
|
@ -1,92 +0,0 @@
|
||||||
services:
|
|
||||||
cloudserver:
|
|
||||||
image: ${CLOUDSERVER_IMAGE}
|
|
||||||
command: sh -c "yarn start > /artifacts/s3.log"
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- /tmp/ssl:/ssl
|
|
||||||
- /tmp/ssl-kmip:/ssl-kmip
|
|
||||||
- ${HOME}/.aws/credentials:/root/.aws/credentials
|
|
||||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
|
||||||
environment:
|
|
||||||
- CI=true
|
|
||||||
- ENABLE_LOCAL_CACHE=true
|
|
||||||
- REDIS_HOST=0.0.0.0
|
|
||||||
- REDIS_PORT=6379
|
|
||||||
- REPORT_TOKEN=report-token-1
|
|
||||||
- REMOTE_MANAGEMENT_DISABLE=1
|
|
||||||
- HEALTHCHECKS_ALLOWFROM=0.0.0.0/0
|
|
||||||
- DATA_HOST=0.0.0.0
|
|
||||||
- METADATA_HOST=0.0.0.0
|
|
||||||
- S3BACKEND
|
|
||||||
- S3DATA
|
|
||||||
- S3METADATA
|
|
||||||
- MPU_TESTING
|
|
||||||
- S3VAULT
|
|
||||||
- S3_LOCATION_FILE
|
|
||||||
- ENABLE_UTAPI_V2
|
|
||||||
- BUCKET_DENY_FILTER
|
|
||||||
- S3KMS
|
|
||||||
- S3KMIP_PORT
|
|
||||||
- S3KMIP_HOSTS
|
|
||||||
- S3KMIP-COMPOUND_CREATE
|
|
||||||
- S3KMIP_BUCKET_ATTRIBUTE_NAME
|
|
||||||
- S3KMIP_PIPELINE_DEPTH
|
|
||||||
- S3KMIP_KEY
|
|
||||||
- S3KMIP_CERT
|
|
||||||
- S3KMIP_CA
|
|
||||||
- MONGODB_HOSTS=0.0.0.0:27018
|
|
||||||
- MONGODB_RS=rs0
|
|
||||||
- DEFAULT_BUCKET_KEY_FORMAT
|
|
||||||
- METADATA_MAX_CACHED_BUCKETS
|
|
||||||
- ENABLE_NULL_VERSION_COMPAT_MODE
|
|
||||||
- SCUBA_HOST
|
|
||||||
- SCUBA_PORT
|
|
||||||
- SCUBA_HEALTHCHECK_FREQUENCY
|
|
||||||
- S3QUOTA
|
|
||||||
- QUOTA_ENABLE_INFLIGHTS
|
|
||||||
env_file:
|
|
||||||
- creds.env
|
|
||||||
depends_on:
|
|
||||||
- redis
|
|
||||||
extra_hosts:
|
|
||||||
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1"
|
|
||||||
- "pykmip.local:127.0.0.1"
|
|
||||||
redis:
|
|
||||||
image: redis:alpine
|
|
||||||
network_mode: "host"
|
|
||||||
squid:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['ci-proxy']
|
|
||||||
image: scality/ci-squid
|
|
||||||
command: >-
|
|
||||||
sh -c 'mkdir -p /ssl &&
|
|
||||||
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
|
|
||||||
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
|
|
||||||
-keyout /ssl/myca.pem -out /ssl/myca.pem &&
|
|
||||||
cp /ssl/myca.pem /ssl/CA.pem &&
|
|
||||||
squid -f /etc/squid/squid.conf -N -z &&
|
|
||||||
squid -f /etc/squid/squid.conf -NYCd 1'
|
|
||||||
volumes:
|
|
||||||
- /tmp/ssl:/ssl
|
|
||||||
pykmip:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['pykmip']
|
|
||||||
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
|
|
||||||
volumes:
|
|
||||||
- /tmp/artifacts/${JOB_NAME}:/artifacts
|
|
||||||
mongo:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['mongo', 'ceph']
|
|
||||||
image: ${MONGODB_IMAGE}
|
|
||||||
ceph:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['ceph']
|
|
||||||
image: ghcr.io/scality/cloudserver/ci-ceph
|
|
||||||
sproxyd:
|
|
||||||
network_mode: "host"
|
|
||||||
profiles: ['sproxyd']
|
|
||||||
image: sproxyd-standalone
|
|
||||||
build: ./sproxyd
|
|
||||||
user: 0:0
|
|
||||||
privileged: yes
|
|
|
@ -1,28 +0,0 @@
|
||||||
FROM mongo:5.0.21
|
|
||||||
|
|
||||||
ENV USER=scality \
|
|
||||||
HOME_DIR=/home/scality \
|
|
||||||
CONF_DIR=/conf \
|
|
||||||
DATA_DIR=/data
|
|
||||||
|
|
||||||
# Set up directories and permissions
|
|
||||||
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
|
|
||||||
mkdir /logs; \
|
|
||||||
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
|
|
||||||
|
|
||||||
# Set up environment variables and directories for scality user
|
|
||||||
RUN mkdir ${CONF_DIR} && \
|
|
||||||
chown -R ${USER} ${CONF_DIR} && \
|
|
||||||
chown -R ${USER} ${DATA_DIR}
|
|
||||||
|
|
||||||
# copy the mongo config file
|
|
||||||
COPY /conf/mongod.conf /conf/mongod.conf
|
|
||||||
COPY /conf/mongo-run.sh /conf/mongo-run.sh
|
|
||||||
COPY /conf/initReplicaSet /conf/initReplicaSet.js
|
|
||||||
|
|
||||||
EXPOSE 27017/tcp
|
|
||||||
EXPOSE 27018
|
|
||||||
|
|
||||||
# Set up CMD
|
|
||||||
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
|
|
||||||
CMD ["bash", "/conf/mongo-run.sh"]
|
|
|
@ -1,4 +0,0 @@
|
||||||
rs.initiate({
|
|
||||||
_id: "rs0",
|
|
||||||
members: [{ _id: 0, host: "127.0.0.1:27018" }]
|
|
||||||
});
|
|
|
@ -1,10 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set -exo pipefail
|
|
||||||
|
|
||||||
init_RS() {
|
|
||||||
sleep 5
|
|
||||||
mongo --port 27018 /conf/initReplicaSet.js
|
|
||||||
}
|
|
||||||
init_RS &
|
|
||||||
|
|
||||||
mongod --bind_ip_all --config=/conf/mongod.conf
|
|
|
@ -1,15 +0,0 @@
|
||||||
storage:
|
|
||||||
journal:
|
|
||||||
enabled: true
|
|
||||||
engine: wiredTiger
|
|
||||||
dbPath: "/data/db"
|
|
||||||
processManagement:
|
|
||||||
fork: false
|
|
||||||
net:
|
|
||||||
port: 27018
|
|
||||||
bindIp: 0.0.0.0
|
|
||||||
replication:
|
|
||||||
replSetName: "rs0"
|
|
||||||
enableMajorityReadConcern: true
|
|
||||||
security:
|
|
||||||
authorization: disabled
|
|
|
@ -1,3 +0,0 @@
|
||||||
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
|
|
||||||
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
|
|
||||||
RUN chown root:root /conf/sproxyd0.conf
|
|
|
@ -1,26 +0,0 @@
|
||||||
fastcgi_param QUERY_STRING $query_string;
|
|
||||||
fastcgi_param REQUEST_METHOD $request_method;
|
|
||||||
fastcgi_param CONTENT_TYPE $content_type;
|
|
||||||
fastcgi_param CONTENT_LENGTH $content_length;
|
|
||||||
|
|
||||||
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
|
||||||
fastcgi_param SCRIPT_NAME /var/www;
|
|
||||||
fastcgi_param PATH_INFO $document_uri;
|
|
||||||
|
|
||||||
fastcgi_param REQUEST_URI $request_uri;
|
|
||||||
fastcgi_param DOCUMENT_URI $document_uri;
|
|
||||||
fastcgi_param DOCUMENT_ROOT $document_root;
|
|
||||||
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
|
||||||
fastcgi_param HTTPS $https if_not_empty;
|
|
||||||
|
|
||||||
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
|
||||||
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
|
||||||
|
|
||||||
fastcgi_param REMOTE_ADDR $remote_addr;
|
|
||||||
fastcgi_param REMOTE_PORT $remote_port;
|
|
||||||
fastcgi_param SERVER_ADDR $server_addr;
|
|
||||||
fastcgi_param SERVER_PORT $server_port;
|
|
||||||
fastcgi_param SERVER_NAME $server_name;
|
|
||||||
|
|
||||||
# PHP only, required if PHP was built with --enable-force-cgi-redirect
|
|
||||||
fastcgi_param REDIRECT_STATUS 200;
|
|
|
@ -1,88 +0,0 @@
|
||||||
worker_processes 1;
|
|
||||||
error_log /logs/error.log;
|
|
||||||
user root root;
|
|
||||||
events {
|
|
||||||
worker_connections 1000;
|
|
||||||
reuse_port on;
|
|
||||||
multi_accept on;
|
|
||||||
}
|
|
||||||
worker_rlimit_nofile 20000;
|
|
||||||
http {
|
|
||||||
root /var/www/;
|
|
||||||
upstream sproxyds {
|
|
||||||
least_conn;
|
|
||||||
keepalive 40;
|
|
||||||
server 127.0.0.1:20000;
|
|
||||||
}
|
|
||||||
server {
|
|
||||||
client_max_body_size 0;
|
|
||||||
client_body_timeout 150;
|
|
||||||
client_header_timeout 150;
|
|
||||||
postpone_output 0;
|
|
||||||
client_body_postpone_size 0;
|
|
||||||
keepalive_requests 1100;
|
|
||||||
keepalive_timeout 300s;
|
|
||||||
server_tokens off;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
gzip off;
|
|
||||||
tcp_nodelay on;
|
|
||||||
tcp_nopush on;
|
|
||||||
sendfile on;
|
|
||||||
listen 81;
|
|
||||||
server_name localhost;
|
|
||||||
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
|
|
||||||
location ~* ^/proxy/(.*)$ {
|
|
||||||
rewrite ^/proxy/(.*)$ /$1 last;
|
|
||||||
}
|
|
||||||
allow 127.0.0.1;
|
|
||||||
|
|
||||||
deny all;
|
|
||||||
set $usermd '-';
|
|
||||||
set $sentusermd '-';
|
|
||||||
set $elapsed_ms '-';
|
|
||||||
set $now '-';
|
|
||||||
log_by_lua '
|
|
||||||
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
|
|
||||||
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
|
|
||||||
end
|
|
||||||
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
|
|
||||||
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
|
|
||||||
end
|
|
||||||
local elapsed_ms = tonumber(ngx.var.request_time)
|
|
||||||
if not ( elapsed_ms == nil) then
|
|
||||||
elapsed_ms = elapsed_ms * 1000
|
|
||||||
ngx.var.elapsed_ms = tostring(elapsed_ms)
|
|
||||||
end
|
|
||||||
local time = tonumber(ngx.var.msec) * 1000
|
|
||||||
ngx.var.now = time
|
|
||||||
';
|
|
||||||
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
|
|
||||||
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
|
|
||||||
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
|
|
||||||
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
|
|
||||||
'"contentType":"$content_type","s3Address":"$remote_addr",'
|
|
||||||
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
|
|
||||||
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
|
|
||||||
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
|
|
||||||
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
|
|
||||||
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
|
|
||||||
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
|
|
||||||
access_log /dev/stdout irm;
|
|
||||||
error_log /dev/stdout error;
|
|
||||||
location / {
|
|
||||||
proxy_request_buffering off;
|
|
||||||
fastcgi_request_buffering off;
|
|
||||||
fastcgi_no_cache 1;
|
|
||||||
fastcgi_cache_bypass 1;
|
|
||||||
fastcgi_buffering off;
|
|
||||||
fastcgi_ignore_client_abort on;
|
|
||||||
fastcgi_keep_conn on;
|
|
||||||
include fastcgi_params;
|
|
||||||
fastcgi_pass sproxyds;
|
|
||||||
fastcgi_next_upstream error timeout;
|
|
||||||
fastcgi_send_timeout 285s;
|
|
||||||
fastcgi_read_timeout 285s;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
{
|
|
||||||
"general": {
|
|
||||||
"ring": "DATA",
|
|
||||||
"port": 20000,
|
|
||||||
"syslog_facility": "local0"
|
|
||||||
},
|
|
||||||
"ring_driver:0": {
|
|
||||||
"alias": "dc1",
|
|
||||||
"type": "local",
|
|
||||||
"queue_path": "/tmp/ring-objs"
|
|
||||||
},
|
|
||||||
}
|
|
|
@ -1,43 +0,0 @@
|
||||||
[supervisord]
|
|
||||||
nodaemon = true
|
|
||||||
loglevel = info
|
|
||||||
logfile = %(ENV_LOG_DIR)s/supervisord.log
|
|
||||||
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
|
|
||||||
logfile_maxbytes = 20MB
|
|
||||||
logfile_backups = 2
|
|
||||||
|
|
||||||
[unix_http_server]
|
|
||||||
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
|
|
||||||
|
|
||||||
[rpcinterface:supervisor]
|
|
||||||
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
|
|
||||||
|
|
||||||
[supervisorctl]
|
|
||||||
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
|
|
||||||
|
|
||||||
[program:nginx]
|
|
||||||
directory=%(ENV_SUP_RUN_DIR)s
|
|
||||||
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
|
|
||||||
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
|
||||||
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
|
|
||||||
stdout_logfile_maxbytes=100MB
|
|
||||||
stdout_logfile_backups=7
|
|
||||||
stderr_logfile_maxbytes=100MB
|
|
||||||
stderr_logfile_backups=7
|
|
||||||
autorestart=true
|
|
||||||
autostart=true
|
|
||||||
user=root
|
|
||||||
|
|
||||||
[program:sproxyd]
|
|
||||||
directory=%(ENV_SUP_RUN_DIR)s
|
|
||||||
process_name=%(program_name)s-%(process_num)s
|
|
||||||
numprocs=1
|
|
||||||
numprocs_start=0
|
|
||||||
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
|
|
||||||
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
|
|
||||||
stdout_logfile_maxbytes=100MB
|
|
||||||
stdout_logfile_backups=7
|
|
||||||
redirect_stderr=true
|
|
||||||
autorestart=true
|
|
||||||
autostart=true
|
|
||||||
user=root
|
|
|
@ -1,29 +0,0 @@
|
||||||
FROM python:3.10-alpine
|
|
||||||
|
|
||||||
RUN apk add --no-cache \
|
|
||||||
libressl && \
|
|
||||||
apk add --no-cache --virtual .build-deps \
|
|
||||||
python3-dev \
|
|
||||||
libffi-dev \
|
|
||||||
libressl-dev \
|
|
||||||
sqlite-dev \
|
|
||||||
build-base \
|
|
||||||
curl
|
|
||||||
|
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
|
||||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
|
||||||
|
|
||||||
RUN pip3 install -U pip && \
|
|
||||||
pip3 install pykmip requests && \
|
|
||||||
apk del .build-deps && \
|
|
||||||
mkdir /pykmip
|
|
||||||
|
|
||||||
|
|
||||||
ADD ./bin /usr/local/bin
|
|
||||||
ADD ./certs /ssl
|
|
||||||
ADD policy.json /etc/pykmip/policies/policy.json
|
|
||||||
ADD server.conf /etc/pykmip/server.conf
|
|
||||||
ADD docker-entrypoint.sh /
|
|
||||||
RUN chmod +x /docker-entrypoint.sh
|
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
|
|
@ -1,156 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
from cryptography import x509
|
|
||||||
from cryptography.hazmat import backends
|
|
||||||
from cryptography.hazmat.primitives import hashes
|
|
||||||
from cryptography.hazmat.primitives import serialization
|
|
||||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
def get_args():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
prog=sys.argv[0],
|
|
||||||
description='Tool to generate a x509 CA root, server and client certs')
|
|
||||||
parser.add_argument('-c', '--common-name', action='store',
|
|
||||||
default='localhost',
|
|
||||||
help='Set the common name for the server-side cert')
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def create_rsa_private_key(key_size=2048, public_exponent=65537):
|
|
||||||
private_key = rsa.generate_private_key(
|
|
||||||
public_exponent=public_exponent,
|
|
||||||
key_size=key_size,
|
|
||||||
backend=backends.default_backend()
|
|
||||||
)
|
|
||||||
return private_key
|
|
||||||
|
|
||||||
|
|
||||||
def create_self_signed_certificate(subject_name,
|
|
||||||
private_key,
|
|
||||||
days_valid=36500):
|
|
||||||
subject = x509.Name([
|
|
||||||
x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"),
|
|
||||||
x509.NameAttribute(x509.NameOID.COMMON_NAME, subject_name)
|
|
||||||
])
|
|
||||||
certificate = x509.CertificateBuilder().subject_name(
|
|
||||||
subject
|
|
||||||
).issuer_name(
|
|
||||||
subject
|
|
||||||
).public_key(
|
|
||||||
private_key.public_key()
|
|
||||||
).serial_number(
|
|
||||||
x509.random_serial_number()
|
|
||||||
).not_valid_before(
|
|
||||||
datetime.datetime.utcnow()
|
|
||||||
).not_valid_after(
|
|
||||||
datetime.datetime.utcnow() + datetime.timedelta(days=days_valid)
|
|
||||||
).add_extension(
|
|
||||||
x509.BasicConstraints(True, None),
|
|
||||||
critical=True
|
|
||||||
).sign(private_key, hashes.SHA256(), backends.default_backend())
|
|
||||||
|
|
||||||
return certificate
|
|
||||||
|
|
||||||
|
|
||||||
def create_certificate(subject_name,
|
|
||||||
private_key,
|
|
||||||
signing_certificate,
|
|
||||||
signing_key,
|
|
||||||
days_valid=36500,
|
|
||||||
client_auth=False):
|
|
||||||
subject = x509.Name([
|
|
||||||
x509.NameAttribute(x509.NameOID.ORGANIZATION_NAME, u"Scality"),
|
|
||||||
x509.NameAttribute(x509.NameOID.COMMON_NAME, subject_name)
|
|
||||||
])
|
|
||||||
builder = x509.CertificateBuilder().subject_name(
|
|
||||||
subject
|
|
||||||
).issuer_name(
|
|
||||||
signing_certificate.subject
|
|
||||||
).public_key(
|
|
||||||
private_key.public_key()
|
|
||||||
).serial_number(
|
|
||||||
x509.random_serial_number()
|
|
||||||
).not_valid_before(
|
|
||||||
datetime.datetime.utcnow()
|
|
||||||
).not_valid_after(
|
|
||||||
datetime.datetime.utcnow() + datetime.timedelta(days=days_valid)
|
|
||||||
)
|
|
||||||
|
|
||||||
if client_auth:
|
|
||||||
builder = builder.add_extension(
|
|
||||||
x509.ExtendedKeyUsage([x509.ExtendedKeyUsageOID.CLIENT_AUTH]),
|
|
||||||
critical=True
|
|
||||||
)
|
|
||||||
|
|
||||||
certificate = builder.sign(
|
|
||||||
signing_key,
|
|
||||||
hashes.SHA256(),
|
|
||||||
backends.default_backend()
|
|
||||||
)
|
|
||||||
return certificate
|
|
||||||
|
|
||||||
|
|
||||||
def main(common_name):
|
|
||||||
root_key = create_rsa_private_key()
|
|
||||||
root_certificate = create_self_signed_certificate(
|
|
||||||
u"Root CA",
|
|
||||||
root_key
|
|
||||||
)
|
|
||||||
|
|
||||||
server_key = create_rsa_private_key()
|
|
||||||
server_certificate = create_certificate(
|
|
||||||
common_name,
|
|
||||||
server_key,
|
|
||||||
root_certificate,
|
|
||||||
root_key
|
|
||||||
)
|
|
||||||
|
|
||||||
john_doe_client_key = create_rsa_private_key()
|
|
||||||
john_doe_client_certificate = create_certificate(
|
|
||||||
u"John Doe",
|
|
||||||
john_doe_client_key,
|
|
||||||
root_certificate,
|
|
||||||
root_key,
|
|
||||||
client_auth=True
|
|
||||||
)
|
|
||||||
|
|
||||||
with open("certs/kmip-ca.pem", "wb") as f:
|
|
||||||
f.write(
|
|
||||||
root_certificate.public_bytes(
|
|
||||||
serialization.Encoding.PEM
|
|
||||||
)
|
|
||||||
)
|
|
||||||
with open("certs/kmip-key.pem", "wb") as f:
|
|
||||||
f.write(server_key.private_bytes(
|
|
||||||
encoding=serialization.Encoding.PEM,
|
|
||||||
format=serialization.PrivateFormat.PKCS8,
|
|
||||||
encryption_algorithm=serialization.NoEncryption()
|
|
||||||
))
|
|
||||||
with open("certs/kmip-cert.pem", "wb") as f:
|
|
||||||
f.write(
|
|
||||||
server_certificate.public_bytes(
|
|
||||||
serialization.Encoding.PEM
|
|
||||||
)
|
|
||||||
)
|
|
||||||
with open("certs/kmip-client-key.pem", "wb") as f:
|
|
||||||
f.write(john_doe_client_key.private_bytes(
|
|
||||||
encoding=serialization.Encoding.PEM,
|
|
||||||
format=serialization.PrivateFormat.PKCS8,
|
|
||||||
encryption_algorithm=serialization.NoEncryption()
|
|
||||||
))
|
|
||||||
with open("certs/kmip-client-cert.pem", "wb") as f:
|
|
||||||
f.write(
|
|
||||||
john_doe_client_certificate.public_bytes(
|
|
||||||
serialization.Encoding.PEM
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
args = get_args()
|
|
||||||
main(args.common_name)
|
|
|
@ -1,26 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import logging # noqa: E402
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
|
||||||
|
|
||||||
from kmip.services.server import server # noqa: E402
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
print('Starting PyKMIP server on 0.0.0.0:5696')
|
|
||||||
server.main()
|
|
|
@ -1,18 +0,0 @@
|
||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIIC6zCCAdOgAwIBAgIUPIpMY95b4HjKAk+FyydZApAEFskwDQYJKoZIhvcNAQEL
|
|
||||||
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
|
|
||||||
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowJDEQMA4GA1UECgwHU2NhbGl0eTEQ
|
|
||||||
MA4GA1UEAwwHUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
|
|
||||||
AKqLFEsWtfRTxnoZrQe63tq+rQnVgninHMahRmXkzyjK/uNhoKnIh8bXdTC/eCZ6
|
|
||||||
FBROqBYNL0TJb0HDv1FzcZS1UCUldRqTlvr6wZb0pfrp40fvztsqQgAh1t/Blg5i
|
|
||||||
Zv5+ESSlNs5rWbFTxtq+FbMW/ERYTrVfnMkBiLg4Gq0HwID9a5jvJatzrrno2s1m
|
|
||||||
OfZCT3HaE3tMZ6vvYuoamvLNdvdH+9KeTmBCursfNejt0rSGjIqfi6DvFJSayydQ
|
|
||||||
is5DMSTbCLGdKQmA85VfEQmlQ8v0232WDSd6gVfp2tthDEDHnCbgWkEd1vsTyS85
|
|
||||||
ubdt5v4CWGOWV+mu3bf8xM0CAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq
|
|
||||||
hkiG9w0BAQsFAAOCAQEARTjc2zV/ol1/LsSzZy6l1R0uFBmR2KumH+Se1Yq2vKpY
|
|
||||||
Dv6xmrvmjOUr5RBO77nRhIgdcQA+LyAg8ii2Dfzc8r1RTD+j1bYOxESXctBOBcXM
|
|
||||||
Chy6FEBydR6m7S8qQyL+caJWO1WZWp2tapcm6sUG1oRVznWtK1/SHKIzOBwsmJ07
|
|
||||||
79KsCJ6wf9tzD05EDTI2QhAObE9/thy+zc8l8cmv9A6p3jKkx9rwXUttSUqTn0CW
|
|
||||||
w45bgKg6+DDcrhZ+MATbzuTfhuA4NFUTzK7KeX9sMuOV03Zs8SA3VhAOXmu063M3
|
|
||||||
0f9X7P/0RmGTTp7GGCqEINcZdbLh3k7CpFb2Ox998Q==
|
|
||||||
-----END CERTIFICATE-----
|
|
|
@ -1,18 +0,0 @@
|
||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIIC2zCCAcOgAwIBAgIUIlE8UAkqQ+6mbJDtrt9kkmi8aJYwDQYJKoZIhvcNAQEL
|
|
||||||
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
|
|
||||||
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowKTEQMA4GA1UECgwHU2NhbGl0eTEV
|
|
||||||
MBMGA1UEAwwMcHlrbWlwLmxvY2FsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
|
||||||
CgKCAQEAtxr7pq/lnzVeZz4z52Yc3DeaPqjNfRSyW5cPUlT7ABXFb7+tja7K2C7u
|
|
||||||
DYVK+Q+2yJCQwYJY47aKJB++ewam9t2V8Xy0Z8S+0I2ImCwuyeihaD/f6uJZRzms
|
|
||||||
ycdECH22BA6tCPlQLnlboRiZzI6rcIvXAbUMvLvFm3nyYIs9qidExRnfyMjISknM
|
|
||||||
V+83LT5QW4IcHgKYqzdz2ZmOnk+f4wmMmitcivTdIZCL8Z0cxr7BJlOh5JZ/V5uj
|
|
||||||
WUXeNa+ttW0RKKBlg9T+wj0JvwoJBPZTmsMAy3tI9tjLg3DwGYKsflbFeU2tebXI
|
|
||||||
gncGFZ/dFxj331GGtq3kz1PzAUYf2wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB1
|
|
||||||
8HgJ0fu6/pCrDxAm90eESFjmaTFyTN8q00zhq4Cb3zAT9KMWzAygkZ9n4ZFgELPo
|
|
||||||
7kBE2H6RcDdoBmjVYd8HnBloDdYzYbncKgt5YBvxRaMSF4/l65BM8wjatyXErqnH
|
|
||||||
QLLTRe5AuF0/F0KtPeDQ2JFVu8dZ35W3fyKGPRsEdVOSCTHROmqpGhZCpscyUP4W
|
|
||||||
Hb0dBTESQ9mQHw14OCaaahARd0X5WdcA/E+m0fpGqj1rQCXS+PrRcSLe1E1hqPlK
|
|
||||||
q/hXSXD5nybwipktELvJCbB7l4HmJr2pIpldeR5+ef68Cs8hqs6DRlsJX9sK2ng+
|
|
||||||
TFe5v6SCarqZ9kFvr6Yp
|
|
||||||
-----END CERTIFICATE-----
|
|
|
@ -1,18 +0,0 @@
|
||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIIC8zCCAdugAwIBAgIUBs6nVXQXhrFbClub3aSLg72/DiYwDQYJKoZIhvcNAQEL
|
|
||||||
BQAwJDEQMA4GA1UECgwHU2NhbGl0eTEQMA4GA1UEAwwHUm9vdCBDQTAgFw0yMTA0
|
|
||||||
MDkwMDI4MTFaGA8yMTIxMDMxNjAwMjgxMVowJTEQMA4GA1UECgwHU2NhbGl0eTER
|
|
||||||
MA8GA1UEAwwISm9obiBEb2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
|
|
||||||
AQC6neSYoBoWh/i2mBpduJnTlXacpJ0iQqLezvcGy8qR0s/48mtfV2IRGTNVsq4L
|
|
||||||
jLLRsPGt9KkJlUhHGWhG00cBGEsIiJiBUr+WrEsO04ME/Sk76kX8wk/t9Oljl7jt
|
|
||||||
UDnQUwshj+hRFe0iKAyE65JIutu5EiiNtOqMzbVgPNfNniAaGlrgwByJaS9arzsH
|
|
||||||
PVju9yZBYzYhwAMyYFcXUGrgvHRCHKmxBi4QmV7DX4TeN4l9TrCyEmqDev4PRFip
|
|
||||||
yR2Fh3WGSwWh45HgMT+Jp6Uv6yI4wMXWJAcNkHdx1OhjBoUQrkavvdeVEnCwjQ+p
|
|
||||||
SMLm0T4iNxedQWBtDM7ts4EjAgMBAAGjGjAYMBYGA1UdJQEB/wQMMAoGCCsGAQUF
|
|
||||||
BwMCMA0GCSqGSIb3DQEBCwUAA4IBAQCMi9HEhZc5jHJMj18Wq00fZy4O9XtjCe0J
|
|
||||||
nntW9tzi3rTQcQWKA7i9uVdDoCg+gMFVxWMvV7luFEUc/VYV1v8hFfbIFygzFsZY
|
|
||||||
xwv4GQaIwbsgzD+oziia53w0FSuNL0uE0MeKvrt3yzHxCxylHyl+TQd/UdAtAo+k
|
|
||||||
RL1sI0mBZx5qo6d1J7ZMCxzAGaT7KjnJvziFr/UbfSNnwDsxsUwGaI1ZeAxJN8DI
|
|
||||||
zTrg3f3lrrmHcauEgKnuQwIqaMZR6veG6RkjtcYSlJYID1irkE6njs7+wivOAkzt
|
|
||||||
fBt/0PD76FmAI0VArgU/zDB8dGyYzrq39W749LuEfm1TPmlnUtDr
|
|
||||||
-----END CERTIFICATE-----
|
|
|
@ -1,28 +0,0 @@
|
||||||
-----BEGIN PRIVATE KEY-----
|
|
||||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6neSYoBoWh/i2
|
|
||||||
mBpduJnTlXacpJ0iQqLezvcGy8qR0s/48mtfV2IRGTNVsq4LjLLRsPGt9KkJlUhH
|
|
||||||
GWhG00cBGEsIiJiBUr+WrEsO04ME/Sk76kX8wk/t9Oljl7jtUDnQUwshj+hRFe0i
|
|
||||||
KAyE65JIutu5EiiNtOqMzbVgPNfNniAaGlrgwByJaS9arzsHPVju9yZBYzYhwAMy
|
|
||||||
YFcXUGrgvHRCHKmxBi4QmV7DX4TeN4l9TrCyEmqDev4PRFipyR2Fh3WGSwWh45Hg
|
|
||||||
MT+Jp6Uv6yI4wMXWJAcNkHdx1OhjBoUQrkavvdeVEnCwjQ+pSMLm0T4iNxedQWBt
|
|
||||||
DM7ts4EjAgMBAAECggEANNXdUeUKXdSzcycPV/ea/c+0XFcy8e9B46lfQTpTqQOx
|
|
||||||
xD8GbWD1L/gdk6baJgT43+ukEWdSsJbmdtLXti29Ta8OF2VtIDhIbCVtvs3dq3zt
|
|
||||||
vrvugsiVDr8nkP306qOrKrNIVIFE+igmEmSaXsu/h/33ladxeeV9/s2DC7NOOjWN
|
|
||||||
Mu4KYr5BBbu3qAavdzbrcz7Sch+GzsYqK/pBounCTQu3o9E4TSUcmcsasWmtHN3u
|
|
||||||
e6G2UjObdzEW7J0wWvvtJ0wHQUVRueHfqwqKf0dymcZ3xOlx3ZPhKPz5n4F1UGUt
|
|
||||||
RQaNazqs5SzZpUgDuPw4k8h/aCHK21Yexw/l4+O9KQKBgQD1WZSRK54zFoExBQgt
|
|
||||||
OZSBNZW3Ibti5lSiF0M0g+66yNZSWfPuABEH0tu5CXopdPDXo4kW8NLGEqQStWTX
|
|
||||||
RGK0DE9buEL3eebOfjIdS2IZ3t3dX3lMypplVCj4HzAgITlweSH1LLTyAtaaOpwa
|
|
||||||
jksqfcn5Zw+XGkyc6GBBVaZetQKBgQDCt6Xf/g26+zjvHscjdzsfBhnYvTOrr6+F
|
|
||||||
xqFFxOEOocGr+mL7UTAs+a9m/6lOWhlagk+m+TIZNL8o3IN7KFTYxPYPxTiewgVE
|
|
||||||
rIm3JBmPxRiPn01P3HrtjaqfzsXF30j3ele7ix5OxieZq4vsW7ZXP3GZE34a08Ov
|
|
||||||
12sE1DlvdwKBgQDzpYQOLhyqazzcqzyVfMrnDYmiFVN7QXTmiudobWRUBUIhAcdl
|
|
||||||
oJdJB7K/rJOuO704x+RJ7dnCbZyWH6EGzZifaGIemXuXO21jvpqR0NyZCGOXhUp2
|
|
||||||
YfS1j8AntwEZxyS9du2sBjui4gKvomiHTquChOxgSmKHEcznPTTpbN8MyQKBgF5F
|
|
||||||
LVCZniolkLXsL7tS8VOez4qoZ0i6wP7CYLf3joJX+/z4N023S9yqcaorItvlMRsp
|
|
||||||
tciAIyoi6F2vDRTmPNXJ3dtav4PVKVnLMs1w89MwOCjoljSQ6Q7zpGTEZenbpWbz
|
|
||||||
W2BYBS9cLjXu4MpoyInLFINo9YeleLs8TvrCiKAXAoGBANsduqLnlUW/f5zDb5Fe
|
|
||||||
SB51+KhBjsVIeYmU+8xtur9Z7IxZXK28wpoEsm7LmX7Va5dERjI+tItBiJ5+Unu1
|
|
||||||
Xs2ljDg35ARKHs0dWBJGpbnZg4dbT6xpIL4YMPXm1Zu++PgRpxPIMn646xqd8GlH
|
|
||||||
bavm6Km/fXNG58xus+EeLpV5
|
|
||||||
-----END PRIVATE KEY-----
|
|
|
@ -1,28 +0,0 @@
|
||||||
-----BEGIN PRIVATE KEY-----
|
|
||||||
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC3Gvumr+WfNV5n
|
|
||||||
PjPnZhzcN5o+qM19FLJblw9SVPsAFcVvv62NrsrYLu4NhUr5D7bIkJDBgljjtook
|
|
||||||
H757Bqb23ZXxfLRnxL7QjYiYLC7J6KFoP9/q4llHOazJx0QIfbYEDq0I+VAueVuh
|
|
||||||
GJnMjqtwi9cBtQy8u8WbefJgiz2qJ0TFGd/IyMhKScxX7zctPlBbghweApirN3PZ
|
|
||||||
mY6eT5/jCYyaK1yK9N0hkIvxnRzGvsEmU6Hkln9Xm6NZRd41r621bREooGWD1P7C
|
|
||||||
PQm/CgkE9lOawwDLe0j22MuDcPAZgqx+VsV5Ta15tciCdwYVn90XGPffUYa2reTP
|
|
||||||
U/MBRh/bAgMBAAECggEABCvcMcbuDztzBB0Zp5re63Fk1SqZS9Et4wJE+hYvhaf5
|
|
||||||
UHtoY8LoohYnnC0+MQBXpKgOdCoZBk8BRKNofnr/UL5pjQ/POFH2GuAujXDsO/NN
|
|
||||||
wgc6fapcaE/7DLm6ZgsfG2aOMJclaXmgScI6trtFUpIM+t/6A06vyMP1bpeddwPW
|
|
||||||
Fqu7NvpDiEcTRUGd+z1JooYgUhGgC7peYUx5+9zqFrwoDBKxnUOnz3BkDsXBy3qm
|
|
||||||
65Vu0BSjuJzf6vVMpNGUHY6JXjopVNWku+JAX0wD+iikOd5sziNVdIj1fnZ+IHIf
|
|
||||||
7G5h5owHpvSGzJFQ18/g5VHtJdCm+4WQSnbSJRsCAQKBgQDu4IH8yspyeH44fhoS
|
|
||||||
PAp/OtILqSP+Da0zAp2LbhrOgyzyuSTdEAYyptqjqHS6QkB1Bu1H44FS0BYUxRXc
|
|
||||||
iu2e9AndiLVCGngsE7TpA/ZVLN1B0LEZEHjM6p4d6zZM6iveKVnPAOkTWTBAgzCt
|
|
||||||
b31nj4jL8PdlPKQil1AMrOlRAQKBgQDEOwshzIdr2Iy6B/n4CuBViEtwnbAd5f/c
|
|
||||||
atA9bcfF8kCahokJsI4eCCLgBwDZpYKD+v0AwOBlacF6t6TX+vdlJsi5EP7uxZ22
|
|
||||||
ILsuWqVm/0H77PACuckc5/qLZoGGC81l0DhnpoeMEb6r/TKOo5xAK1gxdlwNNrq+
|
|
||||||
nP1zdZnU2wKBgBAS92xFUR4m0YeHpMV5WNN658t1FEDyNqdqE6PgQtmGpi2nG73s
|
|
||||||
aB5cb/X3TfOCpce6MZlWy8sAyZuYL4Jprte1YDySCHBsS43bvZ64b4kHvdPB8UjY
|
|
||||||
fOh9GSq2Oy8tysnmSm7NhuGQbNjKeyoQiIXBeNkQW/VqATl6qR5RPFoBAoGACNqV
|
|
||||||
JQBCd/Y8W0Ry3eM3vgQ5SyqCQMcY5UwYez0Rz3efvJknY72InAhH8o2+VxOlsOjJ
|
|
||||||
M5iAR3MfHLdeg7Q6J2E5m0gOCJ34ALi3WV8TqXMI+iH1rlnNnjVFU7bbTz4HFXnw
|
|
||||||
oZSc9w/x53a0KkVtjmOmRg0OGDaI9ILG2MfMmhMCgYB8ZqJtX8qZ2TqKU3XdLZ4z
|
|
||||||
T2N7xMFuKohWP420r5jKm3Xw85IC+y1SUTB9XGcL79r2eJzmzmdKQ3A3sf3oyUH3
|
|
||||||
RdYWxtKcZ5PAE8hVRtn1ETZqUgxASGOUn/6w0npkYSOXPU5bc0W6RSLkjES0i+c3
|
|
||||||
fv3OMNI8qpmQhEjpHHQS1g==
|
|
||||||
-----END PRIVATE KEY-----
|
|
|
@ -1,3 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
python3 /usr/local/bin/run_server.py 2>&1 | tee -a /artifacts/pykmip.log
|
|
|
@ -1,168 +0,0 @@
|
||||||
{
|
|
||||||
"example": {
|
|
||||||
"preset": {
|
|
||||||
"CERTIFICATE": {
|
|
||||||
"LOCATE": "ALLOW_ALL",
|
|
||||||
"CHECK": "ALLOW_ALL",
|
|
||||||
"GET": "ALLOW_ALL",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_ALL",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_ALL",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"OBTAIN_LEASE": "ALLOW_ALL",
|
|
||||||
"ACTIVATE": "ALLOW_OWNER",
|
|
||||||
"REVOKE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER",
|
|
||||||
"ARCHIVE": "ALLOW_OWNER",
|
|
||||||
"RECOVER": "ALLOW_OWNER"
|
|
||||||
},
|
|
||||||
"SYMMETRIC_KEY": {
|
|
||||||
"REKEY": "ALLOW_OWNER",
|
|
||||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
|
||||||
"DERIVE_KEY": "ALLOW_OWNER",
|
|
||||||
"LOCATE": "ALLOW_OWNER",
|
|
||||||
"CHECK": "ALLOW_OWNER",
|
|
||||||
"GET": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
|
||||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
|
||||||
"ACTIVATE": "ALLOW_OWNER",
|
|
||||||
"REVOKE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER",
|
|
||||||
"ARCHIVE": "ALLOW_OWNER",
|
|
||||||
"RECOVER": "ALLOW_OWNER"
|
|
||||||
},
|
|
||||||
"PUBLIC_KEY": {
|
|
||||||
"LOCATE": "ALLOW_ALL",
|
|
||||||
"CHECK": "ALLOW_ALL",
|
|
||||||
"GET": "ALLOW_ALL",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_ALL",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_ALL",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"OBTAIN_LEASE": "ALLOW_ALL",
|
|
||||||
"ACTIVATE": "ALLOW_OWNER",
|
|
||||||
"REVOKE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER",
|
|
||||||
"ARCHIVE": "ALLOW_OWNER",
|
|
||||||
"RECOVER": "ALLOW_OWNER"
|
|
||||||
},
|
|
||||||
"PRIVATE_KEY": {
|
|
||||||
"REKEY": "ALLOW_OWNER",
|
|
||||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
|
||||||
"DERIVE_KEY": "ALLOW_OWNER",
|
|
||||||
"LOCATE": "ALLOW_OWNER",
|
|
||||||
"CHECK": "ALLOW_OWNER",
|
|
||||||
"GET": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
|
||||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
|
||||||
"ACTIVATE": "ALLOW_OWNER",
|
|
||||||
"REVOKE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER",
|
|
||||||
"ARCHIVE": "ALLOW_OWNER",
|
|
||||||
"RECOVER": "ALLOW_OWNER"
|
|
||||||
},
|
|
||||||
"SPLIT_KEY": {
|
|
||||||
"REKEY": "ALLOW_OWNER",
|
|
||||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
|
||||||
"DERIVE_KEY": "ALLOW_OWNER",
|
|
||||||
"LOCATE": "ALLOW_OWNER",
|
|
||||||
"CHECK": "ALLOW_OWNER",
|
|
||||||
"GET": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
|
||||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
|
||||||
"ACTIVATE": "ALLOW_OWNER",
|
|
||||||
"REVOKE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER",
|
|
||||||
"ARCHIVE": "ALLOW_OWNER",
|
|
||||||
"RECOVER": "ALLOW_OWNER"
|
|
||||||
},
|
|
||||||
"TEMPLATE": {
|
|
||||||
"LOCATE": "ALLOW_OWNER",
|
|
||||||
"GET": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER"
|
|
||||||
},
|
|
||||||
"SECRET_DATA": {
|
|
||||||
"REKEY": "ALLOW_OWNER",
|
|
||||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
|
||||||
"DERIVE_KEY": "ALLOW_OWNER",
|
|
||||||
"LOCATE": "ALLOW_OWNER",
|
|
||||||
"CHECK": "ALLOW_OWNER",
|
|
||||||
"GET": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
|
||||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
|
||||||
"ACTIVATE": "ALLOW_OWNER",
|
|
||||||
"REVOKE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER",
|
|
||||||
"ARCHIVE": "ALLOW_OWNER",
|
|
||||||
"RECOVER": "ALLOW_OWNER"
|
|
||||||
},
|
|
||||||
"OPAQUE_DATA": {
|
|
||||||
"REKEY": "ALLOW_OWNER",
|
|
||||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
|
||||||
"DERIVE_KEY": "ALLOW_OWNER",
|
|
||||||
"LOCATE": "ALLOW_OWNER",
|
|
||||||
"CHECK": "ALLOW_OWNER",
|
|
||||||
"GET": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
|
||||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
|
||||||
"ACTIVATE": "ALLOW_OWNER",
|
|
||||||
"REVOKE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER",
|
|
||||||
"ARCHIVE": "ALLOW_OWNER",
|
|
||||||
"RECOVER": "ALLOW_OWNER"
|
|
||||||
},
|
|
||||||
"PGP_KEY": {
|
|
||||||
"REKEY": "ALLOW_OWNER",
|
|
||||||
"REKEY_KEY_PAIR": "ALLOW_OWNER",
|
|
||||||
"DERIVE_KEY": "ALLOW_OWNER",
|
|
||||||
"LOCATE": "ALLOW_OWNER",
|
|
||||||
"CHECK": "ALLOW_OWNER",
|
|
||||||
"GET": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTES": "ALLOW_OWNER",
|
|
||||||
"GET_ATTRIBUTE_LIST": "ALLOW_OWNER",
|
|
||||||
"ADD_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"MODIFY_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"DELETE_ATTRIBUTE": "ALLOW_OWNER",
|
|
||||||
"OBTAIN_LEASE": "ALLOW_OWNER",
|
|
||||||
"GET_USAGE_ALLOCATION": "ALLOW_OWNER",
|
|
||||||
"ACTIVATE": "ALLOW_OWNER",
|
|
||||||
"REVOKE": "ALLOW_OWNER",
|
|
||||||
"DESTROY": "ALLOW_OWNER",
|
|
||||||
"ARCHIVE": "ALLOW_OWNER",
|
|
||||||
"RECOVER": "ALLOW_OWNER"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
[server]
|
|
||||||
hostname=0.0.0.0
|
|
||||||
port=5696
|
|
||||||
certificate_path=/ssl/kmip-cert.pem
|
|
||||||
key_path=/ssl/kmip-key.pem
|
|
||||||
ca_path=/ssl/kmip-ca.pem
|
|
||||||
auth_suite=TLS1.2
|
|
||||||
policy_path=/etc/pykmip/policies
|
|
||||||
enable_tls_client_auth=True
|
|
||||||
database_path=/pykmip/pykmip.db
|
|
||||||
tls_cipher_suites=
|
|
||||||
TLS_RSA_WITH_AES_128_CBC_SHA256
|
|
||||||
TLS_RSA_WITH_AES_256_CBC_SHA256
|
|
||||||
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384
|
|
||||||
logging_level=DEBUG
|
|
|
@ -1,20 +0,0 @@
|
||||||
#!/bin/bash -x
|
|
||||||
set -x #echo on
|
|
||||||
set -e #exit at the first error
|
|
||||||
|
|
||||||
mkdir -p $HOME/.aws
|
|
||||||
|
|
||||||
cat >>$HOME/.aws/credentials <<EOF
|
|
||||||
[default]
|
|
||||||
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
|
|
||||||
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY
|
|
||||||
[default_2]
|
|
||||||
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY_2
|
|
||||||
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY_2
|
|
||||||
[google]
|
|
||||||
aws_access_key_id = $AWS_GCP_BACKEND_ACCESS_KEY
|
|
||||||
aws_secret_access_key = $AWS_GCP_BACKEND_SECRET_KEY
|
|
||||||
[google_2]
|
|
||||||
aws_access_key_id = $AWS_GCP_BACKEND_ACCESS_KEY_2
|
|
||||||
aws_secret_access_key = $AWS_GCP_BACKEND_SECRET_KEY_2
|
|
||||||
EOF
|
|
|
@ -1,35 +0,0 @@
|
||||||
name: Test alerts
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches-ignore:
|
|
||||||
- 'development/**'
|
|
||||||
- 'q/*/**'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run-alert-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
tests:
|
|
||||||
- name: 1 minute interval tests
|
|
||||||
file: monitoring/alerts.test.yaml
|
|
||||||
|
|
||||||
- name: 10 seconds interval tests
|
|
||||||
file: monitoring/alerts.10s.test.yaml
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Render and test ${{ matrix.tests.name }}
|
|
||||||
uses: scality/action-prom-render-test@1.0.3
|
|
||||||
with:
|
|
||||||
alert_file_path: monitoring/alerts.yaml
|
|
||||||
test_file_path: ${{ matrix.tests.file }}
|
|
||||||
alert_inputs: |
|
|
||||||
namespace=zenko
|
|
||||||
service=artesca-data-connector-s3api-metrics
|
|
||||||
reportJob=artesca-data-ops-report-handler
|
|
||||||
replicas=3
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
|
@ -1,25 +0,0 @@
|
||||||
---
|
|
||||||
name: codeQL
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [w/**, q/*]
|
|
||||||
pull_request:
|
|
||||||
branches: [development/*, stabilization/*, hotfix/*]
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
name: Static analysis with CodeQL
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v3
|
|
||||||
with:
|
|
||||||
languages: javascript, python, ruby
|
|
||||||
|
|
||||||
- name: Build and analyze
|
|
||||||
uses: github/codeql-action/analyze@v3
|
|
|
@ -1,16 +0,0 @@
|
||||||
---
|
|
||||||
name: dependency review
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches: [development/*, stabilization/*, hotfix/*]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dependency-review:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: 'Checkout Repository'
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: 'Dependency Review'
|
|
||||||
uses: actions/dependency-review-action@v4
|
|
|
@ -1,80 +0,0 @@
|
||||||
---
|
|
||||||
name: release
|
|
||||||
run-name: release ${{ inputs.tag }}
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
tag:
|
|
||||||
description: 'Tag to be released'
|
|
||||||
required: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
PROJECT_NAME: ${{ github.event.repository.name }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-federation-image:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Login to GitHub Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Build and push image for federation
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .
|
|
||||||
file: images/svc-base/Dockerfile
|
|
||||||
tags: |
|
|
||||||
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
|
|
||||||
cache-from: type=gha,scope=federation
|
|
||||||
cache-to: type=gha,mode=max,scope=federation
|
|
||||||
|
|
||||||
release:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Set up Docker Buildk
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Login to Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
|
|
||||||
- name: Push dashboards into the production namespace
|
|
||||||
run: |
|
|
||||||
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
|
|
||||||
dashboard.json:application/grafana-dashboard+json \
|
|
||||||
alerts.yaml:application/prometheus-alerts+yaml
|
|
||||||
working-directory: monitoring
|
|
||||||
|
|
||||||
- name: Build and push
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
|
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
|
|
||||||
- name: Create Release
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
|
||||||
with:
|
|
||||||
name: Release ${{ github.event.inputs.tag }}
|
|
||||||
tag_name: ${{ github.event.inputs.tag }}
|
|
||||||
generate_release_notes: true
|
|
||||||
target_commitish: ${{ github.sha }}
|
|
|
@ -1,533 +0,0 @@
|
||||||
---
|
|
||||||
name: tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
push:
|
|
||||||
branches-ignore:
|
|
||||||
- 'development/**'
|
|
||||||
- 'q/*/**'
|
|
||||||
|
|
||||||
env:
|
|
||||||
# Secrets
|
|
||||||
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
|
||||||
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
|
||||||
azurebackend_AZURE_STORAGE_ENDPOINT: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
|
||||||
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY_2 }}
|
|
||||||
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME_2 }}
|
|
||||||
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ENDPOINT_2 }}
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
|
||||||
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
|
|
||||||
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
|
|
||||||
${{ secrets.AZURE_STORAGE_ENDPOINT }}
|
|
||||||
azuretest_AZURE_BLOB_ENDPOINT: "${{ secrets.AZURE_STORAGE_ENDPOINT }}"
|
|
||||||
b2backend_B2_ACCOUNT_ID: "${{ secrets.B2BACKEND_B2_ACCOUNT_ID }}"
|
|
||||||
b2backend_B2_STORAGE_ACCESS_KEY: >-
|
|
||||||
${{ secrets.B2BACKEND_B2_STORAGE_ACCESS_KEY }}
|
|
||||||
GOOGLE_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
|
|
||||||
GOOGLE_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
|
|
||||||
AWS_S3_BACKEND_ACCESS_KEY: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY }}"
|
|
||||||
AWS_S3_BACKEND_SECRET_KEY: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY }}"
|
|
||||||
AWS_S3_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY_2 }}"
|
|
||||||
AWS_S3_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY_2 }}"
|
|
||||||
AWS_GCP_BACKEND_ACCESS_KEY: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY }}"
|
|
||||||
AWS_GCP_BACKEND_SECRET_KEY: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY }}"
|
|
||||||
AWS_GCP_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY_2 }}"
|
|
||||||
AWS_GCP_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY_2 }}"
|
|
||||||
b2backend_B2_STORAGE_ENDPOINT: "${{ secrets.B2BACKEND_B2_STORAGE_ENDPOINT }}"
|
|
||||||
gcpbackend2_GCP_SERVICE_EMAIL: "${{ secrets.GCP2_SERVICE_EMAIL }}"
|
|
||||||
gcpbackend2_GCP_SERVICE_KEY: "${{ secrets.GCP2_SERVICE_KEY }}"
|
|
||||||
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
gcpbackend_GCP_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
|
|
||||||
gcpbackend_GCP_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
|
|
||||||
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_EMAIL }}
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_KEY: >-
|
|
||||||
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_KEY }}
|
|
||||||
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
|
|
||||||
# Configs
|
|
||||||
ENABLE_LOCAL_CACHE: "true"
|
|
||||||
REPORT_TOKEN: "report-token-1"
|
|
||||||
REMOTE_MANAGEMENT_DISABLE: "1"
|
|
||||||
# https://github.com/git-lfs/git-lfs/issues/5749
|
|
||||||
GIT_CLONE_PROTECTION_ACTIVE: 'false'
|
|
||||||
jobs:
|
|
||||||
linting-coverage:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '16'
|
|
||||||
cache: yarn
|
|
||||||
- name: install dependencies
|
|
||||||
run: yarn install --frozen-lockfile --network-concurrency 1
|
|
||||||
- uses: actions/setup-python@v5
|
|
||||||
with:
|
|
||||||
python-version: '3.9'
|
|
||||||
- uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pip
|
|
||||||
key: ${{ runner.os }}-pip
|
|
||||||
- name: Install python deps
|
|
||||||
run: pip install flake8
|
|
||||||
- name: Lint Javascript
|
|
||||||
run: yarn run --silent lint -- --max-warnings 0
|
|
||||||
- name: Lint Markdown
|
|
||||||
run: yarn run --silent lint_md
|
|
||||||
- name: Lint python
|
|
||||||
run: flake8 $(git ls-files "*.py")
|
|
||||||
- name: Lint Yaml
|
|
||||||
run: yamllint -c yamllint.yml $(git ls-files "*.yml")
|
|
||||||
- name: Unit Coverage
|
|
||||||
run: |
|
|
||||||
set -ex
|
|
||||||
mkdir -p $CIRCLE_TEST_REPORTS/unit
|
|
||||||
yarn test
|
|
||||||
yarn run test_legacy_location
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
CIRCLE_TEST_REPORTS: /tmp
|
|
||||||
CIRCLE_ARTIFACTS: /tmp
|
|
||||||
CI_REPORTS: /tmp
|
|
||||||
- name: Unit Coverage logs
|
|
||||||
run: find /tmp/unit -exec cat {} \;
|
|
||||||
- name: preparing junit files for upload
|
|
||||||
run: |
|
|
||||||
mkdir -p artifacts/junit
|
|
||||||
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
|
|
||||||
if: always()
|
|
||||||
- name: Upload files to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Login to GitHub Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Build and push cloudserver image
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .
|
|
||||||
provenance: false
|
|
||||||
tags: |
|
|
||||||
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
labels: |
|
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=cloudserver
|
|
||||||
cache-to: type=gha,mode=max,scope=cloudserver
|
|
||||||
- name: Build and push pykmip image
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .github/pykmip
|
|
||||||
tags: |
|
|
||||||
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
|
||||||
labels: |
|
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=pykmip
|
|
||||||
cache-to: type=gha,mode=max,scope=pykmip
|
|
||||||
- name: Build and push MongoDB
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
context: .github/docker/mongodb
|
|
||||||
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=mongodb
|
|
||||||
cache-to: type=gha,mode=max,scope=mongodb
|
|
||||||
|
|
||||||
multiple-backend:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
S3BACKEND: mem
|
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
|
||||||
S3DATA: multiple
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Login to Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile sproxyd up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run multiple backend test
|
|
||||||
run: |-
|
|
||||||
set -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
bash wait_for_local_port.bash 81 40
|
|
||||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
mongo-v0-ft-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
S3BACKEND: mem
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3KMS: file
|
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v0
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile mongo up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run functional tests
|
|
||||||
run: |-
|
|
||||||
set -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
mongo-v1-ft-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
S3BACKEND: mem
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3KMS: file
|
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
|
|
||||||
DEFAULT_BUCKET_KEY_FORMAT: v1
|
|
||||||
METADATA_MAX_CACHED_BUCKETS: 1
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile mongo up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run functional tests
|
|
||||||
run: |-
|
|
||||||
set -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
file-ft-tests:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- job-name: file-ft-tests
|
|
||||||
name: ${{ matrix.job-name }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
S3BACKEND: file
|
|
||||||
S3VAULT: mem
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
JOB_NAME: ${{ matrix.job-name }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup matrix job artifacts directory
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -exu
|
|
||||||
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run file ft tests
|
|
||||||
run: |-
|
|
||||||
set -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
utapi-v2-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
ENABLE_UTAPI_V2: t
|
|
||||||
S3BACKEND: mem
|
|
||||||
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run file utapi v2 tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
quota-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
inflights:
|
|
||||||
- name: "With Inflights"
|
|
||||||
value: "true"
|
|
||||||
- name: "Without Inflights"
|
|
||||||
value: "false"
|
|
||||||
env:
|
|
||||||
S3METADATA: mongodb
|
|
||||||
S3BACKEND: mem
|
|
||||||
S3QUOTA: scuba
|
|
||||||
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
|
|
||||||
SCUBA_HOST: localhost
|
|
||||||
SCUBA_PORT: 8100
|
|
||||||
SCUBA_HEALTHCHECK_FREQUENCY: 100
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile mongo up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run quota tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
kmip-ft-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
S3BACKEND: file
|
|
||||||
S3VAULT: mem
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- name: Copy KMIP certs
|
|
||||||
run: cp -r ./certs /tmp/ssl-kmip
|
|
||||||
working-directory: .github/pykmip
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile pykmip up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
- name: Run file KMIP tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
bash wait_for_local_port.bash 5696 40
|
|
||||||
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
||||||
|
|
||||||
ceph-backend-test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
env:
|
|
||||||
S3BACKEND: mem
|
|
||||||
S3DATA: multiple
|
|
||||||
S3KMS: file
|
|
||||||
CI_CEPH: 'true'
|
|
||||||
MPU_TESTING: "yes"
|
|
||||||
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
|
|
||||||
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
|
||||||
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
|
|
||||||
JOB_NAME: ${{ github.job }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- name: Login to GitHub Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ github.token }}
|
|
||||||
- name: Setup CI environment
|
|
||||||
uses: ./.github/actions/setup-ci
|
|
||||||
- uses: ruby/setup-ruby@v1
|
|
||||||
with:
|
|
||||||
ruby-version: '2.5.9'
|
|
||||||
- name: Install Ruby dependencies
|
|
||||||
run: |
|
|
||||||
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
|
|
||||||
- name: Install Java dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
|
|
||||||
- name: Setup CI services
|
|
||||||
run: docker compose --profile ceph up -d
|
|
||||||
working-directory: .github/docker
|
|
||||||
env:
|
|
||||||
S3METADATA: mongodb
|
|
||||||
- name: Run Ceph multiple backend tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
bash .github/ceph/wait_for_ceph.sh
|
|
||||||
bash wait_for_local_port.bash 27018 40
|
|
||||||
bash wait_for_local_port.bash 8000 40
|
|
||||||
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/multibackend-tests.log
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
|
|
||||||
S3METADATA: mem
|
|
||||||
- name: Run Java tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
mvn test | tee /tmp/artifacts/${{ github.job }}/java-tests.log
|
|
||||||
working-directory: tests/functional/jaws
|
|
||||||
- name: Run Ruby tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
|
|
||||||
working-directory: tests/functional/fog
|
|
||||||
- name: Run Javascript AWS SDK tests
|
|
||||||
run: |-
|
|
||||||
set -ex -o pipefail;
|
|
||||||
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
|
|
||||||
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
|
|
||||||
env:
|
|
||||||
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
|
|
||||||
S3BACKEND: file
|
|
||||||
S3VAULT: mem
|
|
||||||
S3METADATA: mongodb
|
|
||||||
- name: Upload logs to artifacts
|
|
||||||
uses: scality/action-artifacts@v4
|
|
||||||
with:
|
|
||||||
method: upload
|
|
||||||
url: https://artifacts.scality.net
|
|
||||||
user: ${{ secrets.ARTIFACTS_USER }}
|
|
||||||
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
|
||||||
source: /tmp/artifacts
|
|
||||||
if: always()
|
|
|
@ -22,14 +22,6 @@ coverage
|
||||||
# Compiled binary addons (http://nodejs.org/api/addons.html)
|
# Compiled binary addons (http://nodejs.org/api/addons.html)
|
||||||
build/Release
|
build/Release
|
||||||
|
|
||||||
# Sphinx build dir
|
|
||||||
_build
|
|
||||||
|
|
||||||
# Dependency directory
|
# Dependency directory
|
||||||
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git
|
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git
|
||||||
node_modules
|
node_modules
|
||||||
yarn.lock
|
|
||||||
.tox
|
|
||||||
|
|
||||||
# Junit directory
|
|
||||||
junit
|
|
||||||
|
|
63
Dockerfile
63
Dockerfile
|
@ -1,60 +1,23 @@
|
||||||
ARG NODE_VERSION=16.20-bullseye-slim
|
FROM node:6-slim
|
||||||
|
MAINTAINER Giorgio Regni <gr@scality.com>
|
||||||
FROM node:${NODE_VERSION} as builder
|
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
|
||||||
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y --no-install-recommends \
|
|
||||||
build-essential \
|
|
||||||
ca-certificates \
|
|
||||||
curl \
|
|
||||||
git \
|
|
||||||
gnupg2 \
|
|
||||||
jq \
|
|
||||||
python3 \
|
|
||||||
ssh \
|
|
||||||
wget \
|
|
||||||
libffi-dev \
|
|
||||||
zlib1g-dev \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& mkdir -p /root/ssh \
|
|
||||||
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
|
|
||||||
|
|
||||||
ENV PYTHON=python3
|
|
||||||
COPY package.json yarn.lock /usr/src/app/
|
|
||||||
RUN npm install typescript -g
|
|
||||||
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
|
||||||
|
|
||||||
################################################################################
|
|
||||||
FROM node:${NODE_VERSION}
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
jq \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
ENV NO_PROXY localhost,127.0.0.1
|
|
||||||
ENV no_proxy localhost,127.0.0.1
|
|
||||||
|
|
||||||
EXPOSE 8000
|
|
||||||
EXPOSE 8002
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
jq \
|
|
||||||
tini \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
# Keep the .git directory in order to properly report version
|
# Keep the .git directory in order to properly report version
|
||||||
COPY . /usr/src/app
|
COPY . /usr/src/app
|
||||||
COPY --from=builder /usr/src/app/node_modules ./node_modules/
|
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y jq python git build-essential --no-install-recommends \
|
||||||
|
&& npm install --production \
|
||||||
|
&& apt-get autoremove --purge -y python git build-essential \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& npm cache clear \
|
||||||
|
&& rm -rf ~/.node-gyp \
|
||||||
|
&& rm -rf /tmp/npm-*
|
||||||
|
|
||||||
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
||||||
|
|
||||||
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
|
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||||
|
CMD [ "npm", "start" ]
|
||||||
|
|
||||||
CMD [ "yarn", "start" ]
|
EXPOSE 8000
|
||||||
|
|
|
@ -7,16 +7,16 @@ COPY . /usr/src/app
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y jq python git build-essential --no-install-recommends \
|
&& apt-get install -y jq python git build-essential --no-install-recommends \
|
||||||
&& yarn install --production \
|
&& npm install --production \
|
||||||
&& apt-get autoremove --purge -y python git build-essential \
|
&& apt-get autoremove --purge -y python git build-essential \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& yarn cache clean \
|
&& npm cache clear \
|
||||||
&& rm -rf ~/.node-gyp \
|
&& rm -rf ~/.node-gyp \
|
||||||
&& rm -rf /tmp/yarn-*
|
&& rm -rf /tmp/npm-*
|
||||||
|
|
||||||
ENV S3BACKEND mem
|
ENV S3BACKEND mem
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||||
CMD [ "yarn", "start" ]
|
CMD [ "npm", "start" ]
|
||||||
|
|
||||||
EXPOSE 8000
|
EXPOSE 8000
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# S3 Healthcheck
|
# S3 Healthcheck
|
||||||
|
|
||||||
Scality S3 exposes a healthcheck route `/live` on the port used
|
Scality S3 exposes a healthcheck route `/_/healthcheck` which returns a
|
||||||
for the metrics (defaults to port 8002) which returns a
|
|
||||||
response with HTTP code
|
response with HTTP code
|
||||||
|
|
||||||
- 200 OK
|
- 200 OK
|
||||||
|
|
166
README.md
166
README.md
|
@ -1,7 +1,12 @@
|
||||||
# Zenko CloudServer with Vitastor Backend
|
# Zenko CloudServer
|
||||||
|
|
||||||
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
||||||
|
|
||||||
|
[![CircleCI][badgepub]](https://circleci.com/gh/scality/S3)
|
||||||
|
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/S3)
|
||||||
|
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/scality/s3server/)
|
||||||
|
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||||
|
@ -11,71 +16,126 @@ Scality’s Open Source Multi-Cloud Data Controller.
|
||||||
CloudServer provides a single AWS S3 API interface to access multiple
|
CloudServer provides a single AWS S3 API interface to access multiple
|
||||||
backend data storage both on-premise or public in the cloud.
|
backend data storage both on-premise or public in the cloud.
|
||||||
|
|
||||||
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
|
CloudServer is useful for Developers, either to run as part of a
|
||||||
backend support.
|
continous integration test environment to emulate the AWS S3 service locally
|
||||||
|
or as an abstraction layer to develop object storage enabled
|
||||||
|
application on the go.
|
||||||
|
|
||||||
## Quick Start with Vitastor
|
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
|
||||||
|
|
||||||
Vitastor Backend is in experimental status, however you can already try to
|
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
||||||
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
|
|
||||||
it works too 😊.
|
|
||||||
|
|
||||||
Installation instructions:
|
## Docker
|
||||||
|
|
||||||
### Install Vitastor
|
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/scality/s3server/)
|
||||||
|
|
||||||
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
|
## Contributing
|
||||||
|
|
||||||
### Install Zenko with Vitastor Backend
|
In order to contribute, please follow the
|
||||||
|
[Contributing Guidelines](
|
||||||
|
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
|
## Installation
|
||||||
- Install dependencies: `npm install --omit dev` or just `npm install`
|
|
||||||
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
|
|
||||||
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
|
|
||||||
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
|
|
||||||
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
|
|
||||||
|
|
||||||
### Install and Configure MongoDB
|
### Dependencies
|
||||||
|
|
||||||
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
|
Building and running the Zenko CloudServer requires node.js 6.9.5 and npm v3
|
||||||
|
. Up-to-date versions can be found at
|
||||||
|
[Nodesource](https://github.com/nodesource/distributions).
|
||||||
|
|
||||||
### Setup Zenko
|
### Clone source code
|
||||||
|
|
||||||
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
|
```shell
|
||||||
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
|
git clone https://github.com/scality/S3.git
|
||||||
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
|
|
||||||
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
|
|
||||||
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
|
|
||||||
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
|
|
||||||
access keys, but it's not published, so let's use a file for now.
|
|
||||||
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
|
|
||||||
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
|
|
||||||
in this file.
|
|
||||||
|
|
||||||
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
|
|
||||||
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
|
|
||||||
|
|
||||||
### Start Zenko
|
|
||||||
|
|
||||||
Start the S3 server with: `node index.js`
|
|
||||||
|
|
||||||
If you use default settings, Zenko CloudServer starts on port 8000.
|
|
||||||
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
|
|
||||||
|
|
||||||
Now you can access your S3 with `s3cmd` or `geesefs`:
|
|
||||||
|
|
||||||
```
|
|
||||||
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
### Install js dependencies
|
||||||
AWS_ACCESS_KEY_ID=accessKey1 \
|
|
||||||
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
|
Go to the ./S3 folder,
|
||||||
geesefs --endpoint http://localhost:8000 testbucket mountdir
|
|
||||||
|
```shell
|
||||||
|
npm install
|
||||||
```
|
```
|
||||||
|
|
||||||
# Author & License
|
If you get an error regarding installation of the diskUsage module,
|
||||||
|
please install g++.
|
||||||
|
|
||||||
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
If you get an error regarding level-down bindings, try clearing your npm cache:
|
||||||
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
|
|
||||||
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
|
```shell
|
||||||
|
npm cache clear
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with a file backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
|
||||||
|
9991 are also open locally for internal transfer of metadata and data,
|
||||||
|
respectively.
|
||||||
|
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
By default the metadata files will be saved in the
|
||||||
|
localMetadata directory and the data files will be saved
|
||||||
|
in the localData directory within the ./S3 directory on your
|
||||||
|
machine. These directories have been pre-created within the
|
||||||
|
repository. If you would like to save the data or metadata in
|
||||||
|
different locations of your choice, you must specify them with absolute paths.
|
||||||
|
So, when starting the server:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||||
|
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||||
|
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
|
npm start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with multiple data backends
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3DATA='multiple'
|
||||||
|
npm start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to
|
||||||
|
choose where each object will be saved by setting
|
||||||
|
the following header with a locationConstraint on
|
||||||
|
a PUT request:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
```
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the
|
||||||
|
location constraint of the bucket will determine
|
||||||
|
where the data is saved. If the bucket has no location
|
||||||
|
constraint, the endpoint of the PUT request will be
|
||||||
|
used to determine location.
|
||||||
|
|
||||||
|
See the Configuration section in our documentation
|
||||||
|
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
|
||||||
|
to learn how to set location constraints.
|
||||||
|
|
||||||
|
## Run it with an in-memory backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
npm run mem_backend
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
|
||||||
|
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
||||||
|
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||||
|
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
---
|
---
|
||||||
theme: jekyll-theme-modernist
|
theme: jekyll-theme-minimal
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/usr/bin/env node
|
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
require('../lib/nfs/utilities.js').createBucketWithNFSEnabled();
|
|
|
@ -13,26 +13,20 @@ function _performSearch(host,
|
||||||
port,
|
port,
|
||||||
bucketName,
|
bucketName,
|
||||||
query,
|
query,
|
||||||
listVersions,
|
|
||||||
accessKey,
|
accessKey,
|
||||||
secretKey,
|
secretKey,
|
||||||
sessionToken,
|
|
||||||
verbose, ssl) {
|
verbose, ssl) {
|
||||||
const escapedSearch = encodeURIComponent(query);
|
const escapedSearch = encodeURIComponent(query);
|
||||||
const options = {
|
const options = {
|
||||||
host,
|
host,
|
||||||
port,
|
port,
|
||||||
method: 'GET',
|
method: 'GET',
|
||||||
path: `/${bucketName}/?search=${escapedSearch}${listVersions ? '&&versions' : ''}`,
|
path: `/${bucketName}/?search=${escapedSearch}`,
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Length': 0,
|
'Content-Length': 0,
|
||||||
},
|
},
|
||||||
rejectUnauthorized: false,
|
rejectUnauthorized: false,
|
||||||
versions: '',
|
|
||||||
};
|
};
|
||||||
if (sessionToken) {
|
|
||||||
options.headers['x-amz-security-token'] = sessionToken;
|
|
||||||
}
|
|
||||||
const transport = ssl ? https : http;
|
const transport = ssl ? https : http;
|
||||||
const request = transport.request(options, response => {
|
const request = transport.request(options, response => {
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
|
@ -61,9 +55,9 @@ function _performSearch(host,
|
||||||
// generateV4Headers exepects request object with path that does not
|
// generateV4Headers exepects request object with path that does not
|
||||||
// include query
|
// include query
|
||||||
request.path = `/${bucketName}`;
|
request.path = `/${bucketName}`;
|
||||||
const requestData = listVersions ? { search: query, versions: '' } : { search: query };
|
auth.client.generateV4Headers(request, { search: query },
|
||||||
auth.client.generateV4Headers(request, requestData, accessKey, secretKey, 's3');
|
accessKey, secretKey, 's3');
|
||||||
request.path = `/${bucketName}?search=${escapedSearch}${listVersions ? '&&versions' : ''}`;
|
request.path = `/${bucketName}?search=${escapedSearch}`;
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
logger.info('request headers', { headers: request._headers });
|
logger.info('request headers', { headers: request._headers });
|
||||||
}
|
}
|
||||||
|
@ -82,17 +76,15 @@ function searchBucket() {
|
||||||
.version('0.0.1')
|
.version('0.0.1')
|
||||||
.option('-a, --access-key <accessKey>', 'Access key id')
|
.option('-a, --access-key <accessKey>', 'Access key id')
|
||||||
.option('-k, --secret-key <secretKey>', 'Secret access key')
|
.option('-k, --secret-key <secretKey>', 'Secret access key')
|
||||||
.option('-t, --session-token <sessionToken>', 'Session token')
|
|
||||||
.option('-b, --bucket <bucket>', 'Name of the bucket')
|
.option('-b, --bucket <bucket>', 'Name of the bucket')
|
||||||
.option('-q, --query <query>', 'Search query')
|
.option('-q, --query <query>', 'Search query')
|
||||||
.option('-h, --host <host>', 'Host of the server')
|
.option('-h, --host <host>', 'Host of the server')
|
||||||
.option('-p, --port <port>', 'Port of the server')
|
.option('-p, --port <port>', 'Port of the server')
|
||||||
.option('-s', '--ssl', 'Enable ssl')
|
.option('-s', '--ssl', 'Enable ssl')
|
||||||
.option('-l, --list-versions', 'List all versions of the objects that meet the search query, ' +
|
|
||||||
'otherwise only list the latest version')
|
|
||||||
.option('-v, --verbose')
|
.option('-v, --verbose')
|
||||||
.parse(process.argv);
|
.parse(process.argv);
|
||||||
const { host, port, accessKey, secretKey, sessionToken, bucket, query, listVersions, verbose, ssl } =
|
|
||||||
|
const { host, port, accessKey, secretKey, bucket, query, verbose, ssl } =
|
||||||
commander;
|
commander;
|
||||||
|
|
||||||
if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
|
if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
|
||||||
|
@ -101,7 +93,7 @@ function searchBucket() {
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
_performSearch(host, port, bucket, query, listVersions, accessKey, secretKey, sessionToken, verbose,
|
_performSearch(host, port, bucket, query, accessKey, secretKey, verbose,
|
||||||
ssl);
|
ssl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
---
|
||||||
|
general:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /^ultron\/.*/ # Ignore ultron/* branches
|
||||||
|
artifacts:
|
||||||
|
- coverage/
|
||||||
|
|
||||||
|
machine:
|
||||||
|
node:
|
||||||
|
version: 6.9.5
|
||||||
|
services:
|
||||||
|
- redis
|
||||||
|
- docker
|
||||||
|
ruby:
|
||||||
|
version: "2.4.1"
|
||||||
|
environment:
|
||||||
|
CXX: g++-4.9
|
||||||
|
ENABLE_LOCAL_CACHE: true
|
||||||
|
REPORT_TOKEN: report-token-1
|
||||||
|
REMOTE_MANAGEMENT_DISABLE: 1
|
||||||
|
hosts:
|
||||||
|
bucketwebsitetester.s3-website-us-east-1.amazonaws.com: 127.0.0.1
|
||||||
|
post:
|
||||||
|
- curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
|
||||||
|
- echo "deb https://dl.yarnpkg.com/debian/ stable main" |
|
||||||
|
sudo tee /etc/apt/sources.list.d/yarn.list
|
||||||
|
- sudo apt-get update && sudo apt-get install yarn -y
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
override:
|
||||||
|
- docker run -d -p 27018:27018 -p 27019:27018 -p 27020:27018
|
||||||
|
--name ci-mongo scality/ci-mongo:3.4
|
||||||
|
- rm -rf node_modules
|
||||||
|
- yarn install --pure-lockfile
|
||||||
|
post:
|
||||||
|
- sudo pip install flake8 yamllint
|
||||||
|
- sudo pip install s3cmd==1.6.1
|
||||||
|
# fog and ruby testing dependencies
|
||||||
|
- gem install fog-aws -v 1.3.0
|
||||||
|
- gem install mime-types -v 3.1
|
||||||
|
- gem install rspec -v 3.5
|
||||||
|
- gem install json
|
||||||
|
- gem install digest
|
||||||
|
# java sdk dependencies
|
||||||
|
- sudo apt-get install -y -q default-jdk
|
||||||
|
|
||||||
|
|
||||||
|
test:
|
||||||
|
override:
|
||||||
|
- openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509
|
||||||
|
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy"
|
||||||
|
-keyout /tmp/CA.pem -out /tmp/CA.pem
|
||||||
|
- docker run --name squid-proxy -d --net=host
|
||||||
|
-v /tmp/CA.pem:/ssl/myca.pem:ro -p 3129:3129 scality/ci-squid
|
||||||
|
- bash tests.bash:
|
||||||
|
parallel: true
|
|
@ -41,16 +41,5 @@
|
||||||
"access": "replicationKey1",
|
"access": "replicationKey1",
|
||||||
"secret": "replicationSecretKey1"
|
"secret": "replicationSecretKey1"
|
||||||
}]
|
}]
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Lifecycle",
|
|
||||||
"email": "inspector@lifecycle.info",
|
|
||||||
"arn": "arn:aws:iam::123456789016:root",
|
|
||||||
"canonicalID": "http://acs.zenko.io/accounts/service/lifecycle",
|
|
||||||
"shortid": "123456789016",
|
|
||||||
"keys": [{
|
|
||||||
"access": "lifecycleKey1",
|
|
||||||
"secret": "lifecycleSecretKey1"
|
|
||||||
}]
|
|
||||||
}]
|
}]
|
||||||
}
|
}
|
|
@ -1,10 +1,7 @@
|
||||||
{
|
{
|
||||||
"port": 8000,
|
"port": 8000,
|
||||||
"listenOn": [],
|
"listenOn": [],
|
||||||
"metricsPort": 8002,
|
|
||||||
"metricsListenOn": [],
|
|
||||||
"replicationGroupId": "RG001",
|
"replicationGroupId": "RG001",
|
||||||
"workers": 4,
|
|
||||||
"restEndpoints": {
|
"restEndpoints": {
|
||||||
"localhost": "us-east-1",
|
"localhost": "us-east-1",
|
||||||
"127.0.0.1": "us-east-1",
|
"127.0.0.1": "us-east-1",
|
||||||
|
@ -42,10 +39,6 @@
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"port": 8900
|
"port": 8900
|
||||||
},
|
},
|
||||||
"workflowEngineOperator": {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 3001
|
|
||||||
},
|
|
||||||
"cdmi": {
|
"cdmi": {
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"port": 81,
|
"port": 81,
|
||||||
|
@ -53,7 +46,7 @@
|
||||||
"readonly": true
|
"readonly": true
|
||||||
},
|
},
|
||||||
"bucketd": {
|
"bucketd": {
|
||||||
"bootstrap": ["localhost:9000"]
|
"bootstrap": ["localhost"]
|
||||||
},
|
},
|
||||||
"vaultd": {
|
"vaultd": {
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
|
@ -75,10 +68,6 @@
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
"port": 9991
|
"port": 9991
|
||||||
},
|
},
|
||||||
"pfsClient": {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 9992
|
|
||||||
},
|
|
||||||
"metadataDaemon": {
|
"metadataDaemon": {
|
||||||
"bindAddress": "localhost",
|
"bindAddress": "localhost",
|
||||||
"port": 9990
|
"port": 9990
|
||||||
|
@ -87,57 +76,15 @@
|
||||||
"bindAddress": "localhost",
|
"bindAddress": "localhost",
|
||||||
"port": 9991
|
"port": 9991
|
||||||
},
|
},
|
||||||
"pfsDaemon": {
|
|
||||||
"bindAddress": "localhost",
|
|
||||||
"port": 9992
|
|
||||||
},
|
|
||||||
"recordLog": {
|
"recordLog": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"recordLogName": "s3-recordlog"
|
"recordLogName": "s3-recordlog"
|
||||||
},
|
},
|
||||||
"mongodb": {
|
"mongodb": {
|
||||||
"replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020",
|
"replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020",
|
||||||
"writeConcern": "majority",
|
"writeConcern": "majority",
|
||||||
"replicaSet": "rs0",
|
"replicaSet": "rs0",
|
||||||
"readPreference": "primary",
|
"readPreference": "primary",
|
||||||
"database": "metadata"
|
"database": "metadata"
|
||||||
},
|
}
|
||||||
"authdata": "authdata.json",
|
|
||||||
"backends": {
|
|
||||||
"auth": "file",
|
|
||||||
"data": "file",
|
|
||||||
"metadata": "mongodb",
|
|
||||||
"kms": "file",
|
|
||||||
"quota": "none"
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
|
||||||
"aws_s3": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": false,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gcp": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": true,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"requests": {
|
|
||||||
"viaProxy": false,
|
|
||||||
"trustedProxyCIDRs": [],
|
|
||||||
"extractClientIPFromHeader": ""
|
|
||||||
},
|
|
||||||
"bucketNotificationDestinations": [
|
|
||||||
{
|
|
||||||
"resource": "target1",
|
|
||||||
"type": "dummy",
|
|
||||||
"host": "localhost:6000"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
|
@ -1,71 +0,0 @@
|
||||||
{
|
|
||||||
"port": 8000,
|
|
||||||
"listenOn": [],
|
|
||||||
"metricsPort": 8002,
|
|
||||||
"metricsListenOn": [],
|
|
||||||
"replicationGroupId": "RG001",
|
|
||||||
"restEndpoints": {
|
|
||||||
"localhost": "STANDARD",
|
|
||||||
"127.0.0.1": "STANDARD",
|
|
||||||
"yourhostname.ru": "STANDARD"
|
|
||||||
},
|
|
||||||
"websiteEndpoints": [
|
|
||||||
"static.yourhostname.ru"
|
|
||||||
],
|
|
||||||
"replicationEndpoints": [ {
|
|
||||||
"site": "zenko",
|
|
||||||
"servers": ["127.0.0.1:8000"],
|
|
||||||
"default": true
|
|
||||||
} ],
|
|
||||||
"log": {
|
|
||||||
"logLevel": "info",
|
|
||||||
"dumpLevel": "error"
|
|
||||||
},
|
|
||||||
"healthChecks": {
|
|
||||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
|
||||||
},
|
|
||||||
"backends": {
|
|
||||||
"metadata": "mongodb"
|
|
||||||
},
|
|
||||||
"mongodb": {
|
|
||||||
"replicaSetHosts": "127.0.0.1:27017",
|
|
||||||
"writeConcern": "majority",
|
|
||||||
"replicaSet": "rs0",
|
|
||||||
"readPreference": "primary",
|
|
||||||
"database": "s3",
|
|
||||||
"authCredentials": {
|
|
||||||
"username": "s3",
|
|
||||||
"password": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
|
||||||
"aws_s3": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": false,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gcp": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": true,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"requests": {
|
|
||||||
"viaProxy": false,
|
|
||||||
"trustedProxyCIDRs": [],
|
|
||||||
"extractClientIPFromHeader": ""
|
|
||||||
},
|
|
||||||
"bucketNotificationDestinations": [
|
|
||||||
{
|
|
||||||
"resource": "target1",
|
|
||||||
"type": "dummy",
|
|
||||||
"host": "localhost:6000"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
127
constants.js
127
constants.js
|
@ -77,60 +77,45 @@ const constants = {
|
||||||
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 :
|
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 :
|
||||||
5368709120,
|
5368709120,
|
||||||
|
|
||||||
// Max size allowed in a single put object request is 5GB
|
|
||||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
|
||||||
maximumAllowedUploadSize: 5368709120,
|
|
||||||
|
|
||||||
// AWS states max size for user-defined metadata (x-amz-meta- headers) is
|
// AWS states max size for user-defined metadata (x-amz-meta- headers) is
|
||||||
// 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
// 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||||
// In testing, AWS seems to allow up to 88 more bytes, so we do the same.
|
// In testing, AWS seems to allow up to 88 more bytes, so we do the same.
|
||||||
maximumMetaHeadersSize: 2136,
|
maximumMetaHeadersSize: 2136,
|
||||||
|
|
||||||
// Maximum HTTP headers size allowed
|
|
||||||
maxHttpHeadersSize: 14122,
|
|
||||||
|
|
||||||
// hex digest of sha256 hash of empty string:
|
// hex digest of sha256 hash of empty string:
|
||||||
emptyStringHash: crypto.createHash('sha256')
|
emptyStringHash: crypto.createHash('sha256')
|
||||||
.update('', 'binary').digest('hex'),
|
.update('', 'binary').digest('hex'),
|
||||||
|
|
||||||
// Queries supported by AWS that we do not currently support.
|
// Queries supported by AWS that we do not currently support.
|
||||||
// Non-bucket queries
|
|
||||||
unsupportedQueries: [
|
unsupportedQueries: [
|
||||||
'accelerate',
|
'accelerate',
|
||||||
'analytics',
|
'analytics',
|
||||||
'inventory',
|
'inventory',
|
||||||
|
'list-type',
|
||||||
'logging',
|
'logging',
|
||||||
'metrics',
|
'metrics',
|
||||||
'policyStatus',
|
'notification',
|
||||||
'publicAccessBlock',
|
'policy',
|
||||||
'requestPayment',
|
'requestPayment',
|
||||||
|
'restore',
|
||||||
'torrent',
|
'torrent',
|
||||||
],
|
],
|
||||||
|
|
||||||
// Headers supported by AWS that we do not currently support.
|
// Headers supported by AWS that we do not currently support.
|
||||||
unsupportedHeaders: [
|
unsupportedHeaders: [
|
||||||
|
'x-amz-server-side-encryption',
|
||||||
'x-amz-server-side-encryption-customer-algorithm',
|
'x-amz-server-side-encryption-customer-algorithm',
|
||||||
|
'x-amz-server-side-encryption-aws-kms-key-id',
|
||||||
'x-amz-server-side-encryption-context',
|
'x-amz-server-side-encryption-context',
|
||||||
'x-amz-server-side-encryption-customer-key',
|
'x-amz-server-side-encryption-customer-key',
|
||||||
'x-amz-server-side-encryption-customer-key-md5',
|
'x-amz-server-side-encryption-customer-key-md5',
|
||||||
],
|
],
|
||||||
|
|
||||||
// user metadata header to set object locationConstraint
|
// user metadata header to set object locationConstraint
|
||||||
objectLocationConstraintHeader: 'x-amz-storage-class',
|
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
||||||
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
|
||||||
legacyLocations: ['sproxyd', 'legacy'],
|
legacyLocations: ['sproxyd', 'legacy'],
|
||||||
// declare here all existing service accounts and their properties
|
|
||||||
// (if any, otherwise an empty object)
|
|
||||||
serviceAccountProperties: {
|
|
||||||
replication: {},
|
|
||||||
lifecycle: {},
|
|
||||||
gc: {},
|
|
||||||
'md-ingestion': {
|
|
||||||
canReplicate: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
/* eslint-disable camelcase */
|
/* eslint-disable camelcase */
|
||||||
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
|
externalBackends: { aws_s3: true, azure: true, gcp: true },
|
||||||
|
replicationBackends: { aws_s3: true, azure: true, gcp: true },
|
||||||
// some of the available data backends (if called directly rather
|
// some of the available data backends (if called directly rather
|
||||||
// than through the multiple backend gateway) need a key provided
|
// than through the multiple backend gateway) need a key provided
|
||||||
// as a string as first parameter of the get/delete methods.
|
// as a string as first parameter of the get/delete methods.
|
||||||
|
@ -149,100 +134,6 @@ const constants = {
|
||||||
azureAccountNameRegex: /^[a-z0-9]{3,24}$/,
|
azureAccountNameRegex: /^[a-z0-9]{3,24}$/,
|
||||||
base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' +
|
base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' +
|
||||||
'(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'),
|
'(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'),
|
||||||
productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko',
|
|
||||||
// location constraint delimiter
|
|
||||||
zenkoSeparator: ':',
|
|
||||||
// user metadata applied on zenko objects
|
|
||||||
zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
|
|
||||||
bucketOwnerActions: [
|
|
||||||
'bucketDeleteCors',
|
|
||||||
'bucketDeleteLifecycle',
|
|
||||||
'bucketDeletePolicy',
|
|
||||||
'bucketDeleteReplication',
|
|
||||||
'bucketDeleteWebsite',
|
|
||||||
'bucketGetCors',
|
|
||||||
'bucketGetLifecycle',
|
|
||||||
'bucketGetLocation',
|
|
||||||
'bucketGetPolicy',
|
|
||||||
'bucketGetReplication',
|
|
||||||
'bucketGetVersioning',
|
|
||||||
'bucketGetWebsite',
|
|
||||||
'bucketPutCors',
|
|
||||||
'bucketPutLifecycle',
|
|
||||||
'bucketPutPolicy',
|
|
||||||
'bucketPutReplication',
|
|
||||||
'bucketPutVersioning',
|
|
||||||
'bucketPutWebsite',
|
|
||||||
'objectDeleteTagging',
|
|
||||||
'objectGetTagging',
|
|
||||||
'objectPutTagging',
|
|
||||||
'objectPutLegalHold',
|
|
||||||
'objectPutRetention',
|
|
||||||
],
|
|
||||||
// response header to be sent when there are invalid
|
|
||||||
// user metadata in the object's metadata
|
|
||||||
invalidObjectUserMetadataHeader: 'x-amz-missing-meta',
|
|
||||||
// Bucket specific queries supported by AWS that we do not currently support
|
|
||||||
// these queries may or may not be supported at object level
|
|
||||||
unsupportedBucketQueries: [
|
|
||||||
],
|
|
||||||
suppressedUtapiEventFields: [
|
|
||||||
'object',
|
|
||||||
'location',
|
|
||||||
'versionId',
|
|
||||||
],
|
|
||||||
allowedUtapiEventFilterFields: [
|
|
||||||
'operationId',
|
|
||||||
'location',
|
|
||||||
'account',
|
|
||||||
'user',
|
|
||||||
'bucket',
|
|
||||||
],
|
|
||||||
arrayOfAllowed: [
|
|
||||||
'objectPutTagging',
|
|
||||||
'objectPutLegalHold',
|
|
||||||
'objectPutRetention',
|
|
||||||
],
|
|
||||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
|
||||||
allowedRestoreObjectRequestTierValues: ['Standard'],
|
|
||||||
lifecycleListing: {
|
|
||||||
CURRENT_TYPE: 'current',
|
|
||||||
NON_CURRENT_TYPE: 'noncurrent',
|
|
||||||
ORPHAN_DM_TYPE: 'orphan',
|
|
||||||
},
|
|
||||||
multiObjectDeleteConcurrency: 50,
|
|
||||||
maxScannedLifecycleListingEntries: 10000,
|
|
||||||
overheadField: [
|
|
||||||
'content-length',
|
|
||||||
'owner-id',
|
|
||||||
'versionId',
|
|
||||||
'isNull',
|
|
||||||
'isDeleteMarker',
|
|
||||||
],
|
|
||||||
unsupportedSignatureChecksums: new Set([
|
|
||||||
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
|
|
||||||
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
|
|
||||||
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
|
|
||||||
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
|
|
||||||
]),
|
|
||||||
supportedSignatureChecksums: new Set([
|
|
||||||
'UNSIGNED-PAYLOAD',
|
|
||||||
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
|
|
||||||
]),
|
|
||||||
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
|
|
||||||
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
|
|
||||||
// The AWS assumed Role resource type
|
|
||||||
assumedRoleArnResourceType: 'assumed-role',
|
|
||||||
// Session name of the backbeat lifecycle assumed role session.
|
|
||||||
backbeatLifecycleSessionName: 'backbeat-lifecycle',
|
|
||||||
actionsToConsiderAsObjectPut: [
|
|
||||||
'initiateMultipartUpload',
|
|
||||||
'objectPutPart',
|
|
||||||
'completeMultipartUpload',
|
|
||||||
],
|
|
||||||
// if requester is not bucket owner, bucket policy actions should be denied with
|
|
||||||
// MethodNotAllowed error
|
|
||||||
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
|
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = constants;
|
module.exports = constants;
|
||||||
|
|
|
@ -4,30 +4,16 @@ const arsenal = require('arsenal');
|
||||||
const { config } = require('./lib/Config.js');
|
const { config } = require('./lib/Config.js');
|
||||||
const logger = require('./lib/utilities/logger');
|
const logger = require('./lib/utilities/logger');
|
||||||
|
|
||||||
process.on('uncaughtException', err => {
|
|
||||||
logger.fatal('caught error', {
|
|
||||||
error: err.message,
|
|
||||||
stack: err.stack,
|
|
||||||
workerId: this.worker ? this.worker.id : undefined,
|
|
||||||
workerPid: this.worker ? this.worker.process.pid : undefined,
|
|
||||||
});
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
if (config.backends.data === 'file' ||
|
if (config.backends.data === 'file' ||
|
||||||
(config.backends.data === 'multiple' &&
|
(config.backends.data === 'multiple' &&
|
||||||
config.backends.metadata !== 'scality')) {
|
config.backends.metadata !== 'scality')) {
|
||||||
const dataServer = new arsenal.network.rest.RESTServer({
|
const dataServer = new arsenal.network.rest.RESTServer(
|
||||||
bindAddress: config.dataDaemon.bindAddress,
|
{ bindAddress: config.dataDaemon.bindAddress,
|
||||||
port: config.dataDaemon.port,
|
port: config.dataDaemon.port,
|
||||||
dataStore: new arsenal.storage.data.file.DataFileStore({
|
dataStore: new arsenal.storage.data.file.DataFileStore(
|
||||||
dataPath: config.dataDaemon.dataPath,
|
{ dataPath: config.dataDaemon.dataPath,
|
||||||
log: config.log,
|
log: config.log }),
|
||||||
noSync: config.dataDaemon.noSync,
|
log: config.log });
|
||||||
noCache: config.dataDaemon.noCache,
|
|
||||||
}),
|
|
||||||
log: config.log,
|
|
||||||
});
|
|
||||||
dataServer.setup(err => {
|
dataServer.setup(err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logger.error('Error initializing REST data server',
|
logger.error('Error initializing REST data server',
|
||||||
|
|
|
@ -10,7 +10,7 @@ JQ_FILTERS_CONFIG="."
|
||||||
# for multiple endpoint locations
|
# for multiple endpoint locations
|
||||||
if [[ "$ENDPOINT" ]]; then
|
if [[ "$ENDPOINT" ]]; then
|
||||||
IFS="," read -ra HOST_NAMES <<< "$ENDPOINT"
|
IFS="," read -ra HOST_NAMES <<< "$ENDPOINT"
|
||||||
for host in "${HOST_NAMES[@]}"; do
|
for host in "${HOST_NAMES[@]}"; do
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$host\"]=\"us-east-1\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$host\"]=\"us-east-1\""
|
||||||
done
|
done
|
||||||
echo "Host name has been modified to ${HOST_NAMES[@]}"
|
echo "Host name has been modified to ${HOST_NAMES[@]}"
|
||||||
|
@ -71,14 +71,9 @@ fi
|
||||||
if [[ "$LISTEN_ADDR" ]]; then
|
if [[ "$LISTEN_ADDR" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsDaemon.bindAddress=\"$LISTEN_ADDR\""
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .listenOn=[\"$LISTEN_ADDR:8000\"]"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .listenOn=[\"$LISTEN_ADDR:8000\"]"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$REPLICATION_GROUP_ID" ]] ; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .replicationGroupId=\"$REPLICATION_GROUP_ID\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$DATA_HOST" ]]; then
|
if [[ "$DATA_HOST" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataClient.host=\"$DATA_HOST\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataClient.host=\"$DATA_HOST\""
|
||||||
fi
|
fi
|
||||||
|
@ -87,10 +82,6 @@ if [[ "$METADATA_HOST" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$PFSD_HOST" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsClient.host=\"$PFSD_HOST\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$MONGODB_HOSTS" ]]; then
|
if [[ "$MONGODB_HOSTS" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
|
||||||
fi
|
fi
|
||||||
|
@ -103,42 +94,19 @@ if [[ "$MONGODB_DATABASE" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.database=\"$MONGODB_DATABASE\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.database=\"$MONGODB_DATABASE\""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$REDIS_HA_NAME" ]; then
|
if [[ "$REDIS_HOST" ]]; then
|
||||||
REDIS_HA_NAME='mymaster'
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$REDIS_SENTINELS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.name=\"$REDIS_HA_NAME\""
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.sentinels=\"$REDIS_SENTINELS\""
|
|
||||||
elif [[ "$REDIS_HOST" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.host=\"$REDIS_HOST\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.host=\"$REDIS_HOST\""
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=6379"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=6379"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$REDIS_PORT" ]] && [[ ! "$REDIS_SENTINELS" ]]; then
|
if [[ "$REDIS_PORT" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=$REDIS_PORT"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=$REDIS_PORT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$REDIS_SENTINELS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.name=\"$REDIS_HA_NAME\""
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.sentinels=\"$REDIS_SENTINELS\""
|
|
||||||
elif [[ "$REDIS_HA_HOST" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.host=\"$REDIS_HA_HOST\""
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=6379"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$REDIS_HA_PORT" ]] && [[ ! "$REDIS_SENTINELS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=$REDIS_HA_PORT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$RECORDLOG_ENABLED" ]]; then
|
if [[ "$RECORDLOG_ENABLED" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$STORAGE_LIMIT_ENABLED" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.metrics[.utapi.metrics | length]=\"location\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$CRR_METRICS_HOST" ]]; then
|
if [[ "$CRR_METRICS_HOST" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.host=\"$CRR_METRICS_HOST\""
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.host=\"$CRR_METRICS_HOST\""
|
||||||
fi
|
fi
|
||||||
|
@ -147,62 +115,6 @@ if [[ "$CRR_METRICS_PORT" ]]; then
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$WE_OPERATOR_HOST" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.host=\"$WE_OPERATOR_HOST\""
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$WE_OPERATOR_PORT" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.port=$WE_OPERATOR_PORT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# external backends http(s) agent config
|
|
||||||
|
|
||||||
# AWS
|
|
||||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAlive=$AWS_S3_HTTPAGENT_KEEPALIVE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAliveMsecs=$AWS_S3_HTTPAGENT_KEEPALIVE_MS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxFreeSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
#GCP
|
|
||||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAlive=$GCP_HTTPAGENT_KEEPALIVE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAliveMsecs=$GCP_HTTPAGENT_KEEPALIVE_MS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$TESTING_MODE" ]]; then
|
|
||||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
||||||
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
||||||
mv config.json.tmp config.json
|
mv config.json.tmp config.json
|
||||||
|
|
|
@ -1,993 +0,0 @@
|
||||||
.. role:: raw-latex(raw)
|
|
||||||
:format: latex
|
|
||||||
..
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
++++++++++++
|
|
||||||
|
|
||||||
Versioning
|
|
||||||
==========
|
|
||||||
|
|
||||||
This document describes Zenko CloudServer's support for the AWS S3 Bucket
|
|
||||||
Versioning feature.
|
|
||||||
|
|
||||||
AWS S3 Bucket Versioning
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
See AWS documentation for a description of the Bucket Versioning
|
|
||||||
feature:
|
|
||||||
|
|
||||||
- `Bucket
|
|
||||||
Versioning <http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html>`__
|
|
||||||
- `Object
|
|
||||||
Versioning <http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html>`__
|
|
||||||
|
|
||||||
This document assumes familiarity with the details of Bucket Versioning,
|
|
||||||
including null versions and delete markers, described in the above
|
|
||||||
links.
|
|
||||||
|
|
||||||
Implementation of Bucket Versioning in Zenko CloudServer
|
|
||||||
--------------------------------------------------------
|
|
||||||
|
|
||||||
Overview of Metadata and API Component Roles
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Each version of an object is stored as a separate key in metadata. The
|
|
||||||
S3 API interacts with the metadata backend to store, retrieve, and
|
|
||||||
delete version metadata.
|
|
||||||
|
|
||||||
The implementation of versioning within the metadata backend is naive.
|
|
||||||
The metadata backend does not evaluate any information about bucket or
|
|
||||||
version state (whether versioning is enabled or suspended, and whether a
|
|
||||||
version is a null version or delete marker). The S3 front-end API
|
|
||||||
manages the logic regarding versioning information, and sends
|
|
||||||
instructions to metadata to handle the basic CRUD operations for version
|
|
||||||
metadata.
|
|
||||||
|
|
||||||
The role of the S3 API can be broken down into the following:
|
|
||||||
|
|
||||||
- put and delete version data
|
|
||||||
- store extra information about a version, such as whether it is a
|
|
||||||
delete marker or null version, in the object's metadata
|
|
||||||
- send instructions to metadata backend to store, retrieve, update and
|
|
||||||
delete version metadata based on bucket versioning state and version
|
|
||||||
metadata
|
|
||||||
- encode version ID information to return in responses to requests, and
|
|
||||||
decode version IDs sent in requests
|
|
||||||
|
|
||||||
The implementation of Bucket Versioning in S3 is described in this
|
|
||||||
document in two main parts. The first section, `"Implementation of
|
|
||||||
Bucket Versioning in
|
|
||||||
Metadata" <#implementation-of-bucket-versioning-in-metadata>`__,
|
|
||||||
describes the way versions are stored in metadata, and the metadata
|
|
||||||
options for manipulating version metadata.
|
|
||||||
|
|
||||||
The second section, `"Implementation of Bucket Versioning in
|
|
||||||
API" <#implementation-of-bucket-versioning-in-api>`__, describes the way
|
|
||||||
the metadata options are used in the API within S3 actions to create new
|
|
||||||
versions, update their metadata, and delete them. The management of null
|
|
||||||
versions and creation of delete markers is also described in this
|
|
||||||
section.
|
|
||||||
|
|
||||||
Implementation of Bucket Versioning in Metadata
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
As mentioned above, each version of an object is stored as a separate
|
|
||||||
key in metadata. We use version identifiers as the suffix for the keys
|
|
||||||
of the object versions, and a special version (the `"Master
|
|
||||||
Version" <#master-version>`__) to represent the latest version.
|
|
||||||
|
|
||||||
An example of what the metadata keys might look like for an object
|
|
||||||
``foo/bar`` with three versions (with `.` representing a null character):
|
|
||||||
|
|
||||||
+------------------------------------------------------+
|
|
||||||
| key |
|
|
||||||
+======================================================+
|
|
||||||
| foo/bar |
|
|
||||||
+------------------------------------------------------+
|
|
||||||
| foo/bar.098506163554375999999PARIS 0.a430a1f85c6ec |
|
|
||||||
+------------------------------------------------------+
|
|
||||||
| foo/bar.098506163554373999999PARIS 0.41b510cd0fdf8 |
|
|
||||||
+------------------------------------------------------+
|
|
||||||
| foo/bar.098506163554373999998PARIS 0.f9b82c166f695 |
|
|
||||||
+------------------------------------------------------+
|
|
||||||
|
|
||||||
The most recent version created is represented above in the key
|
|
||||||
``foo/bar`` and is the master version. This special version is described
|
|
||||||
further in the section `"Master Version" <#master-version>`__.
|
|
||||||
|
|
||||||
Version ID and Metadata Key Format
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
The version ID is generated by the metadata backend, and encoded in a
|
|
||||||
hexadecimal string format by S3 before sending a response to a request.
|
|
||||||
S3 also decodes the hexadecimal string received from a request before
|
|
||||||
sending to metadata to retrieve a particular version.
|
|
||||||
|
|
||||||
The format of a ``version_id`` is: ``ts`` ``rep_group_id`` ``seq_id``
|
|
||||||
where:
|
|
||||||
|
|
||||||
- ``ts``: is the combination of epoch and an increasing number
|
|
||||||
- ``rep_group_id``: is the name of deployment(s) considered one unit
|
|
||||||
used for replication
|
|
||||||
- ``seq_id``: is a unique value based on metadata information.
|
|
||||||
|
|
||||||
The format of a key in metadata for a version is:
|
|
||||||
|
|
||||||
``object_name separator version_id`` where:
|
|
||||||
|
|
||||||
- ``object_name``: is the key of the object in metadata
|
|
||||||
- ``separator``: we use the ``null`` character (``0x00`` or ``\0``) as
|
|
||||||
the separator between the ``object_name`` and the ``version_id`` of a
|
|
||||||
key
|
|
||||||
- ``version_id``: is the version identifier; this encodes the ordering
|
|
||||||
information in the format described above as metadata orders keys
|
|
||||||
alphabetically
|
|
||||||
|
|
||||||
An example of a key in metadata:
|
|
||||||
``foo\01234567890000777PARIS 1234.123456`` indicating that this specific
|
|
||||||
version of ``foo`` was the ``000777``\ th entry created during the epoch
|
|
||||||
``1234567890`` in the replication group ``PARIS`` with ``1234.123456``
|
|
||||||
as ``seq_id``.
|
|
||||||
|
|
||||||
Master Version
|
|
||||||
~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
We store a copy of the latest version of an object's metadata using
|
|
||||||
``object_name`` as the key; this version is called the master version.
|
|
||||||
The master version of each object facilitates the standard GET
|
|
||||||
operation, which would otherwise need to scan among the list of versions
|
|
||||||
of an object for its latest version.
|
|
||||||
|
|
||||||
The following table shows the layout of all versions of ``foo`` in the
|
|
||||||
first example stored in the metadata (with dot ``.`` representing the
|
|
||||||
null separator):
|
|
||||||
|
|
||||||
+----------+---------+
|
|
||||||
| key | value |
|
|
||||||
+==========+=========+
|
|
||||||
| foo | B |
|
|
||||||
+----------+---------+
|
|
||||||
| foo.v2 | B |
|
|
||||||
+----------+---------+
|
|
||||||
| foo.v1 | A |
|
|
||||||
+----------+---------+
|
|
||||||
|
|
||||||
Metadata Versioning Options
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Zenko CloudServer sends instructions to the metadata engine about whether to
|
|
||||||
create a new version or overwrite, retrieve, or delete a specific
|
|
||||||
version by sending values for special options in PUT, GET, or DELETE
|
|
||||||
calls to metadata. The metadata engine can also list versions in the
|
|
||||||
database, which is used by Zenko CloudServer to list object versions.
|
|
||||||
|
|
||||||
These only describe the basic CRUD operations that the metadata engine
|
|
||||||
can handle. How these options are used by the S3 API to generate and
|
|
||||||
update versions is described more comprehensively in `"Implementation of
|
|
||||||
Bucket Versioning in
|
|
||||||
API" <#implementation-of-bucket-versioning-in-api>`__.
|
|
||||||
|
|
||||||
Note: all operations (PUT and DELETE) that generate a new version of an
|
|
||||||
object will return the ``version_id`` of the new version to the API.
|
|
||||||
|
|
||||||
PUT
|
|
||||||
^^^
|
|
||||||
|
|
||||||
- no options: original PUT operation, will update the master version
|
|
||||||
- ``versioning: true`` create a new version of the object, then update
|
|
||||||
the master version with this version.
|
|
||||||
- ``versionId: <versionId>`` create or update a specific version (for updating
|
|
||||||
version's ACL or tags, or remote updates in geo-replication)
|
|
||||||
|
|
||||||
* if the version identified by ``versionId`` happens to be the latest
|
|
||||||
version, the master version will be updated as well
|
|
||||||
* if the master version is not as recent as the version identified by
|
|
||||||
``versionId``, as may happen with cross-region replication, the master
|
|
||||||
will be updated as well
|
|
||||||
* note that with ``versionId`` set to an empty string ``''``, it will
|
|
||||||
overwrite the master version only (same as no options, but the master
|
|
||||||
version will have a ``versionId`` property set in its metadata like
|
|
||||||
any other version). The ``versionId`` will never be exposed to an
|
|
||||||
external user, but setting this internal-only ``versionID`` enables
|
|
||||||
Zenko CloudServer to find this version later if it is no longer the master.
|
|
||||||
This option of ``versionId`` set to ``''`` is used for creating null
|
|
||||||
versions once versioning has been suspended, which is discussed in
|
|
||||||
`"Null Version Management" <#null-version-management>`__.
|
|
||||||
|
|
||||||
In general, only one option is used at a time. When ``versionId`` and
|
|
||||||
``versioning`` are both set, only the ``versionId`` option will have an effect.
|
|
||||||
|
|
||||||
DELETE
|
|
||||||
^^^^^^
|
|
||||||
|
|
||||||
- no options: original DELETE operation, will delete the master version
|
|
||||||
- ``versionId: <versionId>`` delete a specific version
|
|
||||||
|
|
||||||
A deletion targeting the latest version of an object has to:
|
|
||||||
|
|
||||||
- delete the specified version identified by ``versionId``
|
|
||||||
- replace the master version with a version that is a placeholder for
|
|
||||||
deletion
|
|
||||||
|
|
||||||
- this version contains a special keyword, 'isPHD', to indicate the
|
|
||||||
master version was deleted and needs to be updated
|
|
||||||
|
|
||||||
- initiate a repair operation to update the value of the master
|
|
||||||
version:
|
|
||||||
|
|
||||||
- involves listing the versions of the object and get the latest
|
|
||||||
version to replace the placeholder delete version
|
|
||||||
- if no more versions exist, metadata deletes the master version,
|
|
||||||
removing the key from metadata
|
|
||||||
|
|
||||||
Note: all of this happens in metadata before responding to the front-end api,
|
|
||||||
and only when the metadata engine is instructed by Zenko CloudServer to delete
|
|
||||||
a specific version or the master version.
|
|
||||||
See section `"Delete Markers" <#delete-markers>`__ for a description of what
|
|
||||||
happens when a Delete Object request is sent to the S3 API.
|
|
||||||
|
|
||||||
GET
|
|
||||||
^^^
|
|
||||||
|
|
||||||
- no options: original GET operation, will get the master version
|
|
||||||
- ``versionId: <versionId>`` retrieve a specific version
|
|
||||||
|
|
||||||
The implementation of a GET operation does not change compared to the
|
|
||||||
standard version. A standard GET without versioning information would
|
|
||||||
get the master version of a key. A version-specific GET would retrieve
|
|
||||||
the specific version identified by the key for that version.
|
|
||||||
|
|
||||||
LIST
|
|
||||||
^^^^
|
|
||||||
|
|
||||||
For a standard LIST on a bucket, metadata iterates through the keys by
|
|
||||||
using the separator (``\0``, represented by ``.`` in examples) as an
|
|
||||||
extra delimiter. For a listing of all versions of a bucket, there is no
|
|
||||||
change compared to the original listing function. Instead, the API
|
|
||||||
component returns all the keys in a List Objects call and filters for
|
|
||||||
just the keys of the master versions in a List Object Versions call.
|
|
||||||
|
|
||||||
For example, a standard LIST operation against the keys in a table below
|
|
||||||
would return from metadata the list of
|
|
||||||
``[ foo/bar, bar, qux/quz, quz ]``.
|
|
||||||
|
|
||||||
+--------------+
|
|
||||||
| key |
|
|
||||||
+==============+
|
|
||||||
| foo/bar |
|
|
||||||
+--------------+
|
|
||||||
| foo/bar.v2 |
|
|
||||||
+--------------+
|
|
||||||
| foo/bar.v1 |
|
|
||||||
+--------------+
|
|
||||||
| bar |
|
|
||||||
+--------------+
|
|
||||||
| qux/quz |
|
|
||||||
+--------------+
|
|
||||||
| qux/quz.v2 |
|
|
||||||
+--------------+
|
|
||||||
| qux/quz.v1 |
|
|
||||||
+--------------+
|
|
||||||
| quz |
|
|
||||||
+--------------+
|
|
||||||
| quz.v2 |
|
|
||||||
+--------------+
|
|
||||||
| quz.v1 |
|
|
||||||
+--------------+
|
|
||||||
|
|
||||||
Implementation of Bucket Versioning in API
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
Object Metadata Versioning Attributes
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To access all the information needed to properly handle all cases that
|
|
||||||
may exist in versioned operations, the API stores certain
|
|
||||||
versioning-related information in the metadata attributes of each
|
|
||||||
version's object metadata.
|
|
||||||
|
|
||||||
These are the versioning-related metadata properties:
|
|
||||||
|
|
||||||
- ``isNull``: whether the version being stored is a null version.
|
|
||||||
- ``nullVersionId``: the unencoded version ID of the latest null
|
|
||||||
version that existed before storing a non-null version.
|
|
||||||
- ``isDeleteMarker``: whether the version being stored is a delete
|
|
||||||
marker.
|
|
||||||
|
|
||||||
The metadata engine also sets one additional metadata property when
|
|
||||||
creating the version.
|
|
||||||
|
|
||||||
- ``versionId``: the unencoded version ID of the version being stored.
|
|
||||||
|
|
||||||
Null versions and delete markers are described in further detail in
|
|
||||||
their own subsections.
|
|
||||||
|
|
||||||
Creation of New Versions
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
When versioning is enabled in a bucket, APIs which normally result in
|
|
||||||
the creation of objects, such as Put Object, Complete Multipart Upload
|
|
||||||
and Copy Object, will generate new versions of objects.
|
|
||||||
|
|
||||||
Zenko CloudServer creates a new version and updates the master version using the
|
|
||||||
``versioning: true`` option in PUT calls to the metadata engine. As an
|
|
||||||
example, when two consecutive Put Object requests are sent to the Zenko
|
|
||||||
CloudServer for a versioning-enabled bucket with the same key names, there
|
|
||||||
are two corresponding metadata PUT calls with the ``versioning`` option
|
|
||||||
set to true.
|
|
||||||
|
|
||||||
The PUT calls to metadata and resulting keys are shown below:
|
|
||||||
|
|
||||||
(1) PUT foo (first put), versioning: ``true``
|
|
||||||
|
|
||||||
+----------+---------+
|
|
||||||
| key | value |
|
|
||||||
+==========+=========+
|
|
||||||
| foo | A |
|
|
||||||
+----------+---------+
|
|
||||||
| foo.v1 | A |
|
|
||||||
+----------+---------+
|
|
||||||
|
|
||||||
(2) PUT foo (second put), versioning: ``true``
|
|
||||||
|
|
||||||
+----------+---------+
|
|
||||||
| key | value |
|
|
||||||
+==========+=========+
|
|
||||||
| foo | B |
|
|
||||||
+----------+---------+
|
|
||||||
| foo.v2 | B |
|
|
||||||
+----------+---------+
|
|
||||||
| foo.v1 | A |
|
|
||||||
+----------+---------+
|
|
||||||
|
|
||||||
Null Version Management
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
In a bucket without versioning, or when versioning is suspended, putting
|
|
||||||
an object with the same name twice should result in the previous object
|
|
||||||
being overwritten. This is managed with null versions.
|
|
||||||
|
|
||||||
Only one null version should exist at any given time, and it is
|
|
||||||
identified in Zenko CloudServer requests and responses with the version
|
|
||||||
id "null".
|
|
||||||
|
|
||||||
Case 1: Putting Null Versions
|
|
||||||
'''''''''''''''''''''''''''''
|
|
||||||
|
|
||||||
With respect to metadata, since the null version is overwritten by
|
|
||||||
subsequent null versions, the null version is initially stored in the
|
|
||||||
master key alone, as opposed to being stored in the master key and a new
|
|
||||||
version. Zenko CloudServer checks if versioning is suspended or has never been
|
|
||||||
configured, and sets the ``versionId`` option to ``''`` in PUT calls to
|
|
||||||
the metadata engine when creating a new null version.
|
|
||||||
|
|
||||||
If the master version is a null version, Zenko CloudServer also sends a DELETE
|
|
||||||
call to metadata prior to the PUT, in order to clean up any pre-existing null
|
|
||||||
versions which may, in certain edge cases, have been stored as a separate
|
|
||||||
version. [1]_
|
|
||||||
|
|
||||||
The tables below summarize the calls to metadata and the resulting keys if
|
|
||||||
we put an object 'foo' twice, when versioning has not been enabled or is
|
|
||||||
suspended.
|
|
||||||
|
|
||||||
(1) PUT foo (first put), versionId: ``''``
|
|
||||||
|
|
||||||
+--------------+---------+
|
|
||||||
| key | value |
|
|
||||||
+==============+=========+
|
|
||||||
| foo (null) | A |
|
|
||||||
+--------------+---------+
|
|
||||||
|
|
||||||
(2A) DELETE foo (clean-up delete before second put),
|
|
||||||
versionId: ``<version id of master version>``
|
|
||||||
|
|
||||||
+--------------+---------+
|
|
||||||
| key | value |
|
|
||||||
+==============+=========+
|
|
||||||
| | |
|
|
||||||
+--------------+---------+
|
|
||||||
|
|
||||||
(2B) PUT foo (second put), versionId: ``''``
|
|
||||||
|
|
||||||
+--------------+---------+
|
|
||||||
| key | value |
|
|
||||||
+==============+=========+
|
|
||||||
| foo (null) | B |
|
|
||||||
+--------------+---------+
|
|
||||||
|
|
||||||
The S3 API also sets the ``isNull`` attribute to ``true`` in the version
|
|
||||||
metadata before storing the metadata for these null versions.
|
|
||||||
|
|
||||||
.. [1] Some examples of these cases are: (1) when there is a null version
|
|
||||||
that is the second-to-latest version, and the latest version has been
|
|
||||||
deleted, causing metadata to repair the master value with the value of
|
|
||||||
the null version and (2) when putting object tag or ACL on a null
|
|
||||||
version that is the master version, as explained in `"Behavior of
|
|
||||||
Object-Targeting APIs" <#behavior-of-object-targeting-apis>`__.
|
|
||||||
|
|
||||||
Case 2: Preserving Existing Null Versions in Versioning-Enabled Bucket
|
|
||||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
|
||||||
|
|
||||||
Null versions are preserved when new non-null versions are created after
|
|
||||||
versioning has been enabled or re-enabled.
|
|
||||||
|
|
||||||
If the master version is the null version, the S3 API preserves the
|
|
||||||
current null version by storing it as a new key ``(3A)`` in a separate
|
|
||||||
PUT call to metadata, prior to overwriting the master version ``(3B)``.
|
|
||||||
This implies the null version may not necessarily be the latest or
|
|
||||||
master version.
|
|
||||||
|
|
||||||
To determine whether the master version is a null version, the S3 API
|
|
||||||
checks if the master version's ``isNull`` property is set to ``true``,
|
|
||||||
or if the ``versionId`` attribute of the master version is undefined
|
|
||||||
(indicating it is a null version that was put before bucket versioning
|
|
||||||
was configured).
|
|
||||||
|
|
||||||
Continuing the example from Case 1, if we enabled versioning and put
|
|
||||||
another object, the calls to metadata and resulting keys would resemble
|
|
||||||
the following:
|
|
||||||
|
|
||||||
(3A) PUT foo, versionId: ``<versionId of master version>`` if defined or
|
|
||||||
``<non-versioned object id>``
|
|
||||||
|
|
||||||
+-----------------+---------+
|
|
||||||
| key | value |
|
|
||||||
+=================+=========+
|
|
||||||
| foo | B |
|
|
||||||
+-----------------+---------+
|
|
||||||
| foo.v1 (null) | B |
|
|
||||||
+-----------------+---------+
|
|
||||||
|
|
||||||
(3B) PUT foo, versioning: ``true``
|
|
||||||
|
|
||||||
+-----------------+---------+
|
|
||||||
| key | value |
|
|
||||||
+=================+=========+
|
|
||||||
| foo | C |
|
|
||||||
+-----------------+---------+
|
|
||||||
| foo.v2 | C |
|
|
||||||
+-----------------+---------+
|
|
||||||
| foo.v1 (null) | B |
|
|
||||||
+-----------------+---------+
|
|
||||||
|
|
||||||
To prevent issues with concurrent requests, Zenko CloudServer ensures the null
|
|
||||||
version is stored with the same version ID by using ``versionId`` option.
|
|
||||||
Zenko CloudServer sets the ``versionId`` option to the master version's
|
|
||||||
``versionId`` metadata attribute value during the PUT. This creates a new
|
|
||||||
version with the same version ID of the existing null master version.
|
|
||||||
|
|
||||||
The null version's ``versionId`` attribute may be undefined because it
|
|
||||||
was generated before the bucket versioning was configured. In that case,
|
|
||||||
a version ID is generated using the max epoch and sequence values
|
|
||||||
possible so that the null version will be properly ordered as the last
|
|
||||||
entry in a metadata listing. This value ("non-versioned object id") is
|
|
||||||
used in the PUT call with the ``versionId`` option.
|
|
||||||
|
|
||||||
Case 3: Overwriting a Null Version That is Not Latest Version
|
|
||||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
|
||||||
|
|
||||||
Normally when versioning is suspended, Zenko CloudServer uses the
|
|
||||||
``versionId: ''`` option in a PUT to metadata to create a null version.
|
|
||||||
This also overwrites an existing null version if it is the master version.
|
|
||||||
|
|
||||||
However, if there is a null version that is not the latest version,
|
|
||||||
Zenko CloudServer cannot rely on the ``versionId: ''`` option will not
|
|
||||||
overwrite the existing null version. Instead, before creating a new null
|
|
||||||
version, the Zenko CloudServer API must send a separate DELETE call to metadata
|
|
||||||
specifying the version id of the current null version for delete.
|
|
||||||
|
|
||||||
To do this, when storing a null version (3A above) before storing a new
|
|
||||||
non-null version, Zenko CloudServer records the version's ID in the
|
|
||||||
``nullVersionId`` attribute of the non-null version. For steps 3A and 3B above,
|
|
||||||
these are the values stored in the ``nullVersionId`` of each version's metadata:
|
|
||||||
|
|
||||||
(3A) PUT foo, versioning: ``true``
|
|
||||||
|
|
||||||
+-----------------+---------+-----------------------+
|
|
||||||
| key | value | value.nullVersionId |
|
|
||||||
+=================+=========+=======================+
|
|
||||||
| foo | B | undefined |
|
|
||||||
+-----------------+---------+-----------------------+
|
|
||||||
| foo.v1 (null) | B | undefined |
|
|
||||||
+-----------------+---------+-----------------------+
|
|
||||||
|
|
||||||
(3B) PUT foo, versioning: ``true``
|
|
||||||
|
|
||||||
+-----------------+---------+-----------------------+
|
|
||||||
| key | value | value.nullVersionId |
|
|
||||||
+=================+=========+=======================+
|
|
||||||
| foo | C | v1 |
|
|
||||||
+-----------------+---------+-----------------------+
|
|
||||||
| foo.v2 | C | v1 |
|
|
||||||
+-----------------+---------+-----------------------+
|
|
||||||
| foo.v1 (null) | B | undefined |
|
|
||||||
+-----------------+---------+-----------------------+
|
|
||||||
|
|
||||||
If defined, the ``nullVersionId`` of the master version is used with the
|
|
||||||
``versionId`` option in a DELETE call to metadata if a Put Object
|
|
||||||
request is received when versioning is suspended in a bucket.
|
|
||||||
|
|
||||||
(4A) DELETE foo, versionId: ``<nullVersionId of master version>`` (v1)
|
|
||||||
|
|
||||||
+----------+---------+
|
|
||||||
| key | value |
|
|
||||||
+==========+=========+
|
|
||||||
| foo | C |
|
|
||||||
+----------+---------+
|
|
||||||
| foo.v2 | C |
|
|
||||||
+----------+---------+
|
|
||||||
|
|
||||||
Then the master version is overwritten with the new null version:
|
|
||||||
|
|
||||||
(4B) PUT foo, versionId: ``''``
|
|
||||||
|
|
||||||
+--------------+---------+
|
|
||||||
| key | value |
|
|
||||||
+==============+=========+
|
|
||||||
| foo (null) | D |
|
|
||||||
+--------------+---------+
|
|
||||||
| foo.v2 | C |
|
|
||||||
+--------------+---------+
|
|
||||||
|
|
||||||
The ``nullVersionId`` attribute is also used to retrieve the correct
|
|
||||||
version when the version ID "null" is specified in certain object-level
|
|
||||||
APIs, described further in the section `"Null Version
|
|
||||||
Mapping" <#null-version-mapping>`__.
|
|
||||||
|
|
||||||
Specifying Versions in APIs for Putting Versions
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
Since Zenko CloudServer does not allow an overwrite of existing version data,
|
|
||||||
Put Object, Complete Multipart Upload and Copy Object return
|
|
||||||
``400 InvalidArgument`` if a specific version ID is specified in the
|
|
||||||
request query, e.g. for a ``PUT /foo?versionId=v1`` request.
|
|
||||||
|
|
||||||
PUT Example
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
When Zenko CloudServer receives a request to PUT an object:
|
|
||||||
|
|
||||||
- It checks first if versioning has been configured
|
|
||||||
- If it has not been configured, Zenko CloudServer proceeds to puts the new
|
|
||||||
data, puts the metadata by overwriting the master version, and proceeds to
|
|
||||||
delete any pre-existing data
|
|
||||||
|
|
||||||
If versioning has been configured, Zenko CloudServer checks the following:
|
|
||||||
|
|
||||||
Versioning Enabled
|
|
||||||
^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
If versioning is enabled and there is existing object metadata:
|
|
||||||
|
|
||||||
- If the master version is a null version (``isNull: true``) or has no
|
|
||||||
version ID (put before versioning was configured):
|
|
||||||
|
|
||||||
- store the null version metadata as a new version
|
|
||||||
- create a new version and overwrite the master version
|
|
||||||
|
|
||||||
- set ``nullVersionId``: version ID of the null version that was
|
|
||||||
stored
|
|
||||||
|
|
||||||
If versioning is enabled and the master version is not null; or there is
|
|
||||||
no existing object metadata:
|
|
||||||
|
|
||||||
- create a new version and store it, and overwrite the master version
|
|
||||||
|
|
||||||
Versioning Suspended
|
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
If versioning is suspended and there is existing object metadata:
|
|
||||||
|
|
||||||
- If the master version has no version ID:
|
|
||||||
|
|
||||||
- overwrite the master version with the new metadata (PUT ``versionId: ''``)
|
|
||||||
- delete previous object data
|
|
||||||
|
|
||||||
- If the master version is a null version:
|
|
||||||
|
|
||||||
- delete the null version using the `versionId` metadata attribute of the
|
|
||||||
master version (PUT ``versionId: <versionId of master object MD>``)
|
|
||||||
- put a new null version (PUT ``versionId: ''``)
|
|
||||||
|
|
||||||
- If master is not a null version and ``nullVersionId`` is defined in
|
|
||||||
the object’s metadata:
|
|
||||||
|
|
||||||
- delete the current null version metadata and data
|
|
||||||
- overwrite the master version with the new metadata
|
|
||||||
|
|
||||||
If there is no existing object metadata, create the new null version as
|
|
||||||
the master version.
|
|
||||||
|
|
||||||
In each of the above cases, set ``isNull`` metadata attribute to true
|
|
||||||
when creating the new null version.
|
|
||||||
|
|
||||||
Behavior of Object-Targeting APIs
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
API methods which can target existing objects or versions, such as Get
|
|
||||||
Object, Head Object, Get Object ACL, Put Object ACL, Copy Object and
|
|
||||||
Copy Part, will perform the action on the latest version of an object if
|
|
||||||
no version ID is specified in the request query or relevant request
|
|
||||||
header (``x-amz-copy-source-version-id`` for Copy Object and Copy Part
|
|
||||||
APIs).
|
|
||||||
|
|
||||||
Two exceptions are the Delete Object and Multi-Object Delete APIs, which
|
|
||||||
will instead attempt to create delete markers, described in the
|
|
||||||
following section, if no version ID is specified.
|
|
||||||
|
|
||||||
No versioning options are necessary to retrieve the latest version from
|
|
||||||
metadata, since the master version is stored in a key with the name of
|
|
||||||
the object. However, when updating the latest version, such as with the
|
|
||||||
Put Object ACL API, Zenko CloudServer sets the ``versionId`` option in the
|
|
||||||
PUT call to metadata to the value stored in the object metadata's ``versionId``
|
|
||||||
attribute. This is done in order to update the metadata both in the
|
|
||||||
master version and the version itself, if it is not a null version. [2]_
|
|
||||||
|
|
||||||
When a version id is specified in the request query for these APIs, e.g.
|
|
||||||
``GET /foo?versionId=v1``, Zenko CloudServer will attempt to decode the version
|
|
||||||
ID and perform the action on the appropriate version. To do so, the API sets
|
|
||||||
the value of the ``versionId`` option to the decoded version ID in the
|
|
||||||
metadata call.
|
|
||||||
|
|
||||||
Delete Markers
|
|
||||||
^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
If versioning has not been configured for a bucket, the Delete Object
|
|
||||||
and Multi-Object Delete APIs behave as their standard APIs.
|
|
||||||
|
|
||||||
If versioning has been configured, Zenko CloudServer deletes object or version
|
|
||||||
data only if a specific version ID is provided in the request query, e.g.
|
|
||||||
``DELETE /foo?versionId=v1``.
|
|
||||||
|
|
||||||
If no version ID is provided, S3 creates a delete marker by creating a
|
|
||||||
0-byte version with the metadata attribute ``isDeleteMarker: true``. The
|
|
||||||
S3 API will return a ``404 NoSuchKey`` error in response to requests
|
|
||||||
getting or heading an object whose latest version is a delete maker.
|
|
||||||
|
|
||||||
To restore a previous version as the latest version of an object, the
|
|
||||||
delete marker must be deleted, by the same process as deleting any other
|
|
||||||
version.
|
|
||||||
|
|
||||||
The response varies when targeting an object whose latest version is a
|
|
||||||
delete marker for other object-level APIs that can target existing
|
|
||||||
objects and versions, without specifying the version ID.
|
|
||||||
|
|
||||||
- Get Object, Head Object, Get Object ACL, Object Copy and Copy Part
|
|
||||||
return ``404 NoSuchKey``.
|
|
||||||
- Put Object ACL and Put Object Tagging return
|
|
||||||
``405 MethodNotAllowed``.
|
|
||||||
|
|
||||||
These APIs respond to requests specifying the version ID of a delete
|
|
||||||
marker with the error ``405 MethodNotAllowed``, in general. Copy Part
|
|
||||||
and Copy Object respond with ``400 Invalid Request``.
|
|
||||||
|
|
||||||
See section `"Delete Example" <#delete-example>`__ for a summary.
|
|
||||||
|
|
||||||
Null Version Mapping
|
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
When the null version is specified in a request with the version ID
|
|
||||||
"null", the S3 API must use the ``nullVersionId`` stored in the latest
|
|
||||||
version to retrieve the current null version, if the null version is not
|
|
||||||
the latest version.
|
|
||||||
|
|
||||||
Thus, getting the null version is a two step process:
|
|
||||||
|
|
||||||
1. Get the latest version of the object from metadata. If the latest
|
|
||||||
version's ``isNull`` property is ``true``, then use the latest
|
|
||||||
version's metadata. Otherwise,
|
|
||||||
2. Get the null version of the object from metadata, using the internal
|
|
||||||
version ID of the current null version stored in the latest version's
|
|
||||||
``nullVersionId`` metadata attribute.
|
|
||||||
|
|
||||||
DELETE Example
|
|
||||||
~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
The following steps are used in the delete logic for delete marker
|
|
||||||
creation:
|
|
||||||
|
|
||||||
- If versioning has not been configured: attempt to delete the object
|
|
||||||
- If request is version-specific delete request: attempt to delete the
|
|
||||||
version
|
|
||||||
- otherwise, if not a version-specific delete request and versioning
|
|
||||||
has been configured:
|
|
||||||
|
|
||||||
- create a new 0-byte content-length version
|
|
||||||
- in version's metadata, set a 'isDeleteMarker' property to true
|
|
||||||
|
|
||||||
- Return the version ID of any version deleted or any delete marker
|
|
||||||
created
|
|
||||||
- Set response header ``x-amz-delete-marker`` to true if a delete
|
|
||||||
marker was deleted or created
|
|
||||||
|
|
||||||
The Multi-Object Delete API follows the same logic for each of the
|
|
||||||
objects or versions listed in an xml request. Note that a delete request
|
|
||||||
can result in the creation of a deletion marker even if the object
|
|
||||||
requested to delete does not exist in the first place.
|
|
||||||
|
|
||||||
Object-level APIs which can target existing objects and versions perform
|
|
||||||
the following checks regarding delete markers:
|
|
||||||
|
|
||||||
- If not a version-specific request and versioning has been configured,
|
|
||||||
check the metadata of the latest version
|
|
||||||
- If the 'isDeleteMarker' property is set to true, return
|
|
||||||
``404 NoSuchKey`` or ``405 MethodNotAllowed``
|
|
||||||
- If it is a version-specific request, check the object metadata of the
|
|
||||||
requested version
|
|
||||||
- If the ``isDeleteMarker`` property is set to true, return
|
|
||||||
``405 MethodNotAllowed`` or ``400 InvalidRequest``
|
|
||||||
|
|
||||||
.. [2] If it is a null version, this call will overwrite the null version
|
|
||||||
if it is stored in its own key (``foo\0<versionId>``). If the null
|
|
||||||
version is stored only in the master version, this call will both
|
|
||||||
overwrite the master version *and* create a new key
|
|
||||||
(``foo\0<versionId>``), resulting in the edge case referred to by the
|
|
||||||
previous footnote [1]_.
|
|
||||||
|
|
||||||
Data-metadata daemon Architecture and Operational guide
|
|
||||||
=======================================================
|
|
||||||
|
|
||||||
This document presents the architecture of the data-metadata daemon
|
|
||||||
(dmd) used for the community edition of Zenko CloudServer. It also provides a
|
|
||||||
guide on how to operate it.
|
|
||||||
|
|
||||||
The dmd is responsible for storing and retrieving Zenko CloudServer data and
|
|
||||||
metadata, and is accessed by Zenko CloudServer connectors through socket.io
|
|
||||||
(metadata) and REST (data) APIs.
|
|
||||||
|
|
||||||
It has been designed such that more than one Zenko CloudServer connector can
|
|
||||||
access the same buckets by communicating with the dmd. It also means that
|
|
||||||
the dmd can be hosted on a separate container or machine.
|
|
||||||
|
|
||||||
Operation
|
|
||||||
---------
|
|
||||||
|
|
||||||
Startup
|
|
||||||
~~~~~~~
|
|
||||||
|
|
||||||
The simplest deployment is still to launch with yarn start, this will
|
|
||||||
start one instance of the Zenko CloudServer connector and will listen on the
|
|
||||||
locally bound dmd ports 9990 and 9991 (by default, see below).
|
|
||||||
|
|
||||||
The dmd can be started independently from the Zenko CloudServer by running this
|
|
||||||
command in the Zenko CloudServer directory:
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
yarn run start_dmd
|
|
||||||
|
|
||||||
This will open two ports:
|
|
||||||
|
|
||||||
- one is based on socket.io and is used for metadata transfers (9990 by
|
|
||||||
default)
|
|
||||||
|
|
||||||
- the other is a REST interface used for data transfers (9991 by
|
|
||||||
default)
|
|
||||||
|
|
||||||
Then, one or more instances of Zenko CloudServer without the dmd can be started
|
|
||||||
elsewhere with:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
yarn run start_s3server
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Most configuration happens in ``config.json`` for Zenko CloudServer, local
|
|
||||||
storage paths can be changed where the dmd is started using environment
|
|
||||||
variables, like before: ``S3DATAPATH`` and ``S3METADATAPATH``.
|
|
||||||
|
|
||||||
In ``config.json``, the following sections are used to configure access
|
|
||||||
to the dmd through separate configuration of the data and metadata
|
|
||||||
access:
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
"metadataClient": {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 9990
|
|
||||||
},
|
|
||||||
"dataClient": {
|
|
||||||
"host": "localhost",
|
|
||||||
"port": 9991
|
|
||||||
},
|
|
||||||
|
|
||||||
To run a remote dmd, you have to do the following:
|
|
||||||
|
|
||||||
- change both ``"host"`` attributes to the IP or host name where the
|
|
||||||
dmd is run.
|
|
||||||
|
|
||||||
- Modify the ``"bindAddress"`` attributes in ``"metadataDaemon"`` and
|
|
||||||
``"dataDaemon"`` sections where the dmd is run to accept remote
|
|
||||||
connections (e.g. ``"::"``)
|
|
||||||
|
|
||||||
Architecture
|
|
||||||
------------
|
|
||||||
|
|
||||||
This section gives a bit more insight on how it works internally.
|
|
||||||
|
|
||||||
.. figure:: ./images/data_metadata_daemon_arch.png
|
|
||||||
:alt: Architecture diagram
|
|
||||||
|
|
||||||
./images/data\_metadata\_daemon\_arch.png
|
|
||||||
|
|
||||||
Metadata on socket.io
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This communication is based on an RPC system based on socket.io events
|
|
||||||
sent by Zenko CloudServerconnectors, received by the DMD and acknowledged back
|
|
||||||
to the Zenko CloudServer connector.
|
|
||||||
|
|
||||||
The actual payload sent through socket.io is a JSON-serialized form of
|
|
||||||
the RPC call name and parameters, along with some additional information
|
|
||||||
like the request UIDs, and the sub-level information, sent as object
|
|
||||||
attributes in the JSON request.
|
|
||||||
|
|
||||||
With introduction of versioning support, the updates are now gathered in
|
|
||||||
the dmd for some number of milliseconds max, before being batched as a
|
|
||||||
single write to the database. This is done server-side, so the API is
|
|
||||||
meant to send individual updates.
|
|
||||||
|
|
||||||
Four RPC commands are available to clients: ``put``, ``get``, ``del``
|
|
||||||
and ``createReadStream``. They more or less map the parameters accepted
|
|
||||||
by the corresponding calls in the LevelUp implementation of LevelDB.
|
|
||||||
They differ in the following:
|
|
||||||
|
|
||||||
- The ``sync`` option is ignored (under the hood, puts are gathered
|
|
||||||
into batches which have their ``sync`` property enforced when they
|
|
||||||
are committed to the storage)
|
|
||||||
|
|
||||||
- Some additional versioning-specific options are supported
|
|
||||||
|
|
||||||
- ``createReadStream`` becomes asynchronous, takes an additional
|
|
||||||
callback argument and returns the stream in the second callback
|
|
||||||
parameter
|
|
||||||
|
|
||||||
Debugging the socket.io exchanges can be achieved by running the daemon
|
|
||||||
with ``DEBUG='socket.io*'`` environment variable set.
|
|
||||||
|
|
||||||
One parameter controls the timeout value after which RPC commands sent
|
|
||||||
end with a timeout error, it can be changed either:
|
|
||||||
|
|
||||||
- via the ``DEFAULT_CALL_TIMEOUT_MS`` option in
|
|
||||||
``lib/network/rpc/rpc.js``
|
|
||||||
|
|
||||||
- or in the constructor call of the ``MetadataFileClient`` object (in
|
|
||||||
``lib/metadata/bucketfile/backend.js`` as ``callTimeoutMs``.
|
|
||||||
|
|
||||||
Default value is 30000.
|
|
||||||
|
|
||||||
A specific implementation deals with streams, currently used for listing
|
|
||||||
a bucket. Streams emit ``"stream-data"`` events that pack one or more
|
|
||||||
items in the listing, and a special ``“stream-end”`` event when done.
|
|
||||||
Flow control is achieved by allowing a certain number of “in flight”
|
|
||||||
packets that have not received an ack yet (5 by default). Two options
|
|
||||||
can tune the behavior (for better throughput or getting it more robust
|
|
||||||
on weak networks), they have to be set in ``mdserver.js`` file directly,
|
|
||||||
as there is no support in ``config.json`` for now for those options:
|
|
||||||
|
|
||||||
- ``streamMaxPendingAck``: max number of pending ack events not yet
|
|
||||||
received (default is 5)
|
|
||||||
|
|
||||||
- ``streamAckTimeoutMs``: timeout for receiving an ack after an output
|
|
||||||
stream packet is sent to the client (default is 5000)
|
|
||||||
|
|
||||||
Data exchange through the REST data port
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Data is read and written with REST semantic.
|
|
||||||
|
|
||||||
The web server recognizes a base path in the URL of ``/DataFile`` to be
|
|
||||||
a request to the data storage service.
|
|
||||||
|
|
||||||
PUT
|
|
||||||
^^^
|
|
||||||
|
|
||||||
A PUT on ``/DataFile`` URL and contents passed in the request body will
|
|
||||||
write a new object to the storage.
|
|
||||||
|
|
||||||
On success, a ``201 Created`` response is returned and the new URL to
|
|
||||||
the object is returned via the ``Location`` header (e.g.
|
|
||||||
``Location: /DataFile/50165db76eecea293abfd31103746dadb73a2074``). The
|
|
||||||
raw key can then be extracted simply by removing the leading
|
|
||||||
``/DataFile`` service information from the returned URL.
|
|
||||||
|
|
||||||
GET
|
|
||||||
^^^
|
|
||||||
|
|
||||||
A GET is simply issued with REST semantic, e.g.:
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
GET /DataFile/50165db76eecea293abfd31103746dadb73a2074 HTTP/1.1
|
|
||||||
|
|
||||||
A GET request can ask for a specific range. Range support is complete
|
|
||||||
except for multiple byte ranges.
|
|
||||||
|
|
||||||
DELETE
|
|
||||||
^^^^^^
|
|
||||||
|
|
||||||
DELETE is similar to GET, except that a ``204 No Content`` response is
|
|
||||||
returned on success.
|
|
||||||
|
|
||||||
|
|
||||||
Listing
|
|
||||||
=======
|
|
||||||
|
|
||||||
Listing Types
|
|
||||||
-------------
|
|
||||||
|
|
||||||
We use three different types of metadata listing for various operations.
|
|
||||||
Here are the scenarios we use each for:
|
|
||||||
|
|
||||||
- 'Delimiter' - when no versions are possible in the bucket since it is
|
|
||||||
an internally-used only bucket which is not exposed to a user.
|
|
||||||
Namely,
|
|
||||||
|
|
||||||
1. to list objects in the "user's bucket" to respond to a GET SERVICE
|
|
||||||
request and
|
|
||||||
2. to do internal listings on an MPU shadow bucket to complete multipart
|
|
||||||
upload operations.
|
|
||||||
|
|
||||||
- 'DelimiterVersion' - to list all versions in a bucket
|
|
||||||
- 'DelimiterMaster' - to list just the master versions of objects in a
|
|
||||||
bucket
|
|
||||||
|
|
||||||
Algorithms
|
|
||||||
----------
|
|
||||||
|
|
||||||
The algorithms for each listing type can be found in the open-source
|
|
||||||
`scality/Arsenal <https://github.com/scality/Arsenal>`__ repository, in
|
|
||||||
`lib/algos/list <https://github.com/scality/Arsenal/tree/master/lib/algos/list>`__.
|
|
||||||
|
|
||||||
Encryption
|
|
||||||
===========
|
|
||||||
|
|
||||||
With CloudServer, there are two possible methods of at-rest encryption.
|
|
||||||
(1) We offer bucket level encryption where Scality CloudServer itself handles at-rest
|
|
||||||
encryption for any object that is in an 'encrypted' bucket, regardless of what
|
|
||||||
the location-constraint for the data is and
|
|
||||||
(2) If the location-constraint specified for the data is of type AWS,
|
|
||||||
you can choose to use AWS server side encryption.
|
|
||||||
|
|
||||||
Note: bucket level encryption is not available on the standard AWS
|
|
||||||
S3 protocol, so normal AWS S3 clients will not provide the option to send a
|
|
||||||
header when creating a bucket. We have created a simple tool to enable you
|
|
||||||
to easily create an encrypted bucket.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
--------
|
|
||||||
|
|
||||||
Creating encrypted bucket using our encrypted bucket tool in the bin directory
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
./create_encrypted_bucket.js -a accessKey1 -k verySecretKey1 -b bucketname -h localhost -p 8000
|
|
||||||
|
|
||||||
|
|
||||||
AWS backend
|
|
||||||
------------
|
|
||||||
|
|
||||||
With real AWS S3 as a location-constraint, you have to configure the
|
|
||||||
location-constraint as follows
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
"awsbackend": {
|
|
||||||
"type": "aws_s3",
|
|
||||||
"legacyAwsBehavior": true,
|
|
||||||
"details": {
|
|
||||||
"serverSideEncryption": true,
|
|
||||||
...
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
Then, every time an object is put to that data location, we pass the following
|
|
||||||
header to AWS: ``x-amz-server-side-encryption: AES256``
|
|
||||||
|
|
||||||
Note: due to these options, it is possible to configure encryption by both
|
|
||||||
CloudServer and AWS S3 (if you put an object to a CloudServer bucket which has
|
|
||||||
the encryption flag AND the location-constraint for the data is AWS S3 with
|
|
||||||
serverSideEncryption set to true).
|
|
|
@ -1,146 +0,0 @@
|
||||||
# Bucket Policy Documentation
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
Bucket policy is a method of controlling access to a user's account at the
|
|
||||||
resource level.
|
|
||||||
There are three associated APIs:
|
|
||||||
|
|
||||||
- PUT Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTpolicy.html)
|
|
||||||
- GET Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETpolicy.html)
|
|
||||||
- DELETE Bucket policy (see https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEpolicy.html)
|
|
||||||
|
|
||||||
More information on bucket policies in general can be found at
|
|
||||||
https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html.
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
To prevent loss of access to a bucket, the root owner of a bucket will always
|
|
||||||
be able to perform any of the three bucket policy-related operations, even
|
|
||||||
if permission is explicitly denied.
|
|
||||||
All other users must have permission to perform the desired operation.
|
|
||||||
|
|
||||||
## Design
|
|
||||||
|
|
||||||
On a PUTBucketPolicy request, the user provides a policy in JSON format.
|
|
||||||
The policy is evaluated against our policy schema in Arsenal and, once
|
|
||||||
validated, is stored as part of the bucket's metadata.
|
|
||||||
On a GETBucketPolicy request, the policy is retrieved from the bucket's
|
|
||||||
metadata.
|
|
||||||
On a DELETEBucketPolicy request, the policy is deleted from the bucket's
|
|
||||||
metadata.
|
|
||||||
|
|
||||||
All other APIs are updated to check if a bucket policy is attached to the bucket
|
|
||||||
the request is made on. If there is a policy, user authorization to perform
|
|
||||||
the requested action is checked.
|
|
||||||
|
|
||||||
### Differences Between Bucket and IAM Policies
|
|
||||||
|
|
||||||
IAM policies are attached to an IAM identity and define what actions that
|
|
||||||
identity is allowed to or denied from doing on what resource.
|
|
||||||
Bucket policies attach only to buckets and define what actions are allowed or
|
|
||||||
denied for which principles on that bucket. Permissions specified in a bucket
|
|
||||||
policy apply to all objects in that bucket unless otherwise specified.
|
|
||||||
|
|
||||||
Besides their attachment origins, the main structural difference between
|
|
||||||
IAM policy and bucket policy is the requirement of a "Principal" element in
|
|
||||||
bucket policies. This field is redundant in IAM policies.
|
|
||||||
|
|
||||||
### Policy Validation
|
|
||||||
|
|
||||||
For general guidelines for bucket policy structure, see examples here:
|
|
||||||
https://docs.aws.amazon.com/AmazonS3/latest/dev//example-bucket-policies.html.
|
|
||||||
|
|
||||||
Each bucket policy statement object requires at least four keys:
|
|
||||||
"Effect", "Principle", "Resource", and "Action".
|
|
||||||
|
|
||||||
"Effect" defines the effect of the policy and can have a string value of either
|
|
||||||
"Allow" or "Deny".
|
|
||||||
"Resource" defines to which bucket or list of buckets a policy is attached.
|
|
||||||
An object within the bucket is also a valid resource. The element value can be
|
|
||||||
either a single bucket or object ARN string or an array of ARNs.
|
|
||||||
"Action" lists which action(s) the policy controls. Its value can also be either
|
|
||||||
a string or array of S3 APIs. Each action is the API name prepended by "s3:".
|
|
||||||
"Principle" specifies which user(s) are granted or denied access to the bucket
|
|
||||||
resource. Its value can be a string or an object containing an array of users.
|
|
||||||
Valid users can be identified with an account ARN, account id, or user ARN.
|
|
||||||
|
|
||||||
There are also two optional bucket policy statement keys: Sid and Condition.
|
|
||||||
|
|
||||||
"Sid" stands for "statement id". If this key is not included, one will be
|
|
||||||
generated for the statement.
|
|
||||||
"Condition" lists the condition under which a statement will take affect.
|
|
||||||
The possibilities are as follows:
|
|
||||||
|
|
||||||
- ArnEquals
|
|
||||||
- ArnEqualsIfExists
|
|
||||||
- ArnLike
|
|
||||||
- ArnLikeIfExists
|
|
||||||
- ArnNotEquals
|
|
||||||
- ArnNotEqualsIfExists
|
|
||||||
- ArnNotLike
|
|
||||||
- ArnNotLikeIfExists
|
|
||||||
- BinaryEquals
|
|
||||||
- BinaryEqualsIfExists
|
|
||||||
- BinaryNotEquals
|
|
||||||
- BinaryNotEqualsIfExists
|
|
||||||
- Bool
|
|
||||||
- BoolIfExists
|
|
||||||
- DateEquals
|
|
||||||
- DateEqualsIfExists
|
|
||||||
- DateGreaterThan
|
|
||||||
- DateGreaterThanEquals
|
|
||||||
- DateGreaterThanEqualsIfExists
|
|
||||||
- DateGreaterThanIfExists
|
|
||||||
- DateLessThan
|
|
||||||
- DateLessThanEquals
|
|
||||||
- DateLessThanEqualsIfExists
|
|
||||||
- DateLessThanIfExists
|
|
||||||
- DateNotEquals
|
|
||||||
- DateNotEqualsIfExists
|
|
||||||
- IpAddress
|
|
||||||
- IpAddressIfExists
|
|
||||||
- NotIpAddress
|
|
||||||
- NotIpAddressIfExists
|
|
||||||
- Null
|
|
||||||
- NumericEquals
|
|
||||||
- NumericEqualsIfExists
|
|
||||||
- NumericGreaterThan
|
|
||||||
- NumericGreaterThanEquals
|
|
||||||
- NumericGreaterThanEqualsIfExists
|
|
||||||
- NumericGreaterThanIfExists
|
|
||||||
- NumericLessThan
|
|
||||||
- NumericLessThanEquals
|
|
||||||
- NumericLessThanEqualsIfExists
|
|
||||||
- NumericLessThanIfExists
|
|
||||||
- NumericNotEquals
|
|
||||||
- NumericNotEqualsIfExists
|
|
||||||
- StringEquals
|
|
||||||
- StringEqualsIfExists
|
|
||||||
- StringEqualsIgnoreCase
|
|
||||||
- StringEqualsIgnoreCaseIfExists
|
|
||||||
- StringLike
|
|
||||||
- StringLikeIfExists
|
|
||||||
- StringNotEquals
|
|
||||||
- StringNotEqualsIfExists
|
|
||||||
- StringNotEqualsIgnoreCase
|
|
||||||
- StringNotEqualsIgnoreCaseIfExists
|
|
||||||
- StringNotLike
|
|
||||||
- StringNotLikeIfExists
|
|
||||||
|
|
||||||
The value of the Condition key will be an object containing the desired
|
|
||||||
condition name as that key. The value of inner object can be a string, boolean,
|
|
||||||
number, or object, depending on the condition.
|
|
||||||
|
|
||||||
## Authorization with Multiple Access Control Mechanisms
|
|
||||||
|
|
||||||
In the case where multiple access control mechanisms (such as IAM policies,
|
|
||||||
bucket policies, and ACLs) refer to the same resource, the principle of
|
|
||||||
least-privilege is applied. Unless an action is explicitly allowed, access will
|
|
||||||
by default be denied. An explicit DENY in any policy will trump another
|
|
||||||
policy's ALLOW for an action. The request will only be allowed if at least one
|
|
||||||
policy specifies an ALLOW, and there is no overriding DENY.
|
|
||||||
|
|
||||||
The following diagram illustrates this logic:
|
|
||||||
|
|
||||||
![Access_Control_Authorization_Chart](./images/access_control_authorization.png)
|
|
345
docs/CLIENTS.rst
345
docs/CLIENTS.rst
|
@ -1,345 +0,0 @@
|
||||||
Clients
|
|
||||||
=========
|
|
||||||
|
|
||||||
List of applications that have been tested with Zenko CloudServer.
|
|
||||||
|
|
||||||
GUI
|
|
||||||
~~~
|
|
||||||
|
|
||||||
`Cyberduck <https://cyberduck.io/?l=en>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
- https://www.youtube.com/watch?v=-n2MCt4ukUg
|
|
||||||
- https://www.youtube.com/watch?v=IyXHcu4uqgU
|
|
||||||
|
|
||||||
`Cloud Explorer <https://www.linux-toys.com/?p=945>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
- https://www.youtube.com/watch?v=2hhtBtmBSxE
|
|
||||||
|
|
||||||
`CloudBerry Lab <http://www.cloudberrylab.com>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
- https://youtu.be/IjIx8g\_o0gY
|
|
||||||
|
|
||||||
Command Line Tools
|
|
||||||
~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
`s3curl <https://github.com/rtdp/s3curl>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
https://github.com/scality/S3/blob/master/tests/functional/s3curl/s3curl.pl
|
|
||||||
|
|
||||||
`aws-cli <http://docs.aws.amazon.com/cli/latest/reference/>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
``~/.aws/credentials`` on Linux, OS X, or Unix or
|
|
||||||
``C:\Users\USERNAME\.aws\credentials`` on Windows
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
[default]
|
|
||||||
aws_access_key_id = accessKey1
|
|
||||||
aws_secret_access_key = verySecretKey1
|
|
||||||
|
|
||||||
``~/.aws/config`` on Linux, OS X, or Unix or
|
|
||||||
``C:\Users\USERNAME\.aws\config`` on Windows
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
[default]
|
|
||||||
region = us-east-1
|
|
||||||
|
|
||||||
Note: ``us-east-1`` is the default region, but you can specify any
|
|
||||||
region.
|
|
||||||
|
|
||||||
See all buckets:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
aws s3 ls --endpoint-url=http://localhost:8000
|
|
||||||
|
|
||||||
Create bucket:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
aws --endpoint-url=http://localhost:8000 s3 mb s3://mybucket
|
|
||||||
|
|
||||||
`s3cmd <http://s3tools.org/s3cmd>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
If using s3cmd as a client to S3 be aware that v4 signature format is
|
|
||||||
buggy in s3cmd versions < 1.6.1.
|
|
||||||
|
|
||||||
``~/.s3cfg`` on Linux, OS X, or Unix or ``C:\Users\USERNAME\.s3cfg`` on
|
|
||||||
Windows
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
[default]
|
|
||||||
access_key = accessKey1
|
|
||||||
secret_key = verySecretKey1
|
|
||||||
host_base = localhost:8000
|
|
||||||
host_bucket = %(bucket).localhost:8000
|
|
||||||
signature_v2 = False
|
|
||||||
use_https = False
|
|
||||||
|
|
||||||
See all buckets:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
s3cmd ls
|
|
||||||
|
|
||||||
`rclone <http://rclone.org/s3/>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
``~/.rclone.conf`` on Linux, OS X, or Unix or
|
|
||||||
``C:\Users\USERNAME\.rclone.conf`` on Windows
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
[remote]
|
|
||||||
type = s3
|
|
||||||
env_auth = false
|
|
||||||
access_key_id = accessKey1
|
|
||||||
secret_access_key = verySecretKey1
|
|
||||||
region = other-v2-signature
|
|
||||||
endpoint = http://localhost:8000
|
|
||||||
location_constraint =
|
|
||||||
acl = private
|
|
||||||
server_side_encryption =
|
|
||||||
storage_class =
|
|
||||||
|
|
||||||
See all buckets:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
rclone lsd remote:
|
|
||||||
|
|
||||||
JavaScript
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
`AWS JavaScript SDK <http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. code:: javascript
|
|
||||||
|
|
||||||
const AWS = require('aws-sdk');
|
|
||||||
|
|
||||||
const s3 = new AWS.S3({
|
|
||||||
accessKeyId: 'accessKey1',
|
|
||||||
secretAccessKey: 'verySecretKey1',
|
|
||||||
endpoint: 'localhost:8000',
|
|
||||||
sslEnabled: false,
|
|
||||||
s3ForcePathStyle: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
JAVA
|
|
||||||
~~~~
|
|
||||||
|
|
||||||
`AWS JAVA SDK <http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3Client.html>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. code:: java
|
|
||||||
|
|
||||||
import com.amazonaws.auth.AWSCredentials;
|
|
||||||
import com.amazonaws.auth.BasicAWSCredentials;
|
|
||||||
import com.amazonaws.services.s3.AmazonS3;
|
|
||||||
import com.amazonaws.services.s3.AmazonS3Client;
|
|
||||||
import com.amazonaws.services.s3.S3ClientOptions;
|
|
||||||
import com.amazonaws.services.s3.model.Bucket;
|
|
||||||
|
|
||||||
public class S3 {
|
|
||||||
|
|
||||||
public static void main(String[] args) {
|
|
||||||
|
|
||||||
AWSCredentials credentials = new BasicAWSCredentials("accessKey1",
|
|
||||||
"verySecretKey1");
|
|
||||||
|
|
||||||
// Create a client connection based on credentials
|
|
||||||
AmazonS3 s3client = new AmazonS3Client(credentials);
|
|
||||||
s3client.setEndpoint("http://localhost:8000");
|
|
||||||
// Using path-style requests
|
|
||||||
// (deprecated) s3client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
|
|
||||||
s3client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
|
|
||||||
|
|
||||||
// Create bucket
|
|
||||||
String bucketName = "javabucket";
|
|
||||||
s3client.createBucket(bucketName);
|
|
||||||
|
|
||||||
// List off all buckets
|
|
||||||
for (Bucket bucket : s3client.listBuckets()) {
|
|
||||||
System.out.println(" - " + bucket.getName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ruby
|
|
||||||
~~~~
|
|
||||||
|
|
||||||
`AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. code:: ruby
|
|
||||||
|
|
||||||
require 'aws-sdk'
|
|
||||||
|
|
||||||
s3 = Aws::S3::Client.new(
|
|
||||||
:access_key_id => 'accessKey1',
|
|
||||||
:secret_access_key => 'verySecretKey1',
|
|
||||||
:endpoint => 'http://localhost:8000',
|
|
||||||
:force_path_style => true
|
|
||||||
)
|
|
||||||
|
|
||||||
resp = s3.list_buckets
|
|
||||||
|
|
||||||
`fog <http://fog.io/storage/>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. code:: ruby
|
|
||||||
|
|
||||||
require "fog"
|
|
||||||
|
|
||||||
connection = Fog::Storage.new(
|
|
||||||
{
|
|
||||||
:provider => "AWS",
|
|
||||||
:aws_access_key_id => 'accessKey1',
|
|
||||||
:aws_secret_access_key => 'verySecretKey1',
|
|
||||||
:endpoint => 'http://localhost:8000',
|
|
||||||
:path_style => true,
|
|
||||||
:scheme => 'http',
|
|
||||||
})
|
|
||||||
|
|
||||||
Python
|
|
||||||
~~~~~~
|
|
||||||
|
|
||||||
`boto2 <http://boto.cloudhackers.com/en/latest/ref/s3.html>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
import boto
|
|
||||||
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
|
|
||||||
|
|
||||||
|
|
||||||
connection = S3Connection(
|
|
||||||
aws_access_key_id='accessKey1',
|
|
||||||
aws_secret_access_key='verySecretKey1',
|
|
||||||
is_secure=False,
|
|
||||||
port=8000,
|
|
||||||
calling_format=OrdinaryCallingFormat(),
|
|
||||||
host='localhost'
|
|
||||||
)
|
|
||||||
|
|
||||||
connection.create_bucket('mybucket')
|
|
||||||
|
|
||||||
`boto3 <http://boto3.readthedocs.io/en/latest/index.html>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
Client integration
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
import boto3
|
|
||||||
|
|
||||||
client = boto3.client(
|
|
||||||
's3',
|
|
||||||
aws_access_key_id='accessKey1',
|
|
||||||
aws_secret_access_key='verySecretKey1',
|
|
||||||
endpoint_url='http://localhost:8000'
|
|
||||||
)
|
|
||||||
|
|
||||||
lists = client.list_buckets()
|
|
||||||
|
|
||||||
Full integration (with object mapping)
|
|
||||||
|
|
||||||
.. code:: python
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from botocore.utils import fix_s3_host
|
|
||||||
import boto3
|
|
||||||
|
|
||||||
os.environ['AWS_ACCESS_KEY_ID'] = "accessKey1"
|
|
||||||
os.environ['AWS_SECRET_ACCESS_KEY'] = "verySecretKey1"
|
|
||||||
|
|
||||||
s3 = boto3.resource(service_name='s3', endpoint_url='http://localhost:8000')
|
|
||||||
s3.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)
|
|
||||||
|
|
||||||
for bucket in s3.buckets.all():
|
|
||||||
print(bucket.name)
|
|
||||||
|
|
||||||
PHP
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Should force path-style requests even though v3 advertises it does by default.
|
|
||||||
|
|
||||||
`AWS PHP SDK v3 <https://docs.aws.amazon.com/aws-sdk-php/v3/guide>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. code:: php
|
|
||||||
|
|
||||||
use Aws\S3\S3Client;
|
|
||||||
|
|
||||||
$client = S3Client::factory([
|
|
||||||
'region' => 'us-east-1',
|
|
||||||
'version' => 'latest',
|
|
||||||
'endpoint' => 'http://localhost:8000',
|
|
||||||
'use_path_style_endpoint' => true,
|
|
||||||
'credentials' => [
|
|
||||||
'key' => 'accessKey1',
|
|
||||||
'secret' => 'verySecretKey1'
|
|
||||||
]
|
|
||||||
]);
|
|
||||||
|
|
||||||
$client->createBucket(array(
|
|
||||||
'Bucket' => 'bucketphp',
|
|
||||||
));
|
|
||||||
|
|
||||||
Go
|
|
||||||
~~
|
|
||||||
|
|
||||||
`AWS Go SDK <https://github.com/aws/aws-sdk-go>`__
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. code:: go
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
os.Setenv("AWS_ACCESS_KEY_ID", "accessKey1")
|
|
||||||
os.Setenv("AWS_SECRET_ACCESS_KEY", "verySecretKey1")
|
|
||||||
endpoint := "http://localhost:8000"
|
|
||||||
timeout := time.Duration(10) * time.Second
|
|
||||||
sess := session.Must(session.NewSession())
|
|
||||||
|
|
||||||
// Create a context with a timeout that will abort the upload if it takes
|
|
||||||
// more than the passed in timeout.
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
svc := s3.New(sess, &aws.Config{
|
|
||||||
Region: aws.String(endpoints.UsEast1RegionID),
|
|
||||||
Endpoint: &endpoint,
|
|
||||||
})
|
|
||||||
|
|
||||||
out, err := svc.ListBucketsWithContext(ctx, &s3.ListBucketsInput{})
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
} else {
|
|
||||||
fmt.Println(out)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
Contributing
|
|
||||||
============
|
|
||||||
|
|
||||||
Need help?
|
|
||||||
----------
|
|
||||||
We're always glad to help out. Simply open a
|
|
||||||
`GitHub issue <https://github.com/scality/S3/issues>`__ and we'll give you
|
|
||||||
insight. If what you want is not available, and if you're willing to help us
|
|
||||||
out, we'll be happy to welcome you in the team, whether for a small fix or for
|
|
||||||
a larger feature development. Thanks for your interest!
|
|
||||||
|
|
||||||
Got an idea? Get started!
|
|
||||||
-------------------------
|
|
||||||
In order to contribute, please follow the `Contributing
|
|
||||||
Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__.
|
|
||||||
If anything is unclear to you, reach out to us on
|
|
||||||
`forum <https://forum.zenko.io/>`__ or via a GitHub issue.
|
|
||||||
|
|
||||||
Don't write code? There are other ways to help!
|
|
||||||
-----------------------------------------------
|
|
||||||
We're always eager to learn about our users' stories. If you can't contribute
|
|
||||||
code, but would love to help us, please shoot us an email at zenko@scality.com,
|
|
||||||
and tell us what our software enables you to do! Thanks for your time!
|
|
||||||
|
|
371
docs/DOCKER.rst
371
docs/DOCKER.rst
|
@ -1,371 +0,0 @@
|
||||||
Docker
|
|
||||||
======
|
|
||||||
|
|
||||||
.. _environment-variables:
|
|
||||||
|
|
||||||
Environment Variables
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
S3DATA
|
|
||||||
~~~~~~
|
|
||||||
|
|
||||||
S3DATA=multiple
|
|
||||||
^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
This variable enables running CloudServer with multiple data backends, defined
|
|
||||||
as regions.
|
|
||||||
|
|
||||||
For multiple data backends, a custom locationConfig.json file is required.
|
|
||||||
This file enables you to set custom regions. You must provide associated
|
|
||||||
rest_endpoints for each custom region in config.json.
|
|
||||||
|
|
||||||
`Learn more about multiple-backend configurations <GETTING_STARTED.html#location-configuration>`__
|
|
||||||
|
|
||||||
If you are using Scality RING endpoints, refer to your customer documentation.
|
|
||||||
|
|
||||||
Running CloudServer with an AWS S3-Hosted Backend
|
|
||||||
"""""""""""""""""""""""""""""""""""""""""""""""""
|
|
||||||
|
|
||||||
To run CloudServer with an S3 AWS backend, add a new section to the
|
|
||||||
``locationConfig.json`` file with the ``aws_s3`` location type:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
(...)
|
|
||||||
"awsbackend": {
|
|
||||||
"type": "aws_s3",
|
|
||||||
"details": {
|
|
||||||
"awsEndpoint": "s3.amazonaws.com",
|
|
||||||
"bucketName": "yourawss3bucket",
|
|
||||||
"bucketMatch": true,
|
|
||||||
"credentialsProfile": "aws_hosted_profile"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(...)
|
|
||||||
|
|
||||||
Edit your AWS credentials file to enable your preferred command-line tool.
|
|
||||||
This file must mention credentials for all backends in use. You can use
|
|
||||||
several profiles if multiple profiles are configured.
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
[default]
|
|
||||||
aws_access_key_id=accessKey1
|
|
||||||
aws_secret_access_key=verySecretKey1
|
|
||||||
[aws_hosted_profile]
|
|
||||||
aws_access_key_id={{YOUR_ACCESS_KEY}}
|
|
||||||
aws_secret_access_key={{YOUR_SECRET_KEY}}
|
|
||||||
|
|
||||||
As with locationConfig.json, the AWS credentials file must be mounted at
|
|
||||||
run time: ``-v ~/.aws/credentials:/root/.aws/credentials`` on Unix-like
|
|
||||||
systems (Linux, OS X, etc.), or
|
|
||||||
``-v C:\Users\USERNAME\.aws\credential:/root/.aws/credentials`` on Windows
|
|
||||||
|
|
||||||
.. note:: One account cannot copy to another account with a source and
|
|
||||||
destination on real AWS unless the account associated with the
|
|
||||||
accessKey/secretKey pairs used for the destination bucket has source
|
|
||||||
bucket access privileges. To enable this, update ACLs directly on AWS.
|
|
||||||
|
|
||||||
S3BACKEND
|
|
||||||
~~~~~~~~~
|
|
||||||
|
|
||||||
S3BACKEND=file
|
|
||||||
^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
For stored file data to persist, you must mount Docker volumes
|
|
||||||
for both data and metadata. See :ref:`In Production with a Docker-Hosted CloudServer <in-production-w-a-Docker-hosted-cloudserver>`
|
|
||||||
|
|
||||||
S3BACKEND=mem
|
|
||||||
^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
This is ideal for testing: no data remains after the container is shut down.
|
|
||||||
|
|
||||||
ENDPOINT
|
|
||||||
~~~~~~~~
|
|
||||||
|
|
||||||
This variable specifies the endpoint. To direct CloudServer requests to
|
|
||||||
new.host.com, for example, specify the endpoint with:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e ENDPOINT=new.host.com zenko/cloudserver
|
|
||||||
|
|
||||||
.. note:: On Unix-like systems (Linux, OS X, etc.) edit /etc/hosts
|
|
||||||
to associate 127.0.0.1 with new.host.com.
|
|
||||||
|
|
||||||
REMOTE_MANAGEMENT_DISABLE
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
CloudServer is a part of `Zenko <https://www.zenko.io/>`__. When you run CloudServer standalone it will still try to connect to Orbit by default (browser-based graphical user interface for Zenko).
|
|
||||||
|
|
||||||
Setting this variable to true(1) will default to accessKey1 and verySecretKey1 for credentials and disable the automatic Orbit management:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e REMOTE_MANAGEMENT_DISABLE=1 zenko/cloudserver
|
|
||||||
|
|
||||||
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
These variables specify authentication credentials for an account named
|
|
||||||
“CustomAccount”.
|
|
||||||
|
|
||||||
Set account credentials for multiple accounts by editing conf/authdata.json
|
|
||||||
(see below for further details). To specify one set for personal use, set these
|
|
||||||
environment variables:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey \
|
|
||||||
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey zenko/cloudserver
|
|
||||||
|
|
||||||
.. note:: This takes precedence over the contents of the authdata.json
|
|
||||||
file. The authdata.json file is ignored.
|
|
||||||
|
|
||||||
.. note:: The ACCESS_KEY and SECRET_KEY environment variables are
|
|
||||||
deprecated.
|
|
||||||
|
|
||||||
LOG\_LEVEL
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
This variable changes the log level. There are three levels: info, debug,
|
|
||||||
and trace. The default is info. Debug provides more detailed logs, and trace
|
|
||||||
provides the most detailed logs.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e LOG_LEVEL=trace zenko/cloudserver
|
|
||||||
|
|
||||||
SSL
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Set true, this variable runs CloudServer with SSL.
|
|
||||||
|
|
||||||
If SSL is set true:
|
|
||||||
|
|
||||||
* The ENDPOINT environment variable must also be specified.
|
|
||||||
|
|
||||||
* On Unix-like systems (Linux, OS X, etc.), 127.0.0.1 must be associated with
|
|
||||||
<YOUR_ENDPOINT> in /etc/hosts.
|
|
||||||
|
|
||||||
.. Warning:: Self-signed certs with a CA generated within the container are
|
|
||||||
suitable for testing purposes only. Clients cannot trust them, and they may
|
|
||||||
disappear altogether on a container upgrade. The best security practice for
|
|
||||||
production environments is to use an extra container, such as
|
|
||||||
haproxy/nginx/stunnel, for SSL/TLS termination and to pull certificates
|
|
||||||
from a mounted volume, limiting what an exploit on either component
|
|
||||||
can expose.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT> \
|
|
||||||
zenko/cloudserver
|
|
||||||
|
|
||||||
For more information about using ClousdServer with SSL, see `Using SSL <GETTING_STARTED.html#Using SSL>`__
|
|
||||||
|
|
||||||
LISTEN\_ADDR
|
|
||||||
~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This variable causes CloudServer and its data and metadata components to
|
|
||||||
listen on the specified address. This allows starting the data or metadata
|
|
||||||
servers as standalone services, for example.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0
|
|
||||||
scality/s3server yarn run start_dataserver
|
|
||||||
|
|
||||||
|
|
||||||
DATA\_HOST and METADATA\_HOST
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
These variables configure the data and metadata servers to use,
|
|
||||||
usually when they are running on another host and only starting the stateless
|
|
||||||
Zenko CloudServer.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -e DATA_HOST=cloudserver-data \
|
|
||||||
-e METADATA_HOST=cloudserver-metadata zenko/cloudserver yarn run start_s3server
|
|
||||||
|
|
||||||
REDIS\_HOST
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Use this variable to connect to the redis cache server on another host than
|
|
||||||
localhost.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 \
|
|
||||||
-e REDIS_HOST=my-redis-server.example.com zenko/cloudserver
|
|
||||||
|
|
||||||
REDIS\_PORT
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Use this variable to connect to the Redis cache server on a port other
|
|
||||||
than the default 6379.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name cloudserver -p 8000:8000 \
|
|
||||||
-e REDIS_PORT=6379 zenko/cloudserver
|
|
||||||
|
|
||||||
.. _tunables-and-setup-tips:
|
|
||||||
|
|
||||||
Tunables and Setup Tips
|
|
||||||
-----------------------
|
|
||||||
|
|
||||||
Using Docker Volumes
|
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
CloudServer runs with a file backend by default, meaning that data is
|
|
||||||
stored inside the CloudServer’s Docker container.
|
|
||||||
|
|
||||||
For data and metadata to persist, data and metadata must be hosted in Docker
|
|
||||||
volumes outside the CloudServer’s Docker container. Otherwise, the data
|
|
||||||
and metadata are destroyed when the container is erased.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
|
||||||
-p 8000:8000 -d zenko/cloudserver
|
|
||||||
|
|
||||||
This command mounts the ./data host directory to the container
|
|
||||||
at /usr/src/app/localData and the ./metadata host directory to
|
|
||||||
the container at /usr/src/app/localMetaData.
|
|
||||||
|
|
||||||
.. tip:: These host directories can be mounted to any accessible mount
|
|
||||||
point, such as /mnt/data and /mnt/metadata, for example.
|
|
||||||
|
|
||||||
Adding, Modifying, or Deleting Accounts or Credentials
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
1. Create a customized authdata.json file locally based on /conf/authdata.json.
|
|
||||||
|
|
||||||
2. Use `Docker volumes <https://docs.docker.com/storage/volumes/>`__
|
|
||||||
to override the default ``authdata.json`` through a Docker file mapping.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d \
|
|
||||||
zenko/cloudserver
|
|
||||||
|
|
||||||
Specifying a Host Name
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To specify a host name (for example, s3.domain.name), provide your own
|
|
||||||
`config.json <https://github.com/scality/cloudserver/blob/master/config.json>`__
|
|
||||||
file using `Docker volumes <https://docs.docker.com/storage/volumes/>`__.
|
|
||||||
|
|
||||||
First, add a new key-value pair to the restEndpoints section of your
|
|
||||||
config.json. Make the key the host name you want, and the value the default
|
|
||||||
location\_constraint for this endpoint.
|
|
||||||
|
|
||||||
For example, ``s3.example.com`` is mapped to ``us-east-1`` which is one
|
|
||||||
of the ``location_constraints`` listed in your locationConfig.json file
|
|
||||||
`here <https://github.com/scality/S3/blob/master/locationConfig.json>`__.
|
|
||||||
|
|
||||||
For more information about location configuration, see:
|
|
||||||
`GETTING STARTED <GETTING_STARTED.html#location-configuration>`__
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
"restEndpoints": {
|
|
||||||
"localhost": "file",
|
|
||||||
"127.0.0.1": "file",
|
|
||||||
...
|
|
||||||
"cloudserver.example.com": "us-east-1"
|
|
||||||
},
|
|
||||||
|
|
||||||
Next, run CloudServer using a `Docker volume
|
|
||||||
<https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d zenko/cloudserver
|
|
||||||
|
|
||||||
The local ``config.json`` file overrides the default one through a Docker
|
|
||||||
file mapping.
|
|
||||||
|
|
||||||
Running as an Unprivileged User
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
CloudServer runs as root by default.
|
|
||||||
|
|
||||||
To change this, modify the dockerfile and specify a user before the
|
|
||||||
entry point.
|
|
||||||
|
|
||||||
The user must exist within the container, and must own the
|
|
||||||
/usr/src/app directory for CloudServer to run.
|
|
||||||
|
|
||||||
For example, the following dockerfile lines can be modified:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
...
|
|
||||||
&& groupadd -r -g 1001 scality \
|
|
||||||
&& useradd -u 1001 -g 1001 -d /usr/src/app -r scality \
|
|
||||||
&& chown -R scality:scality /usr/src/app
|
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
USER scality
|
|
||||||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
|
||||||
|
|
||||||
.. _continuous-integration-with-docker-hosted-cloudserver:
|
|
||||||
|
|
||||||
Continuous Integration with a Docker-Hosted CloudServer
|
|
||||||
-------------------------------------------------------
|
|
||||||
|
|
||||||
When you start the Docker CloudServer image, you can adjust the
|
|
||||||
configuration of the CloudServer instance by passing one or more
|
|
||||||
environment variables on the ``docker run`` command line.
|
|
||||||
|
|
||||||
|
|
||||||
To run CloudServer for CI with custom locations (one in-memory,
|
|
||||||
one hosted on AWS), and custom credentials mounted:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run --name CloudServer -p 8000:8000 \
|
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
|
||||||
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
|
|
||||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
|
||||||
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver
|
|
||||||
|
|
||||||
To run CloudServer for CI with custom locations, (one in-memory, one
|
|
||||||
hosted on AWS, and one file), and custom credentials `set as environment
|
|
||||||
variables <GETTING_STARTED.html#scality-access-key-id-and-scality-secret-access-key>`__):
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run --name CloudServer -p 8000:8000 \
|
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
|
||||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
|
||||||
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
|
||||||
-e SCALITY_ACCESS_KEY_ID=accessKey1 \
|
|
||||||
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1 \
|
|
||||||
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver
|
|
||||||
|
|
||||||
.. _in-production-w-a-Docker-hosted-cloudserver:
|
|
||||||
|
|
||||||
In Production with a Docker-Hosted CloudServer
|
|
||||||
----------------------------------------------
|
|
||||||
|
|
||||||
Because data must persist in production settings, CloudServer offers
|
|
||||||
multiple-backend capabilities. This requires a custom endpoint
|
|
||||||
and custom credentials for local storage.
|
|
||||||
|
|
||||||
Customize these with:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ docker run -d --name CloudServer \
|
|
||||||
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
|
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
|
||||||
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
|
|
||||||
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple \
|
|
||||||
-e ENDPOINT=custom.endpoint.com \
|
|
||||||
-p 8000:8000 -d zenko/cloudserver \
|
|
|
@ -1,436 +0,0 @@
|
||||||
Getting Started
|
|
||||||
===============
|
|
||||||
|
|
||||||
.. figure:: ../res/scality-cloudserver-logo.png
|
|
||||||
:alt: Zenko CloudServer logo
|
|
||||||
|
|
||||||
|
|
||||||
Dependencies
|
|
||||||
------------
|
|
||||||
|
|
||||||
Building and running the Scality Zenko CloudServer requires node.js 10.x and
|
|
||||||
yarn v1.17.x. Up-to-date versions can be found at
|
|
||||||
`Nodesource <https://github.com/nodesource/distributions>`__.
|
|
||||||
|
|
||||||
Installation
|
|
||||||
------------
|
|
||||||
|
|
||||||
1. Clone the source code
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ git clone https://github.com/scality/cloudserver.git
|
|
||||||
|
|
||||||
2. Go to the cloudserver directory and use yarn to install the js dependencies.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ cd cloudserver
|
|
||||||
$ yarn install
|
|
||||||
|
|
||||||
Running CloudServer with a File Backend
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ yarn start
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000. Two additional ports, 9990
|
|
||||||
and 9991, are also open locally for internal transfer of metadata and
|
|
||||||
data, respectively.
|
|
||||||
|
|
||||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
|
||||||
|
|
||||||
By default, metadata files are saved in the localMetadata directory and
|
|
||||||
data files are saved in the localData directory in the local ./cloudserver
|
|
||||||
directory. These directories are pre-created within the repository. To
|
|
||||||
save data or metadata in different locations, you must specify them using
|
|
||||||
absolute paths. Thus, when starting the server:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ mkdir -m 700 $(pwd)/myFavoriteDataPath
|
|
||||||
$ mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
|
||||||
$ export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
|
||||||
$ export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
|
||||||
$ yarn start
|
|
||||||
|
|
||||||
Running CloudServer with Multiple Data Backends
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ export S3DATA='multiple'
|
|
||||||
$ yarn start
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000.
|
|
||||||
|
|
||||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
|
||||||
|
|
||||||
With multiple backends, you can choose where each object is saved by setting
|
|
||||||
the following header with a location constraint in a PUT request:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
|
||||||
|
|
||||||
If no header is sent with a PUT object request, the bucket’s location
|
|
||||||
constraint determines where the data is saved. If the bucket has no
|
|
||||||
location constraint, the endpoint of the PUT request determines location.
|
|
||||||
|
|
||||||
See the Configuration_ section to set location constraints.
|
|
||||||
|
|
||||||
Run CloudServer with an In-Memory Backend
|
|
||||||
-----------------------------------------
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ yarn run mem_backend
|
|
||||||
|
|
||||||
This starts a Zenko CloudServer on port 8000.
|
|
||||||
|
|
||||||
The default access key is accessKey1. The secret key is verySecretKey1.
|
|
||||||
|
|
||||||
Run CloudServer with Vault User Management
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
export S3VAULT=vault
|
|
||||||
yarn start
|
|
||||||
|
|
||||||
Note: Vault is proprietary and must be accessed separately.
|
|
||||||
This starts a Zenko CloudServer using Vault for user management.
|
|
||||||
|
|
||||||
Run CloudServer for Continuous Integration Testing or in Production with Docker
|
|
||||||
-------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
Run Cloudserver with `DOCKER <DOCKER.html>`__
|
|
||||||
|
|
||||||
Testing
|
|
||||||
~~~~~~~
|
|
||||||
|
|
||||||
Run unit tests with the command:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ yarn test
|
|
||||||
|
|
||||||
Run multiple-backend unit tests with:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ CI=true S3DATA=multiple yarn start
|
|
||||||
$ yarn run multiple_backend_test
|
|
||||||
|
|
||||||
Run the linter with:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ yarn run lint
|
|
||||||
|
|
||||||
Running Functional Tests Locally
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
To pass AWS and Azure backend tests locally, modify
|
|
||||||
tests/locationConfig/locationConfigTests.json so that ``awsbackend``
|
|
||||||
specifies the bucketname of a bucket you have access to based on your
|
|
||||||
credentials, and modify ``azurebackend`` with details for your Azure account.
|
|
||||||
|
|
||||||
The test suite requires additional tools, **s3cmd** and **Redis**
|
|
||||||
installed in the environment the tests are running in.
|
|
||||||
|
|
||||||
1. Install `s3cmd <http://s3tools.org/download>`__
|
|
||||||
|
|
||||||
2. Install `redis <https://redis.io/download>`__ and start Redis.
|
|
||||||
|
|
||||||
3. Add localCache section to ``config.json``:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
"localCache": {
|
|
||||||
"host": REDIS_HOST,
|
|
||||||
"port": REDIS_PORT
|
|
||||||
}
|
|
||||||
|
|
||||||
where ``REDIS_HOST`` is the Redis instance IP address (``"127.0.0.1"``
|
|
||||||
if Redis is running locally) and ``REDIS_PORT`` is the Redis instance
|
|
||||||
port (``6379`` by default)
|
|
||||||
|
|
||||||
4. Add the following to the local etc/hosts file:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
|
|
||||||
|
|
||||||
5. Start Zenko CloudServer in memory and run the functional tests:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ CI=true yarn run mem_backend
|
|
||||||
$ CI=true yarn run ft_test
|
|
||||||
|
|
||||||
.. _Configuration:
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
-------------
|
|
||||||
|
|
||||||
There are three configuration files for Zenko CloudServer:
|
|
||||||
|
|
||||||
* ``conf/authdata.json``, for authentication.
|
|
||||||
|
|
||||||
* ``locationConfig.json``, to configure where data is saved.
|
|
||||||
|
|
||||||
* ``config.json``, for general configuration options.
|
|
||||||
|
|
||||||
.. _location-configuration:
|
|
||||||
|
|
||||||
Location Configuration
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
You must specify at least one locationConstraint in locationConfig.json
|
|
||||||
(or leave it as pre-configured).
|
|
||||||
|
|
||||||
You must also specify 'us-east-1' as a locationConstraint. If you put a
|
|
||||||
bucket to an unknown endpoint and do not specify a locationConstraint in
|
|
||||||
the PUT bucket call, us-east-1 is used.
|
|
||||||
|
|
||||||
For instance, the following locationConstraint saves data sent to
|
|
||||||
``myLocationConstraint`` to the file backend:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
"myLocationConstraint": {
|
|
||||||
"type": "file",
|
|
||||||
"legacyAwsBehavior": false,
|
|
||||||
"details": {}
|
|
||||||
},
|
|
||||||
|
|
||||||
Each locationConstraint must include the ``type``, ``legacyAwsBehavior``,
|
|
||||||
and ``details`` keys. ``type`` indicates which backend is used for that
|
|
||||||
region. Supported backends are mem, file, and scality.``legacyAwsBehavior``
|
|
||||||
indicates whether the region behaves the same as the AWS S3 'us-east-1'
|
|
||||||
region. If the locationConstraint type is ``scality``, ``details`` must
|
|
||||||
contain connector information for sproxyd. If the locationConstraint type
|
|
||||||
is ``mem`` or ``file``, ``details`` must be empty.
|
|
||||||
|
|
||||||
Once locationConstraints is set in locationConfig.json, specify a default
|
|
||||||
locationConstraint for each endpoint.
|
|
||||||
|
|
||||||
For instance, the following sets the ``localhost`` endpoint to the
|
|
||||||
``myLocationConstraint`` data backend defined above:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
"restEndpoints": {
|
|
||||||
"localhost": "myLocationConstraint"
|
|
||||||
},
|
|
||||||
|
|
||||||
To use an endpoint other than localhost for Zenko CloudServer, the endpoint
|
|
||||||
must be listed in ``restEndpoints``. Otherwise, if the server is running
|
|
||||||
with a:
|
|
||||||
|
|
||||||
* **file backend**: The default location constraint is ``file``
|
|
||||||
* **memory backend**: The default location constraint is ``mem``
|
|
||||||
|
|
||||||
Endpoints
|
|
||||||
~~~~~~~~~
|
|
||||||
|
|
||||||
The Zenko CloudServer supports endpoints that are rendered in either:
|
|
||||||
|
|
||||||
* path style: http://myhostname.com/mybucket or
|
|
||||||
* hosted style: http://mybucket.myhostname.com
|
|
||||||
|
|
||||||
However, if an IP address is specified for the host, hosted-style requests
|
|
||||||
cannot reach the server. Use path-style requests in that case. For example,
|
|
||||||
if you are using the AWS SDK for JavaScript, instantiate your client like this:
|
|
||||||
|
|
||||||
.. code:: js
|
|
||||||
|
|
||||||
const s3 = new aws.S3({
|
|
||||||
endpoint: 'http://127.0.0.1:8000',
|
|
||||||
s3ForcePathStyle: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
Setting Your Own Access and Secret Key Pairs
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Credentials can be set for many accounts by editing ``conf/authdata.json``,
|
|
||||||
but use the ``SCALITY_ACCESS_KEY_ID`` and ``SCALITY_SECRET_ACCESS_KEY``
|
|
||||||
environment variables to specify your own credentials.
|
|
||||||
|
|
||||||
_`scality-access-key-id-and-scality-secret-access-key`
|
|
||||||
|
|
||||||
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
These variables specify authentication credentials for an account named
|
|
||||||
“CustomAccount”.
|
|
||||||
|
|
||||||
.. note:: Anything in the ``authdata.json`` file is ignored.
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey yarn start
|
|
||||||
|
|
||||||
.. _Using_SSL:
|
|
||||||
|
|
||||||
Using SSL
|
|
||||||
~~~~~~~~~
|
|
||||||
|
|
||||||
To use https with your local CloudServer, you must set up
|
|
||||||
SSL certificates.
|
|
||||||
|
|
||||||
1. Deploy CloudServer using `our DockerHub page
|
|
||||||
<https://hub.docker.com/r/zenko/cloudserver/>`__ (run it with a file
|
|
||||||
backend).
|
|
||||||
|
|
||||||
.. Note:: If Docker is not installed locally, follow the
|
|
||||||
`instructions to install it for your distribution
|
|
||||||
<https://docs.docker.com/engine/installation/>`__
|
|
||||||
|
|
||||||
2. Update the CloudServer container’s config
|
|
||||||
|
|
||||||
Add your certificates to your container. To do this,
|
|
||||||
#. exec inside the CloudServer container.
|
|
||||||
|
|
||||||
#. Run ``$> docker ps`` to find the container’s ID (the corresponding
|
|
||||||
image name is ``scality/cloudserver``.
|
|
||||||
|
|
||||||
#. Copy the corresponding container ID (``894aee038c5e`` in the present
|
|
||||||
example), and run:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$> docker exec -it 894aee038c5e bash
|
|
||||||
|
|
||||||
This puts you inside your container, using an interactive terminal.
|
|
||||||
|
|
||||||
3. Generate the SSL key and certificates. The paths where the different
|
|
||||||
files are stored are defined after the ``-out`` option in each of the
|
|
||||||
following commands.
|
|
||||||
|
|
||||||
#. Generate a private key for your certificate signing request (CSR):
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$> openssl genrsa -out ca.key 2048
|
|
||||||
|
|
||||||
#. Generate a self-signed certificate for your local certificate
|
|
||||||
authority (CA):
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=scality.test"
|
|
||||||
|
|
||||||
#. Generate a key for the CloudServer:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> openssl genrsa -out test.key 2048
|
|
||||||
|
|
||||||
#. Generate a CSR for CloudServer:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> openssl req -new -key test.key -out test.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.scality.test"
|
|
||||||
|
|
||||||
#. Generate a certificate for CloudServer signed by the local CA:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
|
|
||||||
|
|
||||||
4. Update Zenko CloudServer ``config.json``. Add a ``certFilePaths``
|
|
||||||
section to ``./config.json`` with appropriate paths:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
"certFilePaths": {
|
|
||||||
"key": "./test.key",
|
|
||||||
"cert": "./test.crt",
|
|
||||||
"ca": "./ca.crt"
|
|
||||||
}
|
|
||||||
|
|
||||||
5. Run your container with the new config.
|
|
||||||
|
|
||||||
#. Exit the container by running ``$> exit``.
|
|
||||||
|
|
||||||
#. Restart the container with ``$> docker restart cloudserver``.
|
|
||||||
|
|
||||||
6. Update the host configuration by adding s3.scality.test
|
|
||||||
to /etc/hosts:
|
|
||||||
|
|
||||||
.. code:: bash
|
|
||||||
|
|
||||||
127.0.0.1 localhost s3.scality.test
|
|
||||||
|
|
||||||
7. Copy the local certificate authority (ca.crt in step 4) from your
|
|
||||||
container. Choose the path to save this file to (in the present
|
|
||||||
example, ``/root/ca.crt``), and run:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt
|
|
||||||
|
|
||||||
.. note:: Your container ID will be different, and your path to
|
|
||||||
ca.crt may be different.
|
|
||||||
|
|
||||||
Test the Config
|
|
||||||
^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
If aws-sdk is not installed, run ``$> yarn install aws-sdk``.
|
|
||||||
|
|
||||||
Paste the following script into a file named "test.js":
|
|
||||||
|
|
||||||
.. code:: js
|
|
||||||
|
|
||||||
const AWS = require('aws-sdk');
|
|
||||||
const fs = require('fs');
|
|
||||||
const https = require('https');
|
|
||||||
|
|
||||||
const httpOptions = {
|
|
||||||
agent: new https.Agent({
|
|
||||||
// path on your host of the self-signed certificate
|
|
||||||
ca: fs.readFileSync('./ca.crt', 'ascii'),
|
|
||||||
}),
|
|
||||||
};
|
|
||||||
|
|
||||||
const s3 = new AWS.S3({
|
|
||||||
httpOptions,
|
|
||||||
accessKeyId: 'accessKey1',
|
|
||||||
secretAccessKey: 'verySecretKey1',
|
|
||||||
// The endpoint must be s3.scality.test, else SSL will not work
|
|
||||||
endpoint: 'https://s3.scality.test:8000',
|
|
||||||
sslEnabled: true,
|
|
||||||
// With this setup, you must use path-style bucket access
|
|
||||||
s3ForcePathStyle: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
const bucket = 'cocoriko';
|
|
||||||
|
|
||||||
s3.createBucket({ Bucket: bucket }, err => {
|
|
||||||
if (err) {
|
|
||||||
return console.log('err createBucket', err);
|
|
||||||
}
|
|
||||||
return s3.deleteBucket({ Bucket: bucket }, err => {
|
|
||||||
if (err) {
|
|
||||||
return console.log('err deleteBucket', err);
|
|
||||||
}
|
|
||||||
return console.log('SSL is cool!');
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
Now run this script with:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
$> nodejs test.js
|
|
||||||
|
|
||||||
On success, the script outputs ``SSL is cool!``.
|
|
||||||
|
|
||||||
|
|
||||||
.. |CircleCI| image:: https://circleci.com/gh/scality/S3.svg?style=svg
|
|
||||||
:target: https://circleci.com/gh/scality/S3
|
|
||||||
.. |Scality CI| image:: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
|
||||||
:target: http://ci.ironmann.io/gh/scality/S3
|
|
|
@ -1,69 +0,0 @@
|
||||||
# Get Bucket Version 2 Documentation
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
This feature implements version 2 of the GET Bucket (List Objects)
|
|
||||||
operation, following AWS specifications
|
|
||||||
(see https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html).
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
The user must have READ access to the bucket.
|
|
||||||
|
|
||||||
## Design
|
|
||||||
|
|
||||||
### Request
|
|
||||||
|
|
||||||
The `delimiter`, `encoding-type`, `max-keys`, and `prefix` request parameters
|
|
||||||
from GET Bucket v1 remain unchanged.
|
|
||||||
In order to specify v2, the parameter `list-type` must be included and
|
|
||||||
set to `2`.
|
|
||||||
The `marker` v1 parameter's functionality has been split in two and replaced by
|
|
||||||
`start-after` and `continuation-token` in v2. The `start-after` parameter is
|
|
||||||
a specific object key after which the API will return key names. It is only
|
|
||||||
valid in the first GET request. If both the `start-after` and
|
|
||||||
`continuation-token` parameters are included in a request, the API will
|
|
||||||
ignore the `start-after` parameter in favor of the `continuation-token`.
|
|
||||||
If the GET Bucket v2 response is truncated, a `NextContinuationToken` will
|
|
||||||
also be included. To list the next set of objects, the `NextContinuationToken`
|
|
||||||
can be used as the `continuation-token` in the next request. The continuation
|
|
||||||
token is an obfuscated string of 57 characters that CloudServer understands and
|
|
||||||
interprets.
|
|
||||||
By default, the v2 response does not include object owner information. To
|
|
||||||
include owner information like the default v1 response, use the `fetch-owner`
|
|
||||||
request parameter set to `true`.
|
|
||||||
|
|
||||||
### Response
|
|
||||||
|
|
||||||
The GET Bucket v1 and v2 responses are largely the same, with only a few changes.
|
|
||||||
The `NextMarker` v1 parameter has been replaced by the
|
|
||||||
`NextContinuationToken`. The `NextContinuationToken` is included with any
|
|
||||||
truncated response, even if no delimiter is sent in the request. Its value is an
|
|
||||||
obfuscated string that can be passed at the `continuation-token` in the next
|
|
||||||
request, which will be interpreted by CloudServer.
|
|
||||||
The `KeyCounter` parameter is returned in every response. Its value is the
|
|
||||||
number of keys included in the response. It is always less than or equal to
|
|
||||||
the `MaxKeys` value.
|
|
||||||
If the `start-after` or `continuation-token` parameter is used in the
|
|
||||||
request, it is also included in the response.
|
|
||||||
By default, the v2 response does not include object owner information, unlike
|
|
||||||
the v1 response. See the `Request` section for including it.
|
|
||||||
|
|
||||||
### Continuation Token
|
|
||||||
|
|
||||||
An example continuation token:
|
|
||||||
|
|
||||||
```
|
|
||||||
NextContinuationToken: '1bunC4s+crlZNAAbKUGBLyajJUQKp22TOdUR6/01snxD2cZtjJD0ugA=='
|
|
||||||
```
|
|
||||||
|
|
||||||
In order to generate a comparable token, CloudServer uses base64 encoding to
|
|
||||||
obfuscate the key name of the next object to be listed.
|
|
||||||
Encoded continuation tokens are similarly decoded in order for listing to
|
|
||||||
continue from the correct object.
|
|
||||||
|
|
||||||
## Performing Get Bucket V2 Operation
|
|
||||||
|
|
||||||
When performing the GET Bucket V2 operation, if the request is built manually,
|
|
||||||
the parameter `list-type` must be included and set to `2`.
|
|
||||||
Using the AWS cli client, the command becomes `list-objects-v2`.
|
|
|
@ -1,565 +0,0 @@
|
||||||
Integrations
|
|
||||||
++++++++++++
|
|
||||||
|
|
||||||
High Availability
|
|
||||||
=================
|
|
||||||
|
|
||||||
`Docker Swarm <https://docs.docker.com/engine/swarm/>`__ is a clustering tool
|
|
||||||
developed by Docker for use with its containers. It can be used to start
|
|
||||||
services, which we define to ensure CloudServer's continuous availability to
|
|
||||||
end users. A swarm defines a manager and *n* workers among *n* + 1 servers.
|
|
||||||
|
|
||||||
This tutorial shows how to perform a basic setup with three servers, which
|
|
||||||
provides strong service resiliency, while remaining easy to use and
|
|
||||||
maintain. We will use NFS through Docker to share data and
|
|
||||||
metadata between the different servers.
|
|
||||||
|
|
||||||
Sections are labeled **On Server**, **On Clients**, or
|
|
||||||
**On All Machines**, referring respectively to NFS server, NFS clients, or
|
|
||||||
NFS server and clients. In the present example, the server’s IP address is
|
|
||||||
**10.200.15.113** and the client IP addresses are **10.200.15.96** and
|
|
||||||
**10.200.15.97**
|
|
||||||
|
|
||||||
1. Install Docker (on All Machines)
|
|
||||||
|
|
||||||
Docker 17.03.0-ce is used for this tutorial. Docker 1.12.6 and later will
|
|
||||||
likely work, but is not tested.
|
|
||||||
|
|
||||||
* On Ubuntu 14.04
|
|
||||||
Install Docker CE for Ubuntu as `documented at Docker
|
|
||||||
<https://docs.docker.com/install/linux/docker-ce/ubuntu/>`__.
|
|
||||||
Install the aufs dependency as recommended by Docker. The required
|
|
||||||
commands are:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo apt-get update
|
|
||||||
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
|
|
||||||
$> sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
|
|
||||||
$> curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
|
||||||
$> sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
|
||||||
$> sudo apt-get update
|
|
||||||
$> sudo apt-get install docker-ce
|
|
||||||
|
|
||||||
* On CentOS 7
|
|
||||||
Install Docker CE as `documented at Docker
|
|
||||||
<https://docs.docker.com/install/linux/docker-ce/centos/>`__.
|
|
||||||
The required commands are:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo yum install -y yum-utils
|
|
||||||
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
|
||||||
$> sudo yum makecache fast
|
|
||||||
$> sudo yum install docker-ce
|
|
||||||
$> sudo systemctl start docker
|
|
||||||
|
|
||||||
2. Install NFS on Client(s)
|
|
||||||
|
|
||||||
NFS clients mount Docker volumes over the NFS server’s shared folders.
|
|
||||||
If the NFS commons are installed, manual mounts are no longer needed.
|
|
||||||
|
|
||||||
* On Ubuntu 14.04
|
|
||||||
|
|
||||||
Install the NFS commons with apt-get:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo apt-get install nfs-common
|
|
||||||
|
|
||||||
* On CentOS 7
|
|
||||||
|
|
||||||
Install the NFS utils; then start required services:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> yum install nfs-utils
|
|
||||||
$> sudo systemctl enable rpcbind
|
|
||||||
$> sudo systemctl enable nfs-server
|
|
||||||
$> sudo systemctl enable nfs-lock
|
|
||||||
$> sudo systemctl enable nfs-idmap
|
|
||||||
$> sudo systemctl start rpcbind
|
|
||||||
$> sudo systemctl start nfs-server
|
|
||||||
$> sudo systemctl start nfs-lock
|
|
||||||
$> sudo systemctl start nfs-idmap
|
|
||||||
|
|
||||||
3. Install NFS (on Server)
|
|
||||||
|
|
||||||
The NFS server hosts the data and metadata. The package(s) to install on it
|
|
||||||
differs from the package installed on the clients.
|
|
||||||
|
|
||||||
* On Ubuntu 14.04
|
|
||||||
|
|
||||||
Install the NFS server-specific package and the NFS commons:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo apt-get install nfs-kernel-server nfs-common
|
|
||||||
|
|
||||||
* On CentOS 7
|
|
||||||
|
|
||||||
Install the NFS utils and start the required services:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> yum install nfs-utils
|
|
||||||
$> sudo systemctl enable rpcbind
|
|
||||||
$> sudo systemctl enable nfs-server
|
|
||||||
$> sudo systemctl enable nfs-lock
|
|
||||||
$> sudo systemctl enable nfs-idmap
|
|
||||||
$> sudo systemctl start rpcbind
|
|
||||||
$> sudo systemctl start nfs-server
|
|
||||||
$> sudo systemctl start nfs-lock
|
|
||||||
$> sudo systemctl start nfs-idmap
|
|
||||||
|
|
||||||
For both distributions:
|
|
||||||
|
|
||||||
#. Choose where shared data and metadata from the local
|
|
||||||
`CloudServer <http://www.zenko.io/cloudserver/>`__ shall be stored (The
|
|
||||||
present example uses /var/nfs/data and /var/nfs/metadata). Set permissions
|
|
||||||
for these folders for
|
|
||||||
sharing over NFS:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> mkdir -p /var/nfs/data /var/nfs/metadata
|
|
||||||
$> chmod -R 777 /var/nfs/
|
|
||||||
|
|
||||||
#. The /etc/exports file configures network permissions and r-w-x permissions
|
|
||||||
for NFS access. Edit /etc/exports, adding the following lines:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
|
||||||
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
|
||||||
|
|
||||||
Ubuntu applies the no\_subtree\_check option by default, so both
|
|
||||||
folders are declared with the same permissions, even though they’re in
|
|
||||||
the same tree.
|
|
||||||
|
|
||||||
#. Export this new NFS table:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo exportfs -a
|
|
||||||
|
|
||||||
#. Edit the ``MountFlags`` option in the Docker config in
|
|
||||||
/lib/systemd/system/docker.service to enable NFS mount from Docker volumes
|
|
||||||
on other machines:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
MountFlags=shared
|
|
||||||
|
|
||||||
#. Restart the NFS server and Docker daemons to apply these changes.
|
|
||||||
|
|
||||||
* On Ubuntu 14.04
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo service nfs-kernel-server restart
|
|
||||||
$> sudo service docker restart
|
|
||||||
|
|
||||||
* On CentOS 7
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> sudo systemctl restart nfs-server
|
|
||||||
$> sudo systemctl daemon-reload
|
|
||||||
$> sudo systemctl restart docker
|
|
||||||
|
|
||||||
|
|
||||||
4. Set Up a Docker Swarm
|
|
||||||
|
|
||||||
* On all machines and distributions:
|
|
||||||
|
|
||||||
Set up the Docker volumes to be mounted to the NFS server for CloudServer’s
|
|
||||||
data and metadata storage. The following commands must be replicated on all
|
|
||||||
machines:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
|
|
||||||
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
|
|
||||||
|
|
||||||
There is no need to ``docker exec`` these volumes to mount them: the
|
|
||||||
Docker Swarm manager does this when the Docker service is started.
|
|
||||||
|
|
||||||
* On a server:
|
|
||||||
|
|
||||||
To start a Docker service on a Docker Swarm cluster, initialize the cluster
|
|
||||||
(that is, define a manager), prompt workers/nodes to join in, and then start
|
|
||||||
the service.
|
|
||||||
|
|
||||||
Initialize the swarm cluster, and review its response:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker swarm init --advertise-addr 10.200.15.113
|
|
||||||
|
|
||||||
Swarm initialized: current node (db2aqfu3bzfzzs9b1kfeaglmq) is now a manager.
|
|
||||||
|
|
||||||
To add a worker to this swarm, run the following command:
|
|
||||||
|
|
||||||
docker swarm join \
|
|
||||||
--token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka \
|
|
||||||
10.200.15.113:2377
|
|
||||||
|
|
||||||
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
|
||||||
|
|
||||||
* On clients:
|
|
||||||
|
|
||||||
Copy and paste the command provided by your Docker Swarm init. A successful
|
|
||||||
request/response will resemble:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
|
|
||||||
|
|
||||||
This node joined a swarm as a worker.
|
|
||||||
|
|
||||||
Set Up Docker Swarm on Clients on a Server
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Start the service on the Swarm cluster.
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/cloudserver
|
|
||||||
|
|
||||||
On a successful installation, ``docker service ls`` returns the following
|
|
||||||
output:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker service ls
|
|
||||||
ID NAME MODE REPLICAS IMAGE
|
|
||||||
ocmggza412ft s3 replicated 1/1 scality/cloudserver:latest
|
|
||||||
|
|
||||||
If the service does not start, consider disabling apparmor/SELinux.
|
|
||||||
|
|
||||||
Testing the High-Availability CloudServer
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
On all machines (client/server) and distributions (Ubuntu and CentOS),
|
|
||||||
determine where CloudServer is running using ``docker ps``. CloudServer can
|
|
||||||
operate on any node of the Swarm cluster, manager or worker. When you find
|
|
||||||
it, you can kill it with ``docker stop <container id>``. It will respawn
|
|
||||||
on a different node. Now, if one server falls, or if Docker stops
|
|
||||||
unexpectedly, the end user will still be able to access your the local CloudServer.
|
|
||||||
|
|
||||||
Troubleshooting
|
|
||||||
~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To troubleshoot the service, run:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker service ps s3docker service ps s3
|
|
||||||
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
|
|
||||||
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/cloudserver localhost.localdomain.localdomain Running Running 7 days ago
|
|
||||||
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/cloudserver localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
|
|
||||||
|
|
||||||
If the error is truncated, view the error in detail by inspecting the
|
|
||||||
Docker task ID:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> docker inspect cvmf3j3bz8w6r4h0lf3pxo6eu
|
|
||||||
|
|
||||||
Off you go!
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Let us know how you use this and if you'd like any specific developments
|
|
||||||
around it. Even better: come and contribute to our `Github repository
|
|
||||||
<https://github.com/scality/s3/>`__! We look forward to meeting you!
|
|
||||||
|
|
||||||
S3FS
|
|
||||||
====
|
|
||||||
|
|
||||||
You can export buckets as a filesystem with s3fs on CloudServer.
|
|
||||||
|
|
||||||
`s3fs <https://github.com/s3fs-fuse/s3fs-fuse>`__ is an open source
|
|
||||||
tool, available both on Debian and RedHat distributions, that enables
|
|
||||||
you to mount an S3 bucket on a filesystem-like backend. This tutorial uses
|
|
||||||
an Ubuntu 14.04 host to deploy and use s3fs over CloudServer.
|
|
||||||
|
|
||||||
Deploying Zenko CloudServer with SSL
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
First, deploy CloudServer with a file backend using `our DockerHub page
|
|
||||||
<https://hub.docker.com/r/zenko/cloudserver>`__.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
If Docker is not installed on your machine, follow
|
|
||||||
`these instructions <https://docs.docker.com/engine/installation/>`__
|
|
||||||
to install it for your distribution.
|
|
||||||
|
|
||||||
You must also set up SSL with CloudServer to use s3fs. See `Using SSL
|
|
||||||
<./GETTING_STARTED#Using_SSL>`__ for instructions.
|
|
||||||
|
|
||||||
s3fs Setup
|
|
||||||
~~~~~~~~~~
|
|
||||||
|
|
||||||
Installing s3fs
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Follow the instructions in the s3fs `README
|
|
||||||
<https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation-from-pre-built-packages>`__,
|
|
||||||
|
|
||||||
Check that s3fs is properly installed. A version check should return
|
|
||||||
a response resembling:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> s3fs --version
|
|
||||||
|
|
||||||
Amazon Simple Storage Service File System V1.80(commit:d40da2c) with OpenSSL
|
|
||||||
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
|
|
||||||
License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>
|
|
||||||
This is free software: you are free to change and redistribute it.
|
|
||||||
There is NO WARRANTY, to the extent permitted by law.
|
|
||||||
|
|
||||||
Configuring s3fs
|
|
||||||
----------------
|
|
||||||
|
|
||||||
s3fs expects you to provide it with a password file. Our file is
|
|
||||||
``/etc/passwd-s3fs``. The structure for this file is
|
|
||||||
``ACCESSKEYID:SECRETKEYID``, so, for CloudServer, you can run:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> echo 'accessKey1:verySecretKey1' > /etc/passwd-s3fs
|
|
||||||
$> chmod 600 /etc/passwd-s3fs
|
|
||||||
|
|
||||||
Using CloudServer with s3fs
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
1. Use /mnt/tests3fs as a mount point.
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> mkdir /mnt/tests3fs
|
|
||||||
|
|
||||||
2. Create a bucket on your local CloudServer. In the present example it is
|
|
||||||
named “tests3fs”.
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> s3cmd mb s3://tests3fs
|
|
||||||
|
|
||||||
3. Mount the bucket to your mount point with s3fs:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> s3fs tests3fs /mnt/tests3fs -o passwd_file=/etc/passwd-s3fs -o url="https://s3.scality.test:8000/" -o use_path_request_style
|
|
||||||
|
|
||||||
The structure of this command is:
|
|
||||||
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``. Of these mandatory
|
|
||||||
options:
|
|
||||||
|
|
||||||
* ``passwd_file`` specifies the path to the password file.
|
|
||||||
* ``url`` specifies the host name used by your SSL provider.
|
|
||||||
* ``use_path_request_style`` forces the path style (by default,
|
|
||||||
s3fs uses DNS-style subdomains).
|
|
||||||
|
|
||||||
Once the bucket is mounted, files added to the mount point or
|
|
||||||
objects added to the bucket will appear in both locations.
|
|
||||||
|
|
||||||
Example
|
|
||||||
-------
|
|
||||||
|
|
||||||
Create two files, and then a directory with a file in our mount point:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
|
|
||||||
$> mkdir /mnt/tests3fs/dir1
|
|
||||||
$> touch /mnt/tests3fs/dir1/file3
|
|
||||||
|
|
||||||
Now, use s3cmd to show what is in CloudServer:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> s3cmd ls -r s3://tests3fs
|
|
||||||
|
|
||||||
2017-02-28 17:28 0 s3://tests3fs/dir1/
|
|
||||||
2017-02-28 17:29 0 s3://tests3fs/dir1/file3
|
|
||||||
2017-02-28 17:28 0 s3://tests3fs/file1
|
|
||||||
2017-02-28 17:28 0 s3://tests3fs/file2
|
|
||||||
|
|
||||||
Now you can enjoy a filesystem view on your local CloudServer.
|
|
||||||
|
|
||||||
|
|
||||||
Duplicity
|
|
||||||
=========
|
|
||||||
|
|
||||||
How to back up your files with CloudServer.
|
|
||||||
|
|
||||||
Installing Duplicity and its Dependencies
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To install `Duplicity <http://duplicity.nongnu.org/>`__,
|
|
||||||
go to `this site <https://code.launchpad.net/duplicity/0.7-series>`__.
|
|
||||||
Download the latest tarball. Decompress it and follow the instructions
|
|
||||||
in the README.
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> tar zxvf duplicity-0.7.11.tar.gz
|
|
||||||
$> cd duplicity-0.7.11
|
|
||||||
$> python setup.py install
|
|
||||||
|
|
||||||
You may receive error messages indicating the need to install some or all
|
|
||||||
of the following dependencies:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> apt-get install librsync-dev gnupg
|
|
||||||
$> apt-get install python-dev python-pip python-lockfile
|
|
||||||
$> pip install -U boto
|
|
||||||
|
|
||||||
Testing the Installation
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
1. Check that CloudServer is running. Run ``$> docker ps``. You should
|
|
||||||
see one container named ``scality/cloudserver``. If you do not, run
|
|
||||||
``$> docker start cloudserver`` and check again.
|
|
||||||
|
|
||||||
|
|
||||||
2. Duplicity uses a module called “Boto” to send requests to S3. Boto
|
|
||||||
requires a configuration file located in ``/etc/boto.cfg`` to store
|
|
||||||
your credentials and preferences. A minimal configuration
|
|
||||||
you can fine tune `following these instructions
|
|
||||||
<http://boto.cloudhackers.com/en/latest/getting_started.html>`__ is
|
|
||||||
shown here:
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
[Credentials]
|
|
||||||
aws_access_key_id = accessKey1
|
|
||||||
aws_secret_access_key = verySecretKey1
|
|
||||||
|
|
||||||
[Boto]
|
|
||||||
# If using SSL, set to True
|
|
||||||
is_secure = False
|
|
||||||
# If using SSL, unmute and provide absolute path to local CA certificate
|
|
||||||
# ca_certificates_file = /absolute/path/to/ca.crt
|
|
||||||
|
|
||||||
.. note:: To set up SSL with CloudServer, check out our `Using SSL
|
|
||||||
<./GETTING_STARTED#Using_SSL>`__ in GETTING STARTED.
|
|
||||||
|
|
||||||
3. At this point all requirements to run CloudServer as a backend to Duplicity
|
|
||||||
have been met. A local folder/file should back up to the local S3.
|
|
||||||
Try it with the decompressed Duplicity folder:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
$> duplicity duplicity-0.7.11 "s3://127.0.0.1:8000/testbucket/"
|
|
||||||
|
|
||||||
.. note:: Duplicity will prompt for a symmetric encryption passphrase.
|
|
||||||
Save it carefully, as you will need it to recover your data.
|
|
||||||
Alternatively, you can add the ``--no-encryption`` flag
|
|
||||||
and the data will be stored plain.
|
|
||||||
|
|
||||||
If this command is successful, you will receive an output resembling:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
--------------[ Backup Statistics ]--------------
|
|
||||||
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
|
|
||||||
EndTime 1486486547.40 (Tue Feb 7 16:55:47 2017)
|
|
||||||
ElapsedTime 0.27 (0.27 seconds)
|
|
||||||
SourceFiles 388
|
|
||||||
SourceFileSize 6634529 (6.33 MB)
|
|
||||||
NewFiles 388
|
|
||||||
NewFileSize 6634529 (6.33 MB)
|
|
||||||
DeletedFiles 0
|
|
||||||
ChangedFiles 0
|
|
||||||
ChangedFileSize 0 (0 bytes)
|
|
||||||
ChangedDeltaSize 0 (0 bytes)
|
|
||||||
DeltaEntries 388
|
|
||||||
RawDeltaSize 6392865 (6.10 MB)
|
|
||||||
TotalDestinationSizeChange 2003677 (1.91 MB)
|
|
||||||
Errors 0
|
|
||||||
-------------------------------------------------
|
|
||||||
|
|
||||||
Congratulations! You can now back up to your local S3 through Duplicity.
|
|
||||||
|
|
||||||
Automating Backups
|
|
||||||
------------------
|
|
||||||
|
|
||||||
The easiest way to back up files periodically is to write a bash script
|
|
||||||
and add it to your crontab. A suggested script follows.
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Export your passphrase so you don't have to type anything
|
|
||||||
export PASSPHRASE="mypassphrase"
|
|
||||||
|
|
||||||
# To use a GPG key, put it here and uncomment the line below
|
|
||||||
#GPG_KEY=
|
|
||||||
|
|
||||||
# Define your backup bucket, with localhost specified
|
|
||||||
DEST="s3://127.0.0.1:8000/testbucketcloudserver/"
|
|
||||||
|
|
||||||
# Define the absolute path to the folder to back up
|
|
||||||
SOURCE=/root/testfolder
|
|
||||||
|
|
||||||
# Set to "full" for full backups, and "incremental" for incremental backups
|
|
||||||
# Warning: you must perform one full backup befor you can perform
|
|
||||||
# incremental ones on top of it
|
|
||||||
FULL=incremental
|
|
||||||
|
|
||||||
# How long to keep backups. If you don't want to delete old backups, keep
|
|
||||||
# this value empty; otherwise, the syntax is "1Y" for one year, "1M" for
|
|
||||||
# one month, "1D" for one day.
|
|
||||||
OLDER_THAN="1Y"
|
|
||||||
|
|
||||||
# is_running checks whether Duplicity is currently completing a task
|
|
||||||
is_running=$(ps -ef | grep duplicity | grep python | wc -l)
|
|
||||||
|
|
||||||
# If Duplicity is already completing a task, this will not run
|
|
||||||
if [ $is_running -eq 0 ]; then
|
|
||||||
echo "Backup for ${SOURCE} started"
|
|
||||||
|
|
||||||
# To delete backups older than a certain time, do it here
|
|
||||||
if [ "$OLDER_THAN" != "" ]; then
|
|
||||||
echo "Removing backups older than ${OLDER_THAN}"
|
|
||||||
duplicity remove-older-than ${OLDER_THAN} ${DEST}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# This is where the actual backup takes place
|
|
||||||
echo "Backing up ${SOURCE}..."
|
|
||||||
duplicity ${FULL} \
|
|
||||||
${SOURCE} ${DEST}
|
|
||||||
# If you're using GPG, paste this in the command above
|
|
||||||
# --encrypt-key=${GPG_KEY} --sign-key=${GPG_KEY} \
|
|
||||||
# If you want to exclude a subfolder/file, put it below and
|
|
||||||
# paste this
|
|
||||||
# in the command above
|
|
||||||
# --exclude=/${SOURCE}/path_to_exclude \
|
|
||||||
|
|
||||||
echo "Backup for ${SOURCE} complete"
|
|
||||||
echo "------------------------------------"
|
|
||||||
fi
|
|
||||||
# Forget the passphrase...
|
|
||||||
unset PASSPHRASE
|
|
||||||
|
|
||||||
Put this file in ``/usr/local/sbin/backup.sh``. Run ``crontab -e`` and
|
|
||||||
paste your configuration into the file that opens. If you're unfamiliar
|
|
||||||
with Cron, here is a good `HowTo
|
|
||||||
<https://help.ubuntu.com/community/CronHowto>`__. If the folder being
|
|
||||||
backed up is a folder to be modified permanently during the work day,
|
|
||||||
we can set incremental backups every 5 minutes from 8 AM to 9 PM Monday
|
|
||||||
through Friday by pasting the following line into crontab:
|
|
||||||
|
|
||||||
.. code:: sh
|
|
||||||
|
|
||||||
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh
|
|
||||||
|
|
||||||
Adding or removing files from the folder being backed up will result in
|
|
||||||
incremental backups in the bucket.
|
|
|
@ -1,263 +0,0 @@
|
||||||
Metadata Search Documentation
|
|
||||||
=============================
|
|
||||||
|
|
||||||
Description
|
|
||||||
-----------
|
|
||||||
|
|
||||||
This feature enables metadata search to be performed on the metadata of objects
|
|
||||||
stored in Zenko.
|
|
||||||
|
|
||||||
Requirements
|
|
||||||
------------
|
|
||||||
|
|
||||||
* MongoDB
|
|
||||||
|
|
||||||
Design
|
|
||||||
------
|
|
||||||
|
|
||||||
The Metadata Search feature expands on the existing :code:`GET Bucket` S3 API by
|
|
||||||
enabling users to conduct metadata searches by adding the custom Zenko query
|
|
||||||
string parameter, :code:`search`. The :code:`search` parameter is structured as a pseudo
|
|
||||||
SQL WHERE clause, and supports basic SQL operators. For example:
|
|
||||||
:code:`"A=1 AND B=2 OR C=3"` (complex queries can be built using nesting
|
|
||||||
operators, :code:`(` and :code:`)`).
|
|
||||||
|
|
||||||
The search process is as follows:
|
|
||||||
|
|
||||||
* Zenko receives a :code:`GET` request.
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# regular getBucket request
|
|
||||||
GET /bucketname HTTP/1.1
|
|
||||||
Host: 127.0.0.1:8000
|
|
||||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
|
||||||
Authorization: authorization string
|
|
||||||
|
|
||||||
# getBucket versions request
|
|
||||||
GET /bucketname?versions HTTP/1.1
|
|
||||||
Host: 127.0.0.1:8000
|
|
||||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
|
||||||
Authorization: authorization string
|
|
||||||
|
|
||||||
# search getBucket request
|
|
||||||
GET /bucketname?search=key%3Dsearch-item HTTP/1.1
|
|
||||||
Host: 127.0.0.1:8000
|
|
||||||
Date: Wed, 18 Oct 2018 17:50:00 GMT
|
|
||||||
Authorization: authorization string
|
|
||||||
|
|
||||||
* If the request does *not* contain the :code:`search` query parameter, Zenko performs
|
|
||||||
a normal bucket listing and returns an XML result containing the list of
|
|
||||||
objects.
|
|
||||||
* If the request *does* contain the :code:`search` query parameter, Zenko parses and
|
|
||||||
validates the search string.
|
|
||||||
|
|
||||||
- If the search string is invalid, Zenko returns an :code:`InvalidArgument` error.
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
<?xml version=\"1.0\" encoding=\"UTF-8\"?>
|
|
||||||
<Error>
|
|
||||||
<Code>InvalidArgument</Code>
|
|
||||||
<Message>Invalid sql where clause sent as search query</Message>
|
|
||||||
<Resource></Resource>
|
|
||||||
<RequestId>d1d6afc64345a8e1198e</RequestId>
|
|
||||||
</Error>
|
|
||||||
|
|
||||||
- If the search string is valid, Zenko parses it and generates an abstract
|
|
||||||
syntax tree (AST). The AST is then passed to the MongoDB backend to be
|
|
||||||
used as the query filter for retrieving objects from a bucket that
|
|
||||||
satisfies the requested search conditions. Zenko parses the filtered
|
|
||||||
results and returns them as the response.
|
|
||||||
|
|
||||||
Metadata search results have the same structure as a :code:`GET Bucket` response:
|
|
||||||
|
|
||||||
.. code:: xml
|
|
||||||
|
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
|
||||||
<Name>bucketname</Name>
|
|
||||||
<Prefix/>
|
|
||||||
<Marker/>
|
|
||||||
<MaxKeys>1000</MaxKeys>
|
|
||||||
<IsTruncated>false</IsTruncated>
|
|
||||||
<Contents>
|
|
||||||
<Key>objectKey</Key>
|
|
||||||
<LastModified>2018-04-19T18:31:49.426Z</LastModified>
|
|
||||||
<ETag>"d41d8cd98f00b204e9800998ecf8427e"</ETag>
|
|
||||||
<Size>0</Size>
|
|
||||||
<Owner>
|
|
||||||
<ID>79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be</ID>
|
|
||||||
<DisplayName>Bart</DisplayName>
|
|
||||||
</Owner>
|
|
||||||
<StorageClass>STANDARD</StorageClass>
|
|
||||||
</Contents>
|
|
||||||
<Contents>
|
|
||||||
...
|
|
||||||
</Contents>
|
|
||||||
</ListBucketResult>
|
|
||||||
|
|
||||||
Performing Metadata Searches with Zenko
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
You can perform metadata searches by:
|
|
||||||
|
|
||||||
+ Using the :code:`search_bucket` tool in the
|
|
||||||
`Scality/S3 <https://github.com/scality/S3>`_ GitHub repository.
|
|
||||||
+ Creating a signed HTTP request to Zenko in your preferred programming
|
|
||||||
language.
|
|
||||||
|
|
||||||
Using the S3 Tool
|
|
||||||
+++++++++++++++++
|
|
||||||
|
|
||||||
After cloning the `Scality/S3 <https://github.com/scality/S3>`_ GitHub repository
|
|
||||||
and installing the necessary dependencies, run the following command in the S3
|
|
||||||
project’s root directory to access the search tool:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
node bin/search_bucket
|
|
||||||
|
|
||||||
This generates the following output:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
Usage: search_bucket [options]
|
|
||||||
|
|
||||||
Options:
|
|
||||||
|
|
||||||
-V, --version output the version number
|
|
||||||
-a, --access-key <accessKey> Access key id
|
|
||||||
-k, --secret-key <secretKey> Secret access key
|
|
||||||
-b, --bucket <bucket> Name of the bucket
|
|
||||||
-q, --query <query> Search query
|
|
||||||
-h, --host <host> Host of the server
|
|
||||||
-p, --port <port> Port of the server
|
|
||||||
-s --ssl
|
|
||||||
-v, --verbose
|
|
||||||
-h, --help output usage information
|
|
||||||
|
|
||||||
In the following examples, Zenko Server is accessible on endpoint
|
|
||||||
:code:`http://127.0.0.1:8000` and contains the bucket :code:`zenkobucket`.
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# search for objects with metadata "blue"
|
|
||||||
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
|
|
||||||
-q "x-amz-meta-color=blue" -h 127.0.0.1 -p 8000
|
|
||||||
|
|
||||||
# search for objects tagged with "type=color"
|
|
||||||
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
|
|
||||||
-q "tags.type=color" -h 127.0.0.1 -p 8000
|
|
||||||
|
|
||||||
Coding Examples
|
|
||||||
+++++++++++++++
|
|
||||||
|
|
||||||
Search requests can be also performed by making HTTP requests authenticated
|
|
||||||
with one of the AWS Signature schemes: version 2 or version 4. \
|
|
||||||
For more about authentication scheme, see:
|
|
||||||
|
|
||||||
* https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
|
|
||||||
* http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
|
|
||||||
* http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
|
|
||||||
|
|
||||||
You can also view examples for making requests with Auth V4 in various
|
|
||||||
languages `here <../../../examples>`__.
|
|
||||||
|
|
||||||
Specifying Metadata Fields
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
To search system metadata headers:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
{system-metadata-key}{supported SQL op}{search value}
|
|
||||||
# example
|
|
||||||
key = blueObject
|
|
||||||
size > 0
|
|
||||||
key LIKE "blue.*"
|
|
||||||
|
|
||||||
To search custom user metadata:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# metadata must be prefixed with "x-amz-meta-"
|
|
||||||
x-amz-meta-{user-metadata-key}{supported SQL op}{search value}
|
|
||||||
|
|
||||||
# example
|
|
||||||
x-amz-meta-color = blue
|
|
||||||
x-amz-meta-color != red
|
|
||||||
x-amz-meta-color LIKE "b.*"
|
|
||||||
|
|
||||||
To search tags:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# tag searches must be prefixed with "tags."
|
|
||||||
tags.{tag-key}{supported SQL op}{search value}
|
|
||||||
# example
|
|
||||||
tags.type = color
|
|
||||||
|
|
||||||
Examples queries:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# searching for objects with custom metadata "color"=blue" and are tagged
|
|
||||||
# "type"="color"
|
|
||||||
|
|
||||||
tags.type="color" AND x-amz-meta-color="blue"
|
|
||||||
|
|
||||||
# searching for objects with the object key containing the substring "blue"
|
|
||||||
# or (custom metadata "color"=blue" and are tagged "type"="color")
|
|
||||||
|
|
||||||
key LIKE '.*blue.*' OR (x-amz-meta-color="blue" AND tags.type="color")
|
|
||||||
|
|
||||||
Differences from SQL
|
|
||||||
++++++++++++++++++++
|
|
||||||
|
|
||||||
Zenko metadata search queries are similar to SQL-query :code:`WHERE` clauses, but
|
|
||||||
differ in that:
|
|
||||||
|
|
||||||
* They follow the :code:`PCRE` format
|
|
||||||
* They do not require values with hyphens to be enclosed in
|
|
||||||
backticks, :code:``(`)``
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# SQL query
|
|
||||||
`x-amz-meta-search-item` = `ice-cream-cone`
|
|
||||||
|
|
||||||
# MD Search query
|
|
||||||
x-amz-meta-search-item = ice-cream-cone
|
|
||||||
|
|
||||||
* Search queries do not support all SQL operators.
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# Supported SQL operators:
|
|
||||||
=, <, >, <=, >=, !=, AND, OR, LIKE, <>
|
|
||||||
|
|
||||||
# Unsupported SQL operators:
|
|
||||||
NOT, BETWEEN, IN, IS, +, -, %, ^, /, *, !
|
|
||||||
|
|
||||||
Using Regular Expressions in Metadata Search
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Regular expressions in Zenko metadata search differ from SQL in the following
|
|
||||||
ways:
|
|
||||||
|
|
||||||
+ Wildcards are represented with :code:`.*` instead of :code:`%`.
|
|
||||||
+ Regex patterns must be wrapped in quotes. Failure to do this can lead to
|
|
||||||
misinterpretation of patterns.
|
|
||||||
+ As with :code:`PCRE`, regular expressions can be entered in either the
|
|
||||||
:code:`/pattern/` syntax or as the pattern itself if regex options are
|
|
||||||
not required.
|
|
||||||
|
|
||||||
Example regular expressions:
|
|
||||||
|
|
||||||
.. code::
|
|
||||||
|
|
||||||
# search for strings containing word substring "helloworld"
|
|
||||||
".*helloworld.*"
|
|
||||||
"/.*helloworld.*/"
|
|
||||||
"/.*helloworld.*/i"
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Minimal makefile for Sphinx documentation
|
|
||||||
#
|
|
||||||
|
|
||||||
# You can set these variables from the command line.
|
|
||||||
SPHINXOPTS =
|
|
||||||
SPHINXBUILD = sphinx-build
|
|
||||||
SPHINXPROJ = Zenko
|
|
||||||
SOURCEDIR = .
|
|
||||||
BUILDDIR = _build
|
|
||||||
|
|
||||||
# Put it first so that "make" without argument is like "make help".
|
|
||||||
help:
|
|
||||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
|
||||||
|
|
||||||
.PHONY: help Makefile
|
|
||||||
|
|
||||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
|
||||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
|
||||||
%: Makefile
|
|
||||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
|
||||||
|
|
|
@ -1,161 +0,0 @@
|
||||||
# Object Lock Feature Test Plan
|
|
||||||
|
|
||||||
## Feature Component Description
|
|
||||||
|
|
||||||
Implementing Object Lock will introduce six new APIs:
|
|
||||||
|
|
||||||
- putObjectLockConfiguration
|
|
||||||
- getObjectLockConfiguration
|
|
||||||
- putObjectRetention
|
|
||||||
- getObjectRetention
|
|
||||||
- putObjectLegalHold
|
|
||||||
- getObjectLegalHold
|
|
||||||
|
|
||||||
Along with these APIs, putBucket, putObject, deleteObject, and multiObjectDelete
|
|
||||||
be affected. In Arsenal, both the BucketInfo and ObjectMD models will be
|
|
||||||
updated. Bucket policy and IAM policy permissions will be updated to include
|
|
||||||
the new API actions.
|
|
||||||
|
|
||||||
## Functional Tests
|
|
||||||
|
|
||||||
### putBucket tests
|
|
||||||
|
|
||||||
- passing option to enable object lock updates bucket metadata and enables
|
|
||||||
bucket versioning
|
|
||||||
|
|
||||||
### putBucketVersioning tests
|
|
||||||
|
|
||||||
- suspending versioning on bucket with object lock enabled returns error
|
|
||||||
|
|
||||||
### putObject tests
|
|
||||||
|
|
||||||
- putting retention configuration on object should be allowed
|
|
||||||
- putting invalid retention configuration returns error
|
|
||||||
|
|
||||||
### getObject tests
|
|
||||||
|
|
||||||
- getting object with retention information should include retention information
|
|
||||||
|
|
||||||
### copyObject tests
|
|
||||||
|
|
||||||
- copying object with retention information should include retention information
|
|
||||||
|
|
||||||
### initiateMultipartUpload tests
|
|
||||||
|
|
||||||
- mpu object initiated with retention information should include retention
|
|
||||||
information
|
|
||||||
|
|
||||||
### putObjectLockConfiguration tests
|
|
||||||
|
|
||||||
- putting configuration as non-bucket-owner user returns AccessDenied error
|
|
||||||
- disabling object lock on bucket created with object lock returns error
|
|
||||||
- enabling object lock on bucket created without object lock returns
|
|
||||||
InvalidBucketState error
|
|
||||||
- enabling object lock with token on bucket created without object lock succeeds
|
|
||||||
- putting valid object lock configuration when bucket does not have object
|
|
||||||
lock enabled returns error (InvalidRequest?)
|
|
||||||
- putting valid object lock configuration updates bucket metadata
|
|
||||||
- putting invalid object lock configuration returns error
|
|
||||||
- ObjectLockEnabled !== "Enabled"
|
|
||||||
- Rule object doesn't contain DefaultRetention key
|
|
||||||
- Mode !== "GOVERNANCE" or "COMPLIANCE"
|
|
||||||
- Days are not an integer
|
|
||||||
- Years are not an integer
|
|
||||||
|
|
||||||
### getObjectLockConfiguration tests
|
|
||||||
|
|
||||||
- getting configuration as non-bucket-owner user returns AccessDenied error
|
|
||||||
- getting configuration when none is set returns
|
|
||||||
ObjectLockConfigurationNotFoundError error
|
|
||||||
- getting configuration returns correct object lock configuration for bucket
|
|
||||||
|
|
||||||
### putObjectRetention
|
|
||||||
|
|
||||||
- putting retention as non-bucket-owner user returns AccessDenied error
|
|
||||||
- putting retention on object in bucket without object lock enabled returns
|
|
||||||
InvalidRequest error
|
|
||||||
- putting valid retention period updates object metadata
|
|
||||||
|
|
||||||
### getObjectRetention
|
|
||||||
|
|
||||||
- getting retention as non-bucket-owner user returns AccessDenied error
|
|
||||||
- getting retention when none is set returns NoSuchObjectLockConfiguration
|
|
||||||
error
|
|
||||||
- getting retention returns correct object retention period
|
|
||||||
|
|
||||||
### putObjectLegalHold
|
|
||||||
|
|
||||||
- putting legal hold as non-bucket-owner user returns AccessDenied error
|
|
||||||
- putting legal hold on object in bucket without object lock enabled returns
|
|
||||||
InvalidRequest error
|
|
||||||
- putting valid legal hold updates object metadata
|
|
||||||
|
|
||||||
### getObjectLegalHold
|
|
||||||
|
|
||||||
- getting legal hold as non-bucket-owner user returns AccessDenied error
|
|
||||||
- getting legal hold when none is set returns NoSuchObjectLockConfiguration
|
|
||||||
error
|
|
||||||
- getting legal hold returns correct object legal hold
|
|
||||||
|
|
||||||
## End to End Tests
|
|
||||||
|
|
||||||
### Scenarios
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled. Put object. Put object lock
|
|
||||||
configuration. Put another object.
|
|
||||||
- Ensure object put before configuration does not have retention period set
|
|
||||||
- Ensure object put after configuration does have retention period set
|
|
||||||
|
|
||||||
- Create bucket without object lock. Put object. Enable object lock with token
|
|
||||||
and put object lock configuration. Put another object.
|
|
||||||
- Ensure object put before configuration does not have retention period set
|
|
||||||
- Ensure object put after configuration does have retention period set
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled and put configuration with COMPLIANCE
|
|
||||||
mode. Put object.
|
|
||||||
- Ensure object cannot be deleted (returns AccessDenied error).
|
|
||||||
- Ensure object cannot be overwritten.
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled and put configuration with GOVERNANCE
|
|
||||||
mode. Put object.
|
|
||||||
- Ensure user without permission cannot delete object
|
|
||||||
- Ensure user without permission cannot overwrite object
|
|
||||||
- Ensure user with permission can delete object
|
|
||||||
- Ensure user with permission can overwrite object
|
|
||||||
- Ensure user with permission can lengthen retention period
|
|
||||||
- Ensure user with permission cannot shorten retention period
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled and put configuration. Edit bucket
|
|
||||||
metadata so retention period is expired. Put object.
|
|
||||||
- Ensure object can be deleted.
|
|
||||||
- Ensure object can be overwritten.
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled and put configuration. Edit bucket
|
|
||||||
metadata so retention period is expired. Put object. Put new retention
|
|
||||||
period on object.
|
|
||||||
- Ensure object cannot be deleted.
|
|
||||||
- Ensure object cannot be overwritten.
|
|
||||||
|
|
||||||
- Create bucket with object locked enabled and put configuration. Put object.
|
|
||||||
Edit object metadata so retention period is past expiration.
|
|
||||||
- Ensure object can be deleted.
|
|
||||||
- Ensure object can be overwritten.
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled and put configuration. Edit bucket
|
|
||||||
metadata so retention period is expired. Put object. Put legal hold
|
|
||||||
on object.
|
|
||||||
- Ensure object cannot be deleted.
|
|
||||||
- Ensure object cannot be overwritten.
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled and put configuration. Put object.
|
|
||||||
Check object retention. Change bucket object lock configuration.
|
|
||||||
- Ensure object retention period has not changed with bucket configuration.
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled. Put object with legal hold.
|
|
||||||
- Ensure object cannot be deleted.
|
|
||||||
- Ensure object cannot be overwritten.
|
|
||||||
|
|
||||||
- Create bucket with object lock enabled. Put object with legal hold. Remove
|
|
||||||
legal hold.
|
|
||||||
- Ensure object can be deleted.
|
|
||||||
- Ensure object can be overwritten.
|
|
|
@ -1,73 +0,0 @@
|
||||||
# Cloudserver Release Plan
|
|
||||||
|
|
||||||
## Docker Image Generation
|
|
||||||
|
|
||||||
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
|
|
||||||
CloudServer has a few images there:
|
|
||||||
|
|
||||||
* Cloudserver container image: ghcr.io/scality/cloudserver
|
|
||||||
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
|
||||||
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
|
|
||||||
|
|
||||||
With every CI build, the CI will push images, tagging the
|
|
||||||
content with the developer branch's short SHA-1 commit hash.
|
|
||||||
This allows those images to be used by developers, CI builds,
|
|
||||||
build chain and so on.
|
|
||||||
|
|
||||||
Tagged versions of cloudserver will be stored in the production namespace.
|
|
||||||
|
|
||||||
## How to Pull Docker Images
|
|
||||||
|
|
||||||
```sh
|
|
||||||
docker pull ghcr.io/scality/cloudserver:<commit hash>
|
|
||||||
docker pull ghcr.io/scality/cloudserver:<tag>
|
|
||||||
```
|
|
||||||
|
|
||||||
## Release Process
|
|
||||||
|
|
||||||
To release a production image:
|
|
||||||
|
|
||||||
* Create a PR to bump the package version
|
|
||||||
Update Cloudserver's `package.json` by bumping it to the relevant next
|
|
||||||
version in a new PR. Per example if the last released version was
|
|
||||||
`8.4.7`, the next version would be `8.4.8`.
|
|
||||||
|
|
||||||
```js
|
|
||||||
{
|
|
||||||
"name": "cloudserver",
|
|
||||||
"version": "8.4.8", <--- Here
|
|
||||||
[...]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
* Review & merge the PR
|
|
||||||
|
|
||||||
* Create the release on GitHub
|
|
||||||
|
|
||||||
* Go the Release tab (https://github.com/scality/cloudserver/releases);
|
|
||||||
* Click on the `Draft new release button`;
|
|
||||||
* In the `tag` field, type the name of the release (`8.4.8`), and confirm
|
|
||||||
to create the tag on publish;
|
|
||||||
* Click on `Generate release notes` button to fill the fields;
|
|
||||||
* Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case);
|
|
||||||
* Click to `Publish the release` to create the GitHub release and git tag
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
* the Git tag will be created automatically.
|
|
||||||
* this should be done as soon as the PR is merged, so that the tag
|
|
||||||
is put on the "version bump" commit.
|
|
||||||
|
|
||||||
* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force)
|
|
||||||
|
|
||||||
* Branch Name: The one used for the tag earlier. In this example `development/8.4`
|
|
||||||
* Override Stage: 'release'
|
|
||||||
* Extra properties:
|
|
||||||
* name: `'tag'`, value: `[release version]`, in this example`'8.4.8'`
|
|
||||||
|
|
||||||
* Release the release version on Jira
|
|
||||||
|
|
||||||
* Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page)
|
|
||||||
* Create a next version
|
|
||||||
* Name: `[next version]`, in this example `8.4.9`
|
|
||||||
* Click `...` and select `Release` on the recently released version (`8.4.8`)
|
|
||||||
* Fill in the field to move incomplete version to the next one
|
|
|
@ -1,398 +0,0 @@
|
||||||
.. _use-public-cloud:
|
|
||||||
|
|
||||||
Using Public Clouds as data backends
|
|
||||||
====================================
|
|
||||||
|
|
||||||
Introduction
|
|
||||||
------------
|
|
||||||
|
|
||||||
As stated in our `GETTING STARTED guide <GETTING_STARTED.html#location-configuration>`__,
|
|
||||||
new data backends can be added by creating a region (also called location
|
|
||||||
constraint) with the right endpoint and credentials.
|
|
||||||
This section of the documentation shows you how to set up our currently
|
|
||||||
supported public cloud backends:
|
|
||||||
|
|
||||||
- `Amazon S3 <#aws-s3-as-a-data-backend>`__ ;
|
|
||||||
- `Microsoft Azure <#microsoft-azure-as-a-data-backend>`__ .
|
|
||||||
|
|
||||||
For each public cloud backend, you will have to edit your CloudServer
|
|
||||||
:code:`locationConfig.json` and do a few setup steps on the applicable public
|
|
||||||
cloud backend.
|
|
||||||
|
|
||||||
AWS S3 as a data backend
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
From the AWS S3 Console (or any AWS S3 CLI tool)
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Create a bucket where you will host your data for this new location constraint.
|
|
||||||
This bucket must have versioning enabled:
|
|
||||||
|
|
||||||
- This is an option you may choose to activate at step 2 of Bucket Creation in
|
|
||||||
the Console;
|
|
||||||
- With AWS CLI, use :code:`put-bucket-versioning` from the :code:`s3api`
|
|
||||||
commands on your bucket of choice;
|
|
||||||
- Using other tools, please refer to your tool's documentation.
|
|
||||||
|
|
||||||
In this example, our bucket will be named ``zenkobucket`` and has versioning
|
|
||||||
enabled.
|
|
||||||
|
|
||||||
From the CloudServer repository
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
locationConfig.json
|
|
||||||
^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
Edit this file to add a new location constraint. This location constraint will
|
|
||||||
contain the information for the AWS S3 bucket to which you will be writing your
|
|
||||||
data whenever you create a CloudServer bucket in this location.
|
|
||||||
There are a few configurable options here:
|
|
||||||
|
|
||||||
- :code:`type` : set to :code:`aws_s3` to indicate this location constraint is
|
|
||||||
writing data to AWS S3;
|
|
||||||
- :code:`legacyAwsBehavior` : set to :code:`true` to indicate this region should
|
|
||||||
behave like AWS S3 :code:`us-east-1` region, set to :code:`false` to indicate
|
|
||||||
this region should behave like any other AWS S3 region;
|
|
||||||
- :code:`bucketName` : set to an *existing bucket* in your AWS S3 Account; this
|
|
||||||
is the bucket in which your data will be stored for this location constraint;
|
|
||||||
- :code:`awsEndpoint` : set to your bucket's endpoint, usually :code:`s3.amazonaws.com`;
|
|
||||||
- :code:`bucketMatch` : set to :code:`true` if you want your object name to be the
|
|
||||||
same in your local bucket and your AWS S3 bucket; set to :code:`false` if you
|
|
||||||
want your object name to be of the form :code:`{{localBucketName}}/{{objectname}}`
|
|
||||||
in your AWS S3 hosted bucket;
|
|
||||||
- :code:`credentialsProfile` and :code:`credentials` are two ways to provide
|
|
||||||
your AWS S3 credentials for that bucket, *use only one of them* :
|
|
||||||
|
|
||||||
- :code:`credentialsProfile` : set to the profile name allowing you to access
|
|
||||||
your AWS S3 bucket from your :code:`~/.aws/credentials` file;
|
|
||||||
- :code:`credentials` : set the two fields inside the object (:code:`accessKey`
|
|
||||||
and :code:`secretKey`) to their respective values from your AWS credentials.
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
(...)
|
|
||||||
"aws-test": {
|
|
||||||
"type": "aws_s3",
|
|
||||||
"legacyAwsBehavior": true,
|
|
||||||
"details": {
|
|
||||||
"awsEndpoint": "s3.amazonaws.com",
|
|
||||||
"bucketName": "zenkobucket",
|
|
||||||
"bucketMatch": true,
|
|
||||||
"credentialsProfile": "zenko"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
(...)
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
(...)
|
|
||||||
"aws-test": {
|
|
||||||
"type": "aws_s3",
|
|
||||||
"legacyAwsBehavior": true,
|
|
||||||
"details": {
|
|
||||||
"awsEndpoint": "s3.amazonaws.com",
|
|
||||||
"bucketName": "zenkobucket",
|
|
||||||
"bucketMatch": true,
|
|
||||||
"credentials": {
|
|
||||||
"accessKey": "WHDBFKILOSDDVF78NPMQ",
|
|
||||||
"secretKey": "87hdfGCvDS+YYzefKLnjjZEYstOIuIjs/2X72eET"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
(...)
|
|
||||||
|
|
||||||
.. WARNING::
|
|
||||||
If you set :code:`bucketMatch` to :code:`true`, we strongly advise that you
|
|
||||||
only have one local bucket per AWS S3 location.
|
|
||||||
Without :code:`bucketMatch` set to :code:`false`, your object names in your
|
|
||||||
AWS S3 bucket will not be prefixed with your Cloud Server bucket name. This
|
|
||||||
means that if you put an object :code:`foo` to your CloudServer bucket
|
|
||||||
:code:`zenko1` and you then put a different :code:`foo` to your CloudServer
|
|
||||||
bucket :code:`zenko2` and both :code:`zenko1` and :code:`zenko2` point to the
|
|
||||||
same AWS bucket, the second :code:`foo` will overwrite the first :code:`foo`.
|
|
||||||
|
|
||||||
~/.aws/credentials
|
|
||||||
^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. TIP::
|
|
||||||
If you explicitly set your :code:`accessKey` and :code:`secretKey` in the
|
|
||||||
:code:`credentials` object of your :code:`aws_s3` location in your
|
|
||||||
:code:`locationConfig.json` file, you may skip this section
|
|
||||||
|
|
||||||
Make sure your :code:`~/.aws/credentials` file has a profile matching the one
|
|
||||||
defined in your :code:`locationConfig.json`. Following our previous example, it
|
|
||||||
would look like:
|
|
||||||
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
[zenko]
|
|
||||||
aws_access_key_id=WHDBFKILOSDDVF78NPMQ
|
|
||||||
aws_secret_access_key=87hdfGCvDS+YYzefKLnjjZEYstOIuIjs/2X72eET
|
|
||||||
|
|
||||||
Start the server with the ability to write to AWS S3
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Inside the repository, once all the files have been edited, you should be able
|
|
||||||
to start the server and start writing data to AWS S3 through CloudServer.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
# Start the server locally
|
|
||||||
$> S3DATA=multiple yarn start
|
|
||||||
|
|
||||||
Run the server as a docker container with the ability to write to AWS S3
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
.. TIP::
|
|
||||||
If you set the :code:`credentials` object in your
|
|
||||||
:code:`locationConfig.json` file, you don't need to mount your
|
|
||||||
:code:`.aws/credentials` file
|
|
||||||
|
|
||||||
Mount all the files that have been edited to override defaults, and do a
|
|
||||||
standard Docker run; then you can start writing data to AWS S3 through
|
|
||||||
CloudServer.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
# Start the server in a Docker container
|
|
||||||
$> sudo docker run -d --name CloudServer \
|
|
||||||
-v $(pwd)/data:/usr/src/app/localData \
|
|
||||||
-v $(pwd)/metadata:/usr/src/app/localMetadata \
|
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
|
||||||
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
|
||||||
-v ~/.aws/credentials:/root/.aws/credentials \
|
|
||||||
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000 \
|
|
||||||
-d scality/cloudserver
|
|
||||||
|
|
||||||
Testing: put an object to AWS S3 using CloudServer
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
In order to start testing pushing to AWS S3, you will need to create a local
|
|
||||||
bucket in the AWS S3 location constraint - this local bucket will only store the
|
|
||||||
metadata locally, while both the data and any user metadata (:code:`x-amz-meta`
|
|
||||||
headers sent with a PUT object, and tags) will be stored on AWS S3.
|
|
||||||
This example is based on all our previous steps.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
# Create a local bucket storing data in AWS S3
|
|
||||||
$> s3cmd --host=127.0.0.1:8000 mb s3://zenkobucket --region=aws-test
|
|
||||||
# Put an object to AWS S3, and store the metadata locally
|
|
||||||
$> s3cmd --host=127.0.0.1:8000 put /etc/hosts s3://zenkobucket/testput
|
|
||||||
upload: '/etc/hosts' -> 's3://zenkobucket/testput' [1 of 1]
|
|
||||||
330 of 330 100% in 0s 380.87 B/s done
|
|
||||||
# List locally to check you have the metadata
|
|
||||||
$> s3cmd --host=127.0.0.1:8000 ls s3://zenkobucket
|
|
||||||
2017-10-23 10:26 330 s3://zenkobucket/testput
|
|
||||||
|
|
||||||
Then, from the AWS Console, if you go into your bucket, you should see your
|
|
||||||
newly uploaded object:
|
|
||||||
|
|
||||||
.. figure:: ../res/aws-console-successful-put.png
|
|
||||||
:alt: AWS S3 Console upload example
|
|
||||||
|
|
||||||
Troubleshooting
|
|
||||||
~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Make sure your :code:`~/.s3cfg` file has credentials matching your local
|
|
||||||
CloudServer credentials defined in :code:`conf/authdata.json`. By default, the
|
|
||||||
access key is :code:`accessKey1` and the secret key is :code:`verySecretKey1`.
|
|
||||||
For more informations, refer to our template `~/.s3cfg <./CLIENTS/#s3cmd>`__ .
|
|
||||||
|
|
||||||
Pre-existing objects in your AWS S3 hosted bucket can unfortunately not be
|
|
||||||
accessed by CloudServer at this time.
|
|
||||||
|
|
||||||
Make sure versioning is enabled in your remote AWS S3 hosted bucket. To check,
|
|
||||||
using the AWS Console, click on your bucket name, then on "Properties" at the
|
|
||||||
top, and then you should see something like this:
|
|
||||||
|
|
||||||
.. figure:: ../res/aws-console-versioning-enabled.png
|
|
||||||
:alt: AWS Console showing versioning enabled
|
|
||||||
|
|
||||||
Microsoft Azure as a data backend
|
|
||||||
---------------------------------
|
|
||||||
|
|
||||||
From the MS Azure Console
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
From your Storage Account dashboard, create a container where you will host your
|
|
||||||
data for this new location constraint.
|
|
||||||
|
|
||||||
You will also need to get one of your Storage Account Access Keys, and to
|
|
||||||
provide it to CloudServer.
|
|
||||||
This can be found from your Storage Account dashboard, under "Settings, then
|
|
||||||
"Access keys".
|
|
||||||
|
|
||||||
In this example, our container will be named ``zenkontainer``, and will belong
|
|
||||||
to the ``zenkomeetups`` Storage Account.
|
|
||||||
|
|
||||||
From the CloudServer repository
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
locationConfig.json
|
|
||||||
^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
Edit this file to add a new location constraint. This location constraint will
|
|
||||||
contain the information for the MS Azure container to which you will be writing
|
|
||||||
your data whenever you create a CloudServer bucket in this location.
|
|
||||||
There are a few configurable options here:
|
|
||||||
|
|
||||||
- :code:`type` : set to :code:`azure` to indicate this location constraint is
|
|
||||||
writing data to MS Azure;
|
|
||||||
- :code:`legacyAwsBehavior` : set to :code:`true` to indicate this region should
|
|
||||||
behave like AWS S3 :code:`us-east-1` region, set to :code:`false` to indicate
|
|
||||||
this region should behave like any other AWS S3 region (in the case of MS Azure
|
|
||||||
hosted data, this is mostly relevant for the format of errors);
|
|
||||||
- :code:`azureStorageEndpoint` : set to your storage account's endpoint, usually
|
|
||||||
:code:`https://{{storageAccountName}}.blob.core.windows.net`;
|
|
||||||
- :code:`azureContainerName` : set to an *existing container* in your MS Azure
|
|
||||||
storage account; this is the container in which your data will be stored for
|
|
||||||
this location constraint;
|
|
||||||
- :code:`bucketMatch` : set to :code:`true` if you want your object name to be
|
|
||||||
the same in your local bucket and your MS Azure container; set to
|
|
||||||
:code:`false` if you want your object name to be of the form
|
|
||||||
:code:`{{localBucketName}}/{{objectname}}` in your MS Azure container ;
|
|
||||||
- :code:`azureStorageAccountName` : the MS Azure Storage Account to which your
|
|
||||||
container belongs;
|
|
||||||
- :code:`azureStorageAccessKey` : one of the Access Keys associated to the above
|
|
||||||
defined MS Azure Storage Account.
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
(...)
|
|
||||||
"azure-test": {
|
|
||||||
"type": "azure",
|
|
||||||
"legacyAwsBehavior": false,
|
|
||||||
"details": {
|
|
||||||
"azureStorageEndpoint": "https://zenkomeetups.blob.core.windows.net/",
|
|
||||||
"bucketMatch": true,
|
|
||||||
"azureContainerName": "zenkontainer",
|
|
||||||
"azureStorageAccountName": "zenkomeetups",
|
|
||||||
"azureStorageAccessKey": "auhyDo8izbuU4aZGdhxnWh0ODKFP3IWjsN1UfFaoqFbnYzPj9bxeCVAzTIcgzdgqomDKx6QS+8ov8PYCON0Nxw=="
|
|
||||||
}
|
|
||||||
},
|
|
||||||
(...)
|
|
||||||
|
|
||||||
.. WARNING::
|
|
||||||
If you set :code:`bucketMatch` to :code:`true`, we strongly advise that you
|
|
||||||
only have one local bucket per MS Azure location.
|
|
||||||
Without :code:`bucketMatch` set to :code:`false`, your object names in your
|
|
||||||
MS Azure container will not be prefixed with your Cloud Server bucket name.
|
|
||||||
This means that if you put an object :code:`foo` to your CloudServer bucket
|
|
||||||
:code:`zenko1` and you then put a different :code:`foo` to your CloudServer
|
|
||||||
bucket :code:`zenko2` and both :code:`zenko1` and :code:`zenko2` point to the
|
|
||||||
same MS Azure container, the second :code:`foo` will overwrite the first
|
|
||||||
:code:`foo`.
|
|
||||||
|
|
||||||
.. TIP::
|
|
||||||
You may export environment variables to **override** some of your
|
|
||||||
:code:`locationConfig.json` variable ; the syntax for them is
|
|
||||||
:code:`{{region-name}}_{{ENV_VAR_NAME}}`; currently, the available variables
|
|
||||||
are those shown below, with the values used in the current example:
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
$> export azure-test_AZURE_STORAGE_ACCOUNT_NAME="zenkomeetups"
|
|
||||||
$> export azure-test_AZURE_STORAGE_ACCESS_KEY="auhyDo8izbuU4aZGdhxnWh0ODKFP3IWjsN1UfFaoqFbnYzPj9bxeCVAzTIcgzdgqomDKx6QS+8ov8PYCON0Nxw=="
|
|
||||||
$> export azure-test_AZURE_STORAGE_ENDPOINT="https://zenkomeetups.blob.core.windows.net/"
|
|
||||||
|
|
||||||
Start the server with the ability to write to MS Azure
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Inside the repository, once all the files have been edited, you should be able
|
|
||||||
to start the server and start writing data to MS Azure through CloudServer.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
# Start the server locally
|
|
||||||
$> S3DATA=multiple yarn start
|
|
||||||
|
|
||||||
Run the server as a docker container with the ability to write to MS Azure
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Mount all the files that have been edited to override defaults, and do a
|
|
||||||
standard Docker run; then you can start writing data to MS Azure through
|
|
||||||
CloudServer.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
# Start the server in a Docker container
|
|
||||||
$> sudo docker run -d --name CloudServer \
|
|
||||||
-v $(pwd)/data:/usr/src/app/localData \
|
|
||||||
-v $(pwd)/metadata:/usr/src/app/localMetadata \
|
|
||||||
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
|
||||||
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
|
||||||
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
|
|
||||||
-d scality/cloudserver
|
|
||||||
|
|
||||||
Testing: put an object to MS Azure using CloudServer
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
In order to start testing pushing to MS Azure, you will need to create a local
|
|
||||||
bucket in the MS Azure region - this local bucket will only store the metadata
|
|
||||||
locally, while both the data and any user metadata (:code:`x-amz-meta` headers
|
|
||||||
sent with a PUT object, and tags) will be stored on MS Azure.
|
|
||||||
This example is based on all our previous steps.
|
|
||||||
|
|
||||||
.. code:: shell
|
|
||||||
|
|
||||||
# Create a local bucket storing data in MS Azure
|
|
||||||
$> s3cmd --host=127.0.0.1:8000 mb s3://zenkontainer --region=azure-test
|
|
||||||
# Put an object to MS Azure, and store the metadata locally
|
|
||||||
$> s3cmd --host=127.0.0.1:8000 put /etc/hosts s3://zenkontainer/testput
|
|
||||||
upload: '/etc/hosts' -> 's3://zenkontainer/testput' [1 of 1]
|
|
||||||
330 of 330 100% in 0s 380.87 B/s done
|
|
||||||
# List locally to check you have the metadata
|
|
||||||
$> s3cmd --host=127.0.0.1:8000 ls s3://zenkobucket
|
|
||||||
2017-10-24 14:38 330 s3://zenkontainer/testput
|
|
||||||
|
|
||||||
Then, from the MS Azure Console, if you go into your container, you should see
|
|
||||||
your newly uploaded object:
|
|
||||||
|
|
||||||
.. figure:: ../res/azure-console-successful-put.png
|
|
||||||
:alt: MS Azure Console upload example
|
|
||||||
|
|
||||||
Troubleshooting
|
|
||||||
~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Make sure your :code:`~/.s3cfg` file has credentials matching your local
|
|
||||||
CloudServer credentials defined in :code:`conf/authdata.json`. By default, the
|
|
||||||
access key is :code:`accessKey1` and the secret key is :code:`verySecretKey1`.
|
|
||||||
For more informations, refer to our template `~/.s3cfg <./CLIENTS/#s3cmd>`__ .
|
|
||||||
|
|
||||||
Pre-existing objects in your MS Azure container can unfortunately not be
|
|
||||||
accessed by CloudServer at this time.
|
|
||||||
|
|
||||||
For any data backend
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
From the CloudServer repository
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
config.json
|
|
||||||
^^^^^^^^^^^
|
|
||||||
|
|
||||||
.. IMPORTANT::
|
|
||||||
You only need to follow this section if you want to define a given location
|
|
||||||
as the default for a specific endpoint
|
|
||||||
|
|
||||||
Edit the :code:`restEndpoint` section of your :code:`config.json` file to add
|
|
||||||
an endpoint definition matching the location you want to use as a default for an
|
|
||||||
endpoint to this specific endpoint.
|
|
||||||
In this example, we'll make :code:`custom-location` our default location for the
|
|
||||||
endpoint :code:`zenkotos3.com`:
|
|
||||||
|
|
||||||
.. code:: json
|
|
||||||
|
|
||||||
(...)
|
|
||||||
"restEndpoints": {
|
|
||||||
"localhost": "us-east-1",
|
|
||||||
"127.0.0.1": "us-east-1",
|
|
||||||
"cloudserver-front": "us-east-1",
|
|
||||||
"s3.docker.test": "us-east-1",
|
|
||||||
"127.0.0.2": "us-east-1",
|
|
||||||
"zenkotos3.com": "custom-location"
|
|
||||||
},
|
|
||||||
(...)
|
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
name: cloudserver
|
||||||
|
title: Zenko CloudServer
|
||||||
|
version: '1.0'
|
||||||
|
start_page: ROOT:README.adoc
|
||||||
|
nav:
|
||||||
|
- modules/ROOT/nav.adoc
|
||||||
|
- modules/USERS/nav.adoc
|
||||||
|
- modules/DEVELOPERS/nav.adoc
|
161
docs/conf.py
161
docs/conf.py
|
@ -1,161 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Zope docs documentation build configuration file, created by
|
|
||||||
# sphinx-quickstart on Fri Feb 20 16:22:03 2009.
|
|
||||||
#
|
|
||||||
# This file is execfile()d with the current directory set to its containing
|
|
||||||
# dir.
|
|
||||||
#
|
|
||||||
# The contents of this file are pickled, so don't put values in the namespace
|
|
||||||
# that aren't pickleable (module imports are okay, they're removed
|
|
||||||
# automatically).
|
|
||||||
#
|
|
||||||
# Note that not all possible configuration values are present in this
|
|
||||||
# autogenerated file.
|
|
||||||
#
|
|
||||||
# All configuration values have a default; values that are commented out
|
|
||||||
# serve to show the default.
|
|
||||||
|
|
||||||
# import sys
|
|
||||||
# import os
|
|
||||||
|
|
||||||
# If your extensions are in another directory, add it here. If the directory
|
|
||||||
# is relative to the documentation root, use os.path.abspath to make it
|
|
||||||
# absolute, like shown here.
|
|
||||||
# sys.path.append(os.path.abspath('.'))
|
|
||||||
|
|
||||||
# General configuration
|
|
||||||
# ---------------------
|
|
||||||
|
|
||||||
# Add any Sphinx extension module names here, as strings. They can be
|
|
||||||
# extensions
|
|
||||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
|
||||||
extensions = []
|
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
|
||||||
templates_path = ['_templates']
|
|
||||||
|
|
||||||
# The suffix of source filenames.
|
|
||||||
source_suffix = '.rst'
|
|
||||||
|
|
||||||
# The encoding of source files.
|
|
||||||
# source_encoding = 'utf-8'
|
|
||||||
|
|
||||||
# The master toctree document.
|
|
||||||
master_doc = 'index'
|
|
||||||
|
|
||||||
# General information about the project.
|
|
||||||
project = u'scality-zenko-cloudserver'
|
|
||||||
copyright = u'Apache License Version 2.0, 2004 http://www.apache.org/licenses/'
|
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
|
||||||
# |version| and |release|, also used in various other places throughout the
|
|
||||||
# built documents.
|
|
||||||
#
|
|
||||||
# The short X.Y version.
|
|
||||||
version = '7.0.0'
|
|
||||||
# The full version, including alpha/beta/rc tags.
|
|
||||||
release = '7.0.0'
|
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
|
||||||
# for a list of supported languages.
|
|
||||||
# language = None
|
|
||||||
|
|
||||||
# There are two options for replacing |today|: either, you set today to some
|
|
||||||
# non-false value, then it is used:
|
|
||||||
# today = ''
|
|
||||||
# Else, today_fmt is used as the format for a strftime call.
|
|
||||||
# today_fmt = '%B %d, %Y'
|
|
||||||
|
|
||||||
# List of documents that shouldn't be included in the build.
|
|
||||||
# unused_docs = []
|
|
||||||
|
|
||||||
# List of directories, relative to source directory, that shouldn't be searched
|
|
||||||
# for source files.
|
|
||||||
exclude_trees = ['_build']
|
|
||||||
|
|
||||||
# The reST default role (used for this markup: `text`) to use for
|
|
||||||
# all documents.
|
|
||||||
# default_role = None
|
|
||||||
|
|
||||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
|
||||||
# add_function_parentheses = True
|
|
||||||
|
|
||||||
# If true, the current module name will be prepended to all description
|
|
||||||
# unit titles (such as .. function::).
|
|
||||||
# add_module_names = True
|
|
||||||
|
|
||||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
|
||||||
# output. They are ignored by default.
|
|
||||||
# show_authors = False
|
|
||||||
|
|
||||||
# The name of the Pygments (syntax highlighting) style to use.
|
|
||||||
pygments_style = 'sphinx'
|
|
||||||
|
|
||||||
|
|
||||||
# Options for HTML output
|
|
||||||
# -----------------------
|
|
||||||
|
|
||||||
# The style sheet to use for HTML and HTML Help pages. A file of that name
|
|
||||||
# must exist either in Sphinx' static/ path, or in one of the custom paths
|
|
||||||
# given in html_static_path.
|
|
||||||
html_style = 'css/default.css'
|
|
||||||
|
|
||||||
# The name for this set of Sphinx documents. If None, it defaults to
|
|
||||||
# "<project> v<release> documentation".
|
|
||||||
# html_title = None
|
|
||||||
|
|
||||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
|
||||||
# html_short_title = None
|
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top
|
|
||||||
# of the sidebar.
|
|
||||||
html_logo = '../res/scality-cloudserver-logo.png'
|
|
||||||
|
|
||||||
# The name of an image file (within the static path) to use as favicon of the
|
|
||||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
|
||||||
# pixels large.
|
|
||||||
# html_favicon = None
|
|
||||||
|
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
|
||||||
html_static_path = ['_static']
|
|
||||||
|
|
||||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
|
||||||
# using the given strftime format.
|
|
||||||
# html_last_updated_fmt = '%b %d, %Y'
|
|
||||||
|
|
||||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
|
||||||
# typographically correct entities.
|
|
||||||
# html_use_smartypants = True
|
|
||||||
|
|
||||||
# Custom sidebar templates, maps document names to template names.
|
|
||||||
# html_sidebars = {}
|
|
||||||
|
|
||||||
# Additional templates that should be rendered to pages, maps page names to
|
|
||||||
# template names.
|
|
||||||
# html_additional_pages = {}
|
|
||||||
|
|
||||||
# If false, no module index is generated.
|
|
||||||
# html_use_modindex = True
|
|
||||||
|
|
||||||
# If false, no index is generated.
|
|
||||||
# html_use_index = True
|
|
||||||
|
|
||||||
# If true, the index is split into individual pages for each letter.
|
|
||||||
# html_split_index = False
|
|
||||||
|
|
||||||
# If true, the reST sources are included in the HTML build as _sources/<name>.
|
|
||||||
# html_copy_source = True
|
|
||||||
|
|
||||||
# If true, an OpenSearch description file will be output, and all pages will
|
|
||||||
# contain a <link> tag referring to it. The value of this option must be the
|
|
||||||
# base URL from which the finished HTML is served.
|
|
||||||
# html_use_opensearch = ''
|
|
||||||
|
|
||||||
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
|
||||||
# html_file_suffix = ''
|
|
||||||
|
|
||||||
# Output file base name for HTML help builder.
|
|
||||||
htmlhelp_basename = 'ZenkoCloudServerdoc'
|
|
|
@ -1,79 +0,0 @@
|
||||||
============================================
|
|
||||||
Add New Backend Storage To Zenko CloudServer
|
|
||||||
============================================
|
|
||||||
|
|
||||||
This set of documents aims at bootstrapping developers with Zenko's CloudServer
|
|
||||||
module, so they can then go on and contribute features.
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
|
|
||||||
non-s3-compatible-backend
|
|
||||||
s3-compatible-backend
|
|
||||||
|
|
||||||
We always encourage our community to offer new extensions to Zenko,
|
|
||||||
and new backend support is paramount to meeting more community needs.
|
|
||||||
If that is something you want to contribute (or just do on your own
|
|
||||||
version of the cloudserver image), this is the guid to read. Please
|
|
||||||
make sure you follow our `Contributing Guidelines`_/.
|
|
||||||
|
|
||||||
If you need help with anything, please search our `forum`_ for more
|
|
||||||
information.
|
|
||||||
|
|
||||||
Add support for a new backend
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Currently the main public cloud protocols are supported and more can
|
|
||||||
be added. There are two main types of backend: those compatible with
|
|
||||||
Amazon's S3 protocol and those not compatible.
|
|
||||||
|
|
||||||
================= ========== ============ ===========
|
|
||||||
Backend type Supported Active WIP Not started
|
|
||||||
================= ========== ============ ===========
|
|
||||||
Private disk/fs x
|
|
||||||
AWS S3 x
|
|
||||||
Microsoft Azure x
|
|
||||||
Backblaze B2 x
|
|
||||||
Google Cloud x
|
|
||||||
Openstack Swift x
|
|
||||||
================= ========== ============ ===========
|
|
||||||
|
|
||||||
.. important:: Should you want to request for a new backend to be
|
|
||||||
supported, please do so by opening a `Github issue`_,
|
|
||||||
and filling out the "Feature Request" section of our
|
|
||||||
template.
|
|
||||||
|
|
||||||
To add support for a new backend support to CloudServer official
|
|
||||||
repository, please follow these steps:
|
|
||||||
|
|
||||||
- familiarize yourself with our `Contributing Guidelines`_
|
|
||||||
- open a `Github issue`_ and fill out Feature Request form, and
|
|
||||||
specify you would like to contribute it yourself;
|
|
||||||
- wait for our core team to get back to you with an answer on whether
|
|
||||||
we are interested in taking that contribution in (and hence
|
|
||||||
committing to maintaining it over time);
|
|
||||||
- once approved, fork the repository and start your development;
|
|
||||||
- use the `forum`_ with any question you may have during the
|
|
||||||
development process;
|
|
||||||
- when you think it's ready, let us know so that we create a feature
|
|
||||||
branch against which we'll compare and review your code;
|
|
||||||
- open a pull request with your changes against that dedicated feature
|
|
||||||
branch;
|
|
||||||
- once that pull request gets merged, you're done.
|
|
||||||
|
|
||||||
.. tip::
|
|
||||||
|
|
||||||
While we do take care of the final rebase (when we merge your feature
|
|
||||||
branch on the latest default branch), we do ask that you keep up to date with our latest default branch
|
|
||||||
until then.
|
|
||||||
|
|
||||||
.. important::
|
|
||||||
|
|
||||||
If we do not approve your feature request, you may of course still
|
|
||||||
work on supporting a new backend: all our "no" means is that we do not
|
|
||||||
have the resources, as part of our core development team, to maintain
|
|
||||||
this feature for the moment.
|
|
||||||
|
|
||||||
.. _GitHub issue: https://github.com/scality/S3/issues
|
|
||||||
.. _Contributing Guidelines: https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md
|
|
||||||
.. _forum: https://forum.zenko.io
|
|
|
@ -1,53 +0,0 @@
|
||||||
=================
|
|
||||||
Add A New Backend
|
|
||||||
=================
|
|
||||||
|
|
||||||
Supporting all possible public cloud storage APIs is CloudServer's
|
|
||||||
ultimate goal. As an open source project, contributions are welcome.
|
|
||||||
|
|
||||||
The first step is to get familiar with building a custom Docker image
|
|
||||||
for CloudServer.
|
|
||||||
|
|
||||||
Build a Custom Docker Image
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Clone Zenko's CloudServer, install all dependencies and start the
|
|
||||||
service:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
$ git clone https://github.com/scality/cloudserver
|
|
||||||
$ cd cloudserver
|
|
||||||
$ yarn install
|
|
||||||
$ yarn start
|
|
||||||
|
|
||||||
.. tip::
|
|
||||||
|
|
||||||
Some optional dependencies may fail, resulting in you seeing `yarn
|
|
||||||
WARN` messages; these can safely be ignored. Refer to the User
|
|
||||||
documentation for all available options.
|
|
||||||
|
|
||||||
Build the Docker image:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
# docker build . -t
|
|
||||||
# {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
|
|
||||||
|
|
||||||
Push the newly created Docker image to your own hub:
|
|
||||||
|
|
||||||
.. code-block:: shell
|
|
||||||
|
|
||||||
# docker push
|
|
||||||
# {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
To perform this last operation, you need to be authenticated with DockerHub
|
|
||||||
|
|
||||||
There are two main types of backend you could want Zenko to support:
|
|
||||||
|
|
||||||
== link:S3_COMPATIBLE_BACKENDS.adoc[S3 compatible data backends]
|
|
||||||
|
|
||||||
== link:NON_S3_COMPATIBLE_BACKENDS.adoc[Data backends using another protocol than the S3 protocol]
|
|
||||||
|
|
|
@ -1,530 +0,0 @@
|
||||||
==========================================================
|
|
||||||
Adding support for data backends not supporting the S3 API
|
|
||||||
==========================================================
|
|
||||||
|
|
||||||
These backends abstract the complexity of multiple APIs to let users
|
|
||||||
work on a single common namespace across multiple clouds.
|
|
||||||
|
|
||||||
This documents aims at introducing you to the right files in
|
|
||||||
CloudServer (the Zenko stack's subcomponent in charge of API
|
|
||||||
translation, among other things) to add support to your own backend of
|
|
||||||
choice.
|
|
||||||
|
|
||||||
General configuration
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
There are a number of constants and environment variables to define to support a
|
|
||||||
new data backend; here is a list and where to find them:
|
|
||||||
|
|
||||||
:file:`/constants.js`
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
* give your backend type a name, as part of the `externalBackends` object;
|
|
||||||
* specify whether versioning is implemented, as part of the
|
|
||||||
`versioningNotImplemented` object;
|
|
||||||
|
|
||||||
:file:`/lib/Config.js`
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
* this is where you should put common utility functions, like the ones to parse
|
|
||||||
the location object from `locationConfig.json`;
|
|
||||||
* make sure you define environment variables (like `GCP_SERVICE_EMAIL` as we'll
|
|
||||||
use those internally for the CI to test against the real remote backend;
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
* this file is where you'll instantiate your backend client; this should be a
|
|
||||||
class with a constructor taking the config object built in `/lib/Config.js` as
|
|
||||||
parameter;
|
|
||||||
* over time, you may need some utility functions which we've defined in the
|
|
||||||
folder `/api/apiUtils`, and in the file `/lib/data/external/utils`;
|
|
||||||
|
|
||||||
:file:`/lib/data/external/utils.js`
|
|
||||||
-----------------------------------
|
|
||||||
|
|
||||||
* make sure to add options for `sourceLocationConstraintType` to be equal to
|
|
||||||
the name you gave your backend in :file:`/constants.js`;
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
* this folder is where you'll put the functions needed for supporting your
|
|
||||||
backend; keep your files as atomic as possible;
|
|
||||||
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
------------------------------------------------------
|
|
||||||
|
|
||||||
* this file is where you'll create location profiles to be used by your
|
|
||||||
functional tests;
|
|
||||||
|
|
||||||
:file:`/lib/data/locationConstraintParser.js`
|
|
||||||
---------------------------------------------
|
|
||||||
|
|
||||||
* this is where you'll instantiate your client if the operation the end user
|
|
||||||
sent effectively writes to your backend; everything happens inside the
|
|
||||||
function `parseLC()`; you should add a condition that executes if
|
|
||||||
`locationObj.type` is the name of your backend (that you defined in
|
|
||||||
`constants.js`), and instantiates a client of yours. See pseudocode below,
|
|
||||||
assuming location type name is `ztore`:
|
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: js
|
|
||||||
:linenos:
|
|
||||||
:emphasize-lines: 12
|
|
||||||
|
|
||||||
(...) //<1>
|
|
||||||
const ZtoreClient = require('./external/ZtoreClient');
|
|
||||||
const { config } = require('../Config'); //<1>
|
|
||||||
|
|
||||||
function parseLC(){ //<1>
|
|
||||||
(...) //<1>
|
|
||||||
Object.keys(config.locationConstraints).forEach(location => { //<1>
|
|
||||||
const locationObj = config.locationConstraints[location]; //<1>
|
|
||||||
(...) //<1>
|
|
||||||
if (locationObj.type === 'ztore' {
|
|
||||||
const ztoreEndpoint = config.getZtoreEndpoint(location);
|
|
||||||
const ztoreCredentials = config.getZtoreCredentials(location); //<2>
|
|
||||||
clients[location] = new ZtoreClient({
|
|
||||||
ztoreEndpoint,
|
|
||||||
ztoreCredentials,
|
|
||||||
ztoreBucketname: locationObj.details.ztoreBucketName,
|
|
||||||
bucketMatch: locationObj.details.BucketMatch,
|
|
||||||
dataStoreName: location,
|
|
||||||
}); //<3>
|
|
||||||
clients[location].clientType = 'ztore';
|
|
||||||
});
|
|
||||||
(...) //<1>
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
1. Code that is already there
|
|
||||||
2. You may need more utility functions depending on your backend specs
|
|
||||||
3. You may have more fields required in your constructor object depending on
|
|
||||||
your backend specs
|
|
||||||
|
|
||||||
Operation of type PUT
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
PUT routes are usually where people get started, as it's the easiest to check!
|
|
||||||
Simply go on your remote backend console and you'll be able to see whether your
|
|
||||||
object actually went up in the cloud...
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `put()` function is also called
|
|
||||||
`put()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
|
|
||||||
- define a function with signature like
|
|
||||||
`put(stream, size, keyContext, reqUids, callback)`; this is worth exploring a
|
|
||||||
bit more as these parameters are the same for all backends:
|
|
||||||
//TODO: generate this from jsdoc
|
|
||||||
|
|
||||||
- `stream`: the stream of data you want to put in the cloud; if you're
|
|
||||||
unfamiliar with node.js streams, we suggest you start training, as we use
|
|
||||||
them a lot !
|
|
||||||
|
|
||||||
- `size`: the size of the object you're trying to put;
|
|
||||||
|
|
||||||
- `keyContext`: an object with metadata about the operation; common entries are
|
|
||||||
`namespace`, `buckerName`, `owner`, `cipherBundle`, and `tagging`; if these
|
|
||||||
are not sufficient for your integration, contact us to get architecture
|
|
||||||
validation before adding new entries;
|
|
||||||
|
|
||||||
- `reqUids`: the request unique ID used for logging;
|
|
||||||
|
|
||||||
- `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your PUT operation, and
|
|
||||||
then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/put/put{{BackendName}}js`
|
|
||||||
-----------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- every contribution should come with thorough functional tests, showing
|
|
||||||
nominal context gives expected behaviour, and error cases are handled in a way
|
|
||||||
that is standard with the backend (including error messages and code);
|
|
||||||
- the ideal setup is if you simulate your backend locally, so as not to be
|
|
||||||
subjected to network flakiness in the CI; however, we know there might not be
|
|
||||||
mockups available for every client; if that is the case of your backend, you
|
|
||||||
may test against the "real" endpoint of your data backend;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
- where you'll define a constant for your backend location matching your
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
- depending on your backend, the sample `keys[]` and associated made up objects
|
|
||||||
may not work for you (if your backend's key format is different, for example);
|
|
||||||
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
|
||||||
function returning ajusted `keys[]` to your tests.
|
|
||||||
|
|
||||||
Operation of type GET
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
GET routes are easy to test after PUT routes are implemented, hence why we're
|
|
||||||
covering them second.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `get()` function is also called
|
|
||||||
`get()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
|
|
||||||
- define a function with signature like
|
|
||||||
`get(objectGetInfo, range, reqUids, callback)`; this is worth exploring a
|
|
||||||
bit more as these parameters are the same for all backends:
|
|
||||||
|
|
||||||
//TODO: generate this from jsdoc
|
|
||||||
|
|
||||||
- `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
|
|
||||||
data store, and `client`, the data store name;
|
|
||||||
|
|
||||||
- `range`: the range of bytes you will get, for "get-by-range" operations (we
|
|
||||||
recommend you do simple GETs first, and then look at this);
|
|
||||||
|
|
||||||
- `reqUids`: the request unique ID used for logging;
|
|
||||||
|
|
||||||
- `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your GET operation, and
|
|
||||||
then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
|
|
||||||
-----------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- every contribution should come with thorough functional tests, showing
|
|
||||||
nominal context gives expected behaviour, and error cases are handled in a way
|
|
||||||
that is standard with the backend (including error messages and code);
|
|
||||||
- the ideal setup is if you simulate your backend locally, so as not to be
|
|
||||||
subjected to network flakiness in the CI; however, we know there might not be
|
|
||||||
mockups available for every client; if that is the case of your backend, you
|
|
||||||
may test against the "real" endpoint of your data backend;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
.. note:: You should need this section if you have followed the tutorial in order
|
|
||||||
(that is, if you have covered the PUT operation already)
|
|
||||||
|
|
||||||
- where you'll define a constant for your backend location matching your
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
- depending on your backend, the sample `keys[]` and associated made up objects
|
|
||||||
may not work for you (if your backend's key format is different, for example);
|
|
||||||
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
|
||||||
|
|
||||||
Operation of type DELETE
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
DELETE routes are easy to test after PUT routes are implemented, and they are
|
|
||||||
similar to GET routes in our implementation, hence why we're covering them
|
|
||||||
third.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `delete()` function is also called
|
|
||||||
`delete()`, and it's defined in :file:`/lib/data/multipleBackendGateway.js`;
|
|
||||||
- define a function with signature like
|
|
||||||
`delete(objectGetInfo, reqUids, callback)`; this is worth exploring a
|
|
||||||
bit more as these parameters are the same for all backends:
|
|
||||||
|
|
||||||
//TODO: generate this from jsdoc
|
|
||||||
* `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
|
|
||||||
data store, and `client`, the data store name;
|
|
||||||
* `reqUids`: the request unique ID used for logging;
|
|
||||||
* `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your DELETE operation,
|
|
||||||
and then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/delete/delete{{BackendName}}js`
|
|
||||||
-----------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- every contribution should come with thorough functional tests, showing
|
|
||||||
nominal context gives expected behaviour, and error cases are handled in a way
|
|
||||||
that is standard with the backend (including error messages and code);
|
|
||||||
- the ideal setup is if you simulate your backend locally, so as not to be
|
|
||||||
subjected to network flakiness in the CI; however, we know there might not be
|
|
||||||
mockups available for every client; if that is the case of your backend, you
|
|
||||||
may test against the "real" endpoint of your data backend;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
.. note:: You should need this section if you have followed the
|
|
||||||
tutorial in order (that is, if you have covered the PUT operation
|
|
||||||
already)
|
|
||||||
|
|
||||||
- where you'll define a constant for your backend location matching your
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
- depending on your backend, the sample `keys[]` and associated made up objects
|
|
||||||
may not work for you (if your backend's key format is different, for example);
|
|
||||||
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
|
||||||
|
|
||||||
Operation of type HEAD
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
HEAD routes are very similar to DELETE routes in our implementation, hence why
|
|
||||||
we're covering them fourth.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `head()` function is also called
|
|
||||||
`head()`, and it's defined in :file:`/lib/data/multipleBackendGateway.js`;
|
|
||||||
- define a function with signature like
|
|
||||||
`head(objectGetInfo, reqUids, callback)`; this is worth exploring a
|
|
||||||
bit more as these parameters are the same for all backends:
|
|
||||||
|
|
||||||
// TODO:: generate this from jsdoc
|
|
||||||
|
|
||||||
* `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
|
|
||||||
data store, and `client`, the data store name;
|
|
||||||
* `reqUids`: the request unique ID used for logging;
|
|
||||||
* `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your HEAD operation,
|
|
||||||
and then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
|
|
||||||
-----------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- every contribution should come with thorough functional tests, showing
|
|
||||||
nominal context gives expected behaviour, and error cases are handled in a way
|
|
||||||
that is standard with the backend (including error messages and code);
|
|
||||||
- the ideal setup is if you simulate your backend locally, so as not to be
|
|
||||||
subjected to network flakiness in the CI; however, we know there might not be
|
|
||||||
mockups available for every client; if that is the case of your backend, you
|
|
||||||
may test against the "real" endpoint of your data backend;
|
|
||||||
|
|
||||||
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
|
||||||
-------------------------------------------------------------------
|
|
||||||
|
|
||||||
.. note:: You should need this section if you have followed the tutorial in order
|
|
||||||
(that is, if you have covered the PUT operation already)
|
|
||||||
|
|
||||||
- where you'll define a constant for your backend location matching your
|
|
||||||
:file:`/tests/locationConfig/locationConfigTests.json`
|
|
||||||
- depending on your backend, the sample `keys[]` and associated made up objects
|
|
||||||
may not work for you (if your backend's key format is different, for example);
|
|
||||||
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
|
||||||
|
|
||||||
Healthcheck
|
|
||||||
~~~~~~~~~~~
|
|
||||||
|
|
||||||
Healtchecks are used to make sure failure to write to a remote cloud is due to
|
|
||||||
a problem on that remote cloud, an not on Zenko's side.
|
|
||||||
This is usually done by trying to create a bucket that already exists, and
|
|
||||||
making sure you get the expected answer.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
- the function that is going to call your `healthcheck()` function is called
|
|
||||||
`checkExternalBackend()` and it's defined in
|
|
||||||
:file:`/lib/data/multipleBackendGateway.js`; you will need to add your own;
|
|
||||||
- your healtcheck function should get `location` as a parameter, which is an
|
|
||||||
object comprising:`
|
|
||||||
|
|
||||||
* `reqUids`: the request unique ID used for logging;
|
|
||||||
* `callback`: your function's callback (should handle errors);
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/{{backendName}}_create_bucket.js`
|
|
||||||
-------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should write the function performing the actual bucket
|
|
||||||
creation;
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/utils.js`
|
|
||||||
-------------------------------------------------------
|
|
||||||
|
|
||||||
- add an object named per your backend's name to the `backendHealth` dictionary,
|
|
||||||
with proper `response` and `time` entries;
|
|
||||||
|
|
||||||
:file:`lib/data/multipleBackendGateway.js`
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
- edit the `healthcheck` function to add your location's array, and call your
|
|
||||||
healthcheck; see pseudocode below for a sample implementation, provided your
|
|
||||||
backend name is `ztore`
|
|
||||||
|
|
||||||
|
|
||||||
.. code-block:: js
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
(...) //<1>
|
|
||||||
|
|
||||||
healthcheck: (flightCheckOnStartUp, log, callback) => { //<1>
|
|
||||||
(...) //<1>
|
|
||||||
const ztoreArray = []; //<2>
|
|
||||||
async.each(Object.keys(clients), (location, cb) => { //<1>
|
|
||||||
(...) //<1>
|
|
||||||
} else if (client.clientType === 'ztore' {
|
|
||||||
ztoreArray.push(location); //<3>
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
(...) //<1>
|
|
||||||
multBackendResp[location] = { code: 200, message: 'OK' }; //<1>
|
|
||||||
return cb();
|
|
||||||
}, () => { //<1>
|
|
||||||
async.parallel([
|
|
||||||
(...) //<1>
|
|
||||||
next => checkExternalBackend( //<4>
|
|
||||||
clients, ztoreArray, 'ztore', flightCheckOnStartUp,
|
|
||||||
externalBackendHealthCheckInterval, next),
|
|
||||||
] (...) //<1>
|
|
||||||
});
|
|
||||||
(...) //<1>
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
1. Code that is already there
|
|
||||||
2. The array that will store all locations of type 'ztore'
|
|
||||||
3. Where you add locations of type 'ztore' to the array
|
|
||||||
4. Where you actually call the healthcheck function on all 'ztore' locations
|
|
||||||
|
|
||||||
Multipart upload (MPU)
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This is the final part to supporting a new backend! MPU is far from
|
|
||||||
the easiest subject, but you've come so far it shouldn't be a problem.
|
|
||||||
|
|
||||||
These are the files you'll need to edit:
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{BackendName}}Client.js`
|
|
||||||
---------------------------------------------------
|
|
||||||
|
|
||||||
You'll be creating four functions with template signatures:
|
|
||||||
|
|
||||||
- `createMPU(Key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
|
|
||||||
cacheControl, contentDisposition, contentEncoding, log, callback)` will
|
|
||||||
initiate the multi part upload process; now, here, all parameters are
|
|
||||||
metadata headers except for:
|
|
||||||
|
|
||||||
* `Key`, the key id for the final object (collection of all parts);
|
|
||||||
* `bucketName`, the name of the bucket to which we will do an MPU;
|
|
||||||
* `log`, the logger;
|
|
||||||
|
|
||||||
- `uploadPart(request, streamingV4Params, stream, size, key, uploadId, partNumber, bucketName, log, callback)`
|
|
||||||
will be called for each part; the parameters can be explicited as follow:
|
|
||||||
|
|
||||||
* `request`, the request object for putting the part;
|
|
||||||
* `streamingV4Params`, parameters for auth V4 parameters against S3;
|
|
||||||
* `stream`, the node.js readable stream used to put the part;
|
|
||||||
* `size`, the size of the part;
|
|
||||||
* `key`, the key of the object;
|
|
||||||
* `uploadId`, multipart upload id string;
|
|
||||||
* `partNumber`, the number of the part in this MPU (ordered);
|
|
||||||
* `bucketName`, the name of the bucket to which we will do an MPU;
|
|
||||||
* `log`, the logger;
|
|
||||||
|
|
||||||
- `completeMPU(jsonList, mdInfo, key, uploadId, bucketName, log, callback)` will
|
|
||||||
end the MPU process once all parts are uploaded; parameters can be explicited
|
|
||||||
as follows:
|
|
||||||
|
|
||||||
* `jsonList`, user-sent list of parts to include in final mpu object;
|
|
||||||
* `mdInfo`, object containing 3 keys: storedParts, mpuOverviewKey, and
|
|
||||||
splitter;
|
|
||||||
* `key`, the key of the object;
|
|
||||||
* `uploadId`, multipart upload id string;
|
|
||||||
* `bucketName`, name of bucket;
|
|
||||||
* `log`, logger instance:
|
|
||||||
|
|
||||||
- `abortMPU(key, uploadId, bucketName, log, callback)` will handle errors, and
|
|
||||||
make sure that all parts that may have been uploaded will be deleted if the
|
|
||||||
MPU ultimately fails; the parameters are:
|
|
||||||
|
|
||||||
* `key`, the key of the object;
|
|
||||||
* `uploadId`, multipart upload id string;
|
|
||||||
* `bucketName`, name of bucket;
|
|
||||||
* `log`, logger instance.
|
|
||||||
|
|
||||||
:file:`/lib/api/objectPutPart.js`
|
|
||||||
---------------------------------
|
|
||||||
|
|
||||||
- you'll need to add your backend type in appropriate sections (simply look for
|
|
||||||
other backends already implemented).
|
|
||||||
|
|
||||||
:file:`/lib/data/external/{{backendName}}_lib/`
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
- this is where you should put all utility functions for your MPU operations,
|
|
||||||
and then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
|
|
||||||
your code clean;
|
|
||||||
|
|
||||||
:file:`lib/data/multipleBackendGateway.js`
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
- edit the `createMPU` function to add your location type, and call your
|
|
||||||
`createMPU()`; see pseudocode below for a sample implementation, provided your
|
|
||||||
backend name is `ztore`
|
|
||||||
|
|
||||||
.. code-block:: javascript
|
|
||||||
:linenos:
|
|
||||||
|
|
||||||
(...) //<1>
|
|
||||||
createMPU:(key, metaHeaders, bucketName, websiteRedirectHeader, //<1>
|
|
||||||
location, contentType, cacheControl, contentDisposition,
|
|
||||||
contentEncoding, log, cb) => {
|
|
||||||
const client = clients[location]; //<1>
|
|
||||||
if (client.clientType === 'aws_s3') { //<1>
|
|
||||||
return client.createMPU(key, metaHeaders, bucketName,
|
|
||||||
websiteRedirectHeader, contentType, cacheControl,
|
|
||||||
contentDisposition, contentEncoding, log, cb);
|
|
||||||
} else if (client.clientType === 'ztore') { //<2>
|
|
||||||
return client.createMPU(key, metaHeaders, bucketName,
|
|
||||||
websiteRedirectHeader, contentType, cacheControl,
|
|
||||||
contentDisposition, contentEncoding, log, cb);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
};
|
|
||||||
(...) //<1>
|
|
||||||
|
|
||||||
1. Code that is already there
|
|
||||||
2. Where the `createMPU()` of your client is actually called
|
|
||||||
|
|
||||||
Add functional tests
|
|
||||||
~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/initMPU/{{BackendName}}InitMPU.js`
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/listParts/{{BackendName}}ListPart.js`
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/{{BackendName}}AbortMPU.js`
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/{{BackendName}}CompleteMPU.js`
|
|
||||||
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/{{BackendName}}UploadPart.js`
|
|
||||||
|
|
||||||
Adding support in Orbit, Zenko's UI for simplified Multi Cloud Management
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This can only be done by our core developers' team. Once your backend
|
|
||||||
integration is merged, you may open a feature request on the
|
|
||||||
`Zenko repository`_, and we will
|
|
||||||
get back to you after we evaluate feasability and maintainability.
|
|
||||||
|
|
||||||
.. _Zenko repository: https://www.github.com/scality/Zenko/issues/new
|
|
|
@ -1,43 +0,0 @@
|
||||||
======================
|
|
||||||
S3-Compatible Backends
|
|
||||||
======================
|
|
||||||
|
|
||||||
|
|
||||||
Adding Support in CloudServer
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
This is the easiest case for backend support integration: there is nothing to do
|
|
||||||
but configuration! Follow the steps described in our
|
|
||||||
:ref:`use-public-cloud` and make sure you:
|
|
||||||
|
|
||||||
- set ``details.awsEndpoint`` to your storage provider endpoint;
|
|
||||||
|
|
||||||
- use ``details.credentials`` and *not* ``details.credentialsProfile`` to set your
|
|
||||||
credentials for that S3-compatible backend.
|
|
||||||
|
|
||||||
For example, if you’re using a Wasabi bucket as a backend, then your region
|
|
||||||
definition for that backend will look something like:
|
|
||||||
::
|
|
||||||
|
|
||||||
"wasabi-bucket-zenkobucket": {
|
|
||||||
"type": "aws_s3",
|
|
||||||
"legacyAwsBehavior": true,
|
|
||||||
"details": {
|
|
||||||
"awsEndpoint": "s3.wasabisys.com",
|
|
||||||
"bucketName": "zenkobucket",
|
|
||||||
"bucketMatch": true,
|
|
||||||
"credentials": {
|
|
||||||
"accessKey": "\\{YOUR_WASABI_ACCESS_KEY}",
|
|
||||||
"secretKey": "\\{YOUR_WASABI_SECRET_KEY}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
Adding Support in Zenko Orbit
|
|
||||||
#############################
|
|
||||||
|
|
||||||
This can only be done by our core developpers' team. If that’s what you’re
|
|
||||||
after, open a feature request on the `Zenko repository`_, and we will
|
|
||||||
get back to you after we evaluate feasability and maintainability.
|
|
||||||
|
|
||||||
.. _Zenko repository: https://www.github.com/scality/Zenko/issues/new
|
|
Binary file not shown.
Before Width: | Height: | Size: 23 KiB |
|
@ -1,18 +0,0 @@
|
||||||
Scality Zenko CloudServer
|
|
||||||
=========================
|
|
||||||
|
|
||||||
.. _user-docs:
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
:caption: Documentation
|
|
||||||
:glob:
|
|
||||||
|
|
||||||
CONTRIBUTING
|
|
||||||
GETTING_STARTED
|
|
||||||
USING_PUBLIC_CLOUDS
|
|
||||||
CLIENTS
|
|
||||||
DOCKER
|
|
||||||
INTEGRATIONS
|
|
||||||
ARCHITECTURE
|
|
||||||
developers/*
|
|
|
@ -1,5 +0,0 @@
|
||||||
---
|
|
||||||
# http://www.mkdocs.org/user-guide/configuration/
|
|
||||||
# https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes
|
|
||||||
|
|
||||||
site_name: Scality Zenko CloudServer documentation
|
|
|
@ -0,0 +1,91 @@
|
||||||
|
Zenko Cloudserver for Developpers
|
||||||
|
=================================
|
||||||
|
:Revision: v1.0
|
||||||
|
:Date: 2018-03-20
|
||||||
|
:Email: <zenko@scality.com>
|
||||||
|
|
||||||
|
[.lead]
|
||||||
|
This set of documents aims at bootstrapping developpers with Zenko's Cloudserver
|
||||||
|
module, so they can then go on and contribute features.
|
||||||
|
In order to achieve this, we're going to cover a number of subjects:
|
||||||
|
|
||||||
|
- <<cloning-and-building,cloning, installing, and building your own image>>;
|
||||||
|
- <<support-new-public-cloud, adding support to a new public cloud backend>>;
|
||||||
|
- <<telling-story-usecase, telling the community about your story or usecase>>.
|
||||||
|
|
||||||
|
== [[cloning-and-building]]
|
||||||
|
== Cloning, installing, and building your own image
|
||||||
|
|
||||||
|
To clone Zenko's Cloudserver, simply run:
|
||||||
|
|
||||||
|
~# git clone https://github.com/scality/S3 cloudserver
|
||||||
|
~# cd cloudserver
|
||||||
|
|
||||||
|
To install all dependencies (necessary to run), do:
|
||||||
|
|
||||||
|
~/cloudserver# npm install
|
||||||
|
|
||||||
|
TIP: Some optional dependencies may fail, resulting in you seeing `NPM WARN`
|
||||||
|
messages; these can safely be ignored.
|
||||||
|
|
||||||
|
// Add link to user doc
|
||||||
|
To run the service locally, use:
|
||||||
|
|
||||||
|
~/cloudserver# npm start
|
||||||
|
|
||||||
|
TIP: Refer to the User documentation for all available options
|
||||||
|
|
||||||
|
// Add link to Docker doc
|
||||||
|
To build your own Docker image, run:
|
||||||
|
|
||||||
|
~/cloudserver# docker build . -t {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
|
||||||
|
|
||||||
|
To then push your Docker image to your own hub, run:
|
||||||
|
|
||||||
|
~/cloudserver# docker push {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
|
||||||
|
|
||||||
|
NOTE: To perform this last operation, you will need to be authenticated with
|
||||||
|
DockerHub
|
||||||
|
|
||||||
|
== [[support-new-public-cloud]]
|
||||||
|
== Add support for a new Public Cloud backend
|
||||||
|
|
||||||
|
.Backend Support
|
||||||
|
[align="center",halign="center",valign="center",options="header"]
|
||||||
|
|=======================================================================
|
||||||
|
|Backend type |Currently supported |Active WIP |Community suggestion
|
||||||
|
|Private disk/fs |x | |
|
||||||
|
|AWS S3 |x | |
|
||||||
|
|Microsoft Azure |x | |
|
||||||
|
|Backblaze B2 | |x |
|
||||||
|
|Google Cloud | |x |
|
||||||
|
|Openstack Swift | | |x
|
||||||
|
|=======================================================================
|
||||||
|
|
||||||
|
IMPORTANT: Should you want to request a new backend support, please do so by
|
||||||
|
opening a Github issue, and filling out the "Feature Request" section
|
||||||
|
of our template. Thanks!
|
||||||
|
|
||||||
|
We always encourage our community to offer new extensions to Zenko, and new
|
||||||
|
backend support is paramount to meeting more community needs.
|
||||||
|
If that is something you want to contribute (or just do on your own version of
|
||||||
|
the cloudserver image), go read our link:NEW_BACKEND.adoc[step-by-step guide] on
|
||||||
|
where to start to add support for a new backend.
|
||||||
|
//TODO:add link to contributing guidelines
|
||||||
|
If you wish to make this a contribution, please make sure you follow our
|
||||||
|
Contributing Guidelines.
|
||||||
|
|
||||||
|
If you need help with anything, please search our https://forum.scality.com[Forum]
|
||||||
|
for more information. If you can't find what you need, open a thread, and our
|
||||||
|
community memebers and core team will be right with you!
|
||||||
|
|
||||||
|
== [[telling-story-usecase]]
|
||||||
|
== Telling the community about your story or usecase
|
||||||
|
|
||||||
|
The best part of being open source is learning from such a diverse crowd. At
|
||||||
|
Scality, we're always curious to learn about what you do with Zenko and Zenko
|
||||||
|
Cloudserver.
|
||||||
|
If you wish to tell us your story, if you want us to advertise your extension,
|
||||||
|
or if you want to publish a tutorial on how to replicatie your setup, please
|
||||||
|
reach out either on https://forum.scality.com[the Zenko Forum], or send us an
|
||||||
|
mailto:zenko@scality.com[email].
|
|
@ -0,0 +1,54 @@
|
||||||
|
= Adding a new backend
|
||||||
|
|
||||||
|
One of Zenko's Cloudserver commitment is to simplify multicloud storage by
|
||||||
|
giving one API (the S3 API) to access all clouds. With that in mind, supporting
|
||||||
|
more and more backends is one of Zenko's Community priorities. And you, as a
|
||||||
|
developper, are welcome to join that trend!
|
||||||
|
|
||||||
|
If you're planning to add a new backend for your own usage, go ahead and read
|
||||||
|
the doc. If you have any questions during the development process, search our
|
||||||
|
https://forum.scality.com[forum] and, if there is no answer to your question
|
||||||
|
already there, open a new thread.
|
||||||
|
|
||||||
|
//TODO: Add link to contributing Guidelines
|
||||||
|
If you're planning to contribute your backend support to our official
|
||||||
|
repository, please follow these steps:
|
||||||
|
- familiarize yourself with our Contributing Guidelines;
|
||||||
|
- open a Github issue and fill out Feature Request form, and specify you would
|
||||||
|
like to contribute it yourself;
|
||||||
|
- wait for our core team to get back to you with an answer on whether we are
|
||||||
|
interested in taking that contribution in (and hence committing to maintaining
|
||||||
|
it over time);
|
||||||
|
- once approved, fork this https://www.github.com/scality/S3[repository], and
|
||||||
|
get started!
|
||||||
|
- reach out to us on the https://forum.scality.com[forum] with any question you
|
||||||
|
may have during the development process (after reading this document, of
|
||||||
|
course!);
|
||||||
|
- when you think it's ready, let us know so that we create a feature branch
|
||||||
|
against which we'll compare and review your code;
|
||||||
|
- open a pull request with your changes against that dedicated feature branch;
|
||||||
|
//TODO: Add Hall of Fame section in the community report
|
||||||
|
- once that pull request gets merged, you're done (and you'll join our Hall of
|
||||||
|
Fame ;) );
|
||||||
|
- finally, we'll let you know when we merge this into master.
|
||||||
|
|
||||||
|
TIP: While we do take care of the finale rebase (when we merge your feature
|
||||||
|
branch on master), we do ask that you keep up to date with our master until
|
||||||
|
then; find out more https://help.github.com/articles/syncing-a-fork/[here].
|
||||||
|
|
||||||
|
IMPORTANT: If we do not approve your feature request, you may of course still
|
||||||
|
work on supporting a new backend: all our "no" means is that we do
|
||||||
|
not have the resources, as part of our core development team, to
|
||||||
|
maintain this feature for the moment.
|
||||||
|
_If your code is clean and your extension works nicely, we will be_
|
||||||
|
_glad to advertise it as part of the Zenko Galaxy_
|
||||||
|
|
||||||
|
//TODO: Get approval for Zenko Galaxy as the name of our hub - sound appropriate with Orbit ;)
|
||||||
|
|
||||||
|
There are two main types of backend you could want Zenko to support:
|
||||||
|
|
||||||
|
== link:S3_COMPATIBLE_BACKENDS.adoc[S3 compatible data backends]
|
||||||
|
|
||||||
|
== link:NON_S3_COMPATIBLE_BACKENDS.adoc[Data backends using another protocol
|
||||||
|
than the S3 protocol]
|
||||||
|
|
|
@ -0,0 +1,469 @@
|
||||||
|
= Adding support for data backends not supporting the S3 API
|
||||||
|
|
||||||
|
These backends are what makes Zenko so valuable: abstracting the complexity of
|
||||||
|
multiple APIs to let users work on a single common namespace across multiple
|
||||||
|
clouds.
|
||||||
|
|
||||||
|
This documents aims at introducing you to the right files in Cloudserver (the
|
||||||
|
Zenko stack's subcomponent in charge of API translation, among other things) to
|
||||||
|
add support to your own backend of choice.
|
||||||
|
|
||||||
|
As usual, should you have any question, please reach out on the
|
||||||
|
https://forum.zenko.io[Zenko forum].
|
||||||
|
|
||||||
|
== General configuration
|
||||||
|
|
||||||
|
There are a number of constants and environment variables to define to support a
|
||||||
|
new data backends; here is a list and where to find them:
|
||||||
|
|
||||||
|
=== `/constants.js`
|
||||||
|
|
||||||
|
- give you backend type a name, as part of the `externalBackends` object;
|
||||||
|
- specify whether versioning is implemented, as part of the
|
||||||
|
`versioningNotImplemented` object;
|
||||||
|
|
||||||
|
=== `/lib/Config.js`
|
||||||
|
|
||||||
|
- this is where you should put common utility functions, like the ones to parse
|
||||||
|
the location object from `locationConfig.json`;
|
||||||
|
- make sure you define environment variables (like `GCP_SERVICE_EMAIL` as we'll
|
||||||
|
use those internally for the CI to test against the real remote backend;
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{backendName}}Client.js`
|
||||||
|
|
||||||
|
- this file is where you'll instantiate your backend client; this should be a
|
||||||
|
class with a constructor taking the config object built in `/lib/Config.js` as
|
||||||
|
parameter;
|
||||||
|
- over time, you may need some utility functions which we've defined in the
|
||||||
|
folder `/api/apiUtils`, and in the file `/lib/data/external/utils`;
|
||||||
|
|
||||||
|
=== `/lib/data/external/utils.js`
|
||||||
|
|
||||||
|
- make sure to add options for `sourceLocationConstraintType` to be equal to
|
||||||
|
the name you gave your backend in `/constants.js`;
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{BackendName}}_lib/`
|
||||||
|
|
||||||
|
- this folder is where you'll put the functions needed for supporting your
|
||||||
|
backend; keep your files as atomic as possible;
|
||||||
|
|
||||||
|
=== [[location-config-test-json]]
|
||||||
|
=== `/tests/locationConfig/locationConfigTests.json`
|
||||||
|
|
||||||
|
- this file is where you'll create location profiles to be used by your
|
||||||
|
functional tests;
|
||||||
|
|
||||||
|
=== `/lib/data/locationConstraintParser.js`
|
||||||
|
|
||||||
|
- this is where you'll instantiate your client if the operation the end user
|
||||||
|
sent effectively writes to your backend; everything happens inside the
|
||||||
|
function `parseLC()`; you should add a condition that executes if
|
||||||
|
`locationObj.type` is the name of your backend (that you defined in
|
||||||
|
`constants.js`), and instantiates a client of yours. See pseudocode below,
|
||||||
|
assuming location type name is `ztore`:
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----
|
||||||
|
(...) //<1>
|
||||||
|
const ZtoreClient = require('./external/ZtoreClient');
|
||||||
|
const { config } = require('../Config'); //<1>
|
||||||
|
|
||||||
|
function parseLC(){ //<1>
|
||||||
|
(...) //<1>
|
||||||
|
Object.keys(config.locationConstraints).forEach(location => { //<1>
|
||||||
|
const locationObj = config.locationConstraints[location]; //<1>
|
||||||
|
(...) //<1>
|
||||||
|
if (locationObj.type === 'ztore' {
|
||||||
|
const ztoreEndpoint = config.getZtoreEndpoint(location);
|
||||||
|
const ztoreCredentials = config.getZtoreCredentials(location); //<2>
|
||||||
|
clients[location] = new ZtoreClient({
|
||||||
|
ztoreEndpoint,
|
||||||
|
ztoreCredentials,
|
||||||
|
ztoreBucketname: locationObj.details.ztoreBucketName,
|
||||||
|
bucketMatch: locationObj.details.BucketMatch,
|
||||||
|
dataStoreName: location,
|
||||||
|
}); //<3>
|
||||||
|
clients[location].clientType = 'ztore';
|
||||||
|
});
|
||||||
|
(...) //<1>
|
||||||
|
});
|
||||||
|
}
|
||||||
|
----
|
||||||
|
<1> Code that is already there
|
||||||
|
<2> You may need more utility functions depending on your backend specs
|
||||||
|
<3> You may have more fields required in your constructor object depending on
|
||||||
|
your backend specs
|
||||||
|
|
||||||
|
== Operation of type PUT
|
||||||
|
|
||||||
|
PUT routes are usually where people get started, as it's the easiest to check!
|
||||||
|
Simply go on your remote backend console and you'll be able to see whether your
|
||||||
|
object actually went up in the cloud...
|
||||||
|
|
||||||
|
These are the files you'll need to edit:
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{BackendName}}Client.js`
|
||||||
|
|
||||||
|
- the function that is going to call your `put()` function is also called
|
||||||
|
`put()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
|
||||||
|
- define a function with signature like
|
||||||
|
`put(stream, size, keyContext, reqUids, callback)`; this is worth exploring a
|
||||||
|
bit more as these parameters are the same for all backends:
|
||||||
|
//TODO: generate this from jsdoc
|
||||||
|
-- `stream`: the stream of data you want to put in the cloud; if you're
|
||||||
|
unfamiliar with node.js strams, we suggest you start training, as we use them
|
||||||
|
a lot !
|
||||||
|
-- `size`: the size of the object you're trying to put;
|
||||||
|
-- `keyContext`: an object with metadata about the operation; common entries are
|
||||||
|
`namespace`, `buckerName`, `owner`, `cipherBundle`, and `tagging`; if these
|
||||||
|
are not sufficient for your integration, contact us to get architecture
|
||||||
|
validation before adding new entries;
|
||||||
|
-- `reqUids`: the request unique ID used for logging;
|
||||||
|
-- `callback`: your function's callback (should handle errors);
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{backendName}}_lib/`
|
||||||
|
|
||||||
|
- this is where you should put all utility functions for your PUT operation, and
|
||||||
|
then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
||||||
|
your code clean;
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/put/put{{BackendName}}js`
|
||||||
|
|
||||||
|
- every contribution should come with thorough functional tests, showing
|
||||||
|
nominal context gives expected behaviour, and error cases are handled in a way
|
||||||
|
that is standard with the backend (including error messages and code);
|
||||||
|
- the ideal setup is if you simulate your backend locally, so as not to be
|
||||||
|
subjected to network flakiness in the CI; however, we know there might not be
|
||||||
|
mockups available for every client; if that is the case of your backend, you
|
||||||
|
may test against the "real" endpoint of your data backend;
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
||||||
|
|
||||||
|
- where you'll define a constant for your backend location matching your
|
||||||
|
`/tests/locationConfig/locationConfigTests.json`
|
||||||
|
<<location-config-test-json,test location name>>;
|
||||||
|
- depending on your backend, the sample `keys[]` and associated made up objects
|
||||||
|
may not work for you (if your backend's key format is different, for example);
|
||||||
|
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
||||||
|
function returning ajusted `keys[]` to your tests.
|
||||||
|
|
||||||
|
== Operation of type GET
|
||||||
|
|
||||||
|
GET routes are easy to test after PUT routes are implemented, hence why we're
|
||||||
|
covering them second.
|
||||||
|
|
||||||
|
These are the files you'll need to edit:
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{BackendName}}Client.js`
|
||||||
|
|
||||||
|
- the function that is going to call your `get()` function is also called
|
||||||
|
`get()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
|
||||||
|
- define a function with signature like
|
||||||
|
`get(objectGetInfo, range, reqUids, callback)`; this is worth exploring a
|
||||||
|
bit more as these parameters are the same for all backends:
|
||||||
|
//TODO: generate this from jsdoc
|
||||||
|
-- `objectGetInfo`: a dictionnary with two entries: `key`, the object key in the
|
||||||
|
data store, and `client`, the data store name;
|
||||||
|
-- `range`: the range of bytes you will get, for "get-by-range" operations (we
|
||||||
|
recommend you do simple GETs first, and then look at this);
|
||||||
|
-- `reqUids`: the request unique ID used for logging;
|
||||||
|
-- `callback`: your function's callback (should handle errors);
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{backendName}}_lib/`
|
||||||
|
|
||||||
|
- this is where you should put all utility functions for your GET operation, and
|
||||||
|
then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
||||||
|
your code clean;
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
|
||||||
|
|
||||||
|
- every contribution should come with thorough functional tests, showing
|
||||||
|
nominal context gives expected behaviour, and error cases are handled in a way
|
||||||
|
that is standard with the backend (including error messages and code);
|
||||||
|
- the ideal setup is if you simulate your backend locally, so as not to be
|
||||||
|
subjected to network flakiness in the CI; however, we know there might not be
|
||||||
|
mockups available for every client; if that is the case of your backend, you
|
||||||
|
may test against the "real" endpoint of your data backend;
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
||||||
|
|
||||||
|
NOTE: You should need this section if you have followed the tutorial in order
|
||||||
|
(that is, if you have covered the PUT operation already)
|
||||||
|
|
||||||
|
- where you'll define a constant for your backend location matching your
|
||||||
|
`/tests/locationConfig/locationConfigTests.json`
|
||||||
|
<<location-config-test-json,test location name>>;
|
||||||
|
- depending on your backend, the sample `keys[]` and associated made up objects
|
||||||
|
may not work for you (if your backend's key format is different, for example);
|
||||||
|
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
||||||
|
|
||||||
|
== Operation of type DELETE
|
||||||
|
|
||||||
|
DELETE routes are easy to test after PUT routes are implemented, and they are
|
||||||
|
similar to GET routes in our implementation, hence why we're covering them
|
||||||
|
third.
|
||||||
|
|
||||||
|
These are the files you'll need to edit:
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{BackendName}}Client.js`
|
||||||
|
|
||||||
|
- the function that is going to call your `delete()` function is also called
|
||||||
|
`delete()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
|
||||||
|
- define a function with signature like
|
||||||
|
`delete(objectGetInfo, reqUids, callback)`; this is worth exploring a
|
||||||
|
bit more as these parameters are the same for all backends:
|
||||||
|
//TODO: generate this from jsdoc
|
||||||
|
-- `objectGetInfo`: a dictionnary with two entries: `key`, the object key in the
|
||||||
|
data store, and `client`, the data store name;
|
||||||
|
-- `reqUids`: the request unique ID used for logging;
|
||||||
|
-- `callback`: your function's callback (should handle errors);
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{backendName}}_lib/`
|
||||||
|
|
||||||
|
- this is where you should put all utility functions for your DELETE operation,
|
||||||
|
and then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
||||||
|
your code clean;
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
|
||||||
|
|
||||||
|
- every contribution should come with thorough functional tests, showing
|
||||||
|
nominal context gives expected behaviour, and error cases are handled in a way
|
||||||
|
that is standard with the backend (including error messages and code);
|
||||||
|
- the ideal setup is if you simulate your backend locally, so as not to be
|
||||||
|
subjected to network flakiness in the CI; however, we know there might not be
|
||||||
|
mockups available for every client; if that is the case of your backend, you
|
||||||
|
may test against the "real" endpoint of your data backend;
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
||||||
|
|
||||||
|
NOTE: You should need this section if you have followed the tutorial in order
|
||||||
|
(that is, if you have covered the PUT operation already)
|
||||||
|
|
||||||
|
- where you'll define a constant for your backend location matching your
|
||||||
|
`/tests/locationConfig/locationConfigTests.json`
|
||||||
|
<<location-config-test-json,test location name>>;
|
||||||
|
- depending on your backend, the sample `keys[]` and associated made up objects
|
||||||
|
may not work for you (if your backend's key format is different, for example);
|
||||||
|
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
||||||
|
|
||||||
|
== Operation of type HEAD
|
||||||
|
|
||||||
|
HEAD routes are very similar to DELETE routes in our implementation, hence why
|
||||||
|
we're covering them fourth.
|
||||||
|
|
||||||
|
These are the files you'll need to edit:
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{BackendName}}Client.js`
|
||||||
|
|
||||||
|
- the function that is going to call your `head()` function is also called
|
||||||
|
`head()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
|
||||||
|
- define a function with signature like
|
||||||
|
`head(objectGetInfo, reqUids, callback)`; this is worth exploring a
|
||||||
|
bit more as these parameters are the same for all backends:
|
||||||
|
//TODO: generate this from jsdoc
|
||||||
|
-- `objectGetInfo`: a dictionnary with two entries: `key`, the object key in the
|
||||||
|
data store, and `client`, the data store name;
|
||||||
|
-- `reqUids`: the request unique ID used for logging;
|
||||||
|
-- `callback`: your function's callback (should handle errors);
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{backendName}}_lib/`
|
||||||
|
|
||||||
|
- this is where you should put all utility functions for your HEAD operation,
|
||||||
|
and then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
||||||
|
your code clean;
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
|
||||||
|
|
||||||
|
- every contribution should come with thorough functional tests, showing
|
||||||
|
nominal context gives expected behaviour, and error cases are handled in a way
|
||||||
|
that is standard with the backend (including error messages and code);
|
||||||
|
- the ideal setup is if you simulate your backend locally, so as not to be
|
||||||
|
subjected to network flakiness in the CI; however, we know there might not be
|
||||||
|
mockups available for every client; if that is the case of your backend, you
|
||||||
|
may test against the "real" endpoint of your data backend;
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
|
||||||
|
|
||||||
|
NOTE: You should need this section if you have followed the tutorial in order
|
||||||
|
(that is, if you have covered the PUT operation already)
|
||||||
|
|
||||||
|
- where you'll define a constant for your backend location matching your
|
||||||
|
`/tests/locationConfig/locationConfigTests.json`
|
||||||
|
<<location-config-test-json,test location name>>;
|
||||||
|
- depending on your backend, the sample `keys[]` and associated made up objects
|
||||||
|
may not work for you (if your backend's key format is different, for example);
|
||||||
|
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
|
||||||
|
|
||||||
|
== Healthcheck
|
||||||
|
|
||||||
|
Healtchecks are used to make sure failure to write to a remote cloud is due to
|
||||||
|
a problem on that remote cloud, an not on Zenko's side.
|
||||||
|
This is usually done by trying to create a bucket that already exists, and
|
||||||
|
making sure you get the expected answer.
|
||||||
|
|
||||||
|
These are the files you'll need to edit:
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{BackendName}}Client.js`
|
||||||
|
|
||||||
|
- the function that is going to call your `healthcheck()` function is called
|
||||||
|
`checkExternalBackend()` and it's defined in
|
||||||
|
`/lib/data/multipleBackendGateway.js`; you will need to add your own;
|
||||||
|
- your healtcheck function should get `location` as a parameter, which is an
|
||||||
|
object comprising:`
|
||||||
|
-- `reqUids`: the request unique ID used for logging;
|
||||||
|
-- `callback`: your function's callback (should handle errors);
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{backendName}}_lib/{{backendName}}_create_bucket.js`
|
||||||
|
|
||||||
|
- this is where you should write the function performing the actual bucket
|
||||||
|
creation;
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{backendName}}_lib/utils.js`
|
||||||
|
|
||||||
|
- add an object named per your backend's name to the `backendHealth` dictionary,
|
||||||
|
with proper `response` and `time` entries;
|
||||||
|
|
||||||
|
=== `lib/data/multipleBackendGateway.js`
|
||||||
|
|
||||||
|
- edit the `healthcheck` function to add your location's array, and call your
|
||||||
|
healthcheck; see pseudocode below for a sample implementation, provided your
|
||||||
|
backend name is `ztore`
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----
|
||||||
|
(...) //<1>
|
||||||
|
|
||||||
|
healthcheck: (flightCheckOnStartUp, log, callback) => { //<1>
|
||||||
|
(...) //<1>
|
||||||
|
const ztoreArray = []; //<2>
|
||||||
|
async.each(Object.keys(clients), (location, cb) => { //<1>
|
||||||
|
(...) //<1>
|
||||||
|
} else if (client.clientType === 'ztore' {
|
||||||
|
ztoreArray.push(location); //<3>
|
||||||
|
return cb();
|
||||||
|
}
|
||||||
|
(...) //<1>
|
||||||
|
multBackendResp[location] = { code: 200, message: 'OK' }; //<1>
|
||||||
|
return cb();
|
||||||
|
}, () => { //<1>
|
||||||
|
async.parallel([
|
||||||
|
(...) //<1>
|
||||||
|
next => checkExternalBackend( //<4>
|
||||||
|
clients, ztoreArray, 'ztore', flightCheckOnStartUp,
|
||||||
|
externalBackendHealthCheckInterval, next),
|
||||||
|
] (...) //<1>
|
||||||
|
});
|
||||||
|
(...) //<1>
|
||||||
|
});
|
||||||
|
}
|
||||||
|
----
|
||||||
|
<1> Code that is already there
|
||||||
|
<2> The array that will store all locations of type 'ztore'
|
||||||
|
<3> Where you add locations of type 'ztore' to the array
|
||||||
|
<4> Where you actually call the healthcheck function on all 'ztore' locations
|
||||||
|
|
||||||
|
== Multipart upload (MPU)
|
||||||
|
|
||||||
|
Congratulations! This is the final part to supporting a new backend! You're
|
||||||
|
nearly there!
|
||||||
|
Now, let's be honest: MPU is far from the easiest subject, but you've come so
|
||||||
|
far it shouldn't be a problem.
|
||||||
|
|
||||||
|
These are the files you'll need to edit:
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{BackendName}}Client.js`
|
||||||
|
|
||||||
|
You'll be creating four functions with template signatures:
|
||||||
|
|
||||||
|
- `createMPU(Key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
|
||||||
|
cacheControl, contentDisposition, contentEncoding, log, callback)` will
|
||||||
|
initiate the multi part upload process; now, here, all parameters are
|
||||||
|
metadata headers except for:
|
||||||
|
-- `Key`, the key id for the final object (collection of all parts);
|
||||||
|
-- `bucketName`, the name of the bucket to which we will do an MPU;
|
||||||
|
-- `log`, the logger;
|
||||||
|
- `uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||||
|
partNumber, bucketName, log, callback)` will be called for each part; the
|
||||||
|
parameters can be explicited as follow:
|
||||||
|
-- `request`, the request object for putting the part;
|
||||||
|
-- `streamingV4Params`, parameters for auth V4 parameters against S3;
|
||||||
|
-- `stream`, the node.js readable stream used to put the part;
|
||||||
|
-- `size`, the size of the part;
|
||||||
|
-- `key`, the key of the object;
|
||||||
|
-- `uploadId`, multipart upload id string;
|
||||||
|
-- `partNumber`, the number of the part in this MPU (ordered);
|
||||||
|
-- `bucketName`, the name of the bucket to which we will do an MPU;
|
||||||
|
-- `log`, the logger;
|
||||||
|
- `completeMPU(jsonList, mdInfo, key, uploadId, bucketName, log, callback)` will
|
||||||
|
end the MPU process once all parts are uploaded; parameters can be explicited
|
||||||
|
as follows:
|
||||||
|
-- `jsonList`, user-sent list of parts to include in final mpu object;
|
||||||
|
-- `mdInfo`, object containing 3 keys: storedParts, mpuOverviewKey, and
|
||||||
|
splitter;
|
||||||
|
-- `key`, the key of the object;
|
||||||
|
-- `uploadId`, multipart upload id string;
|
||||||
|
-- `bucketName`, name of bucket;
|
||||||
|
-- `log`, logger instance:
|
||||||
|
- `abortMPU(key, uploadId, bucketName, log, callback)` will handle errors, and
|
||||||
|
make sure that all parts that may have been uploaded will be deleted if the
|
||||||
|
MPU ultimately fails; the parameters are:
|
||||||
|
-- `key`, the key of the object;
|
||||||
|
-- `uploadId`, multipart upload id string;
|
||||||
|
-- `bucketName`, name of bucket;
|
||||||
|
-- `log`, logger instance.
|
||||||
|
|
||||||
|
=== `/lib/api/objectPutPart.js`
|
||||||
|
|
||||||
|
- you'll need to add your backend type in appropriate sections (simply look for
|
||||||
|
other backends already implemented).
|
||||||
|
|
||||||
|
=== `/lib/data/external/{{backendName}}_lib/`
|
||||||
|
|
||||||
|
- this is where you should put all utility functions for your MPU operations,
|
||||||
|
and then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
|
||||||
|
your code clean;
|
||||||
|
|
||||||
|
=== `lib/data/multipleBackendGateway.js`
|
||||||
|
|
||||||
|
- edit the `createLOY` function to add your location type, and call your
|
||||||
|
`©reateMPU()`; see pseudocode below for a sample implementation, provided your
|
||||||
|
backend name is `ztore`
|
||||||
|
|
||||||
|
[source,js]
|
||||||
|
----
|
||||||
|
(...) //<1>
|
||||||
|
createMPU:(key, metaHeaders, bucketName, websiteRedirectHeader, //<1>
|
||||||
|
location, contentType, cacheControl, contentDisposition,
|
||||||
|
contentEncoding, log, cb) => {
|
||||||
|
const client = clients[location]; //<1>
|
||||||
|
if (client.clientType === 'aws_s3') { //<1>
|
||||||
|
return client.createMPU(key, metaHeaders, bucketName,
|
||||||
|
websiteRedirectHeader, contentType, cacheControl,
|
||||||
|
contentDisposition, contentEncoding, log, cb);
|
||||||
|
} else if (client.clientType === 'ztore') { //<2>
|
||||||
|
return client.createMPU(key, metaHeaders, bucketName,
|
||||||
|
websiteRedirectHeader, contentType, cacheControl,
|
||||||
|
contentDisposition, contentEncoding, log, cb);
|
||||||
|
}
|
||||||
|
return cb();
|
||||||
|
};
|
||||||
|
(...) //<1>
|
||||||
|
----
|
||||||
|
<1> Code that is already there
|
||||||
|
<2> Where the `createMPU()` of your client is actually called
|
||||||
|
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/initMPU/{{BackendName}}InitMPU.js`
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/listParts/{{BackendName}}ListPart.js`
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/{{BackendName}}AbortMPU.js`
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/{{BackendName}}CompleteMPU.js`
|
||||||
|
=== `tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/{{BackendName}}UploadPart.js`
|
||||||
|
|
||||||
|
- granted, that is a lot of functional tests... but it's the last series as
|
||||||
|
well! Hurray!
|
||||||
|
|
||||||
|
== Adding support in Orbit, Zenko's UI for simplified Multi Cloud Management
|
||||||
|
|
||||||
|
This can only be done by our core developpers' team. Once your backend
|
||||||
|
integration is merged, you may open a feature request on the
|
||||||
|
https://www.github.com/scality/Zenko/issues/new[Zenko repository], and we will
|
||||||
|
get back to you after we evaluate feasability and maintainability.
|
|
@ -0,0 +1,43 @@
|
||||||
|
= S3 compatible backends
|
||||||
|
|
||||||
|
IMPORTANT: S3 compatibility claims are a bit like idols: a lot think they are,
|
||||||
|
but very few effectively meet all criteria ;) If the following steps
|
||||||
|
don't work for you, it's likely to be because the S3 compatibility
|
||||||
|
of your target backend is imperfect.
|
||||||
|
|
||||||
|
== Adding support in Zenko's Cloudserver
|
||||||
|
|
||||||
|
This is the easiest case for backend support integration: there is nothing to do
|
||||||
|
but configuration!
|
||||||
|
Follow the steps described in our link:../USING_PUBLIC_CLOUDS.rst[user guide for
|
||||||
|
using AWS S3 as a data backend], and make sure you:
|
||||||
|
|
||||||
|
- set `details.awsEndpoint` to your storage provider endpoint;
|
||||||
|
- use `details.credentials` and *not* `details.credentialsProfile` to set your
|
||||||
|
credentials for that S3-compatible backend.
|
||||||
|
|
||||||
|
For example, if you're using a Wasabi bucket as a backend, then your region
|
||||||
|
definition for that backend will look something like:
|
||||||
|
|
||||||
|
```json
|
||||||
|
"wasabi-bucket-zenkobucket": {
|
||||||
|
"type": "aws_s3",
|
||||||
|
"legacyAwsBehavior": true,
|
||||||
|
"details": {
|
||||||
|
"awsEndpoint": "s3.wasabisys.com",
|
||||||
|
"bucketName": "zenkobucket",
|
||||||
|
"bucketMatch": true,
|
||||||
|
"credentials": {
|
||||||
|
"accessKey": "\{YOUR_WASABI_ACCESS_KEY}",
|
||||||
|
"secretKey": "\{YOUR_WASABI_SECRET_KEY}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
```
|
||||||
|
|
||||||
|
== Adding support in Zenko Orbit
|
||||||
|
|
||||||
|
This can only be done by our core developpers' team. If that's what you're
|
||||||
|
after, open a feature request on the
|
||||||
|
https://www.github.com/scality/Zenko/issues/new[Zenko repository], and we will
|
||||||
|
get back to you after we evaluate feasability and maintainability.
|
|
@ -0,0 +1,4 @@
|
||||||
|
:attachmentsdir: {moduledir}/assets/attachments
|
||||||
|
:examplesdir: {moduledir}/examples
|
||||||
|
:imagesdir: {moduledir}/assets/images
|
||||||
|
:partialsdir: {moduledir}/pages/_partials
|
|
@ -0,0 +1,4 @@
|
||||||
|
* xref:GETTING_STARTED.adoc[Getting Started]
|
||||||
|
* xref:NEW_BACKEND.adoc[Adding a new backend]
|
||||||
|
** xref:S3_COMPATIBLE_BACKENDS.adoc[Backends supporting the S3 protocol]
|
||||||
|
** xref:NON_S3_COMPATIBLE_BACKENDS.adoc[Backends supporting other protocols]
|
|
@ -0,0 +1,4 @@
|
||||||
|
:attachmentsdir: {moduledir}/assets/attachments
|
||||||
|
:examplesdir: {moduledir}/examples
|
||||||
|
:imagesdir: {moduledir}/assets/images
|
||||||
|
:partialsdir: {moduledir}/pages/_partials
|
|
@ -0,0 +1,8 @@
|
||||||
|
name: cloudserver-root
|
||||||
|
title: Zenko CloudServer
|
||||||
|
version: '1.0'
|
||||||
|
start_page: ROOT:README.adoc
|
||||||
|
nav:
|
||||||
|
- modules/ROOT/nav.adoc
|
||||||
|
- modules/USERS/nav.adoc
|
||||||
|
- modules/DEVELOPERS/nav.adoc
|
|
@ -0,0 +1,3 @@
|
||||||
|
.xref:README.adoc[README]
|
||||||
|
* xref:README.adoc[README TOO]
|
||||||
|
** xref:README.adoc#docker[README DOCKER SECTION direct link]
|
|
@ -0,0 +1,172 @@
|
||||||
|
[[zenko-cloudserver]]
|
||||||
|
Zenko CloudServer
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
image:res/scality-cloudserver-logo.png[Zenko CloudServer logo]
|
||||||
|
|
||||||
|
https://circleci.com/gh/scality/S3[image:https://circleci.com/gh/scality/S3.svg?style=svg[CircleCI]]
|
||||||
|
http://ci.ironmann.io/gh/scality/S3[image:http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae[Scality
|
||||||
|
CI]]
|
||||||
|
https://hub.docker.com/r/scality/s3server/[image:https://img.shields.io/docker/pulls/scality/s3server.svg[Docker
|
||||||
|
Pulls]]
|
||||||
|
https://twitter.com/zenko[image:https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow[Docker
|
||||||
|
Pulls]]
|
||||||
|
|
||||||
|
[[overview]]
|
||||||
|
Overview
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||||
|
object storage server that is part of https://www.zenko.io[Zenko],
|
||||||
|
Scality’s Open Source Multi-Cloud Data Controller.
|
||||||
|
|
||||||
|
CloudServer provides a single AWS S3 API interface to access multiple
|
||||||
|
backend data storage both on-premise or public in the cloud.
|
||||||
|
|
||||||
|
CloudServer is useful for Developers, either to run as part of a
|
||||||
|
continous integration test environment to emulate the AWS S3 service
|
||||||
|
locally or as an abstraction layer to develop object storage enabled
|
||||||
|
application on the go.
|
||||||
|
|
||||||
|
[[learn-more-at-www.zenko.iocloudserver]]
|
||||||
|
Learn more at
|
||||||
|
https://www.zenko.io/cloudserver/[www.zenko.io/cloudserver]
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[may-i-offer-you-some-lovely-documentation]]
|
||||||
|
http://s3-server.readthedocs.io/en/latest/[May I offer you some lovely
|
||||||
|
documentation?]
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[docker]]
|
||||||
|
Docker
|
||||||
|
~~~~~~
|
||||||
|
|
||||||
|
https://hub.docker.com/r/scality/s3server/[Run your Zenko CloudServer
|
||||||
|
with Docker]
|
||||||
|
|
||||||
|
[[contributing]]
|
||||||
|
Contributing
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
In order to contribute, please follow the
|
||||||
|
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md[Contributing
|
||||||
|
Guidelines].
|
||||||
|
|
||||||
|
[[installation]]
|
||||||
|
Installation
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[dependencies]]
|
||||||
|
Dependencies
|
||||||
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Building and running the Zenko CloudServer requires node.js 6.9.5 and
|
||||||
|
npm v3 . Up-to-date versions can be found at
|
||||||
|
https://github.com/nodesource/distributions[Nodesource].
|
||||||
|
|
||||||
|
[[clone-source-code]]
|
||||||
|
Clone source code
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
git clone https://github.com/scality/S3.git
|
||||||
|
----
|
||||||
|
|
||||||
|
[[install-js-dependencies]]
|
||||||
|
Install js dependencies
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Go to the ./S3 folder,
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
npm install
|
||||||
|
----
|
||||||
|
|
||||||
|
If you get an error regarding installation of the diskUsage module,
|
||||||
|
please install g++.
|
||||||
|
|
||||||
|
If you get an error regarding level-down bindings, try clearing your npm
|
||||||
|
cache:
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
npm cache clear
|
||||||
|
----
|
||||||
|
|
||||||
|
[[run-it-with-a-file-backend]]
|
||||||
|
Run it with a file backend
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
npm start
|
||||||
|
----
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000. Two additional ports 9990
|
||||||
|
and 9991 are also open locally for internal transfer of metadata and
|
||||||
|
data, respectively.
|
||||||
|
|
||||||
|
The default access key is accessKey1 with a secret key of
|
||||||
|
verySecretKey1.
|
||||||
|
|
||||||
|
By default the metadata files will be saved in the localMetadata
|
||||||
|
directory and the data files will be saved in the localData directory
|
||||||
|
within the ./S3 directory on your machine. These directories have been
|
||||||
|
pre-created within the repository. If you would like to save the data or
|
||||||
|
metadata in different locations of your choice, you must specify them
|
||||||
|
with absolute paths. So, when starting the server:
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||||
|
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||||
|
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
|
npm start
|
||||||
|
----
|
||||||
|
|
||||||
|
[[run-it-with-multiple-data-backends]]
|
||||||
|
Run it with multiple data backends
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
export S3DATA='multiple'
|
||||||
|
npm start
|
||||||
|
----
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000. The default access key is
|
||||||
|
accessKey1 with a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to choose where each object
|
||||||
|
will be saved by setting the following header with a locationConstraint
|
||||||
|
on a PUT request:
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
----
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the location constraint
|
||||||
|
of the bucket will determine where the data is saved. If the bucket has
|
||||||
|
no location constraint, the endpoint of the PUT request will be used to
|
||||||
|
determine location.
|
||||||
|
|
||||||
|
See the Configuration section in our documentation
|
||||||
|
http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration[here]
|
||||||
|
to learn how to set location constraints.
|
||||||
|
|
||||||
|
[[run-it-with-an-in-memory-backend]]
|
||||||
|
Run it with an in-memory backend
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[source,shell]
|
||||||
|
----
|
||||||
|
npm run mem_backend
|
||||||
|
----
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000. The default access key is
|
||||||
|
accessKey1 with a secret key of verySecretKey1.
|
|
@ -0,0 +1,4 @@
|
||||||
|
:attachmentsdir: {moduledir}/assets/attachments
|
||||||
|
:examplesdir: {moduledir}/examples
|
||||||
|
:imagesdir: {moduledir}/assets/images
|
||||||
|
:partialsdir: {moduledir}/pages/_partials
|
Binary file not shown.
After Width: | Height: | Size: 42 KiB |
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 45 KiB |
|
@ -0,0 +1,8 @@
|
||||||
|
.Users guide
|
||||||
|
* xref:CONTRIBUTING.adoc
|
||||||
|
* xref:GETTING_STARTED.adoc
|
||||||
|
* xref:USING_PUBLIC_CLOUDS.adoc
|
||||||
|
* xref:CLIENTS.adoc
|
||||||
|
* xref:DOCKER.adoc
|
||||||
|
* xref:INTEGRATIONS.adoc
|
||||||
|
* xref:ARCHITECTURE.adoc
|
|
@ -0,0 +1,979 @@
|
||||||
|
[[architecture]]
|
||||||
|
Architecture
|
||||||
|
------------
|
||||||
|
|
||||||
|
[[versioning]]
|
||||||
|
Versioning
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
This document describes Zenko CloudServer's support for the AWS S3
|
||||||
|
Bucket Versioning feature.
|
||||||
|
|
||||||
|
[[aws-s3-bucket-versioning]]
|
||||||
|
AWS S3 Bucket Versioning
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
See AWS documentation for a description of the Bucket Versioning
|
||||||
|
feature:
|
||||||
|
|
||||||
|
* http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html[Bucket
|
||||||
|
Versioning]
|
||||||
|
* http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html[Object
|
||||||
|
Versioning]
|
||||||
|
|
||||||
|
This document assumes familiarity with the details of Bucket Versioning,
|
||||||
|
including null versions and delete markers, described in the above
|
||||||
|
links.
|
||||||
|
|
||||||
|
Implementation of Bucket Versioning in Zenko CloudServer
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
[[overview-of-metadata-and-api-component-roles]]
|
||||||
|
Overview of Metadata and API Component Roles
|
||||||
|
++++++++++++++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
Each version of an object is stored as a separate key in metadata. The
|
||||||
|
S3 API interacts with the metadata backend to store, retrieve, and
|
||||||
|
delete version metadata.
|
||||||
|
|
||||||
|
The implementation of versioning within the metadata backend is naive.
|
||||||
|
The metadata backend does not evaluate any information about bucket or
|
||||||
|
version state (whether versioning is enabled or suspended, and whether a
|
||||||
|
version is a null version or delete marker). The S3 front-end API
|
||||||
|
manages the logic regarding versioning information, and sends
|
||||||
|
instructions to metadata to handle the basic CRUD operations for version
|
||||||
|
metadata.
|
||||||
|
|
||||||
|
The role of the S3 API can be broken down into the following:
|
||||||
|
|
||||||
|
* put and delete version data
|
||||||
|
* store extra information about a version, such as whether it is a
|
||||||
|
delete marker or null version, in the object's metadata
|
||||||
|
* send instructions to metadata backend to store, retrieve, update and
|
||||||
|
delete version metadata based on bucket versioning state and version
|
||||||
|
metadata
|
||||||
|
* encode version ID information to return in responses to requests, and
|
||||||
|
decode version IDs sent in requests
|
||||||
|
|
||||||
|
The implementation of Bucket Versioning in S3 is described in this
|
||||||
|
document in two main parts. The first section,
|
||||||
|
link:#implementation-of-bucket-versioning-in-metadata["Implementation of
|
||||||
|
Bucket Versioning in Metadata"], describes the way versions are stored
|
||||||
|
in metadata, and the metadata options for manipulating version metadata.
|
||||||
|
|
||||||
|
The second section,
|
||||||
|
link:#implementation-of-bucket-versioning-in-api["Implementation of
|
||||||
|
Bucket Versioning in API"], describes the way the metadata options are
|
||||||
|
used in the API within S3 actions to create new versions, update their
|
||||||
|
metadata, and delete them. The management of null versions and creation
|
||||||
|
of delete markers are also described in this section.
|
||||||
|
|
||||||
|
[[implementation-of-bucket-versioning-in-metadata]]
|
||||||
|
Implementation of Bucket Versioning in Metadata
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
As mentioned above, each version of an object is stored as a separate
|
||||||
|
key in metadata. We use version identifiers as the suffix for the keys
|
||||||
|
of the object versions, and a special version (the
|
||||||
|
link:#master-version["Master Version"]) to represent the latest version.
|
||||||
|
|
||||||
|
An example of what the metadata keys might look like for an object
|
||||||
|
`foo/bar` with three versions (with . representing a null character):
|
||||||
|
|
||||||
|
[width="76%",cols="100%",options="header",]
|
||||||
|
|==================================================
|
||||||
|
|key
|
||||||
|
|foo/bar
|
||||||
|
|foo/bar.098506163554375999999PARIS 0.a430a1f85c6ec
|
||||||
|
|foo/bar.098506163554373999999PARIS 0.41b510cd0fdf8
|
||||||
|
|foo/bar.098506163554373999998PARIS 0.f9b82c166f695
|
||||||
|
|==================================================
|
||||||
|
|
||||||
|
The most recent version created is represented above in the key
|
||||||
|
`foo/bar` and is the master version. This special version is described
|
||||||
|
further in the section link:#master-version["Master Version"].
|
||||||
|
|
||||||
|
[[version-id-and-metadata-key-format]]
|
||||||
|
Version ID and Metadata Key Format
|
||||||
|
++++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
The version ID is generated by the metadata backend, and encoded in a
|
||||||
|
hexadecimal string format by S3 before sending a response to a request.
|
||||||
|
S3 also decodes the hexadecimal string received from a request before
|
||||||
|
sending to metadata to retrieve a particular version.
|
||||||
|
|
||||||
|
The format of a `version_id` is: `ts` `rep_group_id` `seq_id` where:
|
||||||
|
|
||||||
|
* `ts`: is the combination of epoch and an increasing number
|
||||||
|
* `rep_group_id`: is the name of deployment(s) considered one unit used
|
||||||
|
for replication
|
||||||
|
* `seq_id`: is a unique value based on metadata information.
|
||||||
|
|
||||||
|
The format of a key in metadata for a version is:
|
||||||
|
|
||||||
|
`object_name separator version_id` where:
|
||||||
|
|
||||||
|
* `object_name`: is the key of the object in metadata
|
||||||
|
* `separator`: we use the `null` character (`0x00` or `\0`) as the
|
||||||
|
separator between the `object_name` and the `version_id` of a key
|
||||||
|
* `version_id`: is the version identifier; this encodes the ordering
|
||||||
|
information in the format described above as metadata orders keys
|
||||||
|
alphabetically
|
||||||
|
|
||||||
|
An example of a key in metadata:
|
||||||
|
`foo\01234567890000777PARIS 1234.123456` indicating that this specific
|
||||||
|
version of `foo` was the `000777`th entry created during the epoch
|
||||||
|
`1234567890` in the replication group `PARIS` with `1234.123456` as
|
||||||
|
`seq_id`.
|
||||||
|
|
||||||
|
[[master-version]]
|
||||||
|
Master Version
|
||||||
|
++++++++++++++
|
||||||
|
|
||||||
|
We store a copy of the latest version of an object's metadata using
|
||||||
|
`object_name` as the key; this version is called the master version. The
|
||||||
|
master version of each object facilitates the standard GET operation,
|
||||||
|
which would otherwise need to scan among the list of versions of an
|
||||||
|
object for its latest version.
|
||||||
|
|
||||||
|
The following table shows the layout of all versions of `foo` in the
|
||||||
|
first example stored in the metadata (with dot `.` representing the null
|
||||||
|
separator):
|
||||||
|
|
||||||
|
[width="30%",cols="50%,50%",options="header",]
|
||||||
|
|==========
|
||||||
|
|key |value
|
||||||
|
|foo |B
|
||||||
|
|foo.v2 |B
|
||||||
|
|foo.v1 |A
|
||||||
|
|==========
|
||||||
|
|
||||||
|
[[metadata-versioning-options]]
|
||||||
|
Metadata Versioning Options
|
||||||
|
+++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
Zenko CloudServer sends instructions to the metadata engine about
|
||||||
|
whether to create a new version or overwrite, retrieve, or delete a
|
||||||
|
specific version by sending values for special options in PUT, GET, or
|
||||||
|
DELETE calls to metadata. The metadata engine can also list versions in
|
||||||
|
the database, which is used by Zenko CloudServer to list object
|
||||||
|
versions.
|
||||||
|
|
||||||
|
These only describe the basic CRUD operations that the metadata engine
|
||||||
|
can handle. How these options are used by the S3 API to generate and
|
||||||
|
update versions is described more comprehensively in
|
||||||
|
link:#implementation-of-bucket-versioning-in-api["Implementation of
|
||||||
|
Bucket Versioning in API"].
|
||||||
|
|
||||||
|
Note: all operations (PUT and DELETE) that generate a new version of an
|
||||||
|
object will return the `version_id` of the new version to the API.
|
||||||
|
|
||||||
|
[[put]]
|
||||||
|
PUT
|
||||||
|
|
||||||
|
* no options: original PUT operation, will update the master version
|
||||||
|
* `versioning: true` create a new version of the object, then update the
|
||||||
|
master version with this version.
|
||||||
|
* `versionId: <versionId>` create or update a specific version (for
|
||||||
|
updating version's ACL or tags, or remote updates in geo-replication)
|
||||||
|
** if the version identified by `versionId` happens to be the latest
|
||||||
|
version, the master version will be updated as well
|
||||||
|
** if the master version is not as recent as the version identified by
|
||||||
|
`versionId`, as may happen with cross-region replication, the master
|
||||||
|
will be updated as well
|
||||||
|
** note that with `versionId` set to an empty string `''`, it will
|
||||||
|
overwrite the master version only (same as no options, but the master
|
||||||
|
version will have a `versionId` property set in its metadata like any
|
||||||
|
other version). The `versionId` will never be exposed to an external
|
||||||
|
user, but setting this internal-only `versionID` enables Zenko
|
||||||
|
CloudServer to find this version later if it is no longer the master.
|
||||||
|
This option of `versionId` set to `''` is used for creating null
|
||||||
|
versions once versioning has been suspended, which is discussed in
|
||||||
|
link:#null-version-management["Null Version Management"].
|
||||||
|
|
||||||
|
In general, only one option is used at a time. When `versionId` and
|
||||||
|
`versioning` are both set, only the `versionId` option will have an
|
||||||
|
effect.
|
||||||
|
|
||||||
|
[[delete]]
|
||||||
|
DELETE
|
||||||
|
|
||||||
|
* no options: original DELETE operation, will delete the master version
|
||||||
|
* `versionId: <versionId>` delete a specific version
|
||||||
|
|
||||||
|
A deletion targeting the latest version of an object has to:
|
||||||
|
|
||||||
|
* delete the specified version identified by `versionId`
|
||||||
|
* replace the master version with a version that is a placeholder for
|
||||||
|
deletion - this version contains a special keyword, 'isPHD', to indicate
|
||||||
|
the master version was deleted and needs to be updated
|
||||||
|
* initiate a repair operation to update the value of the master version:
|
||||||
|
- involves listing the versions of the object and get the latest version
|
||||||
|
to replace the placeholder delete version - if no more versions exist,
|
||||||
|
metadata deletes the master version, removing the key from metadata
|
||||||
|
|
||||||
|
Note: all of this happens in metadata before responding to the front-end
|
||||||
|
api, and only when the metadata engine is instructed by Zenko
|
||||||
|
CloudServer to delete a specific version or the master version. See
|
||||||
|
section link:#delete-markers["Delete Markers"] for a description of what
|
||||||
|
happens when a Delete Object request is sent to the S3 API.
|
||||||
|
|
||||||
|
[[get]]
|
||||||
|
GET
|
||||||
|
|
||||||
|
* no options: original GET operation, will get the master version
|
||||||
|
* `versionId: <versionId>` retrieve a specific version
|
||||||
|
|
||||||
|
The implementation of a GET operation does not change compared to the
|
||||||
|
standard version. A standard GET without versioning information would
|
||||||
|
get the master version of a key. A version-specific GET would retrieve
|
||||||
|
the specific version identified by the key for that version.
|
||||||
|
|
||||||
|
[[list]]
|
||||||
|
LIST
|
||||||
|
|
||||||
|
For a standard LIST on a bucket, metadata iterates through the keys by
|
||||||
|
using the separator (`\0`, represented by `.` in examples) as an extra
|
||||||
|
delimiter. For a listing of all versions of a bucket, there is no change
|
||||||
|
compared to the original listing function. Instead, the API component
|
||||||
|
returns all the keys in a List Objects call and filters for just the
|
||||||
|
keys of the master versions in a List Object Versions call.
|
||||||
|
|
||||||
|
For example, a standard LIST operation against the keys in a table below
|
||||||
|
would return from metadata the list of `[ foo/bar, bar, qux/quz, quz ]`.
|
||||||
|
|
||||||
|
[width="20%",cols="100%",options="header",]
|
||||||
|
|==========
|
||||||
|
|key
|
||||||
|
|foo/bar
|
||||||
|
|foo/bar.v2
|
||||||
|
|foo/bar.v1
|
||||||
|
|bar
|
||||||
|
|qux/quz
|
||||||
|
|qux/quz.v2
|
||||||
|
|qux/quz.v1
|
||||||
|
|quz
|
||||||
|
|quz.v2
|
||||||
|
|quz.v1
|
||||||
|
|==========
|
||||||
|
|
||||||
|
[[implementation-of-bucket-versioning-in-api]]
|
||||||
|
Implementation of Bucket Versioning in API
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
[[object-metadata-versioning-attributes]]
|
||||||
|
Object Metadata Versioning Attributes
|
||||||
|
+++++++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
To access all the information needed to properly handle all cases that
|
||||||
|
may exist in versioned operations, the API stores certain
|
||||||
|
versioning-related information in the metadata attributes of each
|
||||||
|
version's object metadata.
|
||||||
|
|
||||||
|
These are the versioning-related metadata properties:
|
||||||
|
|
||||||
|
* `isNull`: whether the version being stored is a null version.
|
||||||
|
* `nullVersionId`: the unencoded version ID of the latest null version
|
||||||
|
that existed before storing a non-null version.
|
||||||
|
* `isDeleteMarker`: whether the version being stored is a delete marker.
|
||||||
|
|
||||||
|
The metadata engine also sets one additional metadata property when
|
||||||
|
creating the version.
|
||||||
|
|
||||||
|
* `versionId`: the unencoded version ID of the version being stored.
|
||||||
|
|
||||||
|
Null versions and delete markers are described in further detail in
|
||||||
|
their own subsections.
|
||||||
|
|
||||||
|
[[creation-of-new-versions]]
|
||||||
|
Creation of New Versions
|
||||||
|
++++++++++++++++++++++++
|
||||||
|
|
||||||
|
When versioning is enabled in a bucket, APIs which normally result in
|
||||||
|
the creation of objects, such as Put Object, Complete Multipart Upload
|
||||||
|
and Copy Object, will generate new versions of objects.
|
||||||
|
|
||||||
|
Zenko CloudServer creates a new version and updates the master version
|
||||||
|
using the `versioning: true` option in PUT calls to the metadata engine.
|
||||||
|
As an example, when two consecutive Put Object requests are sent to the
|
||||||
|
Zenko CloudServer for a versioning-enabled bucket with the same key
|
||||||
|
names, there are two corresponding metadata PUT calls with the
|
||||||
|
`versioning` option set to true.
|
||||||
|
|
||||||
|
The PUT calls to metadata and resulting keys are shown below:
|
||||||
|
|
||||||
|
1. PUT foo (first put), versioning: `true`
|
||||||
|
|
||||||
|
[width="30%",cols="50%,50%",options="header",]
|
||||||
|
|==========
|
||||||
|
|key |value
|
||||||
|
|foo |A
|
||||||
|
|foo.v1 |A
|
||||||
|
|==========
|
||||||
|
|
||||||
|
1. PUT foo (second put), versioning: `true`
|
||||||
|
|
||||||
|
[width="30%",cols="50%,50%",options="header",]
|
||||||
|
|==========
|
||||||
|
|key |value
|
||||||
|
|foo |B
|
||||||
|
|foo.v2 |B
|
||||||
|
|foo.v1 |A
|
||||||
|
|==========
|
||||||
|
|
||||||
|
[[null-version-management]]
|
||||||
|
Null Version Management
|
||||||
|
|
||||||
|
In a bucket without versioning, or when versioning is suspended, putting
|
||||||
|
an object with the same name twice should result in the previous object
|
||||||
|
being overwritten. This is managed with null versions.
|
||||||
|
|
||||||
|
Only one null version should exist at any given time, and it is
|
||||||
|
identified in Zenko CloudServer requests and responses with the version
|
||||||
|
id "null".
|
||||||
|
|
||||||
|
[[case-1-putting-null-versions]]
|
||||||
|
Case 1: Putting Null Versions
|
||||||
|
|
||||||
|
With respect to metadata, since the null version is overwritten by
|
||||||
|
subsequent null versions, the null version is initially stored in the
|
||||||
|
master key alone, as opposed to being stored in the master key and a new
|
||||||
|
version. Zenko CloudServer checks if versioning is suspended or has
|
||||||
|
never been configured, and sets the `versionId` option to `''` in PUT
|
||||||
|
calls to the metadata engine when creating a new null version.
|
||||||
|
|
||||||
|
If the master version is a null version, Zenko CloudServer also sends a
|
||||||
|
DELETE call to metadata prior to the PUT, in order to clean up any
|
||||||
|
pre-existing null versions which may, in certain edge cases, have been
|
||||||
|
stored as a separate version. footnote:[Some examples of these cases
|
||||||
|
are: (1) when there is a null version that is the second-to-latest
|
||||||
|
version, and the latest version has been deleted, causing metadata to
|
||||||
|
repair the master value with the value of the null version and (2) when
|
||||||
|
putting object tag or ACL on a null version that is the master version,
|
||||||
|
as explained in link:#behavior-of-object-targeting-apis["Behavior of
|
||||||
|
Object-Targeting APIs"].]
|
||||||
|
|
||||||
|
The tables below summarize the calls to metadata and the resulting keys
|
||||||
|
if we put an object 'foo' twice, when versioning has not been enabled or
|
||||||
|
is suspended.
|
||||||
|
|
||||||
|
1. PUT foo (first put), versionId: `''`
|
||||||
|
|
||||||
|
[width="34%",cols="60%,40%",options="header",]
|
||||||
|
|=============
|
||||||
|
|key |value
|
||||||
|
|foo (null) |A
|
||||||
|
|=============
|
||||||
|
|
||||||
|
(2A) DELETE foo (clean-up delete before second put), versionId:
|
||||||
|
`<version id of master version>`
|
||||||
|
|
||||||
|
[width="34%",cols="60%,40%",options="header",]
|
||||||
|
|==========
|
||||||
|
|key |value
|
||||||
|
| |
|
||||||
|
|==========
|
||||||
|
|
||||||
|
(2B) PUT foo (second put), versionId: `''`
|
||||||
|
|
||||||
|
[width="34%",cols="60%,40%",options="header",]
|
||||||
|
|=============
|
||||||
|
|key |value
|
||||||
|
|foo (null) |B
|
||||||
|
|=============
|
||||||
|
|
||||||
|
The S3 API also sets the `isNull` attribute to `true` in the version
|
||||||
|
metadata before storing the metadata for these null versions.
|
||||||
|
|
||||||
|
[[case-2-preserving-existing-null-versions-in-versioning-enabled-bucket]]
|
||||||
|
Case 2: Preserving Existing Null Versions in Versioning-Enabled Bucket
|
||||||
|
|
||||||
|
Null versions are preserved when new non-null versions are created after
|
||||||
|
versioning has been enabled or re-enabled.
|
||||||
|
|
||||||
|
If the master version is the null version, the S3 API preserves the
|
||||||
|
current null version by storing it as a new key `(3A)` in a separate PUT
|
||||||
|
call to metadata, prior to overwriting the master version `(3B)`. This
|
||||||
|
implies the null version may not necessarily be the latest or master
|
||||||
|
version.
|
||||||
|
|
||||||
|
To determine whether the master version is a null version, the S3 API
|
||||||
|
checks if the master version's `isNull` property is set to `true`, or if
|
||||||
|
the `versionId` attribute of the master version is undefined (indicating
|
||||||
|
it is a null version that was put before bucket versioning was
|
||||||
|
configured).
|
||||||
|
|
||||||
|
Continuing the example from Case 1, if we enabled versioning and put
|
||||||
|
another object, the calls to metadata and resulting keys would resemble
|
||||||
|
the following:
|
||||||
|
|
||||||
|
(3A) PUT foo, versionId: `<versionId of master version>` if defined or
|
||||||
|
`<non-versioned object id>`
|
||||||
|
|
||||||
|
[width="38%",cols="65%,35%",options="header",]
|
||||||
|
|================
|
||||||
|
|key |value
|
||||||
|
|foo |B
|
||||||
|
|foo.v1 (null) |B
|
||||||
|
|================
|
||||||
|
|
||||||
|
(3B) PUT foo, versioning: `true`
|
||||||
|
|
||||||
|
[width="38%",cols="65%,35%",options="header",]
|
||||||
|
|================
|
||||||
|
|key |value
|
||||||
|
|foo |C
|
||||||
|
|foo.v2 |C
|
||||||
|
|foo.v1 (null) |B
|
||||||
|
|================
|
||||||
|
|
||||||
|
To prevent issues with concurrent requests, Zenko CloudServer ensures
|
||||||
|
the null version is stored with the same version ID by using `versionId`
|
||||||
|
option. Zenko CloudServer sets the `versionId` option to the master
|
||||||
|
version's `versionId` metadata attribute value during the PUT. This
|
||||||
|
creates a new version with the same version ID of the existing null
|
||||||
|
master version.
|
||||||
|
|
||||||
|
The null version's `versionId` attribute may be undefined because it was
|
||||||
|
generated before the bucket versioning was configured. In that case, a
|
||||||
|
version ID is generated using the max epoch and sequence values possible
|
||||||
|
so that the null version will be properly ordered as the last entry in a
|
||||||
|
metadata listing. This value ("non-versioned object id") is used in the
|
||||||
|
PUT call with the `versionId` option.
|
||||||
|
|
||||||
|
[[case-3-overwriting-a-null-version-that-is-not-latest-version]]
|
||||||
|
Case 3: Overwriting a Null Version That is Not Latest Version
|
||||||
|
|
||||||
|
Normally when versioning is suspended, Zenko CloudServer uses the
|
||||||
|
`versionId: ''` option in a PUT to metadata to create a null version.
|
||||||
|
This also overwrites an existing null version if it is the master
|
||||||
|
version.
|
||||||
|
|
||||||
|
However, if there is a null version that is not the latest version,
|
||||||
|
Zenko CloudServer cannot rely on the `versionId: ''` option will not
|
||||||
|
overwrite the existing null version. Instead, before creating a new null
|
||||||
|
version, the Zenko CloudServer API must send a separate DELETE call to
|
||||||
|
metadata specifying the version id of the current null version for
|
||||||
|
delete.
|
||||||
|
|
||||||
|
To do this, when storing a null version (3A above) before storing a new
|
||||||
|
non-null version, Zenko CloudServer records the version's ID in the
|
||||||
|
`nullVersionId` attribute of the non-null version. For steps 3A and 3B
|
||||||
|
above, these are the values stored in the `nullVersionId` of each
|
||||||
|
version's metadata:
|
||||||
|
|
||||||
|
(3A) PUT foo, versioning: `true`
|
||||||
|
|
||||||
|
[width="72%",cols="35%,19%,46%",options="header",]
|
||||||
|
|===============================
|
||||||
|
|key |value |value.nullVersionId
|
||||||
|
|foo |B |undefined
|
||||||
|
|foo.v1 (null) |B |undefined
|
||||||
|
|===============================
|
||||||
|
|
||||||
|
(3B) PUT foo, versioning: `true`
|
||||||
|
|
||||||
|
[width="72%",cols="35%,19%,46%",options="header",]
|
||||||
|
|===============================
|
||||||
|
|key |value |value.nullVersionId
|
||||||
|
|foo |C |v1
|
||||||
|
|foo.v2 |C |v1
|
||||||
|
|foo.v1 (null) |B |undefined
|
||||||
|
|===============================
|
||||||
|
|
||||||
|
If defined, the `nullVersionId` of the master version is used with the
|
||||||
|
`versionId` option in a DELETE call to metadata if a Put Object request
|
||||||
|
is received when versioning is suspended in a bucket.
|
||||||
|
|
||||||
|
(4A) DELETE foo, versionId: `<nullVersionId of master version>` (v1)
|
||||||
|
|
||||||
|
[width="30%",cols="50%,50%",options="header",]
|
||||||
|
|==========
|
||||||
|
|key |value
|
||||||
|
|foo |C
|
||||||
|
|foo.v2 |C
|
||||||
|
|==========
|
||||||
|
|
||||||
|
Then the master version is overwritten with the new null version:
|
||||||
|
|
||||||
|
(4B) PUT foo, versionId: `''`
|
||||||
|
|
||||||
|
[width="34%",cols="60%,40%",options="header",]
|
||||||
|
|=============
|
||||||
|
|key |value
|
||||||
|
|foo (null) |D
|
||||||
|
|foo.v2 |C
|
||||||
|
|=============
|
||||||
|
|
||||||
|
The `nullVersionId` attribute is also used to retrieve the correct
|
||||||
|
version when the version ID "null" is specified in certain object-level
|
||||||
|
APIs, described further in the section link:#null-version-mapping["Null
|
||||||
|
Version Mapping"].
|
||||||
|
|
||||||
|
[[specifying-versions-in-apis-for-putting-versions]]
|
||||||
|
Specifying Versions in APIs for Putting Versions
|
||||||
|
|
||||||
|
Since Zenko CloudServer does not allow an overwrite of existing version
|
||||||
|
data, Put Object, Complete Multipart Upload and Copy Object return
|
||||||
|
`400 InvalidArgument` if a specific version ID is specified in the
|
||||||
|
request query, e.g. for a `PUT /foo?versionId=v1` request.
|
||||||
|
|
||||||
|
[[put-example]]
|
||||||
|
PUT Example
|
||||||
|
+++++++++++
|
||||||
|
|
||||||
|
When Zenko CloudServer receives a request to PUT an object:
|
||||||
|
|
||||||
|
* It checks first if versioning has been configured
|
||||||
|
* If it has not been configured, Zenko CloudServer proceeds to puts the
|
||||||
|
new data, puts the metadata by overwriting the master version, and
|
||||||
|
proceeds to delete any pre-existing data
|
||||||
|
|
||||||
|
If versioning has been configured, Zenko CloudServer checks the
|
||||||
|
following:
|
||||||
|
|
||||||
|
[[versioning-enabled]]
|
||||||
|
Versioning Enabled
|
||||||
|
|
||||||
|
If versioning is enabled and there is existing object metadata:
|
||||||
|
|
||||||
|
* If the master version is a null version (`isNull: true`) or has no
|
||||||
|
version ID (put before versioning was configured):
|
||||||
|
** store the null version metadata as a new version
|
||||||
|
** create a new version and overwrite the master version
|
||||||
|
*** set `nullVersionId`: version ID of the null version that was stored
|
||||||
|
|
||||||
|
If versioning is enabled and the master version is not null; or there is
|
||||||
|
no existing object metadata:
|
||||||
|
|
||||||
|
* create a new version and store it, and overwrite the master version
|
||||||
|
|
||||||
|
[[versioning-suspended]]
|
||||||
|
Versioning Suspended
|
||||||
|
|
||||||
|
If versioning is suspended and there is existing object metadata:
|
||||||
|
|
||||||
|
* If the master version has no version ID:
|
||||||
|
** overwrite the master version with the new metadata (PUT
|
||||||
|
`versionId: ''`)
|
||||||
|
** delete previous object data
|
||||||
|
* If the master version is a null version:
|
||||||
|
+
|
||||||
|
__________________________________________________________________________________________________________________________________________
|
||||||
|
** delete the null version using the versionId metadata attribute of the
|
||||||
|
master version (PUT `versionId: <versionId of master object MD>`)
|
||||||
|
** put a new null version (PUT `versionId: ''`)
|
||||||
|
__________________________________________________________________________________________________________________________________________
|
||||||
|
* If master is not a null version and `nullVersionId` is defined in the
|
||||||
|
object’s metadata:
|
||||||
|
** delete the current null version metadata and data
|
||||||
|
** overwrite the master version with the new metadata
|
||||||
|
|
||||||
|
If there is no existing object metadata, create the new null version as
|
||||||
|
the master version.
|
||||||
|
|
||||||
|
In each of the above cases, set `isNull` metadata attribute to true when
|
||||||
|
creating the new null version.
|
||||||
|
|
||||||
|
[[behavior-of-object-targeting-apis]]
|
||||||
|
Behavior of Object-Targeting APIs
|
||||||
|
+++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
API methods which can target existing objects or versions, such as Get
|
||||||
|
Object, Head Object, Get Object ACL, Put Object ACL, Copy Object and
|
||||||
|
Copy Part, will perform the action on the latest version of an object if
|
||||||
|
no version ID is specified in the request query or relevant request
|
||||||
|
header (`x-amz-copy-source-version-id` for Copy Object and Copy Part
|
||||||
|
APIs).
|
||||||
|
|
||||||
|
Two exceptions are the Delete Object and Multi-Object Delete APIs, which
|
||||||
|
will instead attempt to create delete markers, described in the
|
||||||
|
following section, if no version ID is specified.
|
||||||
|
|
||||||
|
No versioning options are necessary to retrieve the latest version from
|
||||||
|
metadata, since the master version is stored in a key with the name of
|
||||||
|
the object. However, when updating the latest version, such as with the
|
||||||
|
Put Object ACL API, Zenko CloudServer sets the `versionId` option in the
|
||||||
|
PUT call to metadata to the value stored in the object metadata's
|
||||||
|
`versionId` attribute. This is done in order to update the metadata both
|
||||||
|
in the master version and the version itself, if it is not a null
|
||||||
|
version. footnote:[If it is a null version, this call will overwrite the
|
||||||
|
null version if it is stored in its own key (`foo\0<versionId>`). If the
|
||||||
|
null version is stored only in the master version, this call will both
|
||||||
|
overwrite the master version _and_ create a new key
|
||||||
|
(`foo\0<versionId>`), resulting in the edge case referred to by the
|
||||||
|
previous footnote [1]_.]
|
||||||
|
|
||||||
|
When a version id is specified in the request query for these APIs, e.g.
|
||||||
|
`GET /foo?versionId=v1`, Zenko CloudServer will attempt to decode the
|
||||||
|
version ID and perform the action on the appropriate version. To do so,
|
||||||
|
the API sets the value of the `versionId` option to the decoded version
|
||||||
|
ID in the metadata call.
|
||||||
|
|
||||||
|
[[delete-markers]]
|
||||||
|
Delete Markers
|
||||||
|
|
||||||
|
If versioning has not been configured for a bucket, the Delete Object
|
||||||
|
and Multi-Object Delete APIs behave as their standard APIs.
|
||||||
|
|
||||||
|
If versioning has been configured, Zenko CloudServer deletes object or
|
||||||
|
version data only if a specific version ID is provided in the request
|
||||||
|
query, e.g. `DELETE /foo?versionId=v1`.
|
||||||
|
|
||||||
|
If no version ID is provided, S3 creates a delete marker by creating a
|
||||||
|
0-byte version with the metadata attribute `isDeleteMarker: true`. The
|
||||||
|
S3 API will return a `404 NoSuchKey` error in response to requests
|
||||||
|
getting or heading an object whose latest version is a delete maker.
|
||||||
|
|
||||||
|
To restore a previous version as the latest version of an object, the
|
||||||
|
delete marker must be deleted, by the same process as deleting any other
|
||||||
|
version.
|
||||||
|
|
||||||
|
The response varies when targeting an object whose latest version is a
|
||||||
|
delete marker for other object-level APIs that can target existing
|
||||||
|
objects and versions, without specifying the version ID.
|
||||||
|
|
||||||
|
* Get Object, Head Object, Get Object ACL, Object Copy and Copy Part
|
||||||
|
return `404 NoSuchKey`.
|
||||||
|
* Put Object ACL and Put Object Tagging return `405 MethodNotAllowed`.
|
||||||
|
|
||||||
|
These APIs respond to requests specifying the version ID of a delete
|
||||||
|
marker with the error `405 MethodNotAllowed`, in general. Copy Part and
|
||||||
|
Copy Object respond with `400 Invalid Request`.
|
||||||
|
|
||||||
|
See section link:#delete-example["Delete Example"] for a summary.
|
||||||
|
|
||||||
|
[[null-version-mapping]]
|
||||||
|
Null Version Mapping
|
||||||
|
|
||||||
|
When the null version is specified in a request with the version ID
|
||||||
|
"null", the S3 API must use the `nullVersionId` stored in the latest
|
||||||
|
version to retrieve the current null version, if the null version is not
|
||||||
|
the latest version.
|
||||||
|
|
||||||
|
Thus, getting the null version is a two step process:
|
||||||
|
|
||||||
|
1. Get the latest version of the object from metadata. If the latest
|
||||||
|
version's `isNull` property is `true`, then use the latest version's
|
||||||
|
metadata. Otherwise,
|
||||||
|
2. Get the null version of the object from metadata, using the internal
|
||||||
|
version ID of the current null version stored in the latest version's
|
||||||
|
`nullVersionId` metadata attribute.
|
||||||
|
|
||||||
|
[[delete-example]]
|
||||||
|
DELETE Example
|
||||||
|
++++++++++++++
|
||||||
|
|
||||||
|
The following steps are used in the delete logic for delete marker
|
||||||
|
creation:
|
||||||
|
|
||||||
|
* If versioning has not been configured: attempt to delete the object
|
||||||
|
* If request is version-specific delete request: attempt to delete the
|
||||||
|
version
|
||||||
|
* otherwise, if not a version-specific delete request and versioning has
|
||||||
|
been configured:
|
||||||
|
** create a new 0-byte content-length version
|
||||||
|
** in version's metadata, set a 'isDeleteMarker' property to true
|
||||||
|
* Return the version ID of any version deleted or any delete marker
|
||||||
|
created
|
||||||
|
* Set response header `x-amz-delete-marker` to true if a delete marker
|
||||||
|
was deleted or created
|
||||||
|
|
||||||
|
The Multi-Object Delete API follows the same logic for each of the
|
||||||
|
objects or versions listed in an xml request. Note that a delete request
|
||||||
|
can result in the creation of a deletion marker even if the object
|
||||||
|
requested to delete does not exist in the first place.
|
||||||
|
|
||||||
|
Object-level APIs which can target existing objects and versions perform
|
||||||
|
the following checks regarding delete markers:
|
||||||
|
|
||||||
|
* If not a version-specific request and versioning has been configured,
|
||||||
|
check the metadata of the latest version
|
||||||
|
* If the 'isDeleteMarker' property is set to true, return
|
||||||
|
`404 NoSuchKey` or `405 MethodNotAllowed`
|
||||||
|
* If it is a version-specific request, check the object metadata of the
|
||||||
|
requested version
|
||||||
|
* If the `isDeleteMarker` property is set to true, return
|
||||||
|
`405 MethodNotAllowed` or `400 InvalidRequest`
|
||||||
|
|
||||||
|
[[data-metadata-daemon-architecture-and-operational-guide]]
|
||||||
|
Data-metadata daemon Architecture and Operational guide
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This document presents the architecture of the data-metadata daemon
|
||||||
|
(dmd) used for the community edition of Zenko CloudServer. It also
|
||||||
|
provides a guide on how to operate it.
|
||||||
|
|
||||||
|
The dmd is responsible for storing and retrieving Zenko CloudServer data
|
||||||
|
and metadata, and is accessed by Zenko CloudServer connectors through
|
||||||
|
socket.io (metadata) and REST (data) APIs.
|
||||||
|
|
||||||
|
It has been designed such that more than one Zenko CloudServer connector
|
||||||
|
can access the same buckets by communicating with the dmd. It also means
|
||||||
|
that the dmd can be hosted on a separate container or machine.
|
||||||
|
|
||||||
|
[[operation]]
|
||||||
|
Operation
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
[[startup]]
|
||||||
|
Startup
|
||||||
|
+++++++
|
||||||
|
|
||||||
|
The simplest deployment is still to launch with npm start, this will
|
||||||
|
start one instance of the Zenko CloudServer connector and will listen on
|
||||||
|
the locally bound dmd ports 9990 and 9991 (by default, see below).
|
||||||
|
|
||||||
|
The dmd can be started independently from the Zenko CloudServer by
|
||||||
|
running this command in the Zenko CloudServer directory:
|
||||||
|
|
||||||
|
....
|
||||||
|
npm run start_dmd
|
||||||
|
....
|
||||||
|
|
||||||
|
This will open two ports:
|
||||||
|
|
||||||
|
- one is based on socket.io and is used for metadata transfers (9990
|
||||||
|
by::
|
||||||
|
default)
|
||||||
|
- the other is a REST interface used for data transfers (9991 by::
|
||||||
|
default)
|
||||||
|
|
||||||
|
Then, one or more instances of Zenko CloudServer without the dmd can be
|
||||||
|
started elsewhere with:
|
||||||
|
|
||||||
|
....
|
||||||
|
npm run start_s3server
|
||||||
|
....
|
||||||
|
|
||||||
|
[[configuration]]
|
||||||
|
Configuration
|
||||||
|
+++++++++++++
|
||||||
|
|
||||||
|
Most configuration happens in `config.json` for Zenko CloudServer, local
|
||||||
|
storage paths can be changed where the dmd is started using environment
|
||||||
|
variables, like before: `S3DATAPATH` and `S3METADATAPATH`.
|
||||||
|
|
||||||
|
In `config.json`, the following sections are used to configure access to
|
||||||
|
the dmd through separate configuration of the data and metadata access:
|
||||||
|
|
||||||
|
....
|
||||||
|
"metadataClient": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 9990
|
||||||
|
},
|
||||||
|
"dataClient": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 9991
|
||||||
|
},
|
||||||
|
....
|
||||||
|
|
||||||
|
To run a remote dmd, you have to do the following:
|
||||||
|
|
||||||
|
- change both `"host"` attributes to the IP or host name where the::
|
||||||
|
dmd is run.
|
||||||
|
- Modify the `"bindAddress"` attributes in `"metadataDaemon"` and::
|
||||||
|
`"dataDaemon"` sections where the dmd is run to accept remote
|
||||||
|
connections (e.g. `"::"`)
|
||||||
|
|
||||||
|
[[architecture-1]]
|
||||||
|
Architecture
|
||||||
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
This section gives a bit more insight on how it works internally.
|
||||||
|
|
||||||
|
image:./images/data_metadata_daemon_arch.png[image]
|
||||||
|
|
||||||
|
______________________________________
|
||||||
|
alt::
|
||||||
|
Architecture diagram
|
||||||
|
|
||||||
|
./images/data_metadata_daemon_arch.png
|
||||||
|
______________________________________
|
||||||
|
|
||||||
|
[[metadata-on-socket.io]]
|
||||||
|
Metadata on socket.io
|
||||||
|
+++++++++++++++++++++
|
||||||
|
|
||||||
|
This communication is based on an RPC system based on socket.io events
|
||||||
|
sent by Zenko CloudServerconnectors, received by the DMD and
|
||||||
|
acknowledged back to the Zenko CloudServer connector.
|
||||||
|
|
||||||
|
The actual payload sent through socket.io is a JSON-serialized form of
|
||||||
|
the RPC call name and parameters, along with some additional information
|
||||||
|
like the request UIDs, and the sub-level information, sent as object
|
||||||
|
attributes in the JSON request.
|
||||||
|
|
||||||
|
With introduction of versioning support, the updates are now gathered in
|
||||||
|
the dmd for some number of milliseconds max, before being batched as a
|
||||||
|
single write to the database. This is done server-side, so the API is
|
||||||
|
meant to send individual updates.
|
||||||
|
|
||||||
|
Four RPC commands are available to clients: `put`, `get`, `del` and
|
||||||
|
`createReadStream`. They more or less map the parameters accepted by the
|
||||||
|
corresponding calls in the LevelUp implementation of LevelDB. They
|
||||||
|
differ in the following:
|
||||||
|
|
||||||
|
- The `sync` option is ignored (under the hood, puts are gathered::
|
||||||
|
into batches which have their `sync` property enforced when they are
|
||||||
|
committed to the storage)
|
||||||
|
|
||||||
|
* Some additional versioning-specific options are supported
|
||||||
|
|
||||||
|
- `createReadStream` becomes asynchronous, takes an additional::
|
||||||
|
callback argument and returns the stream in the second callback
|
||||||
|
parameter
|
||||||
|
|
||||||
|
Debugging the socket.io exchanges can be achieved by running the daemon
|
||||||
|
with `DEBUG='socket.io*'` environment variable set.
|
||||||
|
|
||||||
|
One parameter controls the timeout value after which RPC commands sent
|
||||||
|
end with a timeout error, it can be changed either:
|
||||||
|
|
||||||
|
- via the `DEFAULT_CALL_TIMEOUT_MS` option in::
|
||||||
|
`lib/network/rpc/rpc.js`
|
||||||
|
- or in the constructor call of the `MetadataFileClient` object (in::
|
||||||
|
`lib/metadata/bucketfile/backend.js` as `callTimeoutMs`.
|
||||||
|
|
||||||
|
Default value is 30000.
|
||||||
|
|
||||||
|
A specific implementation deals with streams, currently used for listing
|
||||||
|
a bucket. Streams emit `"stream-data"` events that pack one or more
|
||||||
|
items in the listing, and a special `“stream-end”` event when done. Flow
|
||||||
|
control is achieved by allowing a certain number of “in flight” packets
|
||||||
|
that have not received an ack yet (5 by default). Two options can tune
|
||||||
|
the behavior (for better throughput or getting it more robust on weak
|
||||||
|
networks), they have to be set in `mdserver.js` file directly, as there
|
||||||
|
is no support in `config.json` for now for those options:
|
||||||
|
|
||||||
|
- `streamMaxPendingAck`: max number of pending ack events not yet::
|
||||||
|
received (default is 5)
|
||||||
|
- `streamAckTimeoutMs`: timeout for receiving an ack after an output::
|
||||||
|
stream packet is sent to the client (default is 5000)
|
||||||
|
|
||||||
|
[[data-exchange-through-the-rest-data-port]]
|
||||||
|
Data exchange through the REST data port
|
||||||
|
++++++++++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
Data is read and written with REST semantic.
|
||||||
|
|
||||||
|
The web server recognizes a base path in the URL of `/DataFile` to be a
|
||||||
|
request to the data storage service.
|
||||||
|
|
||||||
|
[[put-1]]
|
||||||
|
PUT
|
||||||
|
|
||||||
|
A PUT on `/DataFile` URL and contents passed in the request body will
|
||||||
|
write a new object to the storage.
|
||||||
|
|
||||||
|
On success, a `201 Created` response is returned and the new URL to the
|
||||||
|
object is returned via the `Location` header (e.g.
|
||||||
|
`Location: /DataFile/50165db76eecea293abfd31103746dadb73a2074`). The raw
|
||||||
|
key can then be extracted simply by removing the leading `/DataFile`
|
||||||
|
service information from the returned URL.
|
||||||
|
|
||||||
|
[[get-1]]
|
||||||
|
GET
|
||||||
|
|
||||||
|
A GET is simply issued with REST semantic, e.g.:
|
||||||
|
|
||||||
|
....
|
||||||
|
GET /DataFile/50165db76eecea293abfd31103746dadb73a2074 HTTP/1.1
|
||||||
|
....
|
||||||
|
|
||||||
|
A GET request can ask for a specific range. Range support is complete
|
||||||
|
except for multiple byte ranges.
|
||||||
|
|
||||||
|
[[delete-1]]
|
||||||
|
DELETE
|
||||||
|
|
||||||
|
DELETE is similar to GET, except that a `204 No Content` response is
|
||||||
|
returned on success.
|
||||||
|
|
||||||
|
[[listing]]
|
||||||
|
Listing
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
[[listing-types]]
|
||||||
|
Listing Types
|
||||||
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
We use three different types of metadata listing for various operations.
|
||||||
|
Here are the scenarios we use each for:
|
||||||
|
|
||||||
|
- 'Delimiter' - when no versions are possible in the bucket since it
|
||||||
|
is::
|
||||||
|
an internally-used only bucket which is not exposed to a user. Namely,
|
||||||
|
1. to list objects in the "user's bucket" to respond to a GET SERVICE::
|
||||||
|
request and
|
||||||
|
2. to do internal listings on an MPU shadow bucket to complete
|
||||||
|
multipart::
|
||||||
|
upload operations.
|
||||||
|
|
||||||
|
* 'DelimiterVersion' - to list all versions in a bucket
|
||||||
|
|
||||||
|
- 'DelimiterMaster' - to list just the master versions of objects in a::
|
||||||
|
bucket
|
||||||
|
|
||||||
|
[[algorithms]]
|
||||||
|
Algorithms
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
The algorithms for each listing type can be found in the open-source
|
||||||
|
https://github.com/scality/Arsenal[scality/Arsenal] repository, in
|
||||||
|
https://github.com/scality/Arsenal/tree/master/lib/algos/list[lib/algos/list].
|
||||||
|
|
||||||
|
[[encryption]]
|
||||||
|
Encryption
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
With CloudServer, there are two possible methods of at-rest encryption.
|
||||||
|
(1) We offer bucket level encryption where Scality CloudServer itself
|
||||||
|
handles at-rest encryption for any object that is in an 'encrypted'
|
||||||
|
bucket, regardless of what the location-constraint for the data is and
|
||||||
|
(2) If the location-constraint specified for the data is of type AWS,
|
||||||
|
you can choose to use AWS server side encryption.
|
||||||
|
|
||||||
|
Note: bucket level encryption is not available on the standard AWS S3
|
||||||
|
protocol, so normal AWS S3 clients will not provide the option to send a
|
||||||
|
header when creating a bucket. We have created a simple tool to enable
|
||||||
|
you to easily create an encrypted bucket.
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
Example:
|
||||||
|
^^^^^^^^
|
||||||
|
|
||||||
|
Creating encrypted bucket using our encrypted bucket tool in the bin
|
||||||
|
directory
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
./create_encrypted_bucket.js -a accessKey1 -k verySecretKey1 -b bucketname -h localhost -p 8000
|
||||||
|
----
|
||||||
|
|
||||||
|
[[aws-backend]]
|
||||||
|
AWS backend
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
With real AWS S3 as a location-constraint, you have to configure the
|
||||||
|
location-constraint as follows
|
||||||
|
|
||||||
|
[source,sourceCode,json]
|
||||||
|
----
|
||||||
|
"awsbackend": {
|
||||||
|
"type": "aws_s3",
|
||||||
|
"legacyAwsBehavior": true,
|
||||||
|
"details": {
|
||||||
|
"serverSideEncryption": true,
|
||||||
|
...
|
||||||
|
}
|
||||||
|
},
|
||||||
|
----
|
||||||
|
|
||||||
|
Then, every time an object is put to that data location, we pass the
|
||||||
|
following header to AWS: `x-amz-server-side-encryption: AES256`
|
||||||
|
|
||||||
|
Note: due to these options, it is possible to configure encryption by
|
||||||
|
both CloudServer and AWS S3 (if you put an object to a CloudServer
|
||||||
|
bucket which has the encryption flag AND the location-constraint for the
|
||||||
|
data is AWS S3 with serverSideEncryption set to true).
|
|
@ -0,0 +1,316 @@
|
||||||
|
[[clients]]
|
||||||
|
Clients
|
||||||
|
-------
|
||||||
|
|
||||||
|
List of applications that have been tested with Zenko CloudServer.
|
||||||
|
|
||||||
|
GUI ~~~
|
||||||
|
|
||||||
|
`Cyberduck <https://cyberduck.io/?l=en>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
* https://www.youtube.com/watch?v=-n2MCt4ukUg
|
||||||
|
* https://www.youtube.com/watch?v=IyXHcu4uqgU
|
||||||
|
|
||||||
|
`Cloud Explorer <https://www.linux-toys.com/?p=945>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
* https://www.youtube.com/watch?v=2hhtBtmBSxE
|
||||||
|
|
||||||
|
`CloudBerry Lab <http://www.cloudberrylab.com>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
* https://youtu.be/IjIx8g_o0gY
|
||||||
|
|
||||||
|
Command Line Tools ~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
`s3curl <https://github.com/rtdp/s3curl>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
https://github.com/scality/S3/blob/master/tests/functional/s3curl/s3curl.pl
|
||||||
|
|
||||||
|
`aws-cli <http://docs.aws.amazon.com/cli/latest/reference/>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
`~/.aws/credentials` on Linux, OS X, or Unix or
|
||||||
|
`C:\Users\USERNAME\.aws\credentials` on Windows
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
....
|
||||||
|
[default]
|
||||||
|
aws_access_key_id = accessKey1
|
||||||
|
aws_secret_access_key = verySecretKey1
|
||||||
|
....
|
||||||
|
|
||||||
|
`~/.aws/config` on Linux, OS X, or Unix or
|
||||||
|
`C:\Users\USERNAME\.aws\config` on Windows
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
....
|
||||||
|
[default]
|
||||||
|
region = us-east-1
|
||||||
|
....
|
||||||
|
|
||||||
|
Note: `us-east-1` is the default region, but you can specify any region.
|
||||||
|
|
||||||
|
See all buckets:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
....
|
||||||
|
aws s3 ls --endpoint-url=http://localhost:8000
|
||||||
|
....
|
||||||
|
|
||||||
|
Create bucket:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
....
|
||||||
|
aws --endpoint-url=http://localhost:8000 s3 mb s3://mybucket
|
||||||
|
....
|
||||||
|
|
||||||
|
`s3cmd <http://s3tools.org/s3cmd>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If using s3cmd as a client to S3 be aware that v4 signature format is
|
||||||
|
buggy in s3cmd versions < 1.6.1.
|
||||||
|
|
||||||
|
`~/.s3cfg` on Linux, OS X, or Unix or `C:\Users\USERNAME\.s3cfg` on
|
||||||
|
Windows
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
....
|
||||||
|
[default]
|
||||||
|
access_key = accessKey1
|
||||||
|
secret_key = verySecretKey1
|
||||||
|
host_base = localhost:8000
|
||||||
|
host_bucket = %(bucket).localhost:8000
|
||||||
|
signature_v2 = False
|
||||||
|
use_https = False
|
||||||
|
....
|
||||||
|
|
||||||
|
See all buckets:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
....
|
||||||
|
s3cmd ls
|
||||||
|
....
|
||||||
|
|
||||||
|
`rclone <http://rclone.org/s3/>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
`~/.rclone.conf` on Linux, OS X, or Unix or
|
||||||
|
`C:\Users\USERNAME\.rclone.conf` on Windows
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
....
|
||||||
|
[remote]
|
||||||
|
type = s3
|
||||||
|
env_auth = false
|
||||||
|
access_key_id = accessKey1
|
||||||
|
secret_access_key = verySecretKey1
|
||||||
|
region = other-v2-signature
|
||||||
|
endpoint = http://localhost:8000
|
||||||
|
location_constraint =
|
||||||
|
acl = private
|
||||||
|
server_side_encryption =
|
||||||
|
storage_class =
|
||||||
|
....
|
||||||
|
|
||||||
|
See all buckets:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
....
|
||||||
|
rclone lsd remote:
|
||||||
|
....
|
||||||
|
|
||||||
|
JavaScript ~~~~~~~~
|
||||||
|
|
||||||
|
`AWS JavaScript SDK <http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: javascript
|
||||||
|
|
||||||
|
....
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
|
||||||
|
const s3 = new AWS.S3({
|
||||||
|
accessKeyId: 'accessKey1',
|
||||||
|
secretAccessKey: 'verySecretKey1',
|
||||||
|
endpoint: 'localhost:8000',
|
||||||
|
sslEnabled: false,
|
||||||
|
s3ForcePathStyle: true,
|
||||||
|
});
|
||||||
|
....
|
||||||
|
|
||||||
|
JAVA ~~~~
|
||||||
|
|
||||||
|
`AWS JAVA SDK <http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3Client.html>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: java
|
||||||
|
|
||||||
|
....
|
||||||
|
import com.amazonaws.auth.AWSCredentials;
|
||||||
|
import com.amazonaws.auth.BasicAWSCredentials;
|
||||||
|
import com.amazonaws.services.s3.AmazonS3;
|
||||||
|
import com.amazonaws.services.s3.AmazonS3Client;
|
||||||
|
import com.amazonaws.services.s3.S3ClientOptions;
|
||||||
|
import com.amazonaws.services.s3.model.Bucket;
|
||||||
|
|
||||||
|
public class S3 {
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
|
||||||
|
AWSCredentials credentials = new BasicAWSCredentials("accessKey1",
|
||||||
|
"verySecretKey1");
|
||||||
|
|
||||||
|
// Create a client connection based on credentials
|
||||||
|
AmazonS3 s3client = new AmazonS3Client(credentials);
|
||||||
|
s3client.setEndpoint("http://localhost:8000");
|
||||||
|
// Using path-style requests
|
||||||
|
// (deprecated) s3client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
|
||||||
|
s3client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
|
||||||
|
|
||||||
|
// Create bucket
|
||||||
|
String bucketName = "javabucket";
|
||||||
|
s3client.createBucket(bucketName);
|
||||||
|
|
||||||
|
// List off all buckets
|
||||||
|
for (Bucket bucket : s3client.listBuckets()) {
|
||||||
|
System.out.println(" - " + bucket.getName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
....
|
||||||
|
|
||||||
|
Ruby ~~~~
|
||||||
|
|
||||||
|
`AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: ruby
|
||||||
|
|
||||||
|
....
|
||||||
|
require 'aws-sdk'
|
||||||
|
|
||||||
|
s3 = Aws::S3::Client.new(
|
||||||
|
:access_key_id => 'accessKey1',
|
||||||
|
:secret_access_key => 'verySecretKey1',
|
||||||
|
:endpoint => 'http://localhost:8000',
|
||||||
|
:force_path_style => true
|
||||||
|
)
|
||||||
|
|
||||||
|
resp = s3.list_buckets
|
||||||
|
....
|
||||||
|
|
||||||
|
`fog <http://fog.io/storage/>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: ruby
|
||||||
|
|
||||||
|
....
|
||||||
|
require "fog"
|
||||||
|
|
||||||
|
connection = Fog::Storage.new(
|
||||||
|
{
|
||||||
|
:provider => "AWS",
|
||||||
|
:aws_access_key_id => 'accessKey1',
|
||||||
|
:aws_secret_access_key => 'verySecretKey1',
|
||||||
|
:endpoint => 'http://localhost:8000',
|
||||||
|
:path_style => true,
|
||||||
|
:scheme => 'http',
|
||||||
|
})
|
||||||
|
....
|
||||||
|
|
||||||
|
Python ~~~~~~
|
||||||
|
|
||||||
|
`boto2 <http://boto.cloudhackers.com/en/latest/ref/s3.html>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
....
|
||||||
|
import boto
|
||||||
|
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
|
||||||
|
|
||||||
|
|
||||||
|
connection = S3Connection(
|
||||||
|
aws_access_key_id='accessKey1',
|
||||||
|
aws_secret_access_key='verySecretKey1',
|
||||||
|
is_secure=False,
|
||||||
|
port=8000,
|
||||||
|
calling_format=OrdinaryCallingFormat(),
|
||||||
|
host='localhost'
|
||||||
|
)
|
||||||
|
|
||||||
|
connection.create_bucket('mybucket')
|
||||||
|
....
|
||||||
|
|
||||||
|
`boto3 <http://boto3.readthedocs.io/en/latest/index.html>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Client integration
|
||||||
|
|
||||||
|
.. code:: python import boto3
|
||||||
|
|
||||||
|
....
|
||||||
|
client = boto3.client(
|
||||||
|
's3',
|
||||||
|
aws_access_key_id='accessKey1',
|
||||||
|
aws_secret_access_key='verySecretKey1',
|
||||||
|
endpoint_url='http://localhost:8000'
|
||||||
|
)
|
||||||
|
|
||||||
|
lists = client.list_buckets()
|
||||||
|
....
|
||||||
|
|
||||||
|
Full integration (with object mapping)
|
||||||
|
|
||||||
|
.. code:: python import os
|
||||||
|
|
||||||
|
....
|
||||||
|
from botocore.utils import fix_s3_host
|
||||||
|
import boto3
|
||||||
|
|
||||||
|
os.environ['AWS_ACCESS_KEY_ID'] = "accessKey1"
|
||||||
|
os.environ['AWS_SECRET_ACCESS_KEY'] = "verySecretKey1"
|
||||||
|
|
||||||
|
s3 = boto3.resource(service_name='s3', endpoint_url='http://localhost:8000')
|
||||||
|
s3.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)
|
||||||
|
|
||||||
|
for bucket in s3.buckets.all():
|
||||||
|
print(bucket.name)
|
||||||
|
....
|
||||||
|
|
||||||
|
PHP ~~~
|
||||||
|
|
||||||
|
Should force path-style requests even though v3 advertises it does by
|
||||||
|
default.
|
||||||
|
|
||||||
|
`AWS PHP SDK v3 <https://docs.aws.amazon.com/aws-sdk-php/v3/guide>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: php
|
||||||
|
|
||||||
|
....
|
||||||
|
use Aws\S3\S3Client;
|
||||||
|
|
||||||
|
$client = S3Client::factory([
|
||||||
|
'region' => 'us-east-1',
|
||||||
|
'version' => 'latest',
|
||||||
|
'endpoint' => 'http://localhost:8000',
|
||||||
|
'use_path_style_endpoint' => true,
|
||||||
|
'credentials' => [
|
||||||
|
'key' => 'accessKey1',
|
||||||
|
'secret' => 'verySecretKey1'
|
||||||
|
]
|
||||||
|
]);
|
||||||
|
|
||||||
|
$client->createBucket(array(
|
||||||
|
'Bucket' => 'bucketphp',
|
||||||
|
));
|
||||||
|
....
|
|
@ -0,0 +1,395 @@
|
||||||
|
Docker
|
||||||
|
======
|
||||||
|
|
||||||
|
* link:#environment-variables[Environment Variables]
|
||||||
|
* link:#tunables-and-setup-tips[Tunables and setup tips]
|
||||||
|
* link:#continuous-integration-with-docker-hosted%20CloudServer[Examples
|
||||||
|
for continuous integration with Docker]
|
||||||
|
* link:#in-production-with-docker-hosted%20CloudServer[Examples for
|
||||||
|
going in production with Docker]
|
||||||
|
|
||||||
|
[[environment-variables]]
|
||||||
|
Environment Variables
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
[[s3data]]
|
||||||
|
S3DATA
|
||||||
|
~~~~~~
|
||||||
|
|
||||||
|
[[s3datamultiple]]
|
||||||
|
S3DATA=multiple
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Allows you to run Scality Zenko CloudServer with multiple data backends,
|
||||||
|
defined as regions. When using multiple data backends, a custom
|
||||||
|
`locationConfig.json` file is mandatory. It will allow you to set custom
|
||||||
|
regions. You will then need to provide associated rest_endpoints for
|
||||||
|
each custom region in your `config.json` file.
|
||||||
|
link:../GETTING_STARTED/#location-configuration[Learn more about
|
||||||
|
multiple backends configuration]
|
||||||
|
|
||||||
|
If you are using Scality RING endpoints, please refer to your customer
|
||||||
|
documentation.
|
||||||
|
|
||||||
|
[[running-it-with-an-aws-s3-hosted-backend]]
|
||||||
|
Running it with an AWS S3 hosted backend
|
||||||
|
++++++++++++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
To run CloudServer with an S3 AWS backend, you will have to add a new
|
||||||
|
section to your `locationConfig.json` file with the `aws_s3` location
|
||||||
|
type:
|
||||||
|
|
||||||
|
[source,sourceCode,json]
|
||||||
|
----
|
||||||
|
----
|
||||||
|
|
||||||
|
(...)::
|
||||||
|
"awsbackend": \{;;
|
||||||
|
"type": "aws_s3", "details": \{ "awsEndpoint": "s3.amazonaws.com",
|
||||||
|
"bucketName": "yourawss3bucket", "bucketMatch": true,
|
||||||
|
"credentialsProfile": "aws_hosted_profile" }
|
||||||
|
+
|
||||||
|
}
|
||||||
|
|
||||||
|
(...)
|
||||||
|
|
||||||
|
You will also have to edit your AWS credentials file to be able to use
|
||||||
|
your command line tool of choice. This file should mention credentials
|
||||||
|
for all the backends you're using. You can use several profiles when
|
||||||
|
using multiple profiles.
|
||||||
|
|
||||||
|
[source,sourceCode,json]
|
||||||
|
----
|
||||||
|
----
|
||||||
|
|
||||||
|
[default] aws_access_key_id=accessKey1
|
||||||
|
aws_secret_access_key=verySecretKey1 [aws_hosted_profile]
|
||||||
|
aws_access_key_id=\{\{YOUR_ACCESS_KEY}}
|
||||||
|
aws_secret_access_key=\{\{YOUR_SECRET_KEY}}
|
||||||
|
|
||||||
|
Just as you need to mount your locationConfig.json, you will need to
|
||||||
|
mount your AWS credentials file at run time:
|
||||||
|
`-v ~/.aws/credentials:/root/.aws/credentials` on Linux, OS X, or Unix
|
||||||
|
or `-v C:\Users\USERNAME\.aws\credential:/root/.aws/credentials` on
|
||||||
|
Windows
|
||||||
|
|
||||||
|
NOTE: One account can't copy to another account with a source and
|
||||||
|
destination on real AWS unless the account associated with the access
|
||||||
|
Key/secret Key pairs used for the destination bucket has rights to get
|
||||||
|
in the source bucket. ACL's would have to be updated on AWS directly to
|
||||||
|
enable this.
|
||||||
|
|
||||||
|
S3BACKEND ~~~~~~
|
||||||
|
|
||||||
|
S3BACKEND=file ^^^^^^^^^^^ When storing file data, for it to be
|
||||||
|
persistent you must mount docker volumes for both data and metadata. See
|
||||||
|
link:#using-docker-volumes-in-production[this section]
|
||||||
|
|
||||||
|
S3BACKEND=mem ^^^^^^^^^^ This is ideal for testing - no data will remain
|
||||||
|
after container is shutdown.
|
||||||
|
|
||||||
|
[[endpoint]]
|
||||||
|
ENDPOINT
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
This variable specifies your endpoint. If you have a domain such as
|
||||||
|
new.host.com, by specifying that here, you and your users can direct s3
|
||||||
|
server requests to new.host.com.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e ENDPOINT=new.host.com scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
Note: In your `/etc/hosts` file on Linux, OS X, or Unix with root
|
||||||
|
permissions, make sure to associate 127.0.0.1 with `new.host.com`
|
||||||
|
|
||||||
|
[[scality_access_key_id-and-scality_secret_access_key]]
|
||||||
|
SCALITY_ACCESS_KEY_ID and SCALITY_SECRET_ACCESS_KEY
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
These variables specify authentication credentials for an account named
|
||||||
|
"CustomAccount".
|
||||||
|
|
||||||
|
You can set credentials for many accounts by editing
|
||||||
|
`conf/authdata.json` (see below for further info), but if you just want
|
||||||
|
to specify one set of your own, you can use these environment variables.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey
|
||||||
|
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
Note: Anything in the `authdata.json` file will be ignored. Note: The
|
||||||
|
old `ACCESS_KEY` and `SECRET_KEY` environment variables are now
|
||||||
|
deprecated
|
||||||
|
|
||||||
|
[[log_level]]
|
||||||
|
LOG_LEVEL
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
This variable allows you to change the log level: info, debug or trace.
|
||||||
|
The default is info. Debug will give you more detailed logs and trace
|
||||||
|
will give you the most detailed.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
[[ssl]]
|
||||||
|
SSL
|
||||||
|
~~~
|
||||||
|
|
||||||
|
This variable set to true allows you to run S3 with SSL:
|
||||||
|
|
||||||
|
**Note1**: You also need to specify the ENDPOINT environment variable.
|
||||||
|
**Note2**: In your `/etc/hosts` file on Linux, OS X, or Unix with root
|
||||||
|
permissions, make sure to associate 127.0.0.1 with `<YOUR_ENDPOINT>`
|
||||||
|
|
||||||
|
**Warning**: These certs, being self-signed (and the CA being generated
|
||||||
|
inside the container) will be untrusted by any clients, and could
|
||||||
|
disappear on a container upgrade. That's ok as long as it's for quick
|
||||||
|
testing. Also, best security practice for non-testing would be to use an
|
||||||
|
extra container to do SSL/TLS termination such as haproxy/nginx/stunnel
|
||||||
|
to limit what an exploit on either component could expose, as well as
|
||||||
|
certificates in a mounted volume
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT>
|
||||||
|
scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
More information about how to use S3 server with SSL
|
||||||
|
https://s3.scality.com/v1.0/page/scality-with-ssl[here]
|
||||||
|
|
||||||
|
[[listen_addr]]
|
||||||
|
LISTEN_ADDR
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
This variable instructs the Zenko CloudServer, and its data and metadata
|
||||||
|
components to listen on the specified address. This allows starting the
|
||||||
|
data or metadata servers as standalone services, for example.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0
|
||||||
|
scality/s3server npm run start_dataserver
|
||||||
|
----
|
||||||
|
|
||||||
|
[[data_host-and-metadata_host]]
|
||||||
|
DATA_HOST and METADATA_HOST
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
These variables configure the data and metadata servers to use, usually
|
||||||
|
when they are running on another host and only starting the stateless
|
||||||
|
Zenko CloudServer.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name s3server -e DATA_HOST=s3server-data
|
||||||
|
-e METADATA_HOST=s3server-metadata scality/s3server npm run start_s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
[[redis_host]]
|
||||||
|
REDIS_HOST
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
Use this variable to connect to the redis cache server on another host
|
||||||
|
than localhost.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name s3server -p 8000:8000
|
||||||
|
-e REDIS_HOST=my-redis-server.example.com scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
[[redis_port]]
|
||||||
|
REDIS_PORT
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
Use this variable to connect to the redis cache server on another port
|
||||||
|
than the default 6379.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name s3server -p 8000:8000
|
||||||
|
-e REDIS_PORT=6379 scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
[[tunables-and-setup-tips]]
|
||||||
|
Tunables and Setup Tips
|
||||||
|
-----------------------
|
||||||
|
|
||||||
|
[[using-docker-volumes]]
|
||||||
|
Using Docker Volumes
|
||||||
|
~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Zenko CloudServer runs with a file backend by default.
|
||||||
|
|
||||||
|
So, by default, the data is stored inside your Zenko CloudServer Docker
|
||||||
|
container.
|
||||||
|
|
||||||
|
However, if you want your data and metadata to persist, you *MUST* use
|
||||||
|
Docker volumes to host your data and metadata outside your Zenko
|
||||||
|
CloudServer Docker container. Otherwise, the data and metadata will be
|
||||||
|
destroyed when you erase the container.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||||
|
-p 8000:8000 -d scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
This command mounts the host directory, `./data`, into the container at
|
||||||
|
`/usr/src/app/localData` and the host directory, `./metadata`, into the
|
||||||
|
container at `/usr/src/app/localMetaData`. It can also be any host mount
|
||||||
|
point, like `/mnt/data` and `/mnt/metadata`.
|
||||||
|
|
||||||
|
[[adding-modifying-or-deleting-accounts-or-users-credentials]]
|
||||||
|
Adding modifying or deleting accounts or users credentials
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
1. Create locally a customized `authdata.json` based on our
|
||||||
|
`/conf/authdata.json`.
|
||||||
|
|
||||||
|
2. Use https://docs.docker.com/engine/tutorials/dockervolumes/[Docker
|
||||||
|
Volume]::
|
||||||
|
to override the default `authdata.json` through a docker file mapping.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
|
||||||
|
scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
[[specifying-your-own-host-name]]
|
||||||
|
Specifying your own host name
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To specify a host name (e.g. s3.domain.name), you can provide your own
|
||||||
|
https://github.com/scality/S3/blob/master/config.json[config.json] using
|
||||||
|
https://docs.docker.com/engine/tutorials/dockervolumes/[Docker Volume].
|
||||||
|
|
||||||
|
First add a new key-value pair in the restEndpoints section of your
|
||||||
|
config.json. The key in the key-value pair should be the host name you
|
||||||
|
would like to add and the value is the default location_constraint for
|
||||||
|
this endpoint.
|
||||||
|
|
||||||
|
For example, `s3.example.com` is mapped to `us-east-1` which is one of
|
||||||
|
the `location_constraints` listed in your locationConfig.json file
|
||||||
|
https://github.com/scality/S3/blob/master/locationConfig.json[here].
|
||||||
|
|
||||||
|
More information about location configuration
|
||||||
|
https://github.com/scality/S3/blob/master/README.md#location-configuration[here]
|
||||||
|
|
||||||
|
[source,sourceCode,json]
|
||||||
|
----
|
||||||
|
"restEndpoints": {
|
||||||
|
"localhost": "file",
|
||||||
|
"127.0.0.1": "file",
|
||||||
|
...
|
||||||
|
"s3.example.com": "us-east-1"
|
||||||
|
},
|
||||||
|
----
|
||||||
|
|
||||||
|
Then, run your Scality S3 Server using
|
||||||
|
https://docs.docker.com/engine/tutorials/dockervolumes/[Docker Volume]:
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
Your local `config.json` file will override the default one through a
|
||||||
|
docker file mapping.
|
||||||
|
|
||||||
|
[[running-as-an-unprivileged-user]]
|
||||||
|
Running as an unprivileged user
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Zenko CloudServer runs as root by default.
|
||||||
|
|
||||||
|
You can change that by modifing the dockerfile and specifying a user
|
||||||
|
before the entrypoint.
|
||||||
|
|
||||||
|
The user needs to exist within the container, and own the folder
|
||||||
|
*/usr/src/app* for Scality Zenko CloudServer to run properly.
|
||||||
|
|
||||||
|
For instance, you can modify these lines in the dockerfile:
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
...
|
||||||
|
&& groupadd -r -g 1001 scality \
|
||||||
|
&& useradd -u 1001 -g 1001 -d /usr/src/app -r scality \
|
||||||
|
&& chown -R scality:scality /usr/src/app
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
USER scality
|
||||||
|
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||||
|
----
|
||||||
|
|
||||||
|
[[continuous-integration-with-docker-hosted-cloudserver]]
|
||||||
|
Continuous integration with Docker hosted CloudServer
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
|
When you start the Docker Scality Zenko CloudServer image, you can
|
||||||
|
adjust the configuration of the Scality Zenko CloudServer instance by
|
||||||
|
passing one or more environment variables on the docker run command
|
||||||
|
line.
|
||||||
|
|
||||||
|
Sample ways to run it for CI are:
|
||||||
|
|
||||||
|
* With custom locations (one in-memory, one hosted on AWS), and custom
|
||||||
|
credentials mounted:
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run --name CloudServer -p 8000:8000
|
||||||
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||||
|
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json
|
||||||
|
-v ~/.aws/credentials:/root/.aws/credentials
|
||||||
|
-e S3DATA=multiple -e S3BACKEND=mem scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
* With custom locations, (one in-memory, one hosted on AWS, one file),
|
||||||
|
and custom credentials set as environment variables (see
|
||||||
|
link:#scality-access-key-id-and-scality-secret-access-key[this
|
||||||
|
section]):
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run --name CloudServer -p 8000:8000
|
||||||
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||||
|
-v ~/.aws/credentials:/root/.aws/credentials
|
||||||
|
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||||
|
-e SCALITY_ACCESS_KEY_ID=accessKey1
|
||||||
|
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1
|
||||||
|
-e S3DATA=multiple -e S3BACKEND=mem scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
[[in-production-with-docker-hosted-cloudserver]]
|
||||||
|
In production with Docker hosted CloudServer
|
||||||
|
--------------------------------------------
|
||||||
|
|
||||||
|
In production, we expect that data will be persistent, that you will use
|
||||||
|
the multiple backends capabilities of Zenko CloudServer, and that you
|
||||||
|
will have a custom endpoint for your local storage, and custom
|
||||||
|
credentials for your local storage:
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
docker run -d --name CloudServer
|
||||||
|
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||||
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
|
||||||
|
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json
|
||||||
|
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple
|
||||||
|
-e ENDPOINT=custom.endpoint.com
|
||||||
|
-p 8000:8000 -d scality/s3server
|
||||||
|
----
|
|
@ -0,0 +1,714 @@
|
||||||
|
Integrations
|
||||||
|
============
|
||||||
|
|
||||||
|
[[high-availability]]
|
||||||
|
High Availability
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
https://docs.docker.com/engine/swarm/[Docker swarm] is a clustering tool
|
||||||
|
developped by Docker and ready to use with its containers. It allows to
|
||||||
|
start a service, which we define and use as a means to ensure Zenko
|
||||||
|
CloudServer's continuous availability to the end user. Indeed, a swarm
|
||||||
|
defines a manager and n workers among n+1 servers. We will do a basic
|
||||||
|
setup in this tutorial, with just 3 servers, which already provides a
|
||||||
|
strong service resiliency, whilst remaining easy to do as an individual.
|
||||||
|
We will use NFS through docker to share data and metadata between the
|
||||||
|
different servers.
|
||||||
|
|
||||||
|
You will see that the steps of this tutorial are defined as **On
|
||||||
|
Server**, **On Clients**, **On All Machines**. This refers respectively
|
||||||
|
to NFS Server, NFS Clients, or NFS Server and Clients. In our example,
|
||||||
|
the IP of the Server will be **10.200.15.113**, while the IPs of the
|
||||||
|
Clients will be *10.200.15.96 and 10.200.15.97*
|
||||||
|
|
||||||
|
[[installing-docker]]
|
||||||
|
Installing docker
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Any version from docker 1.12.6 onwards should work; we used Docker
|
||||||
|
17.03.0-ce for this tutorial.
|
||||||
|
|
||||||
|
[[on-all-machines]]
|
||||||
|
On All Machines
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
[[on-ubuntu-14.04]]
|
||||||
|
On Ubuntu 14.04
|
||||||
|
+++++++++++++++
|
||||||
|
|
||||||
|
The docker website has
|
||||||
|
https://docs.docker.com/engine/installation/linux/ubuntu/[solid
|
||||||
|
documentation]. We have chosen to install the aufs dependency, as
|
||||||
|
recommended by Docker. Here are the required commands:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo apt-get update
|
||||||
|
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
|
||||||
|
$> sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
|
||||||
|
$> curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||||
|
$> sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
|
$> sudo apt-get update
|
||||||
|
$> sudo apt-get install docker-ce
|
||||||
|
----
|
||||||
|
|
||||||
|
[[on-centos-7]]
|
||||||
|
On CentOS 7
|
||||||
|
+++++++++++
|
||||||
|
|
||||||
|
The docker website has
|
||||||
|
https://docs.docker.com/engine/installation/linux/centos/[solid
|
||||||
|
documentation]. Here are the required commands:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo yum install -y yum-utils
|
||||||
|
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||||
|
$> sudo yum makecache fast
|
||||||
|
$> sudo yum install docker-ce
|
||||||
|
$> sudo systemctl start docker
|
||||||
|
----
|
||||||
|
|
||||||
|
[[configure-nfs]]
|
||||||
|
Configure NFS
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[on-clients]]
|
||||||
|
On Clients
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
Your NFS Clients will mount Docker volumes over your NFS Server's shared
|
||||||
|
folders. Hence, you don't have to mount anything manually, you just have
|
||||||
|
to install the NFS commons:
|
||||||
|
|
||||||
|
[[on-ubuntu-14.04-1]]
|
||||||
|
On Ubuntu 14.04
|
||||||
|
+++++++++++++++
|
||||||
|
|
||||||
|
Simply install the NFS commons:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo apt-get install nfs-common
|
||||||
|
----
|
||||||
|
|
||||||
|
[[on-centos-7-1]]
|
||||||
|
On CentOS 7
|
||||||
|
+++++++++++
|
||||||
|
|
||||||
|
Install the NFS utils, and then start the required services:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> yum install nfs-utils
|
||||||
|
$> sudo systemctl enable rpcbind
|
||||||
|
$> sudo systemctl enable nfs-server
|
||||||
|
$> sudo systemctl enable nfs-lock
|
||||||
|
$> sudo systemctl enable nfs-idmap
|
||||||
|
$> sudo systemctl start rpcbind
|
||||||
|
$> sudo systemctl start nfs-server
|
||||||
|
$> sudo systemctl start nfs-lock
|
||||||
|
$> sudo systemctl start nfs-idmap
|
||||||
|
----
|
||||||
|
|
||||||
|
[[on-server]]
|
||||||
|
On Server
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
Your NFS Server will be the machine to physically host the data and
|
||||||
|
metadata. The package(s) we will install on it is slightly different
|
||||||
|
from the one we installed on the clients.
|
||||||
|
|
||||||
|
[[on-ubuntu-14.04-2]]
|
||||||
|
On Ubuntu 14.04
|
||||||
|
+++++++++++++++
|
||||||
|
|
||||||
|
Install the NFS server specific package and the NFS commons:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo apt-get install nfs-kernel-server nfs-common
|
||||||
|
----
|
||||||
|
|
||||||
|
[[on-centos-7-2]]
|
||||||
|
On CentOS 7
|
||||||
|
+++++++++++
|
||||||
|
|
||||||
|
Same steps as with the client: install the NFS utils and start the
|
||||||
|
required services:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> yum install nfs-utils
|
||||||
|
$> sudo systemctl enable rpcbind
|
||||||
|
$> sudo systemctl enable nfs-server
|
||||||
|
$> sudo systemctl enable nfs-lock
|
||||||
|
$> sudo systemctl enable nfs-idmap
|
||||||
|
$> sudo systemctl start rpcbind
|
||||||
|
$> sudo systemctl start nfs-server
|
||||||
|
$> sudo systemctl start nfs-lock
|
||||||
|
$> sudo systemctl start nfs-idmap
|
||||||
|
----
|
||||||
|
|
||||||
|
[[on-ubuntu-14.04-and-centos-7]]
|
||||||
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
Choose where your shared data and metadata from your local
|
||||||
|
http://www.zenko.io/cloudserver/[Zenko CloudServer] will be stored. We
|
||||||
|
chose to go with /var/nfs/data and /var/nfs/metadata. You also need to
|
||||||
|
set proper sharing permissions for these folders as they'll be shared
|
||||||
|
over NFS:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> mkdir -p /var/nfs/data /var/nfs/metadata
|
||||||
|
$> chmod -R 777 /var/nfs/
|
||||||
|
----
|
||||||
|
|
||||||
|
Now you need to update your */etc/exports* file. This is the file that
|
||||||
|
configures network permissions and rwx permissions for NFS access. By
|
||||||
|
default, Ubuntu applies the no_subtree_check option, so we declared both
|
||||||
|
folders with the same permissions, even though they're in the same tree:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo vim /etc/exports
|
||||||
|
----
|
||||||
|
|
||||||
|
In this file, add the following lines:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||||
|
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||||
|
----
|
||||||
|
|
||||||
|
Export this new NFS table:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo exportfs -a
|
||||||
|
----
|
||||||
|
|
||||||
|
Eventually, you need to allow for NFS mount from Docker volumes on other
|
||||||
|
machines. You need to change the Docker config in
|
||||||
|
**/lib/systemd/system/docker.service**:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo vim /lib/systemd/system/docker.service
|
||||||
|
----
|
||||||
|
|
||||||
|
In this file, change the *MountFlags* option:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
MountFlags=shared
|
||||||
|
----
|
||||||
|
|
||||||
|
Now you just need to restart the NFS server and docker daemons so your
|
||||||
|
changes apply.
|
||||||
|
|
||||||
|
[[on-ubuntu-14.04-3]]
|
||||||
|
On Ubuntu 14.04
|
||||||
|
+++++++++++++++
|
||||||
|
|
||||||
|
Restart your NFS Server and docker services:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo service nfs-kernel-server restart
|
||||||
|
$> sudo service docker restart
|
||||||
|
----
|
||||||
|
|
||||||
|
[[on-centos-7-3]]
|
||||||
|
On CentOS 7
|
||||||
|
+++++++++++
|
||||||
|
|
||||||
|
Restart your NFS Server and docker daemons:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo systemctl restart nfs-server
|
||||||
|
$> sudo systemctl daemon-reload
|
||||||
|
$> sudo systemctl restart docker
|
||||||
|
----
|
||||||
|
|
||||||
|
[[set-up-your-docker-swarm-service]]
|
||||||
|
Set up your Docker Swarm service
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[on-all-machines-1]]
|
||||||
|
On All Machines
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
[[on-ubuntu-14.04-and-centos-7-1]]
|
||||||
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
We will now set up the Docker volumes that will be mounted to the NFS
|
||||||
|
Server and serve as data and metadata storage for Zenko CloudServer.
|
||||||
|
These two commands have to be replicated on all machines:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
|
||||||
|
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
|
||||||
|
----
|
||||||
|
|
||||||
|
There is no need to ""docker exec" these volumes to mount them: the
|
||||||
|
Docker Swarm manager will do it when the Docker service will be started.
|
||||||
|
|
||||||
|
[[on-server-1]]
|
||||||
|
On Server
|
||||||
|
+++++++++
|
||||||
|
|
||||||
|
To start a Docker service on a Docker Swarm cluster, you first have to
|
||||||
|
initialize that cluster (i.e.: define a manager), then have the
|
||||||
|
workers/nodes join in, and then start the service. Initialize the swarm
|
||||||
|
cluster, and look at the response:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> docker swarm init --advertise-addr 10.200.15.113
|
||||||
|
|
||||||
|
Swarm initialized: current node (db2aqfu3bzfzzs9b1kfeaglmq) is now a manager.
|
||||||
|
|
||||||
|
To add a worker to this swarm, run the following command:
|
||||||
|
|
||||||
|
docker swarm join \
|
||||||
|
--token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka \
|
||||||
|
10.200.15.113:2377
|
||||||
|
|
||||||
|
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
||||||
|
----
|
||||||
|
|
||||||
|
[[on-clients-1]]
|
||||||
|
On Clients
|
||||||
|
++++++++++
|
||||||
|
|
||||||
|
Simply copy/paste the command provided by your docker swarm init. When
|
||||||
|
all goes well, you'll get something like this:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
|
||||||
|
|
||||||
|
This node joined a swarm as a worker.
|
||||||
|
----
|
||||||
|
|
||||||
|
[[on-server-2]]
|
||||||
|
On Server
|
||||||
|
+++++++++
|
||||||
|
|
||||||
|
Start the service on your swarm cluster!
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
If you run a docker service ls, you should have the following output:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> docker service ls
|
||||||
|
ID NAME MODE REPLICAS IMAGE
|
||||||
|
ocmggza412ft s3 replicated 1/1 scality/s3server:latest
|
||||||
|
----
|
||||||
|
|
||||||
|
If your service won't start, consider disabling apparmor/SELinux.
|
||||||
|
|
||||||
|
[[testing-your-high-availability-s3server]]
|
||||||
|
Testing your High Availability S3Server
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[on-all-machines-2]]
|
||||||
|
On All Machines
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
[[on-ubuntu-14.04-and-centos-7-2]]
|
||||||
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
Try to find out where your Scality Zenko CloudServer is actually running
|
||||||
|
using the *docker ps* command. It can be on any node of the swarm
|
||||||
|
cluster, manager or worker. When you find it, you can kill it, with
|
||||||
|
*docker stop <container id>* and you'll see it respawn on a different
|
||||||
|
node of the swarm cluster. Now you see, if one of your servers falls, or
|
||||||
|
if docker stops unexpectedly, your end user will still be able to access
|
||||||
|
your local Zenko CloudServer.
|
||||||
|
|
||||||
|
[[troubleshooting]]
|
||||||
|
Troubleshooting
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To troubleshoot the service you can run:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> docker service ps s3docker service ps s3
|
||||||
|
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
|
||||||
|
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/s3server localhost.localdomain.localdomain Running Running 7 days ago
|
||||||
|
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/s3server localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
|
||||||
|
----
|
||||||
|
|
||||||
|
If the error is truncated it is possible to have a more detailed view of
|
||||||
|
the error by inspecting the docker task ID:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> docker inspect cvmf3j3bz8w6r4h0lf3pxo6eu
|
||||||
|
----
|
||||||
|
|
||||||
|
[[off-you-go]]
|
||||||
|
Off you go!
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
Let us know what you use this functionality for, and if you'd like any
|
||||||
|
specific developments around it. Or, even better: come and contribute to
|
||||||
|
our https://github.com/scality/s3/[Github repository]! We look forward
|
||||||
|
to meeting you!
|
||||||
|
|
||||||
|
[[s3fs]]
|
||||||
|
S3FS
|
||||||
|
----
|
||||||
|
|
||||||
|
Export your buckets as a filesystem with s3fs on top of Zenko
|
||||||
|
CloudServer
|
||||||
|
|
||||||
|
https://github.com/s3fs-fuse/s3fs-fuse[s3fs] is an open source tool that
|
||||||
|
allows you to mount an S3 bucket on a filesystem-like backend. It is
|
||||||
|
available both on Debian and RedHat distributions. For this tutorial, we
|
||||||
|
used an Ubuntu 14.04 host to deploy and use s3fs over Scality's Zenko
|
||||||
|
CloudServer.
|
||||||
|
|
||||||
|
Deploying Zenko CloudServer with SSL ----------------------------
|
||||||
|
|
||||||
|
First, you need to deploy **Zenko CloudServer**. This can be done very
|
||||||
|
easily via https://hub.docker.com/r/scality/s3server/[our DockerHub
|
||||||
|
page] (you want to run it with a file backend).
|
||||||
|
|
||||||
|
___________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
_Note:_ _- If you don't have docker installed on your machine, here are
|
||||||
|
the https://docs.docker.com/engine/installation/[instructions to install
|
||||||
|
it for your distribution]_
|
||||||
|
___________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
|
||||||
|
You also necessarily have to set up SSL with Zenko CloudServer to use
|
||||||
|
s3fs. We have a nice
|
||||||
|
https://s3.scality.com/v1.0/page/scality-with-ssl[tutorial] to help you
|
||||||
|
do it.
|
||||||
|
|
||||||
|
[[s3fs-setup]]
|
||||||
|
s3fs setup
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
[[installing-s3fs]]
|
||||||
|
Installing s3fs
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
s3fs has quite a few dependencies. As explained in their
|
||||||
|
https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation[README],
|
||||||
|
the following commands should install everything for Ubuntu 14.04:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev
|
||||||
|
$> sudo apt-get install libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||||
|
----
|
||||||
|
|
||||||
|
Now you want to install s3fs per se:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||||
|
$> cd s3fs-fuse
|
||||||
|
$> ./autogen.sh
|
||||||
|
$> ./configure
|
||||||
|
$> make
|
||||||
|
$> sudo make install
|
||||||
|
----
|
||||||
|
|
||||||
|
Check that s3fs is properly installed by checking its version. it should
|
||||||
|
answer as below:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> s3fs --version
|
||||||
|
----
|
||||||
|
|
||||||
|
____________________________________________________________________________
|
||||||
|
Amazon Simple Storage Service File System V1.80(commit:d40da2c) with
|
||||||
|
OpenSSL
|
||||||
|
____________________________________________________________________________
|
||||||
|
|
||||||
|
[[configuring-s3fs]]
|
||||||
|
Configuring s3fs
|
||||||
|
^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
s3fs expects you to provide it with a password file. Our file is
|
||||||
|
`/etc/passwd-s3fs`. The structure for this file is
|
||||||
|
`ACCESSKEYID:SECRETKEYID`, so, for S3Server, you can run:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> echo 'accessKey1:verySecretKey1' > /etc/passwd-s3fs
|
||||||
|
$> chmod 600 /etc/passwd-s3fs
|
||||||
|
----
|
||||||
|
|
||||||
|
Using Zenko CloudServer with s3fs ------------------------
|
||||||
|
|
||||||
|
First, you're going to need a mountpoint; we chose `/mnt/tests3fs`:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> mkdir /mnt/tests3fs
|
||||||
|
----
|
||||||
|
|
||||||
|
Then, you want to create a bucket on your local Zenko CloudServer; we
|
||||||
|
named it `tests3fs`:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> s3cmd mb s3://tests3fs
|
||||||
|
|
||||||
|
*Note:* *- If you've never used s3cmd with our Zenko CloudServer, our README
|
||||||
|
provides you with a `recommended
|
||||||
|
config <https://github.com/scality/S3/blob/master/README.md#s3cmd>`__*
|
||||||
|
----
|
||||||
|
|
||||||
|
Now you can mount your bucket to your mountpoint with s3fs:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> s3fs tests3fs /mnt/tests3fs -o passwd_file=/etc/passwd-s3fs -o url="https://s3.scality.test:8000/" -o use_path_request_style
|
||||||
|
|
||||||
|
*If you're curious, the structure of this command is*
|
||||||
|
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``\ *, and the
|
||||||
|
options are mandatory and serve the following purposes:
|
||||||
|
* ``passwd_file``\ *: specifiy path to password file;
|
||||||
|
* ``url``\ *: specify the hostname used by your SSL provider;
|
||||||
|
* ``use_path_request_style``\ *: force path style (by default, s3fs
|
||||||
|
uses subdomains (DNS style)).*
|
||||||
|
----
|
||||||
|
|
||||||
|
From now on, you can either add files to your mountpoint, or add objects
|
||||||
|
to your bucket, and they'll show in the other. +
|
||||||
|
For example, let's' create two files, and then a directory with a file
|
||||||
|
in our mountpoint:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
|
||||||
|
$> mkdir /mnt/tests3fs/dir1
|
||||||
|
$> touch /mnt/tests3fs/dir1/file3
|
||||||
|
----
|
||||||
|
|
||||||
|
Now, I can use s3cmd to show me what is actually in S3Server:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> s3cmd ls -r s3://tests3fs
|
||||||
|
|
||||||
|
2017-02-28 17:28 0 s3://tests3fs/dir1/
|
||||||
|
2017-02-28 17:29 0 s3://tests3fs/dir1/file3
|
||||||
|
2017-02-28 17:28 0 s3://tests3fs/file1
|
||||||
|
2017-02-28 17:28 0 s3://tests3fs/file2
|
||||||
|
----
|
||||||
|
|
||||||
|
Now you can enjoy a filesystem view on your local Zenko CloudServer!
|
||||||
|
|
||||||
|
[[duplicity]]
|
||||||
|
Duplicity
|
||||||
|
---------
|
||||||
|
|
||||||
|
How to backup your files with Zenko CloudServer.
|
||||||
|
|
||||||
|
[[installing]]
|
||||||
|
Installing
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
[[installing-duplicity-and-its-dependencies]]
|
||||||
|
Installing Duplicity and its dependencies
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Second, you want to install
|
||||||
|
http://duplicity.nongnu.org/index.html[Duplicity]. You have to download
|
||||||
|
https://code.launchpad.net/duplicity/0.7-series/0.7.11/+download/duplicity-0.7.11.tar.gz[this
|
||||||
|
tarball], decompress it, and then checkout the README inside, which will
|
||||||
|
give you a list of dependencies to install. If you're using Ubuntu
|
||||||
|
14.04, this is your lucky day: here is a lazy step by step install.
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> apt-get install librsync-dev gnupg
|
||||||
|
$> apt-get install python-dev python-pip python-lockfile
|
||||||
|
$> pip install -U boto
|
||||||
|
----
|
||||||
|
|
||||||
|
Then you want to actually install Duplicity:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> tar zxvf duplicity-0.7.11.tar.gz
|
||||||
|
$> cd duplicity-0.7.11
|
||||||
|
$> python setup.py install
|
||||||
|
----
|
||||||
|
|
||||||
|
[[using]]
|
||||||
|
Using
|
||||||
|
~~~~~
|
||||||
|
|
||||||
|
[[testing-your-installation]]
|
||||||
|
Testing your installation
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
First, we're just going to quickly check that Zenko CloudServer is
|
||||||
|
actually running. To do so, simply run `$> docker ps` . You should see
|
||||||
|
one container named `scality/s3server`. If that is not the case, try
|
||||||
|
`$> docker start s3server`, and check again.
|
||||||
|
|
||||||
|
Secondly, as you probably know, Duplicity uses a module called *Boto* to
|
||||||
|
send requests to S3. Boto requires a configuration file located in
|
||||||
|
*`/etc/boto.cfg`* to have your credentials and preferences. Here is a
|
||||||
|
minimalistic config
|
||||||
|
http://boto.cloudhackers.com/en/latest/getting_started.html[that you can
|
||||||
|
finetune following these instructions].
|
||||||
|
|
||||||
|
....
|
||||||
|
[Credentials]
|
||||||
|
aws_access_key_id = accessKey1
|
||||||
|
aws_secret_access_key = verySecretKey1
|
||||||
|
|
||||||
|
[Boto]
|
||||||
|
# If using SSL, set to True
|
||||||
|
is_secure = False
|
||||||
|
# If using SSL, unmute and provide absolute path to local CA certificate
|
||||||
|
# ca_certificates_file = /absolute/path/to/ca.crt
|
||||||
|
|
||||||
|
*Note:* *If you want to set up SSL with Zenko CloudServer, check out our
|
||||||
|
`tutorial <http://link/to/SSL/tutorial>`__*
|
||||||
|
....
|
||||||
|
|
||||||
|
At this point, we've met all the requirements to start running Zenko
|
||||||
|
CloudServer as a backend to Duplicity. So we should be able to back up a
|
||||||
|
local folder/file to local S3. Let's try with the duplicity decompressed
|
||||||
|
folder:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
$> duplicity duplicity-0.7.11 "s3://127.0.0.1:8000/testbucket/"
|
||||||
|
|
||||||
|
*Note:* *Duplicity will prompt you for a symmetric encryption
|
||||||
|
passphrase. Save it somewhere as you will need it to recover your
|
||||||
|
data. Alternatively, you can also add the ``--no-encryption`` flag
|
||||||
|
and the data will be stored plain.*
|
||||||
|
----
|
||||||
|
|
||||||
|
If this command is succesful, you will get an output looking like this:
|
||||||
|
|
||||||
|
....
|
||||||
|
--------------[ Backup Statistics ]--------------
|
||||||
|
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
|
||||||
|
EndTime 1486486547.40 (Tue Feb 7 16:55:47 2017)
|
||||||
|
ElapsedTime 0.27 (0.27 seconds)
|
||||||
|
SourceFiles 388
|
||||||
|
SourceFileSize 6634529 (6.33 MB)
|
||||||
|
NewFiles 388
|
||||||
|
NewFileSize 6634529 (6.33 MB)
|
||||||
|
DeletedFiles 0
|
||||||
|
ChangedFiles 0
|
||||||
|
ChangedFileSize 0 (0 bytes)
|
||||||
|
ChangedDeltaSize 0 (0 bytes)
|
||||||
|
DeltaEntries 388
|
||||||
|
RawDeltaSize 6392865 (6.10 MB)
|
||||||
|
TotalDestinationSizeChange 2003677 (1.91 MB)
|
||||||
|
Errors 0
|
||||||
|
-------------------------------------------------
|
||||||
|
....
|
||||||
|
|
||||||
|
Congratulations! You can now backup to your local S3 through duplicity
|
||||||
|
:)
|
||||||
|
|
||||||
|
[[automating-backups]]
|
||||||
|
Automating backups
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Now you probably want to back up your files periodically. The easiest
|
||||||
|
way to do this is to write a bash script and add it to your crontab.
|
||||||
|
Here is my suggestion for such a file:
|
||||||
|
|
||||||
|
[source,sourceCode,sh]
|
||||||
|
----
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Export your passphrase so you don't have to type anything
|
||||||
|
export PASSPHRASE="mypassphrase"
|
||||||
|
|
||||||
|
# If you want to use a GPG Key, put it here and unmute the line below
|
||||||
|
#GPG_KEY=
|
||||||
|
|
||||||
|
# Define your backup bucket, with localhost specified
|
||||||
|
DEST="s3://127.0.0.1:8000/testbuckets3server/"
|
||||||
|
|
||||||
|
# Define the absolute path to the folder you want to backup
|
||||||
|
SOURCE=/root/testfolder
|
||||||
|
|
||||||
|
# Set to "full" for full backups, and "incremental" for incremental backups
|
||||||
|
# Warning: you have to perform one full backup befor you can perform
|
||||||
|
# incremental ones on top of it
|
||||||
|
FULL=incremental
|
||||||
|
|
||||||
|
# How long to keep backups for; if you don't want to delete old
|
||||||
|
# backups, keep empty; otherwise, syntax is "1Y" for one year, "1M"
|
||||||
|
# for one month, "1D" for one day
|
||||||
|
OLDER_THAN="1Y"
|
||||||
|
|
||||||
|
# is_running checks whether duplicity is currently completing a task
|
||||||
|
is_running=$(ps -ef | grep duplicity | grep python | wc -l)
|
||||||
|
|
||||||
|
# If duplicity is already completing a task, this will simply not run
|
||||||
|
if [ $is_running -eq 0 ]; then
|
||||||
|
echo "Backup for ${SOURCE} started"
|
||||||
|
|
||||||
|
# If you want to delete backups older than a certain time, we do it here
|
||||||
|
if [ "$OLDER_THAN" != "" ]; then
|
||||||
|
echo "Removing backups older than ${OLDER_THAN}"
|
||||||
|
duplicity remove-older-than ${OLDER_THAN} ${DEST}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# This is where the actual backup takes place
|
||||||
|
echo "Backing up ${SOURCE}..."
|
||||||
|
duplicity ${FULL} \
|
||||||
|
${SOURCE} ${DEST}
|
||||||
|
# If you're using GPG, paste this in the command above
|
||||||
|
# --encrypt-key=${GPG_KEY} --sign-key=${GPG_KEY} \
|
||||||
|
# If you want to exclude a subfolder/file, put it below and
|
||||||
|
# paste this
|
||||||
|
# in the command above
|
||||||
|
# --exclude=/${SOURCE}/path_to_exclude \
|
||||||
|
|
||||||
|
echo "Backup for ${SOURCE} complete"
|
||||||
|
echo "------------------------------------"
|
||||||
|
fi
|
||||||
|
# Forget the passphrase...
|
||||||
|
unset PASSPHRASE
|
||||||
|
----
|
||||||
|
|
||||||
|
So let's say you put this file in `/usr/local/sbin/backup.sh.` Next you
|
||||||
|
want to run `crontab -e` and paste your configuration in the file that
|
||||||
|
opens. If you're unfamiliar with Cron, here is a good
|
||||||
|
https://help.ubuntu.com/community/CronHowto[How To]. The folder I'm
|
||||||
|
backing up is a folder I modify permanently during my workday, so I want
|
||||||
|
incremental backups every 5mn from 8AM to 9PM monday to friday. Here is
|
||||||
|
the line I will paste in my crontab:
|
||||||
|
|
||||||
|
[source,sourceCode,cron]
|
||||||
|
----
|
||||||
|
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh
|
||||||
|
----
|
||||||
|
|
||||||
|
Now I can try and add / remove files from the folder I'm backing up, and
|
||||||
|
I will see incremental backups in my bucket.
|
|
@ -0,0 +1,444 @@
|
||||||
|
Using Public Clouds as data backends
|
||||||
|
====================================
|
||||||
|
|
||||||
|
[[introduction]]
|
||||||
|
Introduction
|
||||||
|
------------
|
||||||
|
|
||||||
|
As stated in our link:../GETTING_STARTED/#location-configuration[GETTING
|
||||||
|
STARTED guide], new data backends can be added by creating a region
|
||||||
|
(also called location constraint) with the right endpoint and
|
||||||
|
credentials. This section of the documentation shows you how to set up
|
||||||
|
our currently supported public cloud backends:
|
||||||
|
|
||||||
|
* link:#aws-s3-as-a-data-backend[Amazon S3] ;
|
||||||
|
* link:#microsoft-azure-as-a-data-backend[Microsoft Azure] .
|
||||||
|
|
||||||
|
For each public cloud backend, you will have to edit your CloudServer
|
||||||
|
`locationConfig.json` and do a few setup steps on the applicable public
|
||||||
|
cloud backend.
|
||||||
|
|
||||||
|
[[aws-s3-as-a-data-backend]]
|
||||||
|
AWS S3 as a data backend
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
[[from-the-aws-s3-console-or-any-aws-s3-cli-tool]]
|
||||||
|
From the AWS S3 Console (or any AWS S3 CLI tool)
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Create a bucket where you will host your data for this new location
|
||||||
|
constraint. This bucket must have versioning enabled:
|
||||||
|
|
||||||
|
* This is an option you may choose to activate at step 2 of Bucket
|
||||||
|
Creation in the Console;
|
||||||
|
* With AWS CLI, use `put-bucket-versioning` from the `s3api` commands on
|
||||||
|
your bucket of choice;
|
||||||
|
* Using other tools, please refer to your tool's documentation.
|
||||||
|
|
||||||
|
In this example, our bucket will be named `zenkobucket` and has
|
||||||
|
versioning enabled.
|
||||||
|
|
||||||
|
[[from-the-cloudserver-repository]]
|
||||||
|
From the CloudServer repository
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[locationconfig.json]]
|
||||||
|
locationConfig.json
|
||||||
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Edit this file to add a new location constraint. This location
|
||||||
|
constraint will contain the information for the AWS S3 bucket to which
|
||||||
|
you will be writing your data whenever you create a CloudServer bucket
|
||||||
|
in this location. There are a few configurable options here:
|
||||||
|
|
||||||
|
* `type` : set to `aws_s3` to indicate this location constraint is
|
||||||
|
writing data to AWS S3;
|
||||||
|
* `legacyAwsBehavior` : set to `true` to indicate this region should
|
||||||
|
behave like AWS S3 `us-east-1` region, set to `false` to indicate this
|
||||||
|
region should behave like any other AWS S3 region;
|
||||||
|
* `bucketName` : set to an _existing bucket_ in your AWS S3 Account;
|
||||||
|
this is the bucket in which your data will be stored for this location
|
||||||
|
constraint;
|
||||||
|
* `awsEndpoint` : set to your bucket's endpoint, usually
|
||||||
|
`s3.amazonaws.com`;
|
||||||
|
* `bucketMatch` : set to `true` if you want your object name to be the
|
||||||
|
same in your local bucket and your AWS S3 bucket; set to `false` if you
|
||||||
|
want your object name to be of the form
|
||||||
|
`{{localBucketName}}/{{objectname}}` in your AWS S3 hosted bucket;
|
||||||
|
* `credentialsProfile` and `credentials` are two ways to provide your
|
||||||
|
AWS S3 credentials for that bucket, _use only one of them_ :
|
||||||
|
** `credentialsProfile` : set to the profile name allowing you to access
|
||||||
|
your AWS S3 bucket from your `~/.aws/credentials` file;
|
||||||
|
** `credentials` : set the two fields inside the object (`accessKey` and
|
||||||
|
`secretKey`) to their respective values from your AWS credentials.
|
||||||
|
|
||||||
|
[source,sourceCode,json]
|
||||||
|
----
|
||||||
|
(...)
|
||||||
|
"aws-test": {
|
||||||
|
"type": "aws_s3",
|
||||||
|
"legacyAwsBehavior": true,
|
||||||
|
"details": {
|
||||||
|
"awsEndpoint": "s3.amazonaws.com",
|
||||||
|
"bucketName": "zenkobucket",
|
||||||
|
"bucketMatch": true,
|
||||||
|
"credentialsProfile": "zenko"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
(...)
|
||||||
|
----
|
||||||
|
|
||||||
|
[source,sourceCode,json]
|
||||||
|
----
|
||||||
|
(...)
|
||||||
|
"aws-test": {
|
||||||
|
"type": "aws_s3",
|
||||||
|
"legacyAwsBehavior": true,
|
||||||
|
"details": {
|
||||||
|
"awsEndpoint": "s3.amazonaws.com",
|
||||||
|
"bucketName": "zenkobucket",
|
||||||
|
"bucketMatch": true,
|
||||||
|
"credentials": {
|
||||||
|
"accessKey": "WHDBFKILOSDDVF78NPMQ",
|
||||||
|
"secretKey": "87hdfGCvDS+YYzefKLnjjZEYstOIuIjs/2X72eET"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
(...)
|
||||||
|
----
|
||||||
|
|
||||||
|
__________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
*warning*
|
||||||
|
|
||||||
|
If you set `bucketMatch` to `true`, we strongly advise that you only
|
||||||
|
have one local bucket per AWS S3 location. Without `bucketMatch` set to
|
||||||
|
`false`, your object names in your AWS S3 bucket will not be prefixed
|
||||||
|
with your Cloud Server bucket name. This means that if you put an object
|
||||||
|
`foo` to your CloudServer bucket `zenko1` and you then put a different
|
||||||
|
`foo` to your CloudServer bucket `zenko2` and both `zenko1` and `zenko2`
|
||||||
|
point to the same AWS bucket, the second `foo` will overwrite the first
|
||||||
|
`foo`.
|
||||||
|
__________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
|
||||||
|
[[awscredentials]]
|
||||||
|
~/.aws/credentials
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
__________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
*tip*
|
||||||
|
|
||||||
|
If you explicitly set your `accessKey` and `secretKey` in the
|
||||||
|
`credentials` object of your `aws_s3` location in your
|
||||||
|
`locationConfig.json` file, you may skip this section
|
||||||
|
__________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
|
||||||
|
Make sure your `~/.aws/credentials` file has a profile matching the one
|
||||||
|
defined in your `locationConfig.json`. Following our previous example,
|
||||||
|
it would look like:
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
[zenko]
|
||||||
|
aws_access_key_id=WHDBFKILOSDDVF78NPMQ
|
||||||
|
aws_secret_access_key=87hdfGCvDS+YYzefKLnjjZEYstOIuIjs/2X72eET
|
||||||
|
----
|
||||||
|
|
||||||
|
[[start-the-server-with-the-ability-to-write-to-aws-s3]]
|
||||||
|
Start the server with the ability to write to AWS S3
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Inside the repository, once all the files have been edited, you should
|
||||||
|
be able to start the server and start writing data to AWS S3 through
|
||||||
|
CloudServer.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
# Start the server locally
|
||||||
|
$> S3DATA=multiple npm start
|
||||||
|
----
|
||||||
|
|
||||||
|
[[run-the-server-as-a-docker-container-with-the-ability-to-write-to-aws-s3]]
|
||||||
|
Run the server as a docker container with the ability to write to AWS S3
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
____________________________________________________________________________________________________________________________
|
||||||
|
*tip*
|
||||||
|
|
||||||
|
If you set the `credentials` object in your `locationConfig.json` file,
|
||||||
|
you don't need to mount your `.aws/credentials` file
|
||||||
|
____________________________________________________________________________________________________________________________
|
||||||
|
|
||||||
|
Mount all the files that have been edited to override defaults, and do a
|
||||||
|
standard Docker run; then you can start writing data to AWS S3 through
|
||||||
|
CloudServer.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
# Start the server in a Docker container
|
||||||
|
$> sudo docker run -d --name CloudServer \
|
||||||
|
-v $(pwd)/data:/usr/src/app/localData \
|
||||||
|
-v $(pwd)/metadata:/usr/src/app/localMetadata \
|
||||||
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||||
|
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
||||||
|
-v ~/.aws/credentials:/root/.aws/credentials \
|
||||||
|
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
|
||||||
|
-d scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
[[testing-put-an-object-to-aws-s3-using-cloudserver]]
|
||||||
|
Testing: put an object to AWS S3 using CloudServer
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
In order to start testing pushing to AWS S3, you will need to create a
|
||||||
|
local bucket in the AWS S3 location constraint - this local bucket will
|
||||||
|
only store the metadata locally, while both the data and any user
|
||||||
|
metadata (`x-amz-meta` headers sent with a PUT object, and tags) will be
|
||||||
|
stored on AWS S3. This example is based on all our previous steps.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
# Create a local bucket storing data in AWS S3
|
||||||
|
$> s3cmd --host=127.0.0.1:8000 mb s3://zenkobucket --region=aws-test
|
||||||
|
# Put an object to AWS S3, and store the metadata locally
|
||||||
|
$> s3cmd --host=127.0.0.1:8000 put /etc/hosts s3://zenkobucket/testput
|
||||||
|
upload: '/etc/hosts' -> 's3://zenkobucket/testput' [1 of 1]
|
||||||
|
330 of 330 100% in 0s 380.87 B/s done
|
||||||
|
# List locally to check you have the metadata
|
||||||
|
$> s3cmd --host=127.0.0.1:8000 ls s3://zenkobucket
|
||||||
|
2017-10-23 10:26 330 s3://zenkobucket/testput
|
||||||
|
----
|
||||||
|
|
||||||
|
Then, from the AWS Console, if you go into your bucket, you should see
|
||||||
|
your newly uploaded object:
|
||||||
|
|
||||||
|
image:../res/aws-console-successful-put.png[image]
|
||||||
|
|
||||||
|
[[troubleshooting]]
|
||||||
|
Troubleshooting
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Make sure your `~/.s3cfg` file has credentials matching your local
|
||||||
|
CloudServer credentials defined in `conf/authdata.json`. By default, the
|
||||||
|
access key is `accessKey1` and the secret key is `verySecretKey1`. For
|
||||||
|
more informations, refer to our template link:./CLIENTS/#s3cmd[~/.s3cfg]
|
||||||
|
.
|
||||||
|
|
||||||
|
Pre-existing objects in your AWS S3 hosted bucket can unfortunately not
|
||||||
|
be accessed by CloudServer at this time.
|
||||||
|
|
||||||
|
Make sure versioning is enabled in your remote AWS S3 hosted bucket. To
|
||||||
|
check, using the AWS Console, click on your bucket name, then on
|
||||||
|
"Properties" at the top, and then you should see something like this:
|
||||||
|
|
||||||
|
image:../res/aws-console-versioning-enabled.png[image]
|
||||||
|
|
||||||
|
[[microsoft-azure-as-a-data-backend]]
|
||||||
|
Microsoft Azure as a data backend
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
[[from-the-ms-azure-console]]
|
||||||
|
From the MS Azure Console
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
From your Storage Account dashboard, create a container where you will
|
||||||
|
host your data for this new location constraint.
|
||||||
|
|
||||||
|
You will also need to get one of your Storage Account Access Keys, and
|
||||||
|
to provide it to CloudServer. This can be found from your Storage
|
||||||
|
Account dashboard, under "Settings, then "Access keys".
|
||||||
|
|
||||||
|
In this example, our container will be named `zenkontainer`, and will
|
||||||
|
belong to the `zenkomeetups` Storage Account.
|
||||||
|
|
||||||
|
[[from-the-cloudserver-repository-1]]
|
||||||
|
From the CloudServer repository
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[locationconfig.json-1]]
|
||||||
|
locationConfig.json
|
||||||
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Edit this file to add a new location constraint. This location
|
||||||
|
constraint will contain the information for the MS Azure container to
|
||||||
|
which you will be writing your data whenever you create a CloudServer
|
||||||
|
bucket in this location. There are a few configurable options here:
|
||||||
|
|
||||||
|
* `type` : set to `azure` to indicate this location constraint is
|
||||||
|
writing data to MS Azure;
|
||||||
|
* `legacyAwsBehavior` : set to `true` to indicate this region should
|
||||||
|
behave like AWS S3 `us-east-1` region, set to `false` to indicate this
|
||||||
|
region should behave like any other AWS S3 region (in the case of MS
|
||||||
|
Azure hosted data, this is mostly relevant for the format of errors);
|
||||||
|
* `azureStorageEndpoint` : set to your storage account's endpoint,
|
||||||
|
usually `https://{{storageAccountName}}.blob.core.windows.net`;
|
||||||
|
* `azureContainerName` : set to an _existing container_ in your MS Azure
|
||||||
|
storage account; this is the container in which your data will be stored
|
||||||
|
for this location constraint;
|
||||||
|
* `bucketMatch` : set to `true` if you want your object name to be the
|
||||||
|
same in your local bucket and your MS Azure container; set to `false` if
|
||||||
|
you want your object name to be of the form
|
||||||
|
`{{localBucketName}}/{{objectname}}` in your MS Azure container ;
|
||||||
|
* `azureStorageAccountName` : the MS Azure Storage Account to which your
|
||||||
|
container belongs;
|
||||||
|
* `azureStorageAccessKey` : one of the Access Keys associated to the
|
||||||
|
above defined MS Azure Storage Account.
|
||||||
|
|
||||||
|
[source,sourceCode,json]
|
||||||
|
----
|
||||||
|
(...)
|
||||||
|
"azure-test": {
|
||||||
|
"type": "azure",
|
||||||
|
"legacyAwsBehavior": false,
|
||||||
|
"details": {
|
||||||
|
"azureStorageEndpoint": "https://zenkomeetups.blob.core.windows.net/",
|
||||||
|
"bucketMatch": true,
|
||||||
|
"azureContainerName": "zenkontainer",
|
||||||
|
"azureStorageAccountName": "zenkomeetups",
|
||||||
|
"azureStorageAccessKey": "auhyDo8izbuU4aZGdhxnWh0ODKFP3IWjsN1UfFaoqFbnYzPj9bxeCVAzTIcgzdgqomDKx6QS+8ov8PYCON0Nxw=="
|
||||||
|
}
|
||||||
|
},
|
||||||
|
(...)
|
||||||
|
----
|
||||||
|
|
||||||
|
_________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
*warning*
|
||||||
|
|
||||||
|
If you set `bucketMatch` to `true`, we strongly advise that you only
|
||||||
|
have one local bucket per MS Azure location. Without `bucketMatch` set
|
||||||
|
to `false`, your object names in your MS Azure container will not be
|
||||||
|
prefixed with your Cloud Server bucket name. This means that if you put
|
||||||
|
an object `foo` to your CloudServer bucket `zenko1` and you then put a
|
||||||
|
different `foo` to your CloudServer bucket `zenko2` and both `zenko1`
|
||||||
|
and `zenko2` point to the same MS Azure container, the second `foo` will
|
||||||
|
overwrite the first `foo`.
|
||||||
|
_________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
|
||||||
|
__________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
*tip*
|
||||||
|
|
||||||
|
You may export environment variables to *override* some of your
|
||||||
|
`locationConfig.json` variable ; the syntax for them is
|
||||||
|
`{{region-name}}_{{ENV_VAR_NAME}}`; currently, the available variables
|
||||||
|
are those shown below, with the values used in the current example:
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
$> export azure-test_AZURE_STORAGE_ACCOUNT_NAME="zenkomeetups"
|
||||||
|
$> export azure-test_AZURE_STORAGE_ACCESS_KEY="auhyDo8izbuU4aZGdhxnWh0ODKFP3IWjsN1UfFaoqFbnYzPj9bxeCVAzTIcgzdgqomDKx6QS+8ov8PYCON0Nxw=="
|
||||||
|
$> export azure-test_AZURE_STORAGE_ENDPOINT="https://zenkomeetups.blob.core.windows.net/"
|
||||||
|
----
|
||||||
|
__________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________________
|
||||||
|
|
||||||
|
[[start-the-server-with-the-ability-to-write-to-ms-azure]]
|
||||||
|
Start the server with the ability to write to MS Azure
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Inside the repository, once all the files have been edited, you should
|
||||||
|
be able to start the server and start writing data to MS Azure through
|
||||||
|
CloudServer.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
# Start the server locally
|
||||||
|
$> S3DATA=multiple npm start
|
||||||
|
----
|
||||||
|
|
||||||
|
[[run-the-server-as-a-docker-container-with-the-ability-to-write-to-ms-azure]]
|
||||||
|
Run the server as a docker container with the ability to write to MS
|
||||||
|
Azure
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Mount all the files that have been edited to override defaults, and do a
|
||||||
|
standard Docker run; then you can start writing data to MS Azure through
|
||||||
|
CloudServer.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
# Start the server in a Docker container
|
||||||
|
$> sudo docker run -d --name CloudServer \
|
||||||
|
-v $(pwd)/data:/usr/src/app/localData \
|
||||||
|
-v $(pwd)/metadata:/usr/src/app/localMetadata \
|
||||||
|
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
|
||||||
|
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
|
||||||
|
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
|
||||||
|
-d scality/s3server
|
||||||
|
----
|
||||||
|
|
||||||
|
[[testing-put-an-object-to-ms-azure-using-cloudserver]]
|
||||||
|
Testing: put an object to MS Azure using CloudServer
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
In order to start testing pushing to MS Azure, you will need to create a
|
||||||
|
local bucket in the MS Azure region - this local bucket will only store
|
||||||
|
the metadata locally, while both the data and any user metadata
|
||||||
|
(`x-amz-meta` headers sent with a PUT object, and tags) will be stored
|
||||||
|
on MS Azure. This example is based on all our previous steps.
|
||||||
|
|
||||||
|
[source,sourceCode,shell]
|
||||||
|
----
|
||||||
|
# Create a local bucket storing data in MS Azure
|
||||||
|
$> s3cmd --host=127.0.0.1:8000 mb s3://zenkontainer --region=azure-test
|
||||||
|
# Put an object to MS Azure, and store the metadata locally
|
||||||
|
$> s3cmd --host=127.0.0.1:8000 put /etc/hosts s3://zenkontainer/testput
|
||||||
|
upload: '/etc/hosts' -> 's3://zenkontainer/testput' [1 of 1]
|
||||||
|
330 of 330 100% in 0s 380.87 B/s done
|
||||||
|
# List locally to check you have the metadata
|
||||||
|
$> s3cmd --host=127.0.0.1:8000 ls s3://zenkobucket
|
||||||
|
2017-10-24 14:38 330 s3://zenkontainer/testput
|
||||||
|
----
|
||||||
|
|
||||||
|
Then, from the MS Azure Console, if you go into your container, you
|
||||||
|
should see your newly uploaded object:
|
||||||
|
|
||||||
|
image:../res/azure-console-successful-put.png[image]
|
||||||
|
|
||||||
|
[[troubleshooting-1]]
|
||||||
|
Troubleshooting
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Make sure your `~/.s3cfg` file has credentials matching your local
|
||||||
|
CloudServer credentials defined in `conf/authdata.json`. By default, the
|
||||||
|
access key is `accessKey1` and the secret key is `verySecretKey1`. For
|
||||||
|
more informations, refer to our template link:./CLIENTS/#s3cmd[~/.s3cfg]
|
||||||
|
.
|
||||||
|
|
||||||
|
Pre-existing objects in your MS Azure container can unfortunately not be
|
||||||
|
accessed by CloudServer at this time.
|
||||||
|
|
||||||
|
[[for-any-data-backend]]
|
||||||
|
For any data backend
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
[[from-the-cloudserver-repository-2]]
|
||||||
|
From the CloudServer repository
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
[[config.json]]
|
||||||
|
config.json
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
__________________________________________________________________________________________________________________
|
||||||
|
*important*
|
||||||
|
|
||||||
|
You only need to follow this section if you want to define a given
|
||||||
|
location as the default for a specific endpoint
|
||||||
|
__________________________________________________________________________________________________________________
|
||||||
|
|
||||||
|
Edit the `restEndpoint` section of your `config.json` file to add an
|
||||||
|
endpoint definition matching the location you want to use as a default
|
||||||
|
for an endpoint to this specific endpoint. In this example, we'll make
|
||||||
|
`custom-location` our default location for the endpoint `zenkotos3.com`:
|
||||||
|
|
||||||
|
[source,sourceCode,json]
|
||||||
|
----
|
||||||
|
(...)
|
||||||
|
"restEndpoints": {
|
||||||
|
"localhost": "us-east-1",
|
||||||
|
"127.0.0.1": "us-east-1",
|
||||||
|
"cloudserver-front": "us-east-1",
|
||||||
|
"s3.docker.test": "us-east-1",
|
||||||
|
"127.0.0.2": "us-east-1",
|
||||||
|
"zenkotos3.com": "custom-location"
|
||||||
|
},
|
||||||
|
(...)
|
||||||
|
----
|
|
@ -1,2 +0,0 @@
|
||||||
Sphinx >= 1.7.5
|
|
||||||
recommonmark >= 0.4.0
|
|
|
@ -1,119 +0,0 @@
|
||||||
#
|
|
||||||
# This file is autogenerated by pip-compile
|
|
||||||
# To update, run:
|
|
||||||
#
|
|
||||||
# tox -e pip-compile
|
|
||||||
#
|
|
||||||
alabaster==0.7.12 \
|
|
||||||
--hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
|
|
||||||
--hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 \
|
|
||||||
# via sphinx
|
|
||||||
babel==2.6.0 \
|
|
||||||
--hash=sha256:6778d85147d5d85345c14a26aada5e478ab04e39b078b0745ee6870c2b5cf669 \
|
|
||||||
--hash=sha256:8cba50f48c529ca3fa18cf81fa9403be176d374ac4d60738b839122dfaaa3d23 \
|
|
||||||
# via sphinx
|
|
||||||
certifi==2018.10.15 \
|
|
||||||
--hash=sha256:339dc09518b07e2fa7eda5450740925974815557727d6bd35d319c1524a04a4c \
|
|
||||||
--hash=sha256:6d58c986d22b038c8c0df30d639f23a3e6d172a05c3583e766f4c0b785c0986a \
|
|
||||||
# via requests
|
|
||||||
chardet==3.0.4 \
|
|
||||||
--hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
|
|
||||||
--hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
|
|
||||||
# via requests
|
|
||||||
commonmark==0.5.4 \
|
|
||||||
--hash=sha256:34d73ec8085923c023930dfc0bcd1c4286e28a2a82de094bb72fabcc0281cbe5 \
|
|
||||||
# via recommonmark
|
|
||||||
docutils==0.14 \
|
|
||||||
--hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
|
|
||||||
--hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
|
|
||||||
--hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 \
|
|
||||||
# via recommonmark, sphinx
|
|
||||||
idna==2.7 \
|
|
||||||
--hash=sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e \
|
|
||||||
--hash=sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16 \
|
|
||||||
# via requests
|
|
||||||
imagesize==1.1.0 \
|
|
||||||
--hash=sha256:3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8 \
|
|
||||||
--hash=sha256:f3832918bc3c66617f92e35f5d70729187676313caa60c187eb0f28b8fe5e3b5 \
|
|
||||||
# via sphinx
|
|
||||||
jinja2==2.10 \
|
|
||||||
--hash=sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd \
|
|
||||||
--hash=sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4 \
|
|
||||||
# via sphinx
|
|
||||||
markupsafe==1.1.0 \
|
|
||||||
--hash=sha256:048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432 \
|
|
||||||
--hash=sha256:130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b \
|
|
||||||
--hash=sha256:19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9 \
|
|
||||||
--hash=sha256:1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af \
|
|
||||||
--hash=sha256:1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834 \
|
|
||||||
--hash=sha256:1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd \
|
|
||||||
--hash=sha256:1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d \
|
|
||||||
--hash=sha256:31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7 \
|
|
||||||
--hash=sha256:3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b \
|
|
||||||
--hash=sha256:4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3 \
|
|
||||||
--hash=sha256:525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c \
|
|
||||||
--hash=sha256:52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2 \
|
|
||||||
--hash=sha256:52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7 \
|
|
||||||
--hash=sha256:5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36 \
|
|
||||||
--hash=sha256:5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1 \
|
|
||||||
--hash=sha256:5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e \
|
|
||||||
--hash=sha256:7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1 \
|
|
||||||
--hash=sha256:83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c \
|
|
||||||
--hash=sha256:857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856 \
|
|
||||||
--hash=sha256:98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550 \
|
|
||||||
--hash=sha256:bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492 \
|
|
||||||
--hash=sha256:d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672 \
|
|
||||||
--hash=sha256:e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401 \
|
|
||||||
--hash=sha256:edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6 \
|
|
||||||
--hash=sha256:efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6 \
|
|
||||||
--hash=sha256:f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c \
|
|
||||||
--hash=sha256:f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd \
|
|
||||||
--hash=sha256:fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1 \
|
|
||||||
# via jinja2
|
|
||||||
packaging==18.0 \
|
|
||||||
--hash=sha256:0886227f54515e592aaa2e5a553332c73962917f2831f1b0f9b9f4380a4b9807 \
|
|
||||||
--hash=sha256:f95a1e147590f204328170981833854229bb2912ac3d5f89e2a8ccd2834800c9 \
|
|
||||||
# via sphinx
|
|
||||||
pygments==2.2.0 \
|
|
||||||
--hash=sha256:78f3f434bcc5d6ee09020f92ba487f95ba50f1e3ef83ae96b9d5ffa1bab25c5d \
|
|
||||||
--hash=sha256:dbae1046def0efb574852fab9e90209b23f556367b5a320c0bcb871c77c3e8cc \
|
|
||||||
# via sphinx
|
|
||||||
pyparsing==2.3.0 \
|
|
||||||
--hash=sha256:40856e74d4987de5d01761a22d1621ae1c7f8774585acae358aa5c5936c6c90b \
|
|
||||||
--hash=sha256:f353aab21fd474459d97b709e527b5571314ee5f067441dc9f88e33eecd96592 \
|
|
||||||
# via packaging
|
|
||||||
pytz==2018.7 \
|
|
||||||
--hash=sha256:31cb35c89bd7d333cd32c5f278fca91b523b0834369e757f4c5641ea252236ca \
|
|
||||||
--hash=sha256:8e0f8568c118d3077b46be7d654cc8167fa916092e28320cde048e54bfc9f1e6 \
|
|
||||||
# via babel
|
|
||||||
recommonmark==0.4.0 \
|
|
||||||
--hash=sha256:6e29c723abcf5533842376d87c4589e62923ecb6002a8e059eb608345ddaff9d \
|
|
||||||
--hash=sha256:cd8bf902e469dae94d00367a8197fb7b81fcabc9cfb79d520e0d22d0fbeaa8b7
|
|
||||||
requests==2.20.1 \
|
|
||||||
--hash=sha256:65b3a120e4329e33c9889db89c80976c5272f56ea92d3e74da8a463992e3ff54 \
|
|
||||||
--hash=sha256:ea881206e59f41dbd0bd445437d792e43906703fff75ca8ff43ccdb11f33f263 \
|
|
||||||
# via sphinx
|
|
||||||
six==1.11.0 \
|
|
||||||
--hash=sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9 \
|
|
||||||
--hash=sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb \
|
|
||||||
# via packaging, sphinx
|
|
||||||
snowballstemmer==1.2.1 \
|
|
||||||
--hash=sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128 \
|
|
||||||
--hash=sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89 \
|
|
||||||
# via sphinx
|
|
||||||
sphinx==1.8.2 \
|
|
||||||
--hash=sha256:120732cbddb1b2364471c3d9f8bfd4b0c5b550862f99a65736c77f970b142aea \
|
|
||||||
--hash=sha256:b348790776490894e0424101af9c8413f2a86831524bd55c5f379d3e3e12ca64
|
|
||||||
sphinxcontrib-websupport==1.1.0 \
|
|
||||||
--hash=sha256:68ca7ff70785cbe1e7bccc71a48b5b6d965d79ca50629606c7861a21b206d9dd \
|
|
||||||
--hash=sha256:9de47f375baf1ea07cdb3436ff39d7a9c76042c10a769c52353ec46e4e8fc3b9 \
|
|
||||||
# via sphinx
|
|
||||||
typing==3.6.6 \
|
|
||||||
--hash=sha256:4027c5f6127a6267a435201981ba156de91ad0d1d98e9ddc2aa173453453492d \
|
|
||||||
--hash=sha256:57dcf675a99b74d64dacf6fba08fb17cf7e3d5fdff53d4a30ea2a5e7e52543d4 \
|
|
||||||
--hash=sha256:a4c8473ce11a65999c8f59cb093e70686b6c84c98df58c1dae9b3b196089858a \
|
|
||||||
# via sphinx
|
|
||||||
urllib3==1.24.1 \
|
|
||||||
--hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \
|
|
||||||
--hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \
|
|
||||||
# via requests
|
|
|
@ -1,46 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"bytes"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"io/ioutil"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Input AWS access key, secret key
|
|
||||||
aws_access_key_id := "accessKey1"
|
|
||||||
aws_secret_access_key := "verySecretKey1"
|
|
||||||
endpoint := "http://localhost:8000"
|
|
||||||
bucket_name := "bucketname"
|
|
||||||
searchQuery := url.QueryEscape("x-amz-meta-color=blue")
|
|
||||||
buf := bytes.NewBuffer([]byte{})
|
|
||||||
|
|
||||||
requestUrl := fmt.Sprintf("%s/%s?search=%s",
|
|
||||||
endpoint, bucket_name, searchQuery)
|
|
||||||
|
|
||||||
request, err := http.NewRequest("GET", requestUrl, buf)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
reader := bytes.NewReader(buf.Bytes())
|
|
||||||
credentials := credentials.NewStaticCredentials(aws_access_key_id,
|
|
||||||
aws_secret_access_key, "")
|
|
||||||
signer := v4.NewSigner(credentials)
|
|
||||||
signer.Sign(request, reader, "s3", "us-east-1", time.Now())
|
|
||||||
client := &http.Client{}
|
|
||||||
resp, err := client.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
fmt.Println(string(body))
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
const { S3 } = require('aws-sdk');
|
|
||||||
const config = {
|
|
||||||
sslEnabled: false,
|
|
||||||
endpoint: 'http://127.0.0.1:8000',
|
|
||||||
signatureCache: false,
|
|
||||||
signatureVersion: 'v4',
|
|
||||||
region: 'us-east-1',
|
|
||||||
s3ForcePathStyle: true,
|
|
||||||
accessKeyId: 'accessKey1',
|
|
||||||
secretAccessKey: 'verySecretKey1',
|
|
||||||
};
|
|
||||||
const s3Client = new S3(config);
|
|
||||||
|
|
||||||
const encodedSearch =
|
|
||||||
encodeURIComponent('x-amz-meta-color="blue"');
|
|
||||||
const req = s3Client.listObjects({ Bucket: 'bucketname' });
|
|
||||||
|
|
||||||
// the build event
|
|
||||||
req.on('build', () => {
|
|
||||||
req.httpRequest.path = `${req.httpRequest.path}?search=${encodedSearch}`;
|
|
||||||
});
|
|
||||||
req.on('success', res => {
|
|
||||||
process.stdout.write(`Result ${res.data}`);
|
|
||||||
});
|
|
||||||
req.on('error', err => {
|
|
||||||
process.stdout.write(`Error ${err}`);
|
|
||||||
});
|
|
||||||
req.send();
|
|
|
@ -1,79 +0,0 @@
|
||||||
import datetime
|
|
||||||
import hashlib
|
|
||||||
import hmac
|
|
||||||
import urllib
|
|
||||||
# pip install requests
|
|
||||||
import requests
|
|
||||||
|
|
||||||
access_key = 'accessKey1'
|
|
||||||
secret_key = 'verySecretKey1'
|
|
||||||
|
|
||||||
method = 'GET'
|
|
||||||
service = 's3'
|
|
||||||
host = 'localhost:8000'
|
|
||||||
region = 'us-east-1'
|
|
||||||
canonical_uri = '/bucketname'
|
|
||||||
query = 'x-amz-meta-color=blue'
|
|
||||||
canonical_querystring = 'search=%s' % (urllib.quote(query))
|
|
||||||
algorithm = 'AWS4-HMAC-SHA256'
|
|
||||||
|
|
||||||
t = datetime.datetime.utcnow()
|
|
||||||
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
|
|
||||||
date_stamp = t.strftime('%Y%m%d')
|
|
||||||
|
|
||||||
# Key derivation functions. See:
|
|
||||||
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
|
|
||||||
|
|
||||||
|
|
||||||
def sign(key, msg):
|
|
||||||
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
|
||||||
|
|
||||||
|
|
||||||
def getSignatureKey(key, date_stamp, regionName, serviceName):
|
|
||||||
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
|
|
||||||
kRegion = sign(kDate, regionName)
|
|
||||||
kService = sign(kRegion, serviceName)
|
|
||||||
kSigning = sign(kService, 'aws4_request')
|
|
||||||
return kSigning
|
|
||||||
|
|
||||||
|
|
||||||
payload_hash = hashlib.sha256('').hexdigest()
|
|
||||||
|
|
||||||
canonical_headers = \
|
|
||||||
'host:{0}\nx-amz-content-sha256:{1}\nx-amz-date:{2}\n' \
|
|
||||||
.format(host, payload_hash, amz_date)
|
|
||||||
|
|
||||||
signed_headers = 'host;x-amz-content-sha256;x-amz-date'
|
|
||||||
|
|
||||||
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
|
|
||||||
.format(method, canonical_uri, canonical_querystring, canonical_headers,
|
|
||||||
signed_headers, payload_hash)
|
|
||||||
print(canonical_request)
|
|
||||||
|
|
||||||
credential_scope = '{0}/{1}/{2}/aws4_request' \
|
|
||||||
.format(date_stamp, region, service)
|
|
||||||
|
|
||||||
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
|
|
||||||
.format(algorithm, amz_date, credential_scope,
|
|
||||||
hashlib.sha256(canonical_request).hexdigest())
|
|
||||||
|
|
||||||
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
|
|
||||||
|
|
||||||
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
|
|
||||||
hashlib.sha256).hexdigest()
|
|
||||||
|
|
||||||
authorization_header = \
|
|
||||||
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
|
|
||||||
.format(algorithm, access_key, credential_scope, signed_headers, signature)
|
|
||||||
|
|
||||||
# The 'host' header is added automatically by the Python 'requests' library.
|
|
||||||
headers = {
|
|
||||||
'X-Amz-Content-Sha256': payload_hash,
|
|
||||||
'X-Amz-Date': amz_date,
|
|
||||||
'Authorization': authorization_header
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring
|
|
||||||
|
|
||||||
r = requests.get(endpoint, headers=headers)
|
|
||||||
print(r.text)
|
|
|
@ -1,28 +0,0 @@
|
||||||
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
|
|
||||||
|
|
||||||
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
|
|
||||||
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
|
|
||||||
|
|
||||||
COPY . ${HOME_DIR}/s3
|
|
||||||
RUN chown -R ${USER} ${HOME_DIR}
|
|
||||||
RUN pip3 install redis===3.5.3 requests==2.27.1 && \
|
|
||||||
apt-get install -y git-lfs
|
|
||||||
|
|
||||||
USER ${USER}
|
|
||||||
WORKDIR ${HOME_DIR}/s3
|
|
||||||
RUN rm -f ~/.gitconfig && \
|
|
||||||
git config --global --add safe.directory . && \
|
|
||||||
git lfs install && \
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 && \
|
|
||||||
yarn global add typescript && \
|
|
||||||
yarn install --frozen-lockfile --production --network-concurrency 1 && \
|
|
||||||
yarn cache clean --all && \
|
|
||||||
yarn global remove typescript
|
|
||||||
|
|
||||||
# run symlinking separately to avoid yarn installation errors
|
|
||||||
# we might have to check if the symlinking is really needed!
|
|
||||||
RUN ln -sf /scality-kms node_modules
|
|
||||||
|
|
||||||
EXPOSE 8000
|
|
||||||
|
|
||||||
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/supervisord.conf"
|
|
7
index.js
7
index.js
|
@ -1,10 +1,3 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr(
|
|
||||||
undefined,
|
|
||||||
// Do not exit as workers have their own listener that will exit
|
|
||||||
// But primary don't have another listener
|
|
||||||
require('cluster').isPrimary ? 1 : null,
|
|
||||||
);
|
|
||||||
|
|
||||||
require('./lib/server.js')();
|
require('./lib/server.js')();
|
||||||
|
|
1440
lib/Config.js
1440
lib/Config.js
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue