Compare commits

..

1 Commits

30 changed files with 381 additions and 265 deletions

View File

@ -1,9 +1,4 @@
node_modules
**/node_modules
build
*/build
package-lock.json
Dockerfile
localData/*
localMetadata/*
# Keep the .git/HEAD file in order to properly report version

26
.gitmodules vendored
View File

@ -1,26 +0,0 @@
[submodule "arsenal"]
path = arsenal
url = ../zenko-arsenal
branch = master
[submodule "scubaclient"]
path = scubaclient
url = ../zenko-scubaclient
branch = development/1.0
[submodule "utapi"]
path = utapi
url = ../zenko-utapi
branch = development/8.1
[submodule "werelogs"]
path = werelogs
url = ../zenko-werelogs
branch = development/8.1
[submodule "fcntl"]
path = fcntl
url = ../zenko-fcntl
[submodule "httpagent"]
path = httpagent
url = ../zenko-httpagent
branch = development/1.0
[submodule "eslint-config-scality"]
path = eslint-config-scality
url = ../zenko-eslint-config-scality

View File

@ -1,115 +1,60 @@
# NOTE: Put Vitastor Debian packages in ./vitastor-bookworm/ if you want them
# to be used instead of the public package repository (e.g., a development version)
ARG NODE_VERSION=16.20.2-bookworm-slim
FROM debian:bookworm AS builder
FROM node:${NODE_VERSION} AS builder
ADD https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key /etc/apt/trusted.gpg.d/
WORKDIR /usr/src/app
RUN set -e ; \
apt-get update ; \
apt-get install -y --no-install-recommends ca-certificates ; \
echo deb https://deb.nodesource.com/node_22.x nodistro main > /etc/apt/sources.list.d/nodesource.list ; \
(grep -vP '^----|^=' /etc/apt/trusted.gpg.d/nodesource-repo.gpg.key | base64 -d) > /etc/apt/trusted.gpg.d/nodesource-repo.gpg ; \
apt-get update ; \
apt-get install -y --no-install-recommends build-essential ca-certificates curl git nodejs jq ; \
apt-get clean ; \
rm -rf /var/lib/apt/lists/*
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
curl \
git \
gnupg2 \
jq \
python3 \
ssh \
wget \
libffi-dev \
zlib1g-dev \
&& apt-get clean \
&& mkdir -p /root/ssh \
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
ADD package.json /app/package.json
ADD arsenal/package.json /app/arsenal/package.json
ADD eslint-config-scality/package.json /app/eslint-config-scality/package.json
ADD fcntl/package.json /app/fcntl/package.json
ADD httpagent/package.json /app/httpagent/package.json
ADD scubaclient/package.json /app/scubaclient/package.json
ADD utapi/package.json /app/utapi/package.json
ADD werelogs/package.json /app/werelogs/package.json
ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript@4.9.5 -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
RUN cd /app/ && \
for i in arsenal eslint-config-scality fcntl httpagent scubaclient utapi werelogs; do \
jq 'del(.scripts)' < $i/package.json > t; \
mv t $i/package.json; \
done && \
npm install --verbose
################################################################################
FROM node:${NODE_VERSION}
ADD https://git.yourcmc.ru/vitalif/vitastor/raw/branch/master/debian/changelog /app/vitastor-changelog
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
&& rm -rf /var/lib/apt/lists/*
ADD . /app/
ENV NO_PROXY localhost,127.0.0.1
ENV no_proxy localhost,127.0.0.1
RUN cd /app/ && \
npm install --verbose
RUN set -e ; \
mkdir -p /deb/ /app/vitastor-bookworm/ ; \
(mv /app/vitastor-bookworm/vitastor-client*.deb /deb/ || true) ; \
echo deb http://vitastor.io/debian bookworm main > /etc/apt/sources.list.d/vitastor.list ; \
echo deb-src http://vitastor.io/debian bookworm main >> /etc/apt/sources.list.d/vitastor.list ; \
curl -o /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg ; \
apt-get update ; \
cd /app/vitastor-bookworm/ ; \
(if ls /deb/*.deb; then \
apt-get install -y --no-install-recommends pkg-config /deb/*.deb ; \
dpkg-source -x *.dsc ; \
else \
apt-get install -y --no-install-recommends pkg-config vitastor-client ; \
apt-get source vitastor ; \
fi) ; \
mv vitastor-*/ vitastor ; \
cd vitastor/node-binding ; \
npm install
RUN cd /app/ && \
cp -r /app/vitastor-bookworm/vitastor/node-binding node_modules/vitastor && \
perl -i -pe "s!require\('bindings'\)\('!require('./build/Release/!" \
node_modules/vitastor/index.js node_modules/ioctl/index.js node_modules/fcntl/index.js && \
perl -i -pe "s!require\('node-gyp-build'\)\(__dirname\)!require('./prebuilds/linux-x64/node.napi.node')!" \
node_modules/utf-8-validate/index.js node_modules/bufferutil/index.js && \
perl -i -pe "s!require\('node-gyp-build'\)\(__dirname\)!require('./prebuilds/linux-x64/classic-level.node')!" \
node_modules/classic-level/binding.js && \
npm exec webpack -- --mode=development
####################
FROM debian:bookworm
ADD https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key /etc/apt/trusted.gpg.d/
RUN set -e ; \
apt-get update ; \
apt-get install -y --no-install-recommends ca-certificates ; \
echo deb https://deb.nodesource.com/node_22.x nodistro main > /etc/apt/sources.list.d/nodesource.list ; \
(grep -vP '^----|^=' /etc/apt/trusted.gpg.d/nodesource-repo.gpg.key | base64 -d) > /etc/apt/trusted.gpg.d/nodesource-repo.gpg ; \
apt-get update ; \
apt-get install -y --no-install-recommends curl ca-certificates nodejs jq node-bindings libibverbs1 librdmacm1 libtcmalloc-minimal4 liburing2 ; \
apt-get clean ; \
rm -rf /var/lib/apt/lists/*
ADD https://vitastor.io/debian/pubkey.gpg /etc/apt/trusted.gpg.d/vitastor.gpg
ADD https://vitastor.io/debian/dists/bookworm/Release /app/debian-release
COPY --from=builder /deb/ /deb/
RUN set -e ; \
echo deb http://vitastor.io/debian bookworm main > /etc/apt/sources.list.d/vitastor.list ; \
chmod 644 /etc/apt/trusted.gpg.d/vitastor.gpg ; \
apt-get update ; \
(if ls /deb/*.deb; then \
apt-get install -y --no-install-recommends /deb/*.deb ; \
else \
apt-get install -y --no-install-recommends vitastor-client ; \
fi) ; \
apt-get clean ; \
rm -rf /var/lib/apt/lists/*
COPY --from=builder /app/node_modules/vitastor /app/node_modules/vitastor
COPY --from=builder /app/dist /app/
COPY --from=builder /app/authdata.json.example /app/
COPY --from=builder /app/config.json.vitastor /app/config.json.example
COPY --from=builder /app/locationConfig.json.vitastor /app/locationConfig.json.example
COPY bin/configure.sh /usr/bin/
WORKDIR /app
EXPOSE 8000
EXPOSE 8002
CMD [ "nodejs", "/app/zenko-vitastor.js" ]
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
tini \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app
# Keep the .git directory in order to properly report version
COPY . /usr/src/app
COPY --from=builder /usr/src/app/node_modules ./node_modules/
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]

22
DockerfileMem Normal file
View File

@ -0,0 +1,22 @@
FROM node:6-slim
MAINTAINER Giorgio Regni <gr@scality.com>
WORKDIR /usr/src/app
COPY . /usr/src/app
RUN apt-get update \
&& apt-get install -y jq python git build-essential --no-install-recommends \
&& yarn install --production \
&& apt-get autoremove --purge -y python git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-*
ENV S3BACKEND mem
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]
EXPOSE 8000

View File

@ -16,80 +16,66 @@ backend support.
## Quick Start with Vitastor
Vitastor S3 Backend is now released. Installation instructions:
Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
Installation instructions:
### Install Vitastor
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
Then create a separate pool for S3 object data in your Vitastor cluster, for example:
### Install Zenko with Vitastor Backend
`vitastor-cli create-pool --ec 2+1 -n 512 s3-data`
Retrieve the ID of your pool with `vitastor-cli ls-pools s3-data --detail`.
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Install and Configure MongoDB
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
### Install Zenko with Vitastor Backend
### Setup Zenko
1. Download Docker image: `docker pull vitalif/vitastor-zenko`
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
2. Extract configuration file examples from the Docker image:
```
docker run --rm -it -v /etc/vitastor:/etc/vitastor -v /etc/vitastor/s3:/conf vitalif/vitastor-zenko configure.sh
```
3. Edit configuration files in `/etc/vitastor/s3/`:
- `config.json` - common settings.
- `authdata.json` - user accounts and access keys.
- `locationConfig.json` - S3 storage class list with placement settings.
Note: it actually contains storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as in the original Zenko CloudServer.
- Put your MongoDB connection data into `config.json` and `locationConfig.json`.
- Put your Vitastor pool ID into `locationConfig.json`.
- For now, the complete list of Vitastor backend settings is only available [in the code](https://git.yourcmc.ru/vitalif/zenko-arsenal/src/branch/master/lib/storage/data/vitastor/VitastorBackend.ts#L94).
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with:
```
docker run --restart always --security-opt seccomp:unconfined --ulimit memlock=-1 --network=host \
-v /etc/vitastor:/etc/vitastor -v /etc/vitastor/s3:/conf --name zenko vitalif/vitastor-zenko
```
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with, for example, [s3cmd](https://s3tools.org/s3cmd):
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```
Or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs):
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
```
## Building from source
- Clone this repository: `git clone --recurse-submodules https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
# Author & License
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality,
licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is
Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)

@ -1 +0,0 @@
Subproject commit ab7861b3b001132b64ec7027fcbe1300149d017c

View File

@ -1,6 +0,0 @@
#!/bin/bash
set -e
[[ -f /conf/config.json ]] || cp /app/config.json.example /conf/config.json
[[ -f /conf/locationConfig.json ]] || cp /app/locationConfig.json.example /conf/locationConfig.json
[[ -f /conf/authdata.json ]] || cp /app/authdata.json.example /conf/authdata.json

View File

@ -59,6 +59,7 @@
"host": "localhost",
"port": 8500
},
"clusters": 1,
"log": {
"logLevel": "info",
"dumpLevel": "error"

View File

@ -1,6 +1,5 @@
{
"port": 8000,
"workers": 4,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],

220
docker-entrypoint.sh Executable file
View File

@ -0,0 +1,220 @@
#!/bin/bash
# set -e stops the execution of a script if a command or pipeline has an error
set -e
# modifying config.json
JQ_FILTERS_CONFIG="."
# ENDPOINT var can accept comma separated values
# for multiple endpoint locations
if [[ "$ENDPOINT" ]]; then
IFS="," read -ra HOST_NAMES <<< "$ENDPOINT"
for host in "${HOST_NAMES[@]}"; do
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$host\"]=\"us-east-1\""
done
echo "Host name has been modified to ${HOST_NAMES[@]}"
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with ${HOST_NAMES[@]}"
fi
if [[ "$LOG_LEVEL" ]]; then
if [[ "$LOG_LEVEL" == "info" || "$LOG_LEVEL" == "debug" || "$LOG_LEVEL" == "trace" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .log.logLevel=\"$LOG_LEVEL\""
echo "Log level has been modified to $LOG_LEVEL"
else
echo "The log level you provided is incorrect (info/debug/trace)"
fi
fi
if [[ "$SSL" && "$HOST_NAMES" ]]; then
# This condition makes sure that the certificates are not generated twice. (for docker restart)
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
# Compute config for utapi tests
cat >>req.cfg <<EOF
[req]
distinguished_name = req_distinguished_name
prompt = no
req_extensions = s3_req
[req_distinguished_name]
CN = ${HOST_NAMES[0]}
[s3_req]
subjectAltName = @alt_names
extendedKeyUsage = serverAuth, clientAuth
[alt_names]
DNS.1 = *.${HOST_NAMES[0]}
DNS.2 = ${HOST_NAMES[0]}
EOF
## Generate SSL key and certificates
# Generate a private key for your CSR
openssl genrsa -out ca.key 2048
# Generate a self signed certificate for your local Certificate Authority
openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=S3 CA Server"
# Generate a key for S3 Server
openssl genrsa -out server.key 2048
# Generate a Certificate Signing Request for S3 Server
openssl req -new -key server.key -out server.csr -config req.cfg
# Generate a local-CA-signed certificate for S3 Server
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 99999 -sha256 -extfile req.cfg -extensions s3_req
fi
## Update S3Server config.json
# This condition makes sure that certFilePaths section is not added twice. (for docker restart)
if ! grep -q "certFilePaths" ./config.json; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .certFilePaths= { \"key\": \".\/server.key\", \"cert\": \".\/server.crt\", \"ca\": \".\/ca.crt\" }"
fi
fi
if [[ "$LISTEN_ADDR" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .listenOn=[\"$LISTEN_ADDR:8000\"]"
fi
if [[ "$REPLICATION_GROUP_ID" ]] ; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .replicationGroupId=\"$REPLICATION_GROUP_ID\""
fi
if [[ "$DATA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataClient.host=\"$DATA_HOST\""
fi
if [[ "$METADATA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
fi
if [[ "$PFSD_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsClient.host=\"$PFSD_HOST\""
fi
if [[ "$MONGODB_HOSTS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
fi
if [[ "$MONGODB_RS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSet=\"$MONGODB_RS\""
fi
if [[ "$MONGODB_DATABASE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.database=\"$MONGODB_DATABASE\""
fi
if [ -z "$REDIS_HA_NAME" ]; then
REDIS_HA_NAME='mymaster'
fi
if [[ "$REDIS_SENTINELS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.name=\"$REDIS_HA_NAME\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.sentinels=\"$REDIS_SENTINELS\""
elif [[ "$REDIS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.host=\"$REDIS_HOST\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=6379"
fi
if [[ "$REDIS_PORT" ]] && [[ ! "$REDIS_SENTINELS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=$REDIS_PORT"
fi
if [[ "$REDIS_SENTINELS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.name=\"$REDIS_HA_NAME\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.sentinels=\"$REDIS_SENTINELS\""
elif [[ "$REDIS_HA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.host=\"$REDIS_HA_HOST\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=6379"
fi
if [[ "$REDIS_HA_PORT" ]] && [[ ! "$REDIS_SENTINELS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=$REDIS_HA_PORT"
fi
if [[ "$RECORDLOG_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
fi
if [[ "$STORAGE_LIMIT_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.metrics[.utapi.metrics | length]=\"location\""
fi
if [[ "$CRR_METRICS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.host=\"$CRR_METRICS_HOST\""
fi
if [[ "$CRR_METRICS_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
fi
if [[ "$WE_OPERATOR_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.host=\"$WE_OPERATOR_HOST\""
fi
if [[ "$WE_OPERATOR_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.port=$WE_OPERATOR_PORT"
fi
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
fi
# external backends http(s) agent config
# AWS
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAlive=$AWS_S3_HTTPAGENT_KEEPALIVE"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAliveMsecs=$AWS_S3_HTTPAGENT_KEEPALIVE_MS"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxFreeSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
#GCP
if [[ "$GCP_HTTPAGENT_KEEPALIVE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAlive=$GCP_HTTPAGENT_KEEPALIVE"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAliveMsecs=$GCP_HTTPAGENT_KEEPALIVE_MS"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
fi
if [[ "$TESTING_MODE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json
fi
if test -v INITIAL_INSTANCE_ID && test -v S3METADATAPATH && ! test -f ${S3METADATAPATH}/uuid ; then
echo -n ${INITIAL_INSTANCE_ID} > ${S3METADATAPATH}/uuid
fi
# s3 secret credentials for Zenko
if [ -r /run/secrets/s3-credentials ] ; then
. /run/secrets/s3-credentials
fi
exec "$@"

@ -1 +0,0 @@
Subproject commit d383e47d8ec650b547de3d0b5fcb48e97bd48ba1

1
fcntl

@ -1 +0,0 @@
Subproject commit 89449bbe583f8ad7531b68cff6cb67ce7682cf18

@ -1 +0,0 @@
Subproject commit 045e9c8282390e92d372f0577d5814fe0ccbeba3

View File

@ -121,7 +121,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
}
// The locations were sent to metadata as an array
// under partLocations. Pull the partLocations.
const locations = storedParts.flatMap(item => item.value.location||item.value.partLocations);
const locations = storedParts.flatMap(item => item.value.partLocations);
if (locations.length === 0) {
return next(null, mpuBucket, storedParts, destBucket);
}

View File

@ -12,6 +12,12 @@ const versionIdUtils = versioning.VersionID;
const nonVersionedObjId =
versionIdUtils.getInfVid(config.replicationGroupId);
let uidCounter = 0;
function generateVersionId() {
// generate a unique number for each member of the nodejs cluster
return versioning.VersionID.generateVersionId(`${process.pid}.${uidCounter++}`, config.replicationGroupId);
}
/** decodeVID - decode the version id
* @param {string} versionId - version ID
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
@ -556,6 +562,7 @@ function overwritingVersioning(objMD, metadataStoreParams) {
}
module.exports = {
generateVersionId,
decodeVersionId,
getVersionIdResHeader,
checkQueryVersionId,

View File

@ -93,19 +93,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
let oldByteLength = null;
const responseHeaders = {};
let versionId;
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
if (putVersionId) {
const decodedVidResult = decodeVID(putVersionId);
if (decodedVidResult instanceof Error) {
log.trace('invalid x-scal-s3-version-id header', {
versionId: putVersionId,
error: decodedVidResult,
});
return process.nextTick(() => callback(decodedVidResult));
}
versionId = decodedVidResult;
if (request.headers['x-scal-s3-version-id']) {
return callback(new Error('x-scal-s3-version-id is not supported in completeMultipartUpload'));
}
const queryContainsVersionId = checkQueryVersionId(request.query);
@ -133,7 +122,6 @@ function completeMultipartUpload(authInfo, request, log, callback) {
// Required permissions for this action
// at the destinationBucket level are same as objectPut
requestType: request.apiMethods || 'completeMultipartUpload',
versionId,
request,
};
standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next);
@ -143,13 +131,6 @@ function completeMultipartUpload(authInfo, request, log, callback) {
oldByteLength = objMD['content-length'];
}
if (isPutVersion) {
const error = validatePutVersionId(objMD, putVersionId, log);
if (error) {
return next(error, destBucket);
}
}
return services.metadataValidateMultipart(metadataValParams,
(err, mpuBucket, mpuOverview, storedMetadata) => {
if (err) {
@ -314,7 +295,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
extraPartLocations, completeObjData, totalMPUSize, next) {
const metaHeaders = {};
const keysNotNeeded =
['initiator', 'location', 'partLocations', 'key',
['initiator', 'partLocations', 'key',
'initiated', 'uploadId', 'content-type', 'expires',
'eventualStorageBucket', 'dataStoreName'];
const metadataKeysToPull =
@ -416,8 +397,9 @@ function completeMultipartUpload(authInfo, request, log, callback) {
extraPartLocations, pseudoCipherBundle,
completeObjData, options, droppedMPUSize, next) {
const dataToDelete = options.dataToDelete;
// take versionId from where initiateMultipartUpload saves it originally
/* eslint-disable no-param-reassign */
metaStoreParams.versionId = options.versionId;
metaStoreParams.versionId = storedMetadata.versionId;
metaStoreParams.versioning = options.versioning;
metaStoreParams.isNull = options.isNull;
metaStoreParams.deleteNullKey = options.deleteNullKey;

View File

@ -22,6 +22,7 @@ const { validateHeaders, compareObjectLockInformation } =
require('./apiUtils/object/objectLockHelpers');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { generateVersionId } = require('./apiUtils/object/versioning');
/*
Sample xml response:
@ -163,6 +164,11 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
if (MPUbucket.getMdBucketModelVersion() < 2) {
metadataStoreParams.splitter = constants.oldSplitter;
}
// Generate version id before creating multipart upload so uploadPart
// can store data with a reference to the correct object version
const vcfg = destinationBucket.getVersioningConfiguration();
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
metadataStoreParams.versionId = isVersionedObj ? generateVersionId() : '';
return services.metadataStoreMPObject(MPUbucket.getName(),
cipherBundle, metadataStoreParams,
log, (err, mpuMD) => {

View File

@ -318,7 +318,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
let oldLocations;
let prevObjectSize = null;
if (result) {
oldLocations = result.location||result.partLocations;
oldLocations = result.partLocations;
prevObjectSize = result['content-length'];
// Pull locations to clean up any potential orphans
// in data if object put is an overwrite of

View File

@ -264,8 +264,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
// Pull locations to clean up any potential orphans in
// data if object put is an overwrite of a pre-existing
// object with the same key and part number.
oldLocations = Array.isArray(res.location||res.partLocations) ?
(res.location||res.partLocations) : [(res.location||res.partLocations)];
oldLocations = Array.isArray(res.partLocations) ?
res.partLocations : [res.partLocations];
}
return next(null, destinationBucket,
objectLocationConstraint, cipherBundle,
@ -321,9 +321,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
partLocations[0].sseCipheredDataKey = cipheredDataKey;
}
const omVal = {
// Version 6 changes 'partLocations' to 'location'
'md-model-version': 6,
'location': partLocations,
// back to Version 3 since number-subparts is not needed
'md-model-version': 3,
partLocations,
'key': partKey,
'last-modified': new Date().toJSON(),
'content-md5': hexDigest,

View File

@ -28,7 +28,7 @@ if (config.backends.data === 'mem') {
client = new DataFileInterface(config);
implName = 'file';
} else if (config.backends.data === 'multiple') {
const clients = parseLC(config, vault, metadata);
const clients = parseLC(config, vault);
client = new MultipleBackendGateway(
clients, metadata, locationStorageCheck);
implName = 'multipleBackends';

View File

@ -175,6 +175,7 @@ const services = {
options.versioning = versioning;
}
if (versionId || versionId === '') {
// FIXME Move to md.versionId
options.versionId = versionId;
}
if (needOplogUpdate) {
@ -471,6 +472,7 @@ const services = {
};
multipartObjectMD.key = params.objectKey;
multipartObjectMD.uploadId = params.uploadId;
multipartObjectMD.versionId = params.versionId;
multipartObjectMD['cache-control'] = params.headers['cache-control'];
multipartObjectMD['content-disposition'] =
params.headers['content-disposition'];
@ -772,9 +774,10 @@ const services = {
assert.strictEqual(typeof splitter, 'string');
const partKey = `${uploadId}${splitter}${partNumber}`;
const omVal = {
// Version 6 changes 'partLocations' to 'location'
'md-model-version': 6,
'location': partLocations,
// Version 3 changes the format of partLocations
// from an object to an array
'md-model-version': 3,
partLocations,
'key': partKey,
'last-modified': dateModified,
'content-md5': contentMD5,
@ -890,7 +893,6 @@ const services = {
marker: undefined,
delimiter: undefined,
maxKeys: 10000,
withLocation: true,
};
metadata.listObject(mpuBucketName, searchArgs, log, cb);
},
@ -906,7 +908,6 @@ const services = {
marker: `${uploadId}${params.splitter}${paddedPartNumber}`,
delimiter: undefined,
maxKeys: maxParts,
withLocation: true,
};
metadata.listObject(mpuBucketName, searchArgs, log, cb);
},

View File

@ -5,12 +5,8 @@
"legacyAwsBehavior": true,
"details": {
"config_path": "/etc/vitastor/vitastor.conf",
"pool_id": 10,
"metadata_mongodb": {
"url": "mongodb://USERNAME:PASSWORD@10.10.10.1:27017,10.10.10.2:27017,10.10.10.3:27017/?w=majority&readPreference=primary&replicaSet=rs0",
"dbname": "vitastor",
"collection": "volume_metadata"
}
"pool_id": 3,
"metadata_image": "s3-volume-meta"
}
}
}

View File

@ -6,10 +6,7 @@
"engines": {
"node": ">=16"
},
"repository": {
"type": "git",
"url": "https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor"
},
"repository": "scality/S3",
"keywords": [
"s3",
"cloud",
@ -18,13 +15,13 @@
"author": "Scality Inc.",
"license": "Apache-2.0",
"bugs": {
"url": "https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor/issues"
"url": "https://github.com/scality/S3/issues"
},
"homepage": "https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor#zenko-cloudserver-with-vitastor-backend",
"homepage": "https://github.com/scality/S3#readme",
"dependencies": {
"@azure/storage-blob": "^12.12.0",
"@hapi/joi": "^17.1.0",
"arsenal": "./arsenal",
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
"async": "^2.5.0",
"aws-sdk": "^2.905.0",
"bufferutil": "^4.0.6",
@ -35,24 +32,23 @@
"http-proxy": "^1.17.0",
"http-proxy-agent": "^4.0.1",
"https-proxy-agent": "^2.2.0",
"level-mem": "^5.0.1",
"moment": "^2.26.0",
"mongodb": "^5.2.0",
"node-fetch": "^2.6.0",
"node-forge": "^0.7.1",
"npm-run-all": "^4.1.5",
"prom-client": "^14.2.0",
"prom-client": "14.2.0",
"request": "^2.81.0",
"scubaclient": "./scubaclient",
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git",
"sql-where-parser": "^2.2.1",
"utapi": "./utapi",
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
"utf-8-validate": "^5.0.8",
"utf8": "^2.1.1",
"uuid": "^8.3.2",
"werelogs": "./werelogs",
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
"ws": "^5.1.0",
"xml2js": "^0.4.16",
"fcntl": "./fcntl",
"httpagent": "./httpagent"
"xml2js": "^0.4.16"
},
"overrides": {
"ltgt": "^2.2.0"
@ -64,7 +60,7 @@
"bluebird": "^3.3.1",
"eslint": "^8.14.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "./eslint-config-scality",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint-plugin-import": "^2.14.0",
"eslint-plugin-mocha": "^10.1.0",
"express": "^4.17.1",
@ -75,7 +71,6 @@
"mocha": ">=3.1.2",
"mocha-junit-reporter": "^1.23.1",
"mocha-multi-reporters": "^1.1.7",
"node-loader": "^2.1.0",
"node-mocks-http": "^1.5.2",
"sinon": "^13.0.1",
"tv4": "^1.2.7",

@ -1 +0,0 @@
Subproject commit 619aca73fc95af9594f8113f700111c479d96b9f

View File

@ -71,35 +71,35 @@ describe('List Parts API', () => {
'last-modified': '2015-11-30T22:41:18.658Z',
'content-md5': 'f3a9fb2071d3503b703938a74eb99846',
'content-length': '6000000',
'location': ['068db6a6745a79d54c1b29ff99f9f131'],
'partLocations': ['068db6a6745a79d54c1b29ff99f9f131'],
});
inMemMetadata.keyMaps.get(mpuBucket).set(partTwoKey, {
'key': partTwoKey,
'last-modified': '2015-11-30T22:41:40.207Z',
'content-md5': 'f3a9fb2071d3503b703938a74eb99846',
'content-length': '6000000',
'location': ['ff22f316b16956ff5118c93abce7d62d'],
'partLocations': ['ff22f316b16956ff5118c93abce7d62d'],
});
inMemMetadata.keyMaps.get(mpuBucket).set(partThreeKey, {
'key': partThreeKey,
'last-modified': '2015-11-30T22:41:52.102Z',
'content-md5': 'f3a9fb2071d3503b703938a74eb99846',
'content-length': '6000000',
'location': ['dea282f70edb6fc5f9433cd6f525d4a6'],
'partLocations': ['dea282f70edb6fc5f9433cd6f525d4a6'],
});
inMemMetadata.keyMaps.get(mpuBucket).set(partFourKey, {
'key': partFourKey,
'last-modified': '2015-11-30T22:42:03.493Z',
'content-md5': 'f3a9fb2071d3503b703938a74eb99846',
'content-length': '6000000',
'location': ['afe24bc40153982e1f7f28066f7af6a4'],
'partLocations': ['afe24bc40153982e1f7f28066f7af6a4'],
});
inMemMetadata.keyMaps.get(mpuBucket).set(partFiveKey, {
'key': partFiveKey,
'last-modified': '2015-11-30T22:42:22.876Z',
'content-md5': '555e4cd2f9eff38109d7a3ab13995a32',
'content-length': '18',
'location': ['85bc16f5769687070fb13cfe66b5e41f'],
'partLocations': ['85bc16f5769687070fb13cfe66b5e41f'],
});
done();
});

View File

@ -159,7 +159,7 @@ function putMPU(key, body, cb) {
const calculatedHash = md5Hash.digest('hex');
const partKey = `${uploadId}${constants.splitter}00001`;
const obj = {
location: [{
partLocations: [{
key: 1,
dataStoreName: 'scality-internal-mem',
dataStoreETag: `1:${calculatedHash}`,

View File

@ -46,6 +46,7 @@
"host": "localhost",
"port": 8500
},
"clusters": 10,
"log": {
"logLevel": "info",
"dumpLevel": "error"

1
utapi

@ -1 +0,0 @@
Subproject commit 84953ff86820bfa2bb6a24bdc79b1ae94ce6ff68

View File

@ -1,8 +1,5 @@
module.exports = {
entry: './index.js',
resolve: {
extensions: ["...", ".node"],
},
target: 'node',
devtool: 'source-map',
output: {
@ -15,15 +12,10 @@ module.exports = {
use: {
loader: 'babel-loader',
options: {
presets: [ [ "@babel/preset-env", { "targets": { "node": "18.0" } } ] ]
//presets: [ [ "@babel/preset-env", { "targets": { "node": "4.0" } } ] ],
presets: [ [ "@babel/preset-env", { "targets": { "node": "12.0" }, "exclude": [ "transform-regenerator" ] } ] ],
}
}
},
{
test: /\.node$/,
loader: "node-loader",
},
{
test: /.json$/,
type: 'json'
@ -31,6 +23,12 @@ module.exports = {
]
},
externals: {
'leveldown': 'commonjs leveldown',
'bufferutil': 'commonjs bufferutil',
'diskusage': 'commonjs diskusage',
'utf-8-validate': 'commonjs utf-8-validate',
'fcntl': 'commonjs fcntl',
'ioctl': 'commonjs ioctl',
'vitastor': 'commonjs vitastor',
'vaultclient': 'commonjs vaultclient',
'bucketclient': 'commonjs bucketclient',
@ -41,6 +39,7 @@ module.exports = {
'kerberos': 'commonjs kerberos',
'@mongodb-js/zstd': 'commonjs @mongodb-js/zstd',
'@aws-sdk/credential-providers': 'commonjs @aws-sdk/credential-providers',
'snappy': 'commonjs snappy',
'mongodb-client-encryption': 'commonjs mongodb-client-encryption'
},
node: {

@ -1 +0,0 @@
Subproject commit b57d072e51f39a3112b0719bbd1974c0e292e233