Compare commits

...

19 Commits

Author SHA1 Message Date
Vitaliy Filippov 740cb42df1 Fix filename 2025-03-18 14:31:47 +03:00
Vitaliy Filippov 5008866629 Update README 2025-03-16 01:10:59 +03:00
Vitaliy Filippov d009b9f0a7 Add configure.sh 2025-03-15 23:53:42 +03:00
Vitaliy Filippov 29021ae22b Dedup types/node 2025-03-14 20:34:07 +03:00
Vitaliy Filippov b564dc1b02 Update arsenal 2025-03-14 20:15:28 +03:00
Vitaliy Filippov 100a991c16 Store MPU part locations also in "location", not "partLocations" 2025-03-14 20:15:16 +03:00
Vitaliy Filippov c8fd6eb20f Remove clusters, add workers to config example 2025-03-11 01:41:40 +03:00
Vitaliy Filippov d74b26787b Update arsenal 2025-03-11 01:22:53 +03:00
Vitaliy Filippov 3f0da690ee More dockerignore 2025-03-11 01:16:39 +03:00
Vitaliy Filippov 6d88042b86 Use nodesource repo 2025-03-10 01:51:06 +03:00
Vitaliy Filippov feade986ab Cache Vitastor dependencies 2025-03-09 16:22:17 +03:00
Vitaliy Filippov f8cd54e80b Support building with local packages instead of repository 2025-03-09 00:48:23 +03:00
Vitaliy Filippov 877189fc0e Include all dependencies as git submodules 2025-03-09 00:16:59 +03:00
Vitaliy Filippov 86626079e4 Use system node-vitastor package 2025-03-02 16:10:21 +03:00
Vitaliy Filippov f11f179147 Plug metadata backend into parseLC wrapper 2025-03-01 14:05:12 +03:00
Vitaliy Filippov 212098e28f Use node-loader to load native extensions 2025-03-01 03:00:20 +03:00
Vitaliy Filippov b89574ea53 Fix build with newer level 2025-03-01 02:13:47 +03:00
Vitaliy Filippov fe902ad08c Add Dockerfile for Zenko-Vitastor 2025-03-01 02:12:09 +03:00
Vitaliy Filippov 7bf1e3d975 Remove level-mem 2025-03-01 02:12:09 +03:00
28 changed files with 243 additions and 362 deletions

View File

@ -1,4 +1,9 @@
node_modules
**/node_modules
build
*/build
package-lock.json
Dockerfile
localData/*
localMetadata/*
# Keep the .git/HEAD file in order to properly report version

26
.gitmodules vendored Normal file
View File

@ -0,0 +1,26 @@
[submodule "arsenal"]
path = arsenal
url = ../zenko-arsenal
branch = master
[submodule "scubaclient"]
path = scubaclient
url = ../zenko-scubaclient
branch = development/1.0
[submodule "utapi"]
path = utapi
url = ../zenko-utapi
branch = development/8.1
[submodule "werelogs"]
path = werelogs
url = ../zenko-werelogs
branch = development/8.1
[submodule "fcntl"]
path = fcntl
url = ../zenko-fcntl
[submodule "httpagent"]
path = httpagent
url = ../zenko-httpagent
branch = development/1.0
[submodule "eslint-config-scality"]
path = eslint-config-scality
url = ../zenko-eslint-config-scality

View File

@ -1,60 +1,115 @@
ARG NODE_VERSION=16.20.2-bookworm-slim
# NOTE: Put Vitastor Debian packages in ./vitastor-bookworm/ if you want them
# to be used instead of the public package repository (e.g., a development version)
FROM node:${NODE_VERSION} AS builder
FROM debian:bookworm AS builder
WORKDIR /usr/src/app
ADD https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key /etc/apt/trusted.gpg.d/
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
curl \
git \
gnupg2 \
jq \
python3 \
ssh \
wget \
libffi-dev \
zlib1g-dev \
&& apt-get clean \
&& mkdir -p /root/ssh \
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
RUN set -e ; \
apt-get update ; \
apt-get install -y --no-install-recommends ca-certificates ; \
echo deb https://deb.nodesource.com/node_22.x nodistro main > /etc/apt/sources.list.d/nodesource.list ; \
(grep -vP '^----|^=' /etc/apt/trusted.gpg.d/nodesource-repo.gpg.key | base64 -d) > /etc/apt/trusted.gpg.d/nodesource-repo.gpg ; \
apt-get update ; \
apt-get install -y --no-install-recommends build-essential ca-certificates curl git nodejs jq ; \
apt-get clean ; \
rm -rf /var/lib/apt/lists/*
ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript@4.9.5 -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
ADD package.json /app/package.json
ADD arsenal/package.json /app/arsenal/package.json
ADD eslint-config-scality/package.json /app/eslint-config-scality/package.json
ADD fcntl/package.json /app/fcntl/package.json
ADD httpagent/package.json /app/httpagent/package.json
ADD scubaclient/package.json /app/scubaclient/package.json
ADD utapi/package.json /app/utapi/package.json
ADD werelogs/package.json /app/werelogs/package.json
################################################################################
FROM node:${NODE_VERSION}
RUN cd /app/ && \
for i in arsenal eslint-config-scality fcntl httpagent scubaclient utapi werelogs; do \
jq 'del(.scripts)' < $i/package.json > t; \
mv t $i/package.json; \
done && \
npm install --verbose
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
&& rm -rf /var/lib/apt/lists/*
ADD https://git.yourcmc.ru/vitalif/vitastor/raw/branch/master/debian/changelog /app/vitastor-changelog
ENV NO_PROXY localhost,127.0.0.1
ENV no_proxy localhost,127.0.0.1
ADD . /app/
RUN cd /app/ && \
npm install --verbose
RUN set -e ; \
mkdir -p /deb/ /app/vitastor-bookworm/ ; \
(mv /app/vitastor-bookworm/vitastor-client*.deb /deb/ || true) ; \
echo deb http://vitastor.io/debian bookworm main > /etc/apt/sources.list.d/vitastor.list ; \
echo deb-src http://vitastor.io/debian bookworm main >> /etc/apt/sources.list.d/vitastor.list ; \
curl -o /etc/apt/trusted.gpg.d/vitastor.gpg https://vitastor.io/debian/pubkey.gpg ; \
apt-get update ; \
cd /app/vitastor-bookworm/ ; \
(if ls /deb/*.deb; then \
apt-get install -y --no-install-recommends pkg-config /deb/*.deb ; \
dpkg-source -x *.dsc ; \
else \
apt-get install -y --no-install-recommends pkg-config vitastor-client ; \
apt-get source vitastor ; \
fi) ; \
mv vitastor-*/ vitastor ; \
cd vitastor/node-binding ; \
npm install
RUN cd /app/ && \
cp -r /app/vitastor-bookworm/vitastor/node-binding node_modules/vitastor && \
perl -i -pe "s!require\('bindings'\)\('!require('./build/Release/!" \
node_modules/vitastor/index.js node_modules/ioctl/index.js node_modules/fcntl/index.js && \
perl -i -pe "s!require\('node-gyp-build'\)\(__dirname\)!require('./prebuilds/linux-x64/node.napi.node')!" \
node_modules/utf-8-validate/index.js node_modules/bufferutil/index.js && \
perl -i -pe "s!require\('node-gyp-build'\)\(__dirname\)!require('./prebuilds/linux-x64/classic-level.node')!" \
node_modules/classic-level/binding.js && \
npm exec webpack -- --mode=development
####################
FROM debian:bookworm
ADD https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key /etc/apt/trusted.gpg.d/
RUN set -e ; \
apt-get update ; \
apt-get install -y --no-install-recommends ca-certificates ; \
echo deb https://deb.nodesource.com/node_22.x nodistro main > /etc/apt/sources.list.d/nodesource.list ; \
(grep -vP '^----|^=' /etc/apt/trusted.gpg.d/nodesource-repo.gpg.key | base64 -d) > /etc/apt/trusted.gpg.d/nodesource-repo.gpg ; \
apt-get update ; \
apt-get install -y --no-install-recommends curl ca-certificates nodejs jq node-bindings libibverbs1 librdmacm1 libtcmalloc-minimal4 liburing2 ; \
apt-get clean ; \
rm -rf /var/lib/apt/lists/*
ADD https://vitastor.io/debian/pubkey.gpg /etc/apt/trusted.gpg.d/vitastor.gpg
ADD https://vitastor.io/debian/dists/bookworm/Release /app/debian-release
COPY --from=builder /deb/ /deb/
RUN set -e ; \
echo deb http://vitastor.io/debian bookworm main > /etc/apt/sources.list.d/vitastor.list ; \
chmod 644 /etc/apt/trusted.gpg.d/vitastor.gpg ; \
apt-get update ; \
(if ls /deb/*.deb; then \
apt-get install -y --no-install-recommends /deb/*.deb ; \
else \
apt-get install -y --no-install-recommends vitastor-client ; \
fi) ; \
apt-get clean ; \
rm -rf /var/lib/apt/lists/*
COPY --from=builder /app/node_modules/vitastor /app/node_modules/vitastor
COPY --from=builder /app/dist /app/
COPY --from=builder /app/authdata.json.example /app/
COPY --from=builder /app/config.json.vitastor /app/config.json.example
COPY --from=builder /app/locationConfig.json.vitastor /app/locationConfig.json.example
COPY bin/configure.sh /usr/bin/
WORKDIR /app
EXPOSE 8000
EXPOSE 8002
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
tini \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app
# Keep the .git directory in order to properly report version
COPY . /usr/src/app
COPY --from=builder /usr/src/app/node_modules ./node_modules/
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]
CMD [ "nodejs", "/app/zenko-vitastor.js" ]

View File

@ -1,22 +0,0 @@
FROM node:6-slim
MAINTAINER Giorgio Regni <gr@scality.com>
WORKDIR /usr/src/app
COPY . /usr/src/app
RUN apt-get update \
&& apt-get install -y jq python git build-essential --no-install-recommends \
&& yarn install --production \
&& apt-get autoremove --purge -y python git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-*
ENV S3BACKEND mem
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]
EXPOSE 8000

View File

@ -16,66 +16,80 @@ backend support.
## Quick Start with Vitastor
Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
Installation instructions:
Vitastor S3 Backend is now released. Installation instructions:
### Install Vitastor
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
### Install Zenko with Vitastor Backend
Then create a separate pool for S3 object data in your Vitastor cluster, for example:
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
`vitastor-cli create-pool --ec 2+1 -n 512 s3-data`
Retrieve the ID of your pool with `vitastor-cli ls-pools s3-data --detail`.
### Install and Configure MongoDB
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
### Setup Zenko
### Install Zenko with Vitastor Backend
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
1. Download Docker image: `docker pull vitalif/vitastor-zenko`
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
2. Extract configuration file examples from the Docker image:
```
docker run --rm -it -v /etc/vitastor:/etc/vitastor -v /etc/vitastor/s3:/conf vitalif/vitastor-zenko configure.sh
```
3. Edit configuration files in `/etc/vitastor/s3/`:
- `config.json` - common settings.
- `authdata.json` - user accounts and access keys.
- `locationConfig.json` - S3 storage class list with placement settings.
Note: it actually contains storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as in the original Zenko CloudServer.
- Put your MongoDB connection data into `config.json` and `locationConfig.json`.
- Put your Vitastor pool ID into `locationConfig.json`.
- For now, the complete list of Vitastor backend settings is only available [in the code](https://git.yourcmc.ru/vitalif/zenko-arsenal/src/branch/master/lib/storage/data/vitastor/VitastorBackend.ts#L94).
### Start Zenko
Start the S3 server with: `node index.js`
Start the S3 server with:
```
docker run --restart always --security-opt seccomp:unconfined --ulimit memlock=-1 --network=host \
-v /etc/vitastor:/etc/vitastor -v /etc/vitastor/s3:/conf --name zenko vitalif/vitastor-zenko
```
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
Now you can access your S3 with, for example, [s3cmd](https://s3tools.org/s3cmd):
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```
Or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs):
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
```
## Building from source
- Clone this repository: `git clone --recurse-submodules https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
# Author & License
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality,
licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is
Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)

1
arsenal Submodule

@ -0,0 +1 @@
Subproject commit ab7861b3b001132b64ec7027fcbe1300149d017c

6
bin/configure.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
set -e
[[ -f /conf/config.json ]] || cp /app/config.json.example /conf/config.json
[[ -f /conf/locationConfig.json ]] || cp /app/locationConfig.json.example /conf/locationConfig.json
[[ -f /conf/authdata.json ]] || cp /app/authdata.json.example /conf/authdata.json

View File

@ -59,7 +59,6 @@
"host": "localhost",
"port": 8500
},
"clusters": 1,
"log": {
"logLevel": "info",
"dumpLevel": "error"

View File

@ -1,5 +1,6 @@
{
"port": 8000,
"workers": 4,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],

View File

@ -1,220 +0,0 @@
#!/bin/bash
# set -e stops the execution of a script if a command or pipeline has an error
set -e
# modifying config.json
JQ_FILTERS_CONFIG="."
# ENDPOINT var can accept comma separated values
# for multiple endpoint locations
if [[ "$ENDPOINT" ]]; then
IFS="," read -ra HOST_NAMES <<< "$ENDPOINT"
for host in "${HOST_NAMES[@]}"; do
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$host\"]=\"us-east-1\""
done
echo "Host name has been modified to ${HOST_NAMES[@]}"
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with ${HOST_NAMES[@]}"
fi
if [[ "$LOG_LEVEL" ]]; then
if [[ "$LOG_LEVEL" == "info" || "$LOG_LEVEL" == "debug" || "$LOG_LEVEL" == "trace" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .log.logLevel=\"$LOG_LEVEL\""
echo "Log level has been modified to $LOG_LEVEL"
else
echo "The log level you provided is incorrect (info/debug/trace)"
fi
fi
if [[ "$SSL" && "$HOST_NAMES" ]]; then
# This condition makes sure that the certificates are not generated twice. (for docker restart)
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
# Compute config for utapi tests
cat >>req.cfg <<EOF
[req]
distinguished_name = req_distinguished_name
prompt = no
req_extensions = s3_req
[req_distinguished_name]
CN = ${HOST_NAMES[0]}
[s3_req]
subjectAltName = @alt_names
extendedKeyUsage = serverAuth, clientAuth
[alt_names]
DNS.1 = *.${HOST_NAMES[0]}
DNS.2 = ${HOST_NAMES[0]}
EOF
## Generate SSL key and certificates
# Generate a private key for your CSR
openssl genrsa -out ca.key 2048
# Generate a self signed certificate for your local Certificate Authority
openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=S3 CA Server"
# Generate a key for S3 Server
openssl genrsa -out server.key 2048
# Generate a Certificate Signing Request for S3 Server
openssl req -new -key server.key -out server.csr -config req.cfg
# Generate a local-CA-signed certificate for S3 Server
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 99999 -sha256 -extfile req.cfg -extensions s3_req
fi
## Update S3Server config.json
# This condition makes sure that certFilePaths section is not added twice. (for docker restart)
if ! grep -q "certFilePaths" ./config.json; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .certFilePaths= { \"key\": \".\/server.key\", \"cert\": \".\/server.crt\", \"ca\": \".\/ca.crt\" }"
fi
fi
if [[ "$LISTEN_ADDR" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .listenOn=[\"$LISTEN_ADDR:8000\"]"
fi
if [[ "$REPLICATION_GROUP_ID" ]] ; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .replicationGroupId=\"$REPLICATION_GROUP_ID\""
fi
if [[ "$DATA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataClient.host=\"$DATA_HOST\""
fi
if [[ "$METADATA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
fi
if [[ "$PFSD_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsClient.host=\"$PFSD_HOST\""
fi
if [[ "$MONGODB_HOSTS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
fi
if [[ "$MONGODB_RS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSet=\"$MONGODB_RS\""
fi
if [[ "$MONGODB_DATABASE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.database=\"$MONGODB_DATABASE\""
fi
if [ -z "$REDIS_HA_NAME" ]; then
REDIS_HA_NAME='mymaster'
fi
if [[ "$REDIS_SENTINELS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.name=\"$REDIS_HA_NAME\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.sentinels=\"$REDIS_SENTINELS\""
elif [[ "$REDIS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.host=\"$REDIS_HOST\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=6379"
fi
if [[ "$REDIS_PORT" ]] && [[ ! "$REDIS_SENTINELS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .localCache.port=$REDIS_PORT"
fi
if [[ "$REDIS_SENTINELS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.name=\"$REDIS_HA_NAME\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.sentinels=\"$REDIS_SENTINELS\""
elif [[ "$REDIS_HA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.host=\"$REDIS_HA_HOST\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=6379"
fi
if [[ "$REDIS_HA_PORT" ]] && [[ ! "$REDIS_SENTINELS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=$REDIS_HA_PORT"
fi
if [[ "$RECORDLOG_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
fi
if [[ "$STORAGE_LIMIT_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.metrics[.utapi.metrics | length]=\"location\""
fi
if [[ "$CRR_METRICS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.host=\"$CRR_METRICS_HOST\""
fi
if [[ "$CRR_METRICS_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
fi
if [[ "$WE_OPERATOR_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.host=\"$WE_OPERATOR_HOST\""
fi
if [[ "$WE_OPERATOR_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.port=$WE_OPERATOR_PORT"
fi
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
fi
# external backends http(s) agent config
# AWS
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAlive=$AWS_S3_HTTPAGENT_KEEPALIVE"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAliveMsecs=$AWS_S3_HTTPAGENT_KEEPALIVE_MS"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxFreeSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
#GCP
if [[ "$GCP_HTTPAGENT_KEEPALIVE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAlive=$GCP_HTTPAGENT_KEEPALIVE"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAliveMsecs=$GCP_HTTPAGENT_KEEPALIVE_MS"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
fi
if [[ "$TESTING_MODE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json
fi
if test -v INITIAL_INSTANCE_ID && test -v S3METADATAPATH && ! test -f ${S3METADATAPATH}/uuid ; then
echo -n ${INITIAL_INSTANCE_ID} > ${S3METADATAPATH}/uuid
fi
# s3 secret credentials for Zenko
if [ -r /run/secrets/s3-credentials ] ; then
. /run/secrets/s3-credentials
fi
exec "$@"

1
eslint-config-scality Submodule

@ -0,0 +1 @@
Subproject commit d383e47d8ec650b547de3d0b5fcb48e97bd48ba1

1
fcntl Submodule

@ -0,0 +1 @@
Subproject commit 89449bbe583f8ad7531b68cff6cb67ce7682cf18

1
httpagent Submodule

@ -0,0 +1 @@
Subproject commit 045e9c8282390e92d372f0577d5814fe0ccbeba3

View File

@ -121,7 +121,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
}
// The locations were sent to metadata as an array
// under partLocations. Pull the partLocations.
const locations = storedParts.flatMap(item => item.value.partLocations);
const locations = storedParts.flatMap(item => item.value.location||item.value.partLocations);
if (locations.length === 0) {
return next(null, mpuBucket, storedParts, destBucket);
}

View File

@ -314,7 +314,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
extraPartLocations, completeObjData, totalMPUSize, next) {
const metaHeaders = {};
const keysNotNeeded =
['initiator', 'partLocations', 'key',
['initiator', 'location', 'partLocations', 'key',
'initiated', 'uploadId', 'content-type', 'expires',
'eventualStorageBucket', 'dataStoreName'];
const metadataKeysToPull =

View File

@ -318,7 +318,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
let oldLocations;
let prevObjectSize = null;
if (result) {
oldLocations = result.partLocations;
oldLocations = result.location||result.partLocations;
prevObjectSize = result['content-length'];
// Pull locations to clean up any potential orphans
// in data if object put is an overwrite of

View File

@ -264,8 +264,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
// Pull locations to clean up any potential orphans in
// data if object put is an overwrite of a pre-existing
// object with the same key and part number.
oldLocations = Array.isArray(res.partLocations) ?
res.partLocations : [res.partLocations];
oldLocations = Array.isArray(res.location||res.partLocations) ?
(res.location||res.partLocations) : [(res.location||res.partLocations)];
}
return next(null, destinationBucket,
objectLocationConstraint, cipherBundle,
@ -321,9 +321,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
partLocations[0].sseCipheredDataKey = cipheredDataKey;
}
const omVal = {
// back to Version 3 since number-subparts is not needed
'md-model-version': 3,
partLocations,
// Version 6 changes 'partLocations' to 'location'
'md-model-version': 6,
'location': partLocations,
'key': partKey,
'last-modified': new Date().toJSON(),
'content-md5': hexDigest,

View File

@ -28,7 +28,7 @@ if (config.backends.data === 'mem') {
client = new DataFileInterface(config);
implName = 'file';
} else if (config.backends.data === 'multiple') {
const clients = parseLC(config, vault);
const clients = parseLC(config, vault, metadata);
client = new MultipleBackendGateway(
clients, metadata, locationStorageCheck);
implName = 'multipleBackends';

View File

@ -772,10 +772,9 @@ const services = {
assert.strictEqual(typeof splitter, 'string');
const partKey = `${uploadId}${splitter}${partNumber}`;
const omVal = {
// Version 3 changes the format of partLocations
// from an object to an array
'md-model-version': 3,
partLocations,
// Version 6 changes 'partLocations' to 'location'
'md-model-version': 6,
'location': partLocations,
'key': partKey,
'last-modified': dateModified,
'content-md5': contentMD5,
@ -891,6 +890,7 @@ const services = {
marker: undefined,
delimiter: undefined,
maxKeys: 10000,
withLocation: true,
};
metadata.listObject(mpuBucketName, searchArgs, log, cb);
},
@ -906,6 +906,7 @@ const services = {
marker: `${uploadId}${params.splitter}${paddedPartNumber}`,
delimiter: undefined,
maxKeys: maxParts,
withLocation: true,
};
metadata.listObject(mpuBucketName, searchArgs, log, cb);
},

View File

@ -5,8 +5,12 @@
"legacyAwsBehavior": true,
"details": {
"config_path": "/etc/vitastor/vitastor.conf",
"pool_id": 3,
"metadata_image": "s3-volume-meta"
"pool_id": 10,
"metadata_mongodb": {
"url": "mongodb://USERNAME:PASSWORD@10.10.10.1:27017,10.10.10.2:27017,10.10.10.3:27017/?w=majority&readPreference=primary&replicaSet=rs0",
"dbname": "vitastor",
"collection": "volume_metadata"
}
}
}
}

View File

@ -6,7 +6,10 @@
"engines": {
"node": ">=16"
},
"repository": "scality/S3",
"repository": {
"type": "git",
"url": "https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor"
},
"keywords": [
"s3",
"cloud",
@ -15,13 +18,13 @@
"author": "Scality Inc.",
"license": "Apache-2.0",
"bugs": {
"url": "https://github.com/scality/S3/issues"
"url": "https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor/issues"
},
"homepage": "https://github.com/scality/S3#readme",
"homepage": "https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor#zenko-cloudserver-with-vitastor-backend",
"dependencies": {
"@azure/storage-blob": "^12.12.0",
"@hapi/joi": "^17.1.0",
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
"arsenal": "./arsenal",
"async": "^2.5.0",
"aws-sdk": "^2.905.0",
"bufferutil": "^4.0.6",
@ -32,23 +35,24 @@
"http-proxy": "^1.17.0",
"http-proxy-agent": "^4.0.1",
"https-proxy-agent": "^2.2.0",
"level-mem": "^5.0.1",
"moment": "^2.26.0",
"mongodb": "^5.2.0",
"node-fetch": "^2.6.0",
"node-forge": "^0.7.1",
"npm-run-all": "^4.1.5",
"prom-client": "14.2.0",
"prom-client": "^14.2.0",
"request": "^2.81.0",
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git",
"scubaclient": "./scubaclient",
"sql-where-parser": "^2.2.1",
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
"utapi": "./utapi",
"utf-8-validate": "^5.0.8",
"utf8": "^2.1.1",
"uuid": "^8.3.2",
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
"werelogs": "./werelogs",
"ws": "^5.1.0",
"xml2js": "^0.4.16"
"xml2js": "^0.4.16",
"fcntl": "./fcntl",
"httpagent": "./httpagent"
},
"overrides": {
"ltgt": "^2.2.0"
@ -60,7 +64,7 @@
"bluebird": "^3.3.1",
"eslint": "^8.14.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint-config-scality": "./eslint-config-scality",
"eslint-plugin-import": "^2.14.0",
"eslint-plugin-mocha": "^10.1.0",
"express": "^4.17.1",
@ -71,6 +75,7 @@
"mocha": ">=3.1.2",
"mocha-junit-reporter": "^1.23.1",
"mocha-multi-reporters": "^1.1.7",
"node-loader": "^2.1.0",
"node-mocks-http": "^1.5.2",
"sinon": "^13.0.1",
"tv4": "^1.2.7",

1
scubaclient Submodule

@ -0,0 +1 @@
Subproject commit 619aca73fc95af9594f8113f700111c479d96b9f

View File

@ -71,35 +71,35 @@ describe('List Parts API', () => {
'last-modified': '2015-11-30T22:41:18.658Z',
'content-md5': 'f3a9fb2071d3503b703938a74eb99846',
'content-length': '6000000',
'partLocations': ['068db6a6745a79d54c1b29ff99f9f131'],
'location': ['068db6a6745a79d54c1b29ff99f9f131'],
});
inMemMetadata.keyMaps.get(mpuBucket).set(partTwoKey, {
'key': partTwoKey,
'last-modified': '2015-11-30T22:41:40.207Z',
'content-md5': 'f3a9fb2071d3503b703938a74eb99846',
'content-length': '6000000',
'partLocations': ['ff22f316b16956ff5118c93abce7d62d'],
'location': ['ff22f316b16956ff5118c93abce7d62d'],
});
inMemMetadata.keyMaps.get(mpuBucket).set(partThreeKey, {
'key': partThreeKey,
'last-modified': '2015-11-30T22:41:52.102Z',
'content-md5': 'f3a9fb2071d3503b703938a74eb99846',
'content-length': '6000000',
'partLocations': ['dea282f70edb6fc5f9433cd6f525d4a6'],
'location': ['dea282f70edb6fc5f9433cd6f525d4a6'],
});
inMemMetadata.keyMaps.get(mpuBucket).set(partFourKey, {
'key': partFourKey,
'last-modified': '2015-11-30T22:42:03.493Z',
'content-md5': 'f3a9fb2071d3503b703938a74eb99846',
'content-length': '6000000',
'partLocations': ['afe24bc40153982e1f7f28066f7af6a4'],
'location': ['afe24bc40153982e1f7f28066f7af6a4'],
});
inMemMetadata.keyMaps.get(mpuBucket).set(partFiveKey, {
'key': partFiveKey,
'last-modified': '2015-11-30T22:42:22.876Z',
'content-md5': '555e4cd2f9eff38109d7a3ab13995a32',
'content-length': '18',
'partLocations': ['85bc16f5769687070fb13cfe66b5e41f'],
'location': ['85bc16f5769687070fb13cfe66b5e41f'],
});
done();
});

View File

@ -159,7 +159,7 @@ function putMPU(key, body, cb) {
const calculatedHash = md5Hash.digest('hex');
const partKey = `${uploadId}${constants.splitter}00001`;
const obj = {
partLocations: [{
location: [{
key: 1,
dataStoreName: 'scality-internal-mem',
dataStoreETag: `1:${calculatedHash}`,

View File

@ -46,7 +46,6 @@
"host": "localhost",
"port": 8500
},
"clusters": 10,
"log": {
"logLevel": "info",
"dumpLevel": "error"

1
utapi Submodule

@ -0,0 +1 @@
Subproject commit 84953ff86820bfa2bb6a24bdc79b1ae94ce6ff68

View File

@ -1,5 +1,8 @@
module.exports = {
entry: './index.js',
resolve: {
extensions: ["...", ".node"],
},
target: 'node',
devtool: 'source-map',
output: {
@ -12,10 +15,15 @@ module.exports = {
use: {
loader: 'babel-loader',
options: {
presets: [ [ "@babel/preset-env", { "targets": { "node": "12.0" }, "exclude": [ "transform-regenerator" ] } ] ],
presets: [ [ "@babel/preset-env", { "targets": { "node": "18.0" } } ] ]
//presets: [ [ "@babel/preset-env", { "targets": { "node": "4.0" } } ] ],
}
}
},
{
test: /\.node$/,
loader: "node-loader",
},
{
test: /.json$/,
type: 'json'
@ -23,12 +31,6 @@ module.exports = {
]
},
externals: {
'leveldown': 'commonjs leveldown',
'bufferutil': 'commonjs bufferutil',
'diskusage': 'commonjs diskusage',
'utf-8-validate': 'commonjs utf-8-validate',
'fcntl': 'commonjs fcntl',
'ioctl': 'commonjs ioctl',
'vitastor': 'commonjs vitastor',
'vaultclient': 'commonjs vaultclient',
'bucketclient': 'commonjs bucketclient',
@ -39,7 +41,6 @@ module.exports = {
'kerberos': 'commonjs kerberos',
'@mongodb-js/zstd': 'commonjs @mongodb-js/zstd',
'@aws-sdk/credential-providers': 'commonjs @aws-sdk/credential-providers',
'snappy': 'commonjs snappy',
'mongodb-client-encryption': 'commonjs mongodb-client-encryption'
},
node: {

1
werelogs Submodule

@ -0,0 +1 @@
Subproject commit b57d072e51f39a3112b0719bbd1974c0e292e233