Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Chan 84c7b336c0 UTAPI-94: add test workflow_dispatch 2023-06-05 11:50:08 -07:00
30 changed files with 5817 additions and 523 deletions

View File

@ -1,7 +1,8 @@
FROM ghcr.io/scality/vault:c2607856 FROM registry.scality.com/vault-dev/vault:c2607856
ENV VAULT_DB_BACKEND LEVELDB ENV VAULT_DB_BACKEND LEVELDB
RUN chmod 400 tests/utils/keyfile RUN chmod 400 tests/utils/keyfile
ENTRYPOINT yarn start ENTRYPOINT yarn start

View File

@ -2,13 +2,18 @@ name: build-ci-images
on: on:
workflow_call: workflow_call:
secrets:
REGISTRY_LOGIN:
required: true
REGISTRY_PASSWORD:
required: true
jobs: jobs:
warp10-ci: warp10-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2 uses: scality/workflows/.github/workflows/docker-build.yaml@v1
secrets: secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }} REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }} REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
with: with:
name: warp10-ci name: warp10-ci
context: . context: .
@ -16,22 +21,22 @@ jobs:
lfs: true lfs: true
redis-ci: redis-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2 uses: scality/workflows/.github/workflows/docker-build.yaml@v1
secrets: secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }} REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }} REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
with: with:
name: redis-ci name: redis-ci
context: . context: .
file: images/redis/Dockerfile file: images/redis/Dockerfile
redis-replica-ci: redis-replica-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2 uses: scality/workflows/.github/workflows/docker-build.yaml@v1
needs: needs:
- redis-ci - redis-ci
secrets: secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }} REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }} REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
with: with:
name: redis-replica-ci name: redis-replica-ci
context: .github/docker/redis-replica context: .github/docker/redis-replica
@ -42,21 +47,28 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2.3.4
with: with:
lfs: true lfs: true
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v1.6.0
- name: Login to GitHub Registry - name: Login to GitHub Registry
uses: docker/login-action@v3 uses: docker/login-action@v1.10.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ github.token }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Scality Registry
uses: docker/login-action@v1.10.0
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push vault Image - name: Build and push vault Image
uses: docker/build-push-action@v5 uses: docker/build-push-action@v2.7.0
with: with:
push: true push: true
context: .github/docker/vault context: .github/docker/vault

View File

@ -7,10 +7,9 @@ on:
jobs: jobs:
build-dev: build-dev:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2 uses: scality/workflows/.github/workflows/docker-build.yaml@v1
secrets: secrets: inherit
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with: with:
namespace: ${{ github.repository_owner }} registry: registry.scality.com
name: ${{ github.event.repository.name }} namespace: utapi-dev
name: utapi

View File

@ -15,9 +15,11 @@ on:
jobs: jobs:
build: build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2 uses: scality/workflows/.github/workflows/docker-build.yaml@v1
secrets: inherit secrets: inherit
with: with:
registry: registry.scality.com
namespace: utapi
name: warp10 name: warp10
context: . context: .
file: images/warp10/Dockerfile file: images/warp10/Dockerfile
@ -29,11 +31,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
steps: steps:
- uses: softprops/action-gh-release@v2 - uses: softprops/action-gh-release@v1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
name: Release utapi/warp10:${{ github.event.inputs.tag }}-warp10 name: Release registry.scality.com/utapi/warp10:${{ github.event.inputs.tag }}-warp10
tag_name: ${{ github.event.inputs.tag }}-warp10 tag_name: ${{ github.event.inputs.tag }}-warp10
generate_release_notes: false generate_release_notes: false
target_commitish: ${{ github.sha }} target_commitish: ${{ github.sha }}

View File

@ -22,10 +22,12 @@ on:
jobs: jobs:
build: build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2 uses: scality/workflows/.github/workflows/docker-build.yaml@v1
secrets: inherit
with: with:
namespace: ${{ github.repository_owner }} registry: registry.scality.com
name: ${{ github.event.repository.name }} namespace: utapi
name: utapi
context: . context: .
file: ${{ github.event.inputs.dockerfile}} file: ${{ github.event.inputs.dockerfile}}
tag: ${{ github.event.inputs.tag }} tag: ${{ github.event.inputs.tag }}
@ -35,9 +37,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
steps: steps:
- uses: softprops/action-gh-release@v2 - uses: softprops/action-gh-release@v1
env: env:
GITHUB_TOKEN: ${{ github.token }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
name: Release ${{ github.event.inputs.tag }} name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }} tag_name: ${{ github.event.inputs.tag }}

View File

@ -2,34 +2,27 @@
name: tests name: tests
on: on:
workflow_dispatch:
push: push:
branches-ignore: branches-ignore:
- 'development/**' - 'development/**'
workflow_dispatch:
inputs:
debug:
description: Debug (enable the ability to SSH to runners)
type: boolean
required: false
default: 'false'
connection-timeout-m:
type: number
required: false
description: Timeout for ssh connection to worker (minutes)
default: 30
jobs: jobs:
build-ci: build-ci:
uses: ./.github/workflows/build-ci.yaml uses: ./.github/workflows/build-ci.yaml
secrets:
REGISTRY_LOGIN: ${{ secrets.REGISTRY_LOGIN }}
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
lint: lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
lfs: true lfs: true
- uses: actions/setup-node@v4 - uses: actions/setup-node@v2
with: with:
node-version: '16.13.2' node-version: '16.13.2'
cache: yarn cache: yarn
@ -88,7 +81,7 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
redis-sentinel: redis-sentinel:
image: bitnami/redis-sentinel:7.2.4 image: bitnami/redis-sentinel:6.2
env: env:
REDIS_MASTER_SET: scality-s3 REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379' REDIS_SENTINEL_PORT_NUMBER: '16379'
@ -119,19 +112,24 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
lfs: true lfs: true
- uses: actions/setup-node@v4 - uses: actions/setup-node@v2
with: with:
node-version: '16.13.2' node-version: '16.13.2'
cache: yarn cache: yarn
- uses: actions/setup-python@v5 - uses: actions/setup-python@v2
with: with:
python-version: '3.9' python-version: '3.9'
cache: pip - uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps - name: Install python deps
run: pip install -r requirements.txt run: |
pip install requests
pip install redis
- name: install dependencies - name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1 run: yarn install --frozen-lockfile --network-concurrency 1
- name: ${{ matrix.test.name }} - name: ${{ matrix.test.name }}
@ -165,7 +163,7 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
redis-sentinel: redis-sentinel:
image: bitnami/redis-sentinel:7.2.4 image: bitnami/redis-sentinel:6.2
env: env:
REDIS_MASTER_SET: scality-s3 REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379' REDIS_SENTINEL_PORT_NUMBER: '16379'
@ -208,19 +206,24 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
lfs: true lfs: true
- uses: actions/setup-node@v4 - uses: actions/setup-node@v2
with: with:
node-version: '16.13.2' node-version: '16.13.2'
cache: yarn cache: yarn
- uses: actions/setup-python@v5 - uses: actions/setup-python@v2
with: with:
python-version: '3.9' python-version: '3.9'
cache: pip - uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps - name: Install python deps
run: pip install -r requirements.txt run: |
pip install requests
pip install redis
- name: install dependencies - name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1 run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 for 60 seconds - name: Wait for warp10 for 60 seconds
@ -232,16 +235,9 @@ jobs:
UTAPI_SERVICE_USER_ENABLED: 'true' UTAPI_SERVICE_USER_ENABLED: 'true'
UTAPI_LOG_LEVEL: trace UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server" SETUP_CMD: "run start_v2:server"
- name: 'Debug: SSH to runner' - name: Setup tmate session
uses: scality/actions/action-ssh-to-runner@1.7.0 uses: mxschmitt/action-tmate@v3
timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }} if: failure()
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}
tests-v2-without-sensision: tests-v2-without-sensision:
needs: needs:
@ -287,7 +283,7 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
redis-sentinel: redis-sentinel:
image: bitnami/redis-sentinel:7.2.4 image: bitnami/redis-sentinel:6.2
env: env:
REDIS_MASTER_SET: scality-s3 REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379' REDIS_SENTINEL_PORT_NUMBER: '16379'
@ -329,19 +325,24 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
lfs: true lfs: true
- uses: actions/setup-node@v4 - uses: actions/setup-node@v2
with: with:
node-version: '16.13.2' node-version: '16.13.2'
cache: yarn cache: yarn
- uses: actions/setup-python@v5 - uses: actions/setup-python@v2
with: with:
python-version: '3.9' python-version: '3.9'
cache: pip - uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps - name: Install python deps
run: pip install -r requirements.txt run: |
pip install requests
pip install redis
- name: install dependencies - name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1 run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 a little bit - name: Wait for warp10 a little bit
@ -349,13 +350,6 @@ jobs:
- name: ${{ matrix.test.name }} - name: ${{ matrix.test.name }}
run: ${{ matrix.test.command }} run: ${{ matrix.test.command }}
env: ${{ matrix.test.env }} env: ${{ matrix.test.env }}
- name: 'Debug: SSH to runner' - name: Setup tmate session
uses: scality/actions/action-ssh-to-runner@1.7.0 uses: mxschmitt/action-tmate@v3
timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }} if: failure()
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}

View File

@ -27,7 +27,7 @@ x-models:
services: services:
redis-0: redis-0:
image: redis:7.2.4 image: redis:6
command: redis-server --port 6379 --slave-announce-ip "${EXTERNAL_HOST}" command: redis-server --port 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports: ports:
- 6379:6379 - 6379:6379
@ -35,7 +35,7 @@ services:
- HOST_IP="${EXTERNAL_HOST}" - HOST_IP="${EXTERNAL_HOST}"
redis-1: redis-1:
image: redis:7.2.4 image: redis:6
command: redis-server --port 6380 --slaveof "${EXTERNAL_HOST}" 6379 --slave-announce-ip "${EXTERNAL_HOST}" command: redis-server --port 6380 --slaveof "${EXTERNAL_HOST}" 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports: ports:
- 6380:6380 - 6380:6380
@ -43,7 +43,7 @@ services:
- HOST_IP="${EXTERNAL_HOST}" - HOST_IP="${EXTERNAL_HOST}"
redis-sentinel-0: redis-sentinel-0:
image: redis:7.2.4 image: redis:6
command: |- command: |-
bash -c 'cat > /tmp/sentinel.conf <<EOF bash -c 'cat > /tmp/sentinel.conf <<EOF
port 16379 port 16379

View File

@ -2,10 +2,11 @@
## Docker Image Generation ## Docker Image Generation
Docker images are hosted on [ghcr.io](https://github.com/orgs/scality/packages). Docker images are hosted on [registry.scality.com](registry.scality.com).
Utapi has one namespace there: Utapi has two namespaces there:
* Namespace: ghcr.io/scality/utapi * Production Namespace: registry.scality.com/utapi
* Dev Namespace: registry.scality.com/utapi-dev
With every CI build, the CI will push images, tagging the With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash. content with the developer branch's short SHA-1 commit hash.
@ -17,8 +18,8 @@ Tagged versions of utapi will be stored in the production namespace.
## How to Pull Docker Images ## How to Pull Docker Images
```sh ```sh
docker pull ghcr.io/scality/utapi:<commit hash> docker pull registry.scality.com/utapi-dev/utapi:<commit hash>
docker pull ghcr.io/scality/utapi:<tag> docker pull registry.scality.com/utapi/utapi:<tag>
``` ```
## Release Process ## Release Process

View File

@ -1,4 +1,4 @@
FROM ghcr.io/scality/federation/nodesvc-base:7.10.5.0 FROM registry.scality.com/federation/nodesvc-base:7.10.5.0
ENV UTAPI_CONFIG_FILE=${CONF_DIR}/config.json ENV UTAPI_CONFIG_FILE=${CONF_DIR}/config.json

1
images/warp10/.gitattributes vendored Normal file
View File

@ -0,0 +1 @@
*.jar filter=lfs diff=lfs merge=lfs -text

View File

@ -13,7 +13,7 @@ RUN apk add zip unzip build-base \
&& cd .. \ && cd .. \
&& go build -a -o /usr/local/go/warp10_sensision_exporter && go build -a -o /usr/local/go/warp10_sensision_exporter
FROM ghcr.io/scality/utapi/warp10:2.8.1-95-g73e7de80 FROM registry.scality.com/utapi/warp10:2.8.1-95-g73e7de80
# Override baked in version # Override baked in version
# Remove when updating to a numbered release # Remove when updating to a numbered release

View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:389d2135867c399a389901460c5f2cc09f4857d0c6d08632c2638c25fb150c46
size 15468553

View File

@ -1,13 +1,35 @@
/* eslint-disable no-bitwise */ /* eslint-disable no-bitwise */
const assert = require('assert'); const assert = require('assert');
const fs = require('fs'); const fs = require('fs');
const path = require('path');
/** /**
* Reads from a config file and returns the content as a config object * Reads from a config file and returns the content as a config object
*/ */
class Config { class Config {
constructor(config) { constructor() {
this.component = config.component; /*
* By default, the config file is "config.json" at the root.
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
*/
this._basePath = path.resolve(__dirname, '..');
this.path = `${this._basePath}/config.json`;
if (process.env.UTAPI_CONFIG_FILE !== undefined) {
this.path = process.env.UTAPI_CONFIG_FILE;
}
// Read config automatically
this._getConfig();
}
_getConfig() {
let config;
try {
const data = fs.readFileSync(this.path, { encoding: 'utf-8' });
config = JSON.parse(data);
} catch (err) {
throw new Error(`could not parse config file: ${err.message}`);
}
this.port = 9500; this.port = 9500;
if (config.port !== undefined) { if (config.port !== undefined) {
@ -93,26 +115,18 @@ class Config {
} }
} }
if (config.vaultclient) { this.vaultd = {};
// Instance passed from outside if (config.vaultd) {
this.vaultclient = config.vaultclient; if (config.vaultd.port !== undefined) {
this.vaultd = null; assert(Number.isInteger(config.vaultd.port)
} else { && config.vaultd.port > 0,
// Connection data 'bad config: vaultd port must be a positive integer');
this.vaultclient = null; this.vaultd.port = config.vaultd.port;
this.vaultd = {}; }
if (config.vaultd) { if (config.vaultd.host !== undefined) {
if (config.vaultd.port !== undefined) { assert.strictEqual(typeof config.vaultd.host, 'string',
assert(Number.isInteger(config.vaultd.port) 'bad config: vaultd host must be a string');
&& config.vaultd.port > 0, this.vaultd.host = config.vaultd.host;
'bad config: vaultd port must be a positive integer');
this.vaultd.port = config.vaultd.port;
}
if (config.vaultd.host !== undefined) {
assert.strictEqual(typeof config.vaultd.host, 'string',
'bad config: vaultd host must be a string');
this.vaultd.host = config.vaultd.host;
}
} }
} }
@ -127,11 +141,12 @@ class Config {
const { key, cert, ca } = config.certFilePaths const { key, cert, ca } = config.certFilePaths
? config.certFilePaths : {}; ? config.certFilePaths : {};
if (key && cert) { if (key && cert) {
const keypath = key; const keypath = (key[0] === '/') ? key : `${this._basePath}/${key}`;
const certpath = cert; const certpath = (cert[0] === '/')
? cert : `${this._basePath}/${cert}`;
let capath; let capath;
if (ca) { if (ca) {
capath = ca; capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`;
assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK), assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${capath}`); `File not found or unreachable: ${capath}`);
} }
@ -157,13 +172,8 @@ class Config {
+ 'expireMetrics must be a boolean'); + 'expireMetrics must be a boolean');
this.expireMetrics = config.expireMetrics; this.expireMetrics = config.expireMetrics;
} }
return config;
if (config.onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof config.onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: onlyCountLatestWhenObjectLocked must be a boolean');
this.onlyCountLatestWhenObjectLocked = config.onlyCountLatestWhenObjectLocked;
}
} }
} }
module.exports = Config; module.exports = new Config();

View File

@ -6,6 +6,8 @@ const async = require('async');
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { getMetricFromKey, getKeys, generateStateKey } = require('./schema'); const { getMetricFromKey, getKeys, generateStateKey } = require('./schema');
const s3metricResponseJSON = require('../models/s3metricResponse'); const s3metricResponseJSON = require('../models/s3metricResponse');
const config = require('./Config');
const Vault = require('./Vault');
const MAX_RANGE_MS = (((1000 * 60) * 60) * 24) * 30; // One month. const MAX_RANGE_MS = (((1000 * 60) * 60) * 24) * 30; // One month.
@ -21,6 +23,7 @@ class ListMetrics {
constructor(metric, component) { constructor(metric, component) {
this.metric = metric; this.metric = metric;
this.service = component; this.service = component;
this.vault = new Vault(config);
} }
/** /**
@ -80,10 +83,9 @@ class ListMetrics {
const resources = validator.get(this.metric); const resources = validator.get(this.metric);
const timeRange = validator.get('timeRange'); const timeRange = validator.get('timeRange');
const datastore = utapiRequest.getDatastore(); const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids // map account ids to canonical ids
if (this.metric === 'accounts') { if (this.metric === 'accounts') {
return vault.getCanonicalIds(resources, log, (err, list) => { return this.vault.getCanonicalIds(resources, log, (err, list) => {
if (err) { if (err) {
return cb(err); return cb(err);
} }
@ -122,11 +124,10 @@ class ListMetrics {
const fifteenMinutes = 15 * 60 * 1000; // In milliseconds const fifteenMinutes = 15 * 60 * 1000; // In milliseconds
const timeRange = [start - fifteenMinutes, end]; const timeRange = [start - fifteenMinutes, end];
const datastore = utapiRequest.getDatastore(); const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids // map account ids to canonical ids
if (this.metric === 'accounts') { if (this.metric === 'accounts') {
return vault.getCanonicalIds(resources, log, (err, list) => { return this.vault.getCanonicalIds(resources, log, (err, list) => {
if (err) { if (err) {
return cb(err); return cb(err);
} }

View File

@ -16,19 +16,15 @@ const REINDEX_PYTHON_INTERPRETER = process.env.REINDEX_PYTHON_INTERPRETER !== un
? process.env.REINDEX_PYTHON_INTERPRETER ? process.env.REINDEX_PYTHON_INTERPRETER
: 'python3.7'; : 'python3.7';
const EXIT_CODE_SENTINEL_CONNECTION = 100;
class UtapiReindex { class UtapiReindex {
constructor(config) { constructor(config) {
this._enabled = false; this._enabled = false;
this._schedule = REINDEX_SCHEDULE; this._schedule = REINDEX_SCHEDULE;
this._redis = { this._sentinel = {
host: '127.0.0.1',
port: 16379,
name: 'scality-s3', name: 'scality-s3',
sentinelPassword: '', sentinelPassword: '',
sentinels: [{
host: '127.0.0.1',
port: 16379,
}],
}; };
this._bucketd = { this._bucketd = {
host: '127.0.0.1', host: '127.0.0.1',
@ -46,13 +42,14 @@ class UtapiReindex {
if (config && config.password) { if (config && config.password) {
this._password = config.password; this._password = config.password;
} }
if (config && config.redis) { if (config && config.sentinel) {
const { const {
name, sentinelPassword, sentinels, host, port, name, sentinelPassword,
} = config.redis; } = config.sentinel;
this._redis.name = name || this._redis.name; this._sentinel.host = host || this._sentinel.host;
this._redis.sentinelPassword = sentinelPassword || this._redis.sentinelPassword; this._sentinel.port = port || this._sentinel.port;
this._redis.sentinels = sentinels || this._redis.sentinels; this._sentinel.name = name || this._sentinel.name;
this._sentinel.sentinelPassword = sentinelPassword || this._sentinel.sentinelPassword;
} }
if (config && config.bucketd) { if (config && config.bucketd) {
const { host, port } = config.bucketd; const { host, port } = config.bucketd;
@ -64,16 +61,17 @@ class UtapiReindex {
this._log = new werelogs.Logger('UtapiReindex', { level, dump }); this._log = new werelogs.Logger('UtapiReindex', { level, dump });
} }
this._onlyCountLatestWhenObjectLocked = (config && config.onlyCountLatestWhenObjectLocked === true);
this._requestLogger = this._log.newRequestLogger(); this._requestLogger = this._log.newRequestLogger();
} }
_getRedisClient() { _getRedisClient() {
const client = new RedisClient({ const client = new RedisClient({
sentinels: this._redis.sentinels, sentinels: [{
name: this._redis.name, host: this._sentinel.host,
sentinelPassword: this._redis.sentinelPassword, port: this._sentinel.port,
}],
name: this._sentinel.name,
sentinelPassword: this._sentinel.sentinelPassword,
password: this._password, password: this._password,
}); });
client.connect(); client.connect();
@ -88,18 +86,17 @@ class UtapiReindex {
return this.ds.del(REINDEX_LOCK_KEY); return this.ds.del(REINDEX_LOCK_KEY);
} }
_buildFlags(sentinel) { _buildFlags() {
const flags = { const flags = {
/* eslint-disable camelcase */ /* eslint-disable camelcase */
sentinel_ip: sentinel.host, sentinel_ip: this._sentinel.host,
sentinel_port: sentinel.port, sentinel_port: this._sentinel.port,
sentinel_cluster_name: this._redis.name, sentinel_cluster_name: this._sentinel.name,
bucketd_addr: `http://${this._bucketd.host}:${this._bucketd.port}`, bucketd_addr: `http://${this._bucketd.host}:${this._bucketd.port}`,
}; };
if (this._redis.sentinelPassword) { if (this._sentinel.sentinelPassword) {
flags.redis_password = this._redis.sentinelPassword; flags.redis_password = this._sentinel.sentinelPassword;
} }
/* eslint-enable camelcase */ /* eslint-enable camelcase */
const opts = []; const opts = [];
Object.keys(flags) Object.keys(flags)
@ -108,15 +105,11 @@ class UtapiReindex {
opts.push(name); opts.push(name);
opts.push(flags[flag]); opts.push(flags[flag]);
}); });
if (this._onlyCountLatestWhenObjectLocked) {
opts.push('--only-latest-when-locked');
}
return opts; return opts;
} }
_runScriptWithSentinels(path, remainingSentinels, done) { _runScript(path, done) {
const flags = this._buildFlags(remainingSentinels.shift()); const flags = this._buildFlags();
this._requestLogger.debug(`launching subprocess ${path} with flags: ${flags}`); this._requestLogger.debug(`launching subprocess ${path} with flags: ${flags}`);
const process = childProcess.spawn(REINDEX_PYTHON_INTERPRETER, [path, ...flags]); const process = childProcess.spawn(REINDEX_PYTHON_INTERPRETER, [path, ...flags]);
process.stdout.on('data', data => { process.stdout.on('data', data => {
@ -143,17 +136,6 @@ class UtapiReindex {
statusCode: code, statusCode: code,
script: path, script: path,
}); });
if (code === EXIT_CODE_SENTINEL_CONNECTION) {
if (remainingSentinels.length > 0) {
this._requestLogger.info('retrying with next sentinel host', {
script: path,
});
return this._runScriptWithSentinels(path, remainingSentinels, done);
}
this._requestLogger.error('no more sentinel host to try', {
script: path,
});
}
} else { } else {
this._requestLogger.info('script exited successfully', { this._requestLogger.info('script exited successfully', {
statusCode: code, statusCode: code,
@ -164,11 +146,6 @@ class UtapiReindex {
}); });
} }
_runScript(path, done) {
const remainingSentinels = [...this._redis.sentinels];
this._runScriptWithSentinels(path, remainingSentinels, done);
}
_attemptLock(job) { _attemptLock(job) {
this._requestLogger.info('attempting to acquire the lock to begin job'); this._requestLogger.info('attempting to acquire the lock to begin job');
this._lock() this._lock()

View File

@ -14,15 +14,6 @@ class UtapiRequest {
this._datastore = null; this._datastore = null;
this._requestQuery = null; this._requestQuery = null;
this._requestPath = null; this._requestPath = null;
this._vault = null;
}
getVault() {
return this._vault;
}
setVault() {
return this._vault;
} }
/** /**

View File

@ -1,21 +1,16 @@
import argparse
import ast
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import re
import redis
import requests import requests
import redis
import json
import ast
import sys import sys
from threading import Thread
import time import time
import urllib import urllib
import re
import sys
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
logging.basicConfig(level=logging.INFO) import argparse
_log = logging.getLogger('utapi-reindex:reporting')
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options(): def get_options():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
@ -34,19 +29,8 @@ class askRedis():
def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None): def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None):
self._password = password self._password = password
r = redis.Redis( r = redis.Redis(host=ip, port=port, db=0, password=password)
host=ip, self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
port=port,
db=0,
password=password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
)
try:
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {ip}:{port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
def read(self, resource, name): def read(self, resource, name):
r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password)
@ -114,4 +98,4 @@ if __name__ == '__main__':
data = U.read('accounts', userid) data = U.read('accounts', userid)
content = "Account:%s|NumberOFfiles:%s|StorageCapacity:%s " % ( content = "Account:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, data["files"], data["total_size"]) userid, data["files"], data["total_size"])
executor.submit(safe_print, content) executor.submit(safe_print, content)

View File

@ -1,6 +1,5 @@
import argparse import argparse
import concurrent.futures as futures import concurrent.futures as futures
import functools
import itertools import itertools
import json import json
import logging import logging
@ -9,9 +8,9 @@ import re
import sys import sys
import time import time
import urllib import urllib
from pathlib import Path
from collections import defaultdict, namedtuple from collections import defaultdict, namedtuple
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from pprint import pprint
import redis import redis
import requests import requests
@ -25,9 +24,6 @@ MPU_SHADOW_BUCKET_PREFIX = 'mpuShadowBucket'
ACCOUNT_UPDATE_CHUNKSIZE = 100 ACCOUNT_UPDATE_CHUNKSIZE = 100
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options(): def get_options():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP") parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
@ -36,38 +32,9 @@ def get_options():
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name") parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server") parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
parser.add_argument("-w", "--worker", default=10, type=int, help="Number of workers") parser.add_argument("-w", "--worker", default=10, type=int, help="Number of workers")
parser.add_argument("-b", "--bucket", default=None, help="Bucket to be processed")
parser.add_argument("-r", "--max-retries", default=2, type=int, help="Max retries before failing a bucketd request") parser.add_argument("-r", "--max-retries", default=2, type=int, help="Max retries before failing a bucketd request")
parser.add_argument("--only-latest-when-locked", action='store_true', help="Only index the latest version of a key when the bucket has a default object lock policy") return parser.parse_args()
parser.add_argument("--debug", action='store_true', help="Enable debug logging")
parser.add_argument("--dry-run", action="store_true", help="Do not update redis")
group = parser.add_mutually_exclusive_group()
group.add_argument("-a", "--account", default=[], help="account canonical ID (all account buckets will be processed)", action="append", type=nonempty_string('account'))
group.add_argument("--account-file", default=None, help="file containing account canonical IDs, one ID per line", type=existing_file)
group.add_argument("-b", "--bucket", default=[], help="bucket name", action="append", type=nonempty_string('bucket'))
group.add_argument("--bucket-file", default=None, help="file containing bucket names, one bucket name per line", type=existing_file)
options = parser.parse_args()
if options.bucket_file:
with open(options.bucket_file) as f:
options.bucket = [line.strip() for line in f if line.strip()]
elif options.account_file:
with open(options.account_file) as f:
options.account = [line.strip() for line in f if line.strip()]
return options
def nonempty_string(flag):
def inner(value):
if not value.strip():
raise argparse.ArgumentTypeError("%s: value must not be empty"%flag)
return value
return inner
def existing_file(path):
path = Path(path).resolve()
if not path.exists():
raise argparse.ArgumentTypeError("File does not exist: %s"%path)
return path
def chunks(iterable, size): def chunks(iterable, size):
it = iter(iterable) it = iter(iterable)
@ -82,7 +49,7 @@ def _encoded(func):
return urllib.parse.quote(val.encode('utf-8')) return urllib.parse.quote(val.encode('utf-8'))
return inner return inner
Bucket = namedtuple('Bucket', ['userid', 'name', 'object_lock_enabled']) Bucket = namedtuple('Bucket', ['userid', 'name'])
MPU = namedtuple('MPU', ['bucket', 'key', 'upload_id']) MPU = namedtuple('MPU', ['bucket', 'key', 'upload_id'])
BucketContents = namedtuple('BucketContents', ['bucket', 'obj_count', 'total_size']) BucketContents = namedtuple('BucketContents', ['bucket', 'obj_count', 'total_size'])
@ -94,21 +61,15 @@ class InvalidListing(Exception):
def __init__(self, bucket): def __init__(self, bucket):
super().__init__('Invalid contents found while listing bucket %s'%bucket) super().__init__('Invalid contents found while listing bucket %s'%bucket)
class BucketNotFound(Exception):
def __init__(self, bucket):
super().__init__('Bucket %s not found'%bucket)
class BucketDClient: class BucketDClient:
'''Performs Listing calls against bucketd''' '''Performs Listing calls against bucketd'''
__url_attribute_format = '{addr}/default/attributes/{bucket}' __url_format = '{addr}/default/bucket/{bucket}'
__url_bucket_format = '{addr}/default/bucket/{bucket}'
__headers = {"x-scal-request-uids": "utapi-reindex-list-buckets"} __headers = {"x-scal-request-uids": "utapi-reindex-list-buckets"}
def __init__(self, bucketd_addr=None, max_retries=2, only_latest_when_locked=False): def __init__(self, bucketd_addr=None, max_retries=2):
self._bucketd_addr = bucketd_addr self._bucketd_addr = bucketd_addr
self._max_retries = max_retries self._max_retries = max_retries
self._only_latest_when_locked = only_latest_when_locked
self._session = requests.Session() self._session = requests.Session()
def _do_req(self, url, check_500=True, **kwargs): def _do_req(self, url, check_500=True, **kwargs):
@ -140,7 +101,7 @@ class BucketDClient:
parameters value. On the first request the function will be called with parameters value. On the first request the function will be called with
`None` and should return its initial value. Return `None` for the param to be excluded. `None` and should return its initial value. Return `None` for the param to be excluded.
''' '''
url = self.__url_bucket_format.format(addr=self._bucketd_addr, bucket=bucket) url = self.__url_format.format(addr=self._bucketd_addr, bucket=bucket)
static_params = {k: v for k, v in kwargs.items() if not callable(v)} static_params = {k: v for k, v in kwargs.items() if not callable(v)}
dynamic_params = {k: v for k, v in kwargs.items() if callable(v)} dynamic_params = {k: v for k, v in kwargs.items() if callable(v)}
is_truncated = True # Set to True for first loop is_truncated = True # Set to True for first loop
@ -153,9 +114,6 @@ class BucketDClient:
_log.debug('listing bucket bucket: %s params: %s'%( _log.debug('listing bucket bucket: %s params: %s'%(
bucket, ', '.join('%s=%s'%p for p in params.items()))) bucket, ', '.join('%s=%s'%p for p in params.items())))
resp = self._do_req(url, params=params) resp = self._do_req(url, params=params)
if resp.status_code == 404:
_log.debug('Bucket not found bucket: %s'%bucket)
return
if resp.status_code == 200: if resp.status_code == 200:
payload = resp.json() payload = resp.json()
except ValueError as e: except ValueError as e:
@ -177,37 +135,7 @@ class BucketDClient:
else: else:
is_truncated = len(payload) > 0 is_truncated = len(payload) > 0
@functools.lru_cache(maxsize=16) def list_buckets(self, name = None):
def _get_bucket_attributes(self, name):
url = self.__url_attribute_format.format(addr=self._bucketd_addr, bucket=name)
try:
resp = self._do_req(url)
if resp.status_code == 200:
return resp.json()
else:
_log.error('Error getting bucket attributes bucket:%s status_code:%s'%(name, resp.status_code))
raise BucketNotFound(name)
except ValueError as e:
_log.exception(e)
_log.error('Invalid attributes response body! bucket:%s'%name)
raise
except MaxRetriesReached:
_log.error('Max retries reached getting bucket attributes bucket:%s'%name)
raise
except Exception as e:
_log.exception(e)
_log.error('Unhandled exception getting bucket attributes bucket:%s'%name)
raise
def get_bucket_md(self, name):
md = self._get_bucket_attributes(name)
canonId = md.get('owner')
if canonId is None:
_log.error('No owner found for bucket %s'%name)
raise InvalidListing(name)
return Bucket(canonId, name, md.get('objectLockEnabled', False))
def list_buckets(self, account=None):
def get_next_marker(p): def get_next_marker(p):
if p is None: if p is None:
@ -219,24 +147,19 @@ class BucketDClient:
'maxKeys': 1000, 'maxKeys': 1000,
'marker': get_next_marker 'marker': get_next_marker
} }
if account is not None:
params['prefix'] = '%s..|..' % account
for _, payload in self._list_bucket(USERS_BUCKET, **params): for _, payload in self._list_bucket(USERS_BUCKET, **params):
buckets = [] buckets = []
for result in payload.get('Contents', []): for result in payload['Contents']:
match = re.match("(\w+)..\|..(\w+.*)", result['key']) match = re.match("(\w+)..\|..(\w+.*)", result['key'])
bucket = Bucket(*match.groups(), False) bucket = Bucket(*match.groups())
# We need to get the attributes for each bucket to determine if it is locked if name is None or bucket.name == name:
if self._only_latest_when_locked: buckets.append(bucket)
bucket_attrs = self._get_bucket_attributes(bucket.name)
object_lock_enabled = bucket_attrs.get('objectLockEnabled', False)
bucket = bucket._replace(object_lock_enabled=object_lock_enabled)
buckets.append(bucket)
if buckets: if buckets:
yield buckets yield buckets
if name is not None:
# Break on the first matching bucket if a name is given
break
def list_mpus(self, bucket): def list_mpus(self, bucket):
_bucket = MPU_SHADOW_BUCKET_PREFIX + bucket.name _bucket = MPU_SHADOW_BUCKET_PREFIX + bucket.name
@ -273,12 +196,18 @@ class BucketDClient:
upload_id=key['value']['UploadId'])) upload_id=key['value']['UploadId']))
return keys return keys
def _sum_objects(self, bucket, listing, only_latest_when_locked = False): def _sum_objects(self, bucket, listing):
count = 0 count = 0
total_size = 0 total_size = 0
last_key = None last_master = None
try: last_size = None
for obj in listing: for status_code, payload in listing:
contents = payload['Contents'] if isinstance(payload, dict) else payload
if contents is None:
_log.error('Invalid contents in listing. bucket:%s status_code:%s'%(bucket, status_code))
raise InvalidListing(bucket)
for obj in contents:
count += 1
if isinstance(obj['value'], dict): if isinstance(obj['value'], dict):
# bucketd v6 returns a dict: # bucketd v6 returns a dict:
data = obj.get('value', {}) data = obj.get('value', {})
@ -287,51 +216,39 @@ class BucketDClient:
# bucketd v7 returns an encoded string # bucketd v7 returns an encoded string
data = json.loads(obj['value']) data = json.loads(obj['value'])
size = data.get('content-length', 0) size = data.get('content-length', 0)
is_latest = obj['key'] != last_key
last_key = obj['key']
if only_latest_when_locked and bucket.object_lock_enabled and not is_latest:
_log.debug('Skipping versioned key: %s'%obj['key'])
continue
count += 1
total_size += size total_size += size
except InvalidListing: # If versioned, subtract the size of the master to avoid double counting
_log.error('Invalid contents in listing. bucket:%s'%bucket.name) if last_master is not None and obj['key'].startswith(last_master + '\x00'):
raise InvalidListing(bucket.name) _log.debug('Detected versioned key: %s - subtracting master size: %i'% (
return count, total_size obj['key'],
last_size,
))
total_size -= last_size
count -= 1
last_master = None
def _extract_listing(self, key, listing): # Only save master versions
for status_code, payload in listing: elif '\x00' not in obj['key']:
contents = payload[key] if isinstance(payload, dict) else payload last_master = obj['key']
if contents is None: last_size = size
raise InvalidListing('')
for obj in contents: return count, total_size
yield obj
def count_bucket_contents(self, bucket): def count_bucket_contents(self, bucket):
def get_key_marker(p): def get_next_marker(p):
if p is None: if p is None or len(p) == 0:
return '' return ''
return p.get('NextKeyMarker', '') return p[-1].get('key', '')
def get_vid_marker(p):
if p is None:
return ''
return p.get('NextVersionIdMarker', '')
params = { params = {
'listingType': 'DelimiterVersions', 'listingType': 'Basic',
'maxKeys': 1000, 'maxKeys': 1000,
'keyMarker': get_key_marker, 'gt': get_next_marker,
'versionIdMarker': get_vid_marker,
} }
listing = self._list_bucket(bucket.name, **params) count, total_size = self._sum_objects(bucket.name, self._list_bucket(bucket.name, **params))
count, total_size = self._sum_objects(bucket, self._extract_listing('Versions', listing), self._only_latest_when_locked)
return BucketContents( return BucketContents(
bucket=bucket, bucket=bucket,
obj_count=count, obj_count=count,
@ -339,8 +256,7 @@ class BucketDClient:
) )
def count_mpu_parts(self, mpu): def count_mpu_parts(self, mpu):
shadow_bucket_name = MPU_SHADOW_BUCKET_PREFIX + mpu.bucket.name _bucket = MPU_SHADOW_BUCKET_PREFIX + mpu.bucket.name
shadow_bucket = mpu.bucket._replace(name=shadow_bucket_name)
def get_prefix(p): def get_prefix(p):
if p is None: if p is None:
@ -360,31 +276,13 @@ class BucketDClient:
'listingType': 'Delimiter', 'listingType': 'Delimiter',
} }
listing = self._list_bucket(shadow_bucket_name, **params) count, total_size = self._sum_objects(_bucket, self._list_bucket(_bucket, **params))
count, total_size = self._sum_objects(shadow_bucket, self._extract_listing('Contents', listing))
return BucketContents( return BucketContents(
bucket=shadow_bucket, bucket=mpu.bucket._replace(name=_bucket),
obj_count=0, # MPU parts are not counted towards numberOfObjects obj_count=0, # MPU parts are not counted towards numberOfObjects
total_size=total_size total_size=total_size
) )
def list_all_buckets(bucket_client):
return bucket_client.list_buckets()
def list_specific_accounts(bucket_client, accounts):
for account in accounts:
yield from bucket_client.list_buckets(account=account)
def list_specific_buckets(bucket_client, buckets):
batch = []
for bucket in buckets:
try:
batch.append(bucket_client.get_bucket_md(bucket))
except BucketNotFound:
_log.error('Failed to list bucket %s. Removing from results.'%bucket)
continue
yield batch
def index_bucket(client, bucket): def index_bucket(client, bucket):
''' '''
@ -424,16 +322,9 @@ def get_redis_client(options):
host=options.sentinel_ip, host=options.sentinel_ip,
port=options.sentinel_port, port=options.sentinel_port,
db=0, db=0,
password=options.redis_password, password=options.redis_password
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
) )
try: ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name)
ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {options.sentinel_ip}:{options.sentinel_port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
return redis.Redis( return redis.Redis(
host=ip, host=ip,
port=port, port=port,
@ -467,24 +358,16 @@ def log_report(resource, name, obj_count, total_size):
if __name__ == '__main__': if __name__ == '__main__':
options = get_options() options = get_options()
if options.debug: if options.bucket is not None and not options.bucket.strip():
_log.setLevel(logging.DEBUG) print('You must provide a bucket name with the --bucket flag')
sys.exit(1)
bucket_client = BucketDClient(options.bucketd_addr, options.max_retries, options.only_latest_when_locked) bucket_client = BucketDClient(options.bucketd_addr, options.max_retries)
redis_client = get_redis_client(options) redis_client = get_redis_client(options)
account_reports = {} account_reports = {}
observed_buckets = set() observed_buckets = set()
failed_accounts = set() failed_accounts = set()
if options.account:
batch_generator = list_specific_accounts(bucket_client, options.account)
elif options.bucket:
batch_generator = list_specific_buckets(bucket_client, options.bucket)
else:
batch_generator = list_all_buckets(bucket_client)
with ThreadPoolExecutor(max_workers=options.worker) as executor: with ThreadPoolExecutor(max_workers=options.worker) as executor:
for batch in batch_generator: for batch in bucket_client.list_buckets(options.bucket):
bucket_reports = {} bucket_reports = {}
jobs = { executor.submit(index_bucket, bucket_client, b): b for b in batch } jobs = { executor.submit(index_bucket, bucket_client, b): b for b in batch }
for job in futures.as_completed(jobs.keys()): for job in futures.as_completed(jobs.keys()):
@ -503,84 +386,51 @@ if __name__ == '__main__':
update_report(account_reports, total.bucket.userid, total.obj_count, total.total_size) update_report(account_reports, total.bucket.userid, total.obj_count, total.total_size)
# Bucket reports can be updated as we get them # Bucket reports can be updated as we get them
if options.dry_run: pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket, report in bucket_reports.items(): for bucket, report in bucket_reports.items():
_log.info( update_redis(pipeline, 'buckets', bucket, report['obj_count'], report['total_size'])
"DryRun: resource buckets [%s] would be updated with obj_count %i and total_size %i" % ( log_report('buckets', bucket, report['obj_count'], report['total_size'])
bucket, report['obj_count'], report['total_size']
)
)
else:
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket, report in bucket_reports.items():
update_redis(pipeline, 'buckets', bucket, report['obj_count'], report['total_size'])
log_report('buckets', bucket, report['obj_count'], report['total_size'])
pipeline.execute()
stale_buckets = set()
recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
if options.bucket:
stale_buckets = { b for b in options.bucket if b not in observed_buckets }
elif options.account:
_log.warning('Stale buckets will not be cleared when using the --account or --account-file flags')
else:
stale_buckets = recorded_buckets.difference(observed_buckets)
_log.info('Found %s stale buckets' % len(stale_buckets))
if options.dry_run:
_log.info("DryRun: not updating stale buckets")
else:
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket in chunk:
update_redis(pipeline, 'buckets', bucket, 0, 0)
log_report('buckets', bucket, 0, 0)
pipeline.execute() pipeline.execute()
# Account metrics are not updated if a bucket is specified recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
if options.bucket: if options.bucket is None:
_log.warning('Account metrics will not be updated when using the --bucket or --bucket-file flags') stale_buckets = recorded_buckets.difference(observed_buckets)
elif observed_buckets and options.bucket not in recorded_buckets:
# The provided bucket does not exist, so clean up any metrics
stale_buckets = { options.bucket }
else: else:
stale_buckets = set()
_log.info('Found %s stale buckets' % len(stale_buckets))
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket in chunk:
update_redis(pipeline, 'buckets', bucket, 0, 0)
log_report('buckets', bucket, 0, 0)
pipeline.execute()
# Account metrics are not updated if a bucket is specified
if options.bucket is None:
# Don't update any accounts with failed listings # Don't update any accounts with failed listings
without_failed = filter(lambda x: x[0] not in failed_accounts, account_reports.items()) without_failed = filter(lambda x: x[0] not in failed_accounts, account_reports.items())
if options.dry_run: # Update total account reports in chunks
for userid, report in account_reports.items(): for chunk in chunks(without_failed, ACCOUNT_UPDATE_CHUNKSIZE):
_log.info( pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
"DryRun: resource account [%s] would be updated with obj_count %i and total_size %i" % ( for userid, report in chunk:
userid, report['obj_count'], report['total_size'] update_redis(pipeline, 'accounts', userid, report['obj_count'], report['total_size'])
) log_report('accounts', userid, report['obj_count'], report['total_size'])
) pipeline.execute()
else:
# Update total account reports in chunks
for chunk in chunks(without_failed, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for userid, report in chunk:
update_redis(pipeline, 'accounts', userid, report['obj_count'], report['total_size'])
log_report('accounts', userid, report['obj_count'], report['total_size'])
pipeline.execute()
if options.account:
for account in options.account:
if account in failed_accounts:
_log.error("No metrics updated for account %s, one or more buckets failed" % account)
# Include failed_accounts in observed_accounts to avoid clearing metrics # Include failed_accounts in observed_accounts to avoid clearing metrics
observed_accounts = failed_accounts.union(set(account_reports.keys())) observed_accounts = failed_accounts.union(set(account_reports.keys()))
recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts')) recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts'))
if options.account: # Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
stale_accounts = { a for a in options.account if a not in observed_accounts } stale_accounts = recorded_accounts.difference(observed_accounts)
else:
# Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
stale_accounts = recorded_accounts.difference(observed_accounts)
_log.info('Found %s stale accounts' % len(stale_accounts)) _log.info('Found %s stale accounts' % len(stale_accounts))
if options.dry_run: for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE):
_log.info("DryRun: not updating stale accounts") pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
else: for account in chunk:
for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE): update_redis(pipeline, 'accounts', account, 0, 0)
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load log_report('accounts', account, 0, 0)
for account in chunk: pipeline.execute()
update_redis(pipeline, 'accounts', account, 0, 0)
log_report('accounts', account, 0, 0)
pipeline.execute()

View File

@ -7,6 +7,7 @@ const { Clustering, errors, ipCheck } = require('arsenal');
const arsenalHttps = require('arsenal').https; const arsenalHttps = require('arsenal').https;
const { Logger } = require('werelogs'); const { Logger } = require('werelogs');
const config = require('./Config');
const routes = require('../router/routes'); const routes = require('../router/routes');
const Route = require('../router/Route'); const Route = require('../router/Route');
const Router = require('../router/Router'); const Router = require('../router/Router');
@ -27,12 +28,7 @@ class UtapiServer {
constructor(worker, port, datastore, logger, config) { constructor(worker, port, datastore, logger, config) {
this.worker = worker; this.worker = worker;
this.port = port; this.port = port;
this.vault = config.vaultclient; this.router = new Router(config);
if (!this.vault) {
const Vault = require('./Vault');
this.vault = new Vault(config);
}
this.router = new Router(config, this.vault);
this.logger = logger; this.logger = logger;
this.datastore = datastore; this.datastore = datastore;
this.server = null; this.server = null;
@ -75,7 +71,6 @@ class UtapiServer {
req.socket.setNoDelay(); req.socket.setNoDelay();
const { query, path, pathname } = url.parse(req.url, true); const { query, path, pathname } = url.parse(req.url, true);
const utapiRequest = new UtapiRequest() const utapiRequest = new UtapiRequest()
.setVault(this.vault)
.setRequest(req) .setRequest(req)
.setLog(this.logger.newRequestLogger()) .setLog(this.logger.newRequestLogger())
.setResponse(res) .setResponse(res)
@ -219,7 +214,8 @@ class UtapiServer {
* @property {object} params.log - logger configuration * @property {object} params.log - logger configuration
* @return {undefined} * @return {undefined}
*/ */
function spawn(config) { function spawn(params) {
Object.assign(config, params);
const { const {
workers, redis, log, port, workers, redis, log, port,
} = config; } = config;

View File

@ -23,6 +23,10 @@
"healthChecks": { "healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"] "allowFrom": ["127.0.0.1/8", "::1"]
}, },
"vaultd": {
"host": "127.0.0.1",
"port": 8500
},
"cacheBackend": "memory", "cacheBackend": "memory",
"development": false, "development": false,
"nodeId": "single_node", "nodeId": "single_node",

View File

@ -2,8 +2,6 @@ const fs = require('fs');
const path = require('path'); const path = require('path');
const Joi = require('@hapi/joi'); const Joi = require('@hapi/joi');
const assert = require('assert'); const assert = require('assert');
const defaults = require('./defaults.json');
const werelogs = require('werelogs');
const { const {
truthy, envNamespace, allowedFilterFields, allowedFilterStates, truthy, envNamespace, allowedFilterFields, allowedFilterStates,
@ -73,6 +71,7 @@ class Config {
constructor(overrides) { constructor(overrides) {
this._basePath = path.join(__dirname, '../../'); this._basePath = path.join(__dirname, '../../');
this._configPath = _loadFromEnv('CONFIG_FILE', defaultConfigPath); this._configPath = _loadFromEnv('CONFIG_FILE', defaultConfigPath);
this._defaultsPath = path.join(__dirname, 'defaults.json');
this.host = undefined; this.host = undefined;
this.port = undefined; this.port = undefined;
@ -90,11 +89,6 @@ class Config {
parsedConfig = this._recursiveUpdate(parsedConfig, overrides); parsedConfig = this._recursiveUpdate(parsedConfig, overrides);
} }
Object.assign(this, parsedConfig); Object.assign(this, parsedConfig);
werelogs.configure({
level: Config.logging.level,
dump: Config.logging.dumpLevel,
});
} }
static _readFile(path, encoding = 'utf-8') { static _readFile(path, encoding = 'utf-8') {
@ -119,7 +113,7 @@ class Config {
} }
_loadDefaults() { _loadDefaults() {
return defaults; return Config._readJSON(this._defaultsPath);
} }
_loadUserConfig() { _loadUserConfig() {

View File

@ -6,8 +6,7 @@ const BackOff = require('backo');
const { whilst } = require('async'); const { whilst } = require('async');
const errors = require('./errors'); const errors = require('./errors');
const { LoggerContext } = require('./utils/log'); const { LoggerContext, asyncOrCallback } = require('./utils');
const { asyncOrCallback } = require('./utils/func');
const moduleLogger = new LoggerContext({ const moduleLogger = new LoggerContext({
module: 'redis', module: 'redis',

View File

@ -1,6 +1,14 @@
const werelogs = require('werelogs'); const werelogs = require('werelogs');
const config = require('../config');
const { comprehend } = require('./func'); const { comprehend } = require('./func');
const loggerConfig = {
level: config.logging.level,
dump: config.logging.dumpLevel,
};
werelogs.configure(loggerConfig);
const rootLogger = new werelogs.Logger('Utapi'); const rootLogger = new werelogs.Logger('Utapi');
class LoggerContext { class LoggerContext {
@ -70,6 +78,8 @@ class LoggerContext {
} }
} }
rootLogger.debug('logger initialized', { loggerConfig });
function buildRequestLogger(req) { function buildRequestLogger(req) {
let reqUids = []; let reqUids = [];
if (req.headers['x-scal-request-uids'] !== undefined) { if (req.headers['x-scal-request-uids'] !== undefined) {

View File

@ -1,5 +1,6 @@
const assert = require('assert'); const assert = require('assert');
const { auth, policies } = require('arsenal'); const { auth, policies } = require('arsenal');
const vaultclient = require('vaultclient');
const config = require('../config'); const config = require('../config');
const errors = require('../errors'); const errors = require('../errors');
/** /**
@ -8,17 +9,9 @@ const errors = require('../errors');
*/ */
class VaultWrapper extends auth.Vault { class VaultWrapper extends auth.Vault {
create(config) {
if (config.vaultd.host) {
return new VaultWrapper(config);
}
return null;
}
constructor(options) { constructor(options) {
let client; let client;
const { host, port } = options.vaultd; const { host, port } = options.vaultd;
const vaultclient = require('vaultclient');
if (options.tls) { if (options.tls) {
const { key, cert, ca } = options.tls; const { key, cert, ca } = options.tls;
client = new vaultclient.Client(host, port, true, key, cert, client = new vaultclient.Client(host, port, true, key, cert,
@ -126,7 +119,7 @@ class VaultWrapper extends auth.Vault {
} }
} }
const vault = VaultWrapper.create(config); const vault = new VaultWrapper(config);
auth.setHandler(vault); auth.setHandler(vault);
module.exports = { module.exports = {

View File

@ -3,7 +3,7 @@
"engines": { "engines": {
"node": ">=16" "node": ">=16"
}, },
"version": "8.1.15", "version": "8.1.12",
"description": "API for tracking resource utilization and reporting metrics", "description": "API for tracking resource utilization and reporting metrics",
"main": "index.js", "main": "index.js",
"repository": { "repository": {
@ -19,12 +19,13 @@
"dependencies": { "dependencies": {
"@hapi/joi": "^17.1.1", "@hapi/joi": "^17.1.1",
"@senx/warp10": "^1.0.14", "@senx/warp10": "^1.0.14",
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1", "arsenal": "git+https://github.com/scality/Arsenal#8.1.87",
"async": "^3.2.0", "async": "^3.2.0",
"aws-sdk": "^2.1005.0", "aws-sdk": "^2.1005.0",
"aws4": "^1.8.0", "aws4": "^1.8.0",
"backo": "^1.1.0", "backo": "^1.1.0",
"body-parser": "^1.19.0", "body-parser": "^1.19.0",
"bucketclient": "scality/bucketclient#8.1.9",
"byte-size": "^7.0.0", "byte-size": "^7.0.0",
"commander": "^5.1.0", "commander": "^5.1.0",
"cron-parser": "^2.15.0", "cron-parser": "^2.15.0",
@ -37,16 +38,17 @@
"needle": "^2.5.0", "needle": "^2.5.0",
"node-schedule": "^1.3.2", "node-schedule": "^1.3.2",
"oas-tools": "^2.2.2", "oas-tools": "^2.2.2",
"prom-client": "14.2.0", "prom-client": "^13.1.0",
"uuid": "^3.3.2", "uuid": "^3.3.2",
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1" "vaultclient": "scality/vaultclient#8.2.8",
"werelogs": "scality/werelogs#8.1.0"
}, },
"devDependencies": { "devDependencies": {
"eslint": "^8.14.0", "eslint": "6.0.1",
"eslint-config-airbnb-base": "^15.0.0", "eslint-config-airbnb": "17.1.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git", "eslint-config-scality": "scality/Guidelines#8.2.0",
"eslint-plugin-import": "^2.18.0", "eslint-plugin-import": "^2.18.0",
"mocha": ">=3.1.2", "mocha": "^3.0.2",
"nodemon": "^2.0.4", "nodemon": "^2.0.4",
"protobufjs": "^6.10.1", "protobufjs": "^6.10.1",
"sinon": "^9.0.2" "sinon": "^9.0.2"

View File

@ -1,2 +0,0 @@
redis==5.0.3
requests==2.31.0

View File

@ -3,16 +3,17 @@ const assert = require('assert');
const url = require('url'); const url = require('url');
const { auth, errors, policies } = require('arsenal'); const { auth, errors, policies } = require('arsenal');
const safeJsonParse = require('../utils/safeJsonParse'); const safeJsonParse = require('../utils/safeJsonParse');
const Vault = require('../lib/Vault');
class Router { class Router {
/** /**
* @constructor * @constructor
* @param {Config} config - Config instance * @param {Config} config - Config instance
*/ */
constructor(config, vault) { constructor(config) {
this._service = config.component; this._service = config.component;
this._routes = {}; this._routes = {};
this._vault = vault; this._vault = new Vault(config);
} }
/** /**

View File

@ -1,21 +1,4 @@
const fs = require('fs'); const config = require('./lib/Config');
const path = require('path');
const Config = require('./lib/Config');
const server = require('./lib/server'); const server = require('./lib/server');
/* server(Object.assign({}, config, { component: 's3' }));
* By default, the config file is "config.json" at the root.
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
*/
const cfgpath = process.env.UTAPI_CONFIG_FILE || (__dirname+'/config.json');
let cfg;
try {
cfg = JSON.parse(fs.readFileSync(cfgpath, { encoding: 'utf-8' }));
} catch (err) {
throw new Error(`could not parse config file: ${err.message}`);
}
cfg.component = 's3';
server(new Config(cfg));

View File

@ -112,17 +112,6 @@ class BucketD {
return body; return body;
} }
_getBucketVersionResponse(bucketName) {
const body = {
CommonPrefixes: [],
IsTruncated: false,
Versions: (this._bucketContent[bucketName] || [])
// patch in a versionId to more closely match the real response
.map(entry => ({ ...entry, versionId: 'null' })),
};
return body;
}
_getShadowBucketOverviewResponse(bucketName) { _getShadowBucketOverviewResponse(bucketName) {
const mpus = (this._bucketContent[bucketName] || []).map(o => ({ const mpus = (this._bucketContent[bucketName] || []).map(o => ({
key: o.key, key: o.key,
@ -148,8 +137,6 @@ class BucketD {
|| req.query.listingType === 'Delimiter' || req.query.listingType === 'Delimiter'
) { ) {
req.body = this._getBucketResponse(bucketName); req.body = this._getBucketResponse(bucketName);
} else if (req.query.listingType === 'DelimiterVersions') {
req.body = this._getBucketVersionResponse(bucketName);
} }
// v2 reindex uses `Basic` listing type for everything // v2 reindex uses `Basic` listing type for everything

5499
yarn.lock Normal file

File diff suppressed because it is too large Load Diff