Compare commits

...

46 Commits

Author SHA1 Message Date
Vitaliy Filippov facc276e8b move away require libv2/config from libv2/redis 2024-08-13 02:17:04 +03:00
Vitaliy Filippov c8e3999fb3 Require defaults.json instead of fs.readFileSync 2024-08-13 01:14:02 +03:00
Vitaliy Filippov 9fa777cdba Split require utils to help webpack remove libV2 2024-08-13 01:10:22 +03:00
Vitaliy Filippov e6d48f3b47 Make vault client optional / support receiving its instance from outside 2024-07-23 19:22:54 +03:00
Vitaliy Filippov 0050625f81 Change git dependency URLs 2024-07-21 18:12:40 +03:00
Vitaliy Filippov 0a66c57a0a Remove yarn lock 2024-07-21 17:34:07 +03:00
Vitaliy Filippov 6711c4241a Forget LFS object 2024-07-21 17:34:07 +03:00
Jonathan Gramain 3800e4b185 Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-105-useListOfSentinelNodes' into w/8.1/bugfix/UTAPI-105-useListOfSentinelNodes 2024-06-27 10:09:15 -07:00
Jonathan Gramain 20667ff741 Merge remote-tracking branch 'origin/bugfix/UTAPI-105-useListOfSentinelNodes' into w/7.70/bugfix/UTAPI-105-useListOfSentinelNodes 2024-06-27 10:06:43 -07:00
Jonathan Gramain 88d18f3eb6 UTAPI-105 bump version 2024-06-25 15:10:02 -07:00
Jonathan Gramain 426dfd0860 bf: UTAPI-105 UtapiReindex: use list of redis sentinels
Use a list of Redis sentinels that are running on stateful nodes only,
instead of localhost.

Previously, a stateless-only node wouldn't have a local sentinel node
running, causing UtapiReindex to fail.

Added a failover mechanism in case of connection error on the current
sentinel, to try each other one in turn.
2024-06-25 15:10:02 -07:00
bert-e ac4fd2c5f5 Merge branch 'improvement/UTAPI-103/support_reindex_by_account' into tmp/octopus/w/8.1/improvement/UTAPI-103/support_reindex_by_account 2024-06-12 18:28:11 +00:00
Taylor McKinnon 69b94c57aa impr(UTAPI-103): Remove undeclared variable from log message 2024-06-12 11:27:16 -07:00
Taylor McKinnon f5262b7875 impr(UTAPI-103): Support reindexing by acccount 2024-06-12 11:27:16 -07:00
Taylor McKinnon ee1c0fcd1b impr(UTAPI-103): Support multiple specified buckets and prep for account support 2024-06-12 11:27:16 -07:00
Taylor McKinnon 5efb70dc63 impr(UTAPI-103): Add --dry-run option 2024-06-12 11:27:16 -07:00
Taylor McKinnon 210ba2fd82 impr(UTAPI-103): Add BucketDClient.get_bucket_md() 2024-06-06 12:10:40 -07:00
Taylor McKinnon 34af848b93 impr(UTAPI-103): Add BucketNotFound Exeception for _get_bucket_attributes 2024-06-06 12:08:40 -07:00
Taylor McKinnon 402fd406e3 impr(UTAPI-103): Add small LRU cache to BucketDClient._get_bucket_attributes 2024-06-06 12:06:46 -07:00
bert-e f9ae694c0c Merge branch 'w/7.70/bugfix/UTAPI-101/fix_release_workflow' into tmp/octopus/w/8.1/bugfix/UTAPI-101/fix_release_workflow 2024-05-16 17:16:03 +00:00
bert-e 960d990e89 Merge branch 'bugfix/UTAPI-101/fix_release_workflow' into tmp/octopus/w/7.70/bugfix/UTAPI-101/fix_release_workflow 2024-05-16 17:16:03 +00:00
Taylor McKinnon 7fde3488b9 impr(UTAPI-101): Remove secrets: inherit from release workflow 2024-05-15 10:32:38 -07:00
Taylor McKinnon 79c2ff0c72 Merge remote-tracking branch 'origin/w/7.70/bugfix/UTAPI-100/utapi_python_version_fix' into w/8.1/bugfix/UTAPI-100/utapi_python_version_fix 2024-05-07 10:56:37 -07:00
Taylor McKinnon ae904b89bf Merge remote-tracking branch 'origin/bugfix/UTAPI-100/utapi_python_version_fix' into w/7.70/bugfix/UTAPI-100/utapi_python_version_fix 2024-05-07 10:55:23 -07:00
Taylor McKinnon 60db367054 bf(UTAPI-100): Bump version 2024-05-06 11:20:17 -07:00
Taylor McKinnon c9ba521b6d bf(UTAPI-100): Remove use of 3.7+ only parameter 2024-05-06 11:16:58 -07:00
Francois Ferrand ce89418788
Update Release.md for ghcr migration
Issue: UTAPI-99
2024-04-18 15:55:13 +02:00
Francois Ferrand 5faaf493a5
Merge branch 'w/7.70/improvement/VAULT-567' into w/8.1/improvement/VAULT-567 2024-04-18 15:54:58 +02:00
Francois Ferrand da143dba67
Merge branch 'w/7.10/improvement/VAULT-567' into w/7.70/improvement/VAULT-567 2024-04-18 15:54:35 +02:00
Francois Ferrand 6e0ec16f00
Fix caching of python packages
Issue: UTAPI-99
2024-04-18 15:54:04 +02:00
Francois Ferrand 4449f44c9a
Bump github actions
- docker-build@v2
- checkout@v4
- setup-buildx@v3
- setup-node@v4
- setup-python@v5
- login@v3
- build-push@v5
- gh-release@v2
- ssh-to-runner@1.7.0

Issue: UTAPI-99
2024-04-18 15:53:26 +02:00
Francois Ferrand c4e786d6cd
Migrate to ghcr
Issue: UTAPI-99
2024-04-18 15:53:20 +02:00
Francois Ferrand bdb483e6b4
Merge branch 'improvement/UTAPI-99' into w/7.10/improvement/VAULT-567 2024-04-18 15:52:47 +02:00
Francois Ferrand 20916c6f0e
Fix caching of python packages
Issue: UTAPI-99
2024-04-18 15:47:05 +02:00
Francois Ferrand 5976018d0e
Bump github actions
- checkout@v4
- setup-qemu@v3
- setup-buildx@v3
- setup-node@v4
- setup-python@v5
- login@v3
- build-push@v5
- gh-release@v2

Issue: UTAPI-99
2024-04-17 15:02:44 +02:00
Francois Ferrand 9e1f14ed17
Migrate to ghcr
Issue: UTAPI-99
2024-04-17 14:42:58 +02:00
bert-e 34699432ee Merge branch 'w/7.70/improvement/UTAPI-98/bump-redis' into tmp/octopus/w/8.1/improvement/UTAPI-98/bump-redis 2024-01-22 15:39:38 +00:00
bert-e 438a25982d Merge branch 'improvement/UTAPI-98/bump-redis' into tmp/octopus/w/7.70/improvement/UTAPI-98/bump-redis 2024-01-22 15:39:37 +00:00
Nicolas Humbert 8804e9ff69 UTAPI-98 Bump Redis version 2024-01-22 16:36:01 +01:00
Taylor McKinnon 27e1c44829 Merge remote-tracking branch 'origin/w/7.70/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option' into w/8.1/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option 2023-12-11 09:38:41 -08:00
Taylor McKinnon e8882a28cc Merge remote-tracking branch 'origin/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option' into w/7.70/improvement/UTAPI-97/reindex_only_latest_for_olock_buckets_option 2023-12-11 09:37:25 -08:00
Taylor McKinnon b93998118c impr(UTAPI-97): Bump version 2023-12-11 09:25:01 -08:00
Taylor McKinnon 9195835f70 impr(UTAPI-97): Add config option to reindex only latest version in object locked buckets 2023-12-11 09:25:01 -08:00
bert-e 8dfb06cdbc Merge branch 'w/7.70/improvement/UTAPI-96/switch_to_scality_ssh_action' into tmp/octopus/w/8.1/improvement/UTAPI-96/switch_to_scality_ssh_action 2023-10-09 16:32:55 +00:00
bert-e 934136635e Merge branch 'improvement/UTAPI-96/switch_to_scality_ssh_action' into tmp/octopus/w/7.70/improvement/UTAPI-96/switch_to_scality_ssh_action 2023-10-09 16:32:55 +00:00
Taylor McKinnon 9f36624799 impr(UTAPI-96): Switch to scality/actions/action-ssh-to-runner 2023-10-09 09:30:34 -07:00
30 changed files with 519 additions and 5811 deletions

View File

@ -1,8 +1,7 @@
FROM registry.scality.com/vault-dev/vault:c2607856 FROM ghcr.io/scality/vault:c2607856
ENV VAULT_DB_BACKEND LEVELDB ENV VAULT_DB_BACKEND LEVELDB
RUN chmod 400 tests/utils/keyfile RUN chmod 400 tests/utils/keyfile
ENTRYPOINT yarn start ENTRYPOINT yarn start

View File

@ -2,18 +2,13 @@ name: build-ci-images
on: on:
workflow_call: workflow_call:
secrets:
REGISTRY_LOGIN:
required: true
REGISTRY_PASSWORD:
required: true
jobs: jobs:
warp10-ci: warp10-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1 uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets: secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }} REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }} REGISTRY_PASSWORD: ${{ github.token }}
with: with:
name: warp10-ci name: warp10-ci
context: . context: .
@ -21,22 +16,22 @@ jobs:
lfs: true lfs: true
redis-ci: redis-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1 uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets: secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }} REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }} REGISTRY_PASSWORD: ${{ github.token }}
with: with:
name: redis-ci name: redis-ci
context: . context: .
file: images/redis/Dockerfile file: images/redis/Dockerfile
redis-replica-ci: redis-replica-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1 uses: scality/workflows/.github/workflows/docker-build.yaml@v2
needs: needs:
- redis-ci - redis-ci
secrets: secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }} REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }} REGISTRY_PASSWORD: ${{ github.token }}
with: with:
name: redis-replica-ci name: redis-replica-ci
context: .github/docker/redis-replica context: .github/docker/redis-replica
@ -47,28 +42,21 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2.3.4 uses: actions/checkout@v4
with: with:
lfs: true lfs: true
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0 uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry - name: Login to GitHub Registry
uses: docker/login-action@v1.10.0 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ github.token }}
- name: Login to Scality Registry
uses: docker/login-action@v1.10.0
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push vault Image - name: Build and push vault Image
uses: docker/build-push-action@v2.7.0 uses: docker/build-push-action@v5
with: with:
push: true push: true
context: .github/docker/vault context: .github/docker/vault

View File

@ -7,9 +7,10 @@ on:
jobs: jobs:
build-dev: build-dev:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1 uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets: inherit secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with: with:
registry: registry.scality.com namespace: ${{ github.repository_owner }}
namespace: utapi-dev name: ${{ github.event.repository.name }}
name: utapi

View File

@ -15,11 +15,9 @@ on:
jobs: jobs:
build: build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1 uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets: inherit secrets: inherit
with: with:
registry: registry.scality.com
namespace: utapi
name: warp10 name: warp10
context: . context: .
file: images/warp10/Dockerfile file: images/warp10/Dockerfile
@ -31,11 +29,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
steps: steps:
- uses: softprops/action-gh-release@v1 - uses: softprops/action-gh-release@v2
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
name: Release registry.scality.com/utapi/warp10:${{ github.event.inputs.tag }}-warp10 name: Release utapi/warp10:${{ github.event.inputs.tag }}-warp10
tag_name: ${{ github.event.inputs.tag }}-warp10 tag_name: ${{ github.event.inputs.tag }}-warp10
generate_release_notes: false generate_release_notes: false
target_commitish: ${{ github.sha }} target_commitish: ${{ github.sha }}

View File

@ -22,12 +22,10 @@ on:
jobs: jobs:
build: build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1 uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets: inherit
with: with:
registry: registry.scality.com namespace: ${{ github.repository_owner }}
namespace: utapi name: ${{ github.event.repository.name }}
name: utapi
context: . context: .
file: ${{ github.event.inputs.dockerfile}} file: ${{ github.event.inputs.dockerfile}}
tag: ${{ github.event.inputs.tag }} tag: ${{ github.event.inputs.tag }}
@ -37,9 +35,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
steps: steps:
- uses: softprops/action-gh-release@v1 - uses: softprops/action-gh-release@v2
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ github.token }}
with: with:
name: Release ${{ github.event.inputs.tag }} name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }} tag_name: ${{ github.event.inputs.tag }}

View File

@ -6,21 +6,30 @@ on:
branches-ignore: branches-ignore:
- 'development/**' - 'development/**'
workflow_dispatch:
inputs:
debug:
description: Debug (enable the ability to SSH to runners)
type: boolean
required: false
default: 'false'
connection-timeout-m:
type: number
required: false
description: Timeout for ssh connection to worker (minutes)
default: 30
jobs: jobs:
build-ci: build-ci:
uses: ./.github/workflows/build-ci.yaml uses: ./.github/workflows/build-ci.yaml
secrets:
REGISTRY_LOGIN: ${{ secrets.REGISTRY_LOGIN }}
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
lint: lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
with: with:
lfs: true lfs: true
- uses: actions/setup-node@v2 - uses: actions/setup-node@v4
with: with:
node-version: '16.13.2' node-version: '16.13.2'
cache: yarn cache: yarn
@ -79,7 +88,7 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
redis-sentinel: redis-sentinel:
image: bitnami/redis-sentinel:6.2 image: bitnami/redis-sentinel:7.2.4
env: env:
REDIS_MASTER_SET: scality-s3 REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379' REDIS_SENTINEL_PORT_NUMBER: '16379'
@ -110,24 +119,19 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
with: with:
lfs: true lfs: true
- uses: actions/setup-node@v2 - uses: actions/setup-node@v4
with: with:
node-version: '16.13.2' node-version: '16.13.2'
cache: yarn cache: yarn
- uses: actions/setup-python@v2 - uses: actions/setup-python@v5
with: with:
python-version: '3.9' python-version: '3.9'
- uses: actions/cache@v2 cache: pip
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps - name: Install python deps
run: | run: pip install -r requirements.txt
pip install requests
pip install redis
- name: install dependencies - name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1 run: yarn install --frozen-lockfile --network-concurrency 1
- name: ${{ matrix.test.name }} - name: ${{ matrix.test.name }}
@ -161,7 +165,7 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
redis-sentinel: redis-sentinel:
image: bitnami/redis-sentinel:6.2 image: bitnami/redis-sentinel:7.2.4
env: env:
REDIS_MASTER_SET: scality-s3 REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379' REDIS_SENTINEL_PORT_NUMBER: '16379'
@ -204,24 +208,19 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
with: with:
lfs: true lfs: true
- uses: actions/setup-node@v2 - uses: actions/setup-node@v4
with: with:
node-version: '16.13.2' node-version: '16.13.2'
cache: yarn cache: yarn
- uses: actions/setup-python@v2 - uses: actions/setup-python@v5
with: with:
python-version: '3.9' python-version: '3.9'
- uses: actions/cache@v2 cache: pip
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps - name: Install python deps
run: | run: pip install -r requirements.txt
pip install requests
pip install redis
- name: install dependencies - name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1 run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 for 60 seconds - name: Wait for warp10 for 60 seconds
@ -233,9 +232,16 @@ jobs:
UTAPI_SERVICE_USER_ENABLED: 'true' UTAPI_SERVICE_USER_ENABLED: 'true'
UTAPI_LOG_LEVEL: trace UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server" SETUP_CMD: "run start_v2:server"
- name: Setup tmate session - name: 'Debug: SSH to runner'
uses: mxschmitt/action-tmate@v3 uses: scality/actions/action-ssh-to-runner@1.7.0
if: failure() timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }}
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}
tests-v2-without-sensision: tests-v2-without-sensision:
needs: needs:
@ -281,7 +287,7 @@ jobs:
--health-timeout 5s --health-timeout 5s
--health-retries 5 --health-retries 5
redis-sentinel: redis-sentinel:
image: bitnami/redis-sentinel:6.2 image: bitnami/redis-sentinel:7.2.4
env: env:
REDIS_MASTER_SET: scality-s3 REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379' REDIS_SENTINEL_PORT_NUMBER: '16379'
@ -323,24 +329,19 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
with: with:
lfs: true lfs: true
- uses: actions/setup-node@v2 - uses: actions/setup-node@v4
with: with:
node-version: '16.13.2' node-version: '16.13.2'
cache: yarn cache: yarn
- uses: actions/setup-python@v2 - uses: actions/setup-python@v5
with: with:
python-version: '3.9' python-version: '3.9'
- uses: actions/cache@v2 cache: pip
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps - name: Install python deps
run: | run: pip install -r requirements.txt
pip install requests
pip install redis
- name: install dependencies - name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1 run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 a little bit - name: Wait for warp10 a little bit
@ -348,6 +349,13 @@ jobs:
- name: ${{ matrix.test.name }} - name: ${{ matrix.test.name }}
run: ${{ matrix.test.command }} run: ${{ matrix.test.command }}
env: ${{ matrix.test.env }} env: ${{ matrix.test.env }}
- name: Setup tmate session - name: 'Debug: SSH to runner'
uses: mxschmitt/action-tmate@v3 uses: scality/actions/action-ssh-to-runner@1.7.0
if: failure() timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }}
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}

View File

@ -27,7 +27,7 @@ x-models:
services: services:
redis-0: redis-0:
image: redis:6 image: redis:7.2.4
command: redis-server --port 6379 --slave-announce-ip "${EXTERNAL_HOST}" command: redis-server --port 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports: ports:
- 6379:6379 - 6379:6379
@ -35,7 +35,7 @@ services:
- HOST_IP="${EXTERNAL_HOST}" - HOST_IP="${EXTERNAL_HOST}"
redis-1: redis-1:
image: redis:6 image: redis:7.2.4
command: redis-server --port 6380 --slaveof "${EXTERNAL_HOST}" 6379 --slave-announce-ip "${EXTERNAL_HOST}" command: redis-server --port 6380 --slaveof "${EXTERNAL_HOST}" 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports: ports:
- 6380:6380 - 6380:6380
@ -43,7 +43,7 @@ services:
- HOST_IP="${EXTERNAL_HOST}" - HOST_IP="${EXTERNAL_HOST}"
redis-sentinel-0: redis-sentinel-0:
image: redis:6 image: redis:7.2.4
command: |- command: |-
bash -c 'cat > /tmp/sentinel.conf <<EOF bash -c 'cat > /tmp/sentinel.conf <<EOF
port 16379 port 16379

View File

@ -2,11 +2,10 @@
## Docker Image Generation ## Docker Image Generation
Docker images are hosted on [registry.scality.com](registry.scality.com). Docker images are hosted on [ghcr.io](https://github.com/orgs/scality/packages).
Utapi has two namespaces there: Utapi has one namespace there:
* Production Namespace: registry.scality.com/utapi * Namespace: ghcr.io/scality/utapi
* Dev Namespace: registry.scality.com/utapi-dev
With every CI build, the CI will push images, tagging the With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash. content with the developer branch's short SHA-1 commit hash.
@ -18,8 +17,8 @@ Tagged versions of utapi will be stored in the production namespace.
## How to Pull Docker Images ## How to Pull Docker Images
```sh ```sh
docker pull registry.scality.com/utapi-dev/utapi:<commit hash> docker pull ghcr.io/scality/utapi:<commit hash>
docker pull registry.scality.com/utapi/utapi:<tag> docker pull ghcr.io/scality/utapi:<tag>
``` ```
## Release Process ## Release Process

View File

@ -1,4 +1,4 @@
FROM registry.scality.com/federation/nodesvc-base:7.10.5.0 FROM ghcr.io/scality/federation/nodesvc-base:7.10.5.0
ENV UTAPI_CONFIG_FILE=${CONF_DIR}/config.json ENV UTAPI_CONFIG_FILE=${CONF_DIR}/config.json

View File

@ -1 +0,0 @@
*.jar filter=lfs diff=lfs merge=lfs -text

View File

@ -13,7 +13,7 @@ RUN apk add zip unzip build-base \
&& cd .. \ && cd .. \
&& go build -a -o /usr/local/go/warp10_sensision_exporter && go build -a -o /usr/local/go/warp10_sensision_exporter
FROM registry.scality.com/utapi/warp10:2.8.1-95-g73e7de80 FROM ghcr.io/scality/utapi/warp10:2.8.1-95-g73e7de80
# Override baked in version # Override baked in version
# Remove when updating to a numbered release # Remove when updating to a numbered release

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:389d2135867c399a389901460c5f2cc09f4857d0c6d08632c2638c25fb150c46
size 15468553

View File

@ -1,35 +1,13 @@
/* eslint-disable no-bitwise */ /* eslint-disable no-bitwise */
const assert = require('assert'); const assert = require('assert');
const fs = require('fs'); const fs = require('fs');
const path = require('path');
/** /**
* Reads from a config file and returns the content as a config object * Reads from a config file and returns the content as a config object
*/ */
class Config { class Config {
constructor() { constructor(config) {
/* this.component = config.component;
* By default, the config file is "config.json" at the root.
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
*/
this._basePath = path.resolve(__dirname, '..');
this.path = `${this._basePath}/config.json`;
if (process.env.UTAPI_CONFIG_FILE !== undefined) {
this.path = process.env.UTAPI_CONFIG_FILE;
}
// Read config automatically
this._getConfig();
}
_getConfig() {
let config;
try {
const data = fs.readFileSync(this.path, { encoding: 'utf-8' });
config = JSON.parse(data);
} catch (err) {
throw new Error(`could not parse config file: ${err.message}`);
}
this.port = 9500; this.port = 9500;
if (config.port !== undefined) { if (config.port !== undefined) {
@ -115,6 +93,13 @@ class Config {
} }
} }
if (config.vaultclient) {
// Instance passed from outside
this.vaultclient = config.vaultclient;
this.vaultd = null;
} else {
// Connection data
this.vaultclient = null;
this.vaultd = {}; this.vaultd = {};
if (config.vaultd) { if (config.vaultd) {
if (config.vaultd.port !== undefined) { if (config.vaultd.port !== undefined) {
@ -129,6 +114,7 @@ class Config {
this.vaultd.host = config.vaultd.host; this.vaultd.host = config.vaultd.host;
} }
} }
}
if (config.certFilePaths) { if (config.certFilePaths) {
assert(typeof config.certFilePaths === 'object' assert(typeof config.certFilePaths === 'object'
@ -141,12 +127,11 @@ class Config {
const { key, cert, ca } = config.certFilePaths const { key, cert, ca } = config.certFilePaths
? config.certFilePaths : {}; ? config.certFilePaths : {};
if (key && cert) { if (key && cert) {
const keypath = (key[0] === '/') ? key : `${this._basePath}/${key}`; const keypath = key;
const certpath = (cert[0] === '/') const certpath = cert;
? cert : `${this._basePath}/${cert}`;
let capath; let capath;
if (ca) { if (ca) {
capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`; capath = ca;
assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK), assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${capath}`); `File not found or unreachable: ${capath}`);
} }
@ -172,8 +157,13 @@ class Config {
+ 'expireMetrics must be a boolean'); + 'expireMetrics must be a boolean');
this.expireMetrics = config.expireMetrics; this.expireMetrics = config.expireMetrics;
} }
return config;
if (config.onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof config.onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: onlyCountLatestWhenObjectLocked must be a boolean');
this.onlyCountLatestWhenObjectLocked = config.onlyCountLatestWhenObjectLocked;
}
} }
} }
module.exports = new Config(); module.exports = Config;

View File

@ -6,8 +6,6 @@ const async = require('async');
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { getMetricFromKey, getKeys, generateStateKey } = require('./schema'); const { getMetricFromKey, getKeys, generateStateKey } = require('./schema');
const s3metricResponseJSON = require('../models/s3metricResponse'); const s3metricResponseJSON = require('../models/s3metricResponse');
const config = require('./Config');
const Vault = require('./Vault');
const MAX_RANGE_MS = (((1000 * 60) * 60) * 24) * 30; // One month. const MAX_RANGE_MS = (((1000 * 60) * 60) * 24) * 30; // One month.
@ -23,7 +21,6 @@ class ListMetrics {
constructor(metric, component) { constructor(metric, component) {
this.metric = metric; this.metric = metric;
this.service = component; this.service = component;
this.vault = new Vault(config);
} }
/** /**
@ -83,9 +80,10 @@ class ListMetrics {
const resources = validator.get(this.metric); const resources = validator.get(this.metric);
const timeRange = validator.get('timeRange'); const timeRange = validator.get('timeRange');
const datastore = utapiRequest.getDatastore(); const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids // map account ids to canonical ids
if (this.metric === 'accounts') { if (this.metric === 'accounts') {
return this.vault.getCanonicalIds(resources, log, (err, list) => { return vault.getCanonicalIds(resources, log, (err, list) => {
if (err) { if (err) {
return cb(err); return cb(err);
} }
@ -124,10 +122,11 @@ class ListMetrics {
const fifteenMinutes = 15 * 60 * 1000; // In milliseconds const fifteenMinutes = 15 * 60 * 1000; // In milliseconds
const timeRange = [start - fifteenMinutes, end]; const timeRange = [start - fifteenMinutes, end];
const datastore = utapiRequest.getDatastore(); const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids // map account ids to canonical ids
if (this.metric === 'accounts') { if (this.metric === 'accounts') {
return this.vault.getCanonicalIds(resources, log, (err, list) => { return vault.getCanonicalIds(resources, log, (err, list) => {
if (err) { if (err) {
return cb(err); return cb(err);
} }

View File

@ -16,15 +16,19 @@ const REINDEX_PYTHON_INTERPRETER = process.env.REINDEX_PYTHON_INTERPRETER !== un
? process.env.REINDEX_PYTHON_INTERPRETER ? process.env.REINDEX_PYTHON_INTERPRETER
: 'python3.7'; : 'python3.7';
const EXIT_CODE_SENTINEL_CONNECTION = 100;
class UtapiReindex { class UtapiReindex {
constructor(config) { constructor(config) {
this._enabled = false; this._enabled = false;
this._schedule = REINDEX_SCHEDULE; this._schedule = REINDEX_SCHEDULE;
this._sentinel = { this._redis = {
host: '127.0.0.1',
port: 16379,
name: 'scality-s3', name: 'scality-s3',
sentinelPassword: '', sentinelPassword: '',
sentinels: [{
host: '127.0.0.1',
port: 16379,
}],
}; };
this._bucketd = { this._bucketd = {
host: '127.0.0.1', host: '127.0.0.1',
@ -42,14 +46,13 @@ class UtapiReindex {
if (config && config.password) { if (config && config.password) {
this._password = config.password; this._password = config.password;
} }
if (config && config.sentinel) { if (config && config.redis) {
const { const {
host, port, name, sentinelPassword, name, sentinelPassword, sentinels,
} = config.sentinel; } = config.redis;
this._sentinel.host = host || this._sentinel.host; this._redis.name = name || this._redis.name;
this._sentinel.port = port || this._sentinel.port; this._redis.sentinelPassword = sentinelPassword || this._redis.sentinelPassword;
this._sentinel.name = name || this._sentinel.name; this._redis.sentinels = sentinels || this._redis.sentinels;
this._sentinel.sentinelPassword = sentinelPassword || this._sentinel.sentinelPassword;
} }
if (config && config.bucketd) { if (config && config.bucketd) {
const { host, port } = config.bucketd; const { host, port } = config.bucketd;
@ -61,17 +64,16 @@ class UtapiReindex {
this._log = new werelogs.Logger('UtapiReindex', { level, dump }); this._log = new werelogs.Logger('UtapiReindex', { level, dump });
} }
this._onlyCountLatestWhenObjectLocked = (config && config.onlyCountLatestWhenObjectLocked === true);
this._requestLogger = this._log.newRequestLogger(); this._requestLogger = this._log.newRequestLogger();
} }
_getRedisClient() { _getRedisClient() {
const client = new RedisClient({ const client = new RedisClient({
sentinels: [{ sentinels: this._redis.sentinels,
host: this._sentinel.host, name: this._redis.name,
port: this._sentinel.port, sentinelPassword: this._redis.sentinelPassword,
}],
name: this._sentinel.name,
sentinelPassword: this._sentinel.sentinelPassword,
password: this._password, password: this._password,
}); });
client.connect(); client.connect();
@ -86,17 +88,18 @@ class UtapiReindex {
return this.ds.del(REINDEX_LOCK_KEY); return this.ds.del(REINDEX_LOCK_KEY);
} }
_buildFlags() { _buildFlags(sentinel) {
const flags = { const flags = {
/* eslint-disable camelcase */ /* eslint-disable camelcase */
sentinel_ip: this._sentinel.host, sentinel_ip: sentinel.host,
sentinel_port: this._sentinel.port, sentinel_port: sentinel.port,
sentinel_cluster_name: this._sentinel.name, sentinel_cluster_name: this._redis.name,
bucketd_addr: `http://${this._bucketd.host}:${this._bucketd.port}`, bucketd_addr: `http://${this._bucketd.host}:${this._bucketd.port}`,
}; };
if (this._sentinel.sentinelPassword) { if (this._redis.sentinelPassword) {
flags.redis_password = this._sentinel.sentinelPassword; flags.redis_password = this._redis.sentinelPassword;
} }
/* eslint-enable camelcase */ /* eslint-enable camelcase */
const opts = []; const opts = [];
Object.keys(flags) Object.keys(flags)
@ -105,11 +108,15 @@ class UtapiReindex {
opts.push(name); opts.push(name);
opts.push(flags[flag]); opts.push(flags[flag]);
}); });
if (this._onlyCountLatestWhenObjectLocked) {
opts.push('--only-latest-when-locked');
}
return opts; return opts;
} }
_runScript(path, done) { _runScriptWithSentinels(path, remainingSentinels, done) {
const flags = this._buildFlags(); const flags = this._buildFlags(remainingSentinels.shift());
this._requestLogger.debug(`launching subprocess ${path} with flags: ${flags}`); this._requestLogger.debug(`launching subprocess ${path} with flags: ${flags}`);
const process = childProcess.spawn(REINDEX_PYTHON_INTERPRETER, [path, ...flags]); const process = childProcess.spawn(REINDEX_PYTHON_INTERPRETER, [path, ...flags]);
process.stdout.on('data', data => { process.stdout.on('data', data => {
@ -136,6 +143,17 @@ class UtapiReindex {
statusCode: code, statusCode: code,
script: path, script: path,
}); });
if (code === EXIT_CODE_SENTINEL_CONNECTION) {
if (remainingSentinels.length > 0) {
this._requestLogger.info('retrying with next sentinel host', {
script: path,
});
return this._runScriptWithSentinels(path, remainingSentinels, done);
}
this._requestLogger.error('no more sentinel host to try', {
script: path,
});
}
} else { } else {
this._requestLogger.info('script exited successfully', { this._requestLogger.info('script exited successfully', {
statusCode: code, statusCode: code,
@ -146,6 +164,11 @@ class UtapiReindex {
}); });
} }
_runScript(path, done) {
const remainingSentinels = [...this._redis.sentinels];
this._runScriptWithSentinels(path, remainingSentinels, done);
}
_attemptLock(job) { _attemptLock(job) {
this._requestLogger.info('attempting to acquire the lock to begin job'); this._requestLogger.info('attempting to acquire the lock to begin job');
this._lock() this._lock()

View File

@ -14,6 +14,15 @@ class UtapiRequest {
this._datastore = null; this._datastore = null;
this._requestQuery = null; this._requestQuery = null;
this._requestPath = null; this._requestPath = null;
this._vault = null;
}
getVault() {
return this._vault;
}
setVault() {
return this._vault;
} }
/** /**

View File

@ -1,16 +1,21 @@
import requests import argparse
import redis
import json
import ast import ast
import sys from concurrent.futures import ThreadPoolExecutor
import time import json
import urllib import logging
import re import re
import redis
import requests
import sys import sys
from threading import Thread from threading import Thread
from concurrent.futures import ThreadPoolExecutor import time
import urllib
import argparse logging.basicConfig(level=logging.INFO)
_log = logging.getLogger('utapi-reindex:reporting')
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options(): def get_options():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
@ -29,8 +34,19 @@ class askRedis():
def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None): def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None):
self._password = password self._password = password
r = redis.Redis(host=ip, port=port, db=0, password=password) r = redis.Redis(
host=ip,
port=port,
db=0,
password=password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
)
try:
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name) self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {ip}:{port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
def read(self, resource, name): def read(self, resource, name):
r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password) r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password)

View File

@ -1,5 +1,6 @@
import argparse import argparse
import concurrent.futures as futures import concurrent.futures as futures
import functools
import itertools import itertools
import json import json
import logging import logging
@ -8,9 +9,9 @@ import re
import sys import sys
import time import time
import urllib import urllib
from pathlib import Path
from collections import defaultdict, namedtuple from collections import defaultdict, namedtuple
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from pprint import pprint
import redis import redis
import requests import requests
@ -24,6 +25,9 @@ MPU_SHADOW_BUCKET_PREFIX = 'mpuShadowBucket'
ACCOUNT_UPDATE_CHUNKSIZE = 100 ACCOUNT_UPDATE_CHUNKSIZE = 100
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options(): def get_options():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP") parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
@ -32,9 +36,38 @@ def get_options():
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name") parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server") parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
parser.add_argument("-w", "--worker", default=10, type=int, help="Number of workers") parser.add_argument("-w", "--worker", default=10, type=int, help="Number of workers")
parser.add_argument("-b", "--bucket", default=None, help="Bucket to be processed")
parser.add_argument("-r", "--max-retries", default=2, type=int, help="Max retries before failing a bucketd request") parser.add_argument("-r", "--max-retries", default=2, type=int, help="Max retries before failing a bucketd request")
return parser.parse_args() parser.add_argument("--only-latest-when-locked", action='store_true', help="Only index the latest version of a key when the bucket has a default object lock policy")
parser.add_argument("--debug", action='store_true', help="Enable debug logging")
parser.add_argument("--dry-run", action="store_true", help="Do not update redis")
group = parser.add_mutually_exclusive_group()
group.add_argument("-a", "--account", default=[], help="account canonical ID (all account buckets will be processed)", action="append", type=nonempty_string('account'))
group.add_argument("--account-file", default=None, help="file containing account canonical IDs, one ID per line", type=existing_file)
group.add_argument("-b", "--bucket", default=[], help="bucket name", action="append", type=nonempty_string('bucket'))
group.add_argument("--bucket-file", default=None, help="file containing bucket names, one bucket name per line", type=existing_file)
options = parser.parse_args()
if options.bucket_file:
with open(options.bucket_file) as f:
options.bucket = [line.strip() for line in f if line.strip()]
elif options.account_file:
with open(options.account_file) as f:
options.account = [line.strip() for line in f if line.strip()]
return options
def nonempty_string(flag):
def inner(value):
if not value.strip():
raise argparse.ArgumentTypeError("%s: value must not be empty"%flag)
return value
return inner
def existing_file(path):
path = Path(path).resolve()
if not path.exists():
raise argparse.ArgumentTypeError("File does not exist: %s"%path)
return path
def chunks(iterable, size): def chunks(iterable, size):
it = iter(iterable) it = iter(iterable)
@ -49,7 +82,7 @@ def _encoded(func):
return urllib.parse.quote(val.encode('utf-8')) return urllib.parse.quote(val.encode('utf-8'))
return inner return inner
Bucket = namedtuple('Bucket', ['userid', 'name']) Bucket = namedtuple('Bucket', ['userid', 'name', 'object_lock_enabled'])
MPU = namedtuple('MPU', ['bucket', 'key', 'upload_id']) MPU = namedtuple('MPU', ['bucket', 'key', 'upload_id'])
BucketContents = namedtuple('BucketContents', ['bucket', 'obj_count', 'total_size']) BucketContents = namedtuple('BucketContents', ['bucket', 'obj_count', 'total_size'])
@ -61,15 +94,21 @@ class InvalidListing(Exception):
def __init__(self, bucket): def __init__(self, bucket):
super().__init__('Invalid contents found while listing bucket %s'%bucket) super().__init__('Invalid contents found while listing bucket %s'%bucket)
class BucketNotFound(Exception):
def __init__(self, bucket):
super().__init__('Bucket %s not found'%bucket)
class BucketDClient: class BucketDClient:
'''Performs Listing calls against bucketd''' '''Performs Listing calls against bucketd'''
__url_format = '{addr}/default/bucket/{bucket}' __url_attribute_format = '{addr}/default/attributes/{bucket}'
__url_bucket_format = '{addr}/default/bucket/{bucket}'
__headers = {"x-scal-request-uids": "utapi-reindex-list-buckets"} __headers = {"x-scal-request-uids": "utapi-reindex-list-buckets"}
def __init__(self, bucketd_addr=None, max_retries=2): def __init__(self, bucketd_addr=None, max_retries=2, only_latest_when_locked=False):
self._bucketd_addr = bucketd_addr self._bucketd_addr = bucketd_addr
self._max_retries = max_retries self._max_retries = max_retries
self._only_latest_when_locked = only_latest_when_locked
self._session = requests.Session() self._session = requests.Session()
def _do_req(self, url, check_500=True, **kwargs): def _do_req(self, url, check_500=True, **kwargs):
@ -101,7 +140,7 @@ class BucketDClient:
parameters value. On the first request the function will be called with parameters value. On the first request the function will be called with
`None` and should return its initial value. Return `None` for the param to be excluded. `None` and should return its initial value. Return `None` for the param to be excluded.
''' '''
url = self.__url_format.format(addr=self._bucketd_addr, bucket=bucket) url = self.__url_bucket_format.format(addr=self._bucketd_addr, bucket=bucket)
static_params = {k: v for k, v in kwargs.items() if not callable(v)} static_params = {k: v for k, v in kwargs.items() if not callable(v)}
dynamic_params = {k: v for k, v in kwargs.items() if callable(v)} dynamic_params = {k: v for k, v in kwargs.items() if callable(v)}
is_truncated = True # Set to True for first loop is_truncated = True # Set to True for first loop
@ -114,6 +153,9 @@ class BucketDClient:
_log.debug('listing bucket bucket: %s params: %s'%( _log.debug('listing bucket bucket: %s params: %s'%(
bucket, ', '.join('%s=%s'%p for p in params.items()))) bucket, ', '.join('%s=%s'%p for p in params.items())))
resp = self._do_req(url, params=params) resp = self._do_req(url, params=params)
if resp.status_code == 404:
_log.debug('Bucket not found bucket: %s'%bucket)
return
if resp.status_code == 200: if resp.status_code == 200:
payload = resp.json() payload = resp.json()
except ValueError as e: except ValueError as e:
@ -135,7 +177,37 @@ class BucketDClient:
else: else:
is_truncated = len(payload) > 0 is_truncated = len(payload) > 0
def list_buckets(self, name = None): @functools.lru_cache(maxsize=16)
def _get_bucket_attributes(self, name):
url = self.__url_attribute_format.format(addr=self._bucketd_addr, bucket=name)
try:
resp = self._do_req(url)
if resp.status_code == 200:
return resp.json()
else:
_log.error('Error getting bucket attributes bucket:%s status_code:%s'%(name, resp.status_code))
raise BucketNotFound(name)
except ValueError as e:
_log.exception(e)
_log.error('Invalid attributes response body! bucket:%s'%name)
raise
except MaxRetriesReached:
_log.error('Max retries reached getting bucket attributes bucket:%s'%name)
raise
except Exception as e:
_log.exception(e)
_log.error('Unhandled exception getting bucket attributes bucket:%s'%name)
raise
def get_bucket_md(self, name):
md = self._get_bucket_attributes(name)
canonId = md.get('owner')
if canonId is None:
_log.error('No owner found for bucket %s'%name)
raise InvalidListing(name)
return Bucket(canonId, name, md.get('objectLockEnabled', False))
def list_buckets(self, account=None):
def get_next_marker(p): def get_next_marker(p):
if p is None: if p is None:
@ -147,19 +219,24 @@ class BucketDClient:
'maxKeys': 1000, 'maxKeys': 1000,
'marker': get_next_marker 'marker': get_next_marker
} }
if account is not None:
params['prefix'] = '%s..|..' % account
for _, payload in self._list_bucket(USERS_BUCKET, **params): for _, payload in self._list_bucket(USERS_BUCKET, **params):
buckets = [] buckets = []
for result in payload['Contents']: for result in payload.get('Contents', []):
match = re.match("(\w+)..\|..(\w+.*)", result['key']) match = re.match("(\w+)..\|..(\w+.*)", result['key'])
bucket = Bucket(*match.groups()) bucket = Bucket(*match.groups(), False)
if name is None or bucket.name == name: # We need to get the attributes for each bucket to determine if it is locked
if self._only_latest_when_locked:
bucket_attrs = self._get_bucket_attributes(bucket.name)
object_lock_enabled = bucket_attrs.get('objectLockEnabled', False)
bucket = bucket._replace(object_lock_enabled=object_lock_enabled)
buckets.append(bucket) buckets.append(bucket)
if buckets: if buckets:
yield buckets yield buckets
if name is not None:
# Break on the first matching bucket if a name is given
break
def list_mpus(self, bucket): def list_mpus(self, bucket):
_bucket = MPU_SHADOW_BUCKET_PREFIX + bucket.name _bucket = MPU_SHADOW_BUCKET_PREFIX + bucket.name
@ -196,18 +273,12 @@ class BucketDClient:
upload_id=key['value']['UploadId'])) upload_id=key['value']['UploadId']))
return keys return keys
def _sum_objects(self, bucket, listing): def _sum_objects(self, bucket, listing, only_latest_when_locked = False):
count = 0 count = 0
total_size = 0 total_size = 0
last_master = None last_key = None
last_size = None try:
for status_code, payload in listing: for obj in listing:
contents = payload['Contents'] if isinstance(payload, dict) else payload
if contents is None:
_log.error('Invalid contents in listing. bucket:%s status_code:%s'%(bucket, status_code))
raise InvalidListing(bucket)
for obj in contents:
count += 1
if isinstance(obj['value'], dict): if isinstance(obj['value'], dict):
# bucketd v6 returns a dict: # bucketd v6 returns a dict:
data = obj.get('value', {}) data = obj.get('value', {})
@ -216,39 +287,51 @@ class BucketDClient:
# bucketd v7 returns an encoded string # bucketd v7 returns an encoded string
data = json.loads(obj['value']) data = json.loads(obj['value'])
size = data.get('content-length', 0) size = data.get('content-length', 0)
is_latest = obj['key'] != last_key
last_key = obj['key']
if only_latest_when_locked and bucket.object_lock_enabled and not is_latest:
_log.debug('Skipping versioned key: %s'%obj['key'])
continue
count += 1
total_size += size total_size += size
# If versioned, subtract the size of the master to avoid double counting except InvalidListing:
if last_master is not None and obj['key'].startswith(last_master + '\x00'): _log.error('Invalid contents in listing. bucket:%s'%bucket.name)
_log.debug('Detected versioned key: %s - subtracting master size: %i'% ( raise InvalidListing(bucket.name)
obj['key'],
last_size,
))
total_size -= last_size
count -= 1
last_master = None
# Only save master versions
elif '\x00' not in obj['key']:
last_master = obj['key']
last_size = size
return count, total_size return count, total_size
def _extract_listing(self, key, listing):
for status_code, payload in listing:
contents = payload[key] if isinstance(payload, dict) else payload
if contents is None:
raise InvalidListing('')
for obj in contents:
yield obj
def count_bucket_contents(self, bucket): def count_bucket_contents(self, bucket):
def get_next_marker(p): def get_key_marker(p):
if p is None or len(p) == 0: if p is None:
return '' return ''
return p[-1].get('key', '') return p.get('NextKeyMarker', '')
def get_vid_marker(p):
if p is None:
return ''
return p.get('NextVersionIdMarker', '')
params = { params = {
'listingType': 'Basic', 'listingType': 'DelimiterVersions',
'maxKeys': 1000, 'maxKeys': 1000,
'gt': get_next_marker, 'keyMarker': get_key_marker,
'versionIdMarker': get_vid_marker,
} }
count, total_size = self._sum_objects(bucket.name, self._list_bucket(bucket.name, **params)) listing = self._list_bucket(bucket.name, **params)
count, total_size = self._sum_objects(bucket, self._extract_listing('Versions', listing), self._only_latest_when_locked)
return BucketContents( return BucketContents(
bucket=bucket, bucket=bucket,
obj_count=count, obj_count=count,
@ -256,7 +339,8 @@ class BucketDClient:
) )
def count_mpu_parts(self, mpu): def count_mpu_parts(self, mpu):
_bucket = MPU_SHADOW_BUCKET_PREFIX + mpu.bucket.name shadow_bucket_name = MPU_SHADOW_BUCKET_PREFIX + mpu.bucket.name
shadow_bucket = mpu.bucket._replace(name=shadow_bucket_name)
def get_prefix(p): def get_prefix(p):
if p is None: if p is None:
@ -276,13 +360,31 @@ class BucketDClient:
'listingType': 'Delimiter', 'listingType': 'Delimiter',
} }
count, total_size = self._sum_objects(_bucket, self._list_bucket(_bucket, **params)) listing = self._list_bucket(shadow_bucket_name, **params)
count, total_size = self._sum_objects(shadow_bucket, self._extract_listing('Contents', listing))
return BucketContents( return BucketContents(
bucket=mpu.bucket._replace(name=_bucket), bucket=shadow_bucket,
obj_count=0, # MPU parts are not counted towards numberOfObjects obj_count=0, # MPU parts are not counted towards numberOfObjects
total_size=total_size total_size=total_size
) )
def list_all_buckets(bucket_client):
return bucket_client.list_buckets()
def list_specific_accounts(bucket_client, accounts):
for account in accounts:
yield from bucket_client.list_buckets(account=account)
def list_specific_buckets(bucket_client, buckets):
batch = []
for bucket in buckets:
try:
batch.append(bucket_client.get_bucket_md(bucket))
except BucketNotFound:
_log.error('Failed to list bucket %s. Removing from results.'%bucket)
continue
yield batch
def index_bucket(client, bucket): def index_bucket(client, bucket):
''' '''
@ -322,9 +424,16 @@ def get_redis_client(options):
host=options.sentinel_ip, host=options.sentinel_ip,
port=options.sentinel_port, port=options.sentinel_port,
db=0, db=0,
password=options.redis_password password=options.redis_password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
) )
try:
ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name) ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {options.sentinel_ip}:{options.sentinel_port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
return redis.Redis( return redis.Redis(
host=ip, host=ip,
port=port, port=port,
@ -358,16 +467,24 @@ def log_report(resource, name, obj_count, total_size):
if __name__ == '__main__': if __name__ == '__main__':
options = get_options() options = get_options()
if options.bucket is not None and not options.bucket.strip(): if options.debug:
print('You must provide a bucket name with the --bucket flag') _log.setLevel(logging.DEBUG)
sys.exit(1)
bucket_client = BucketDClient(options.bucketd_addr, options.max_retries) bucket_client = BucketDClient(options.bucketd_addr, options.max_retries, options.only_latest_when_locked)
redis_client = get_redis_client(options) redis_client = get_redis_client(options)
account_reports = {} account_reports = {}
observed_buckets = set() observed_buckets = set()
failed_accounts = set() failed_accounts = set()
if options.account:
batch_generator = list_specific_accounts(bucket_client, options.account)
elif options.bucket:
batch_generator = list_specific_buckets(bucket_client, options.bucket)
else:
batch_generator = list_all_buckets(bucket_client)
with ThreadPoolExecutor(max_workers=options.worker) as executor: with ThreadPoolExecutor(max_workers=options.worker) as executor:
for batch in bucket_client.list_buckets(options.bucket): for batch in batch_generator:
bucket_reports = {} bucket_reports = {}
jobs = { executor.submit(index_bucket, bucket_client, b): b for b in batch } jobs = { executor.submit(index_bucket, bucket_client, b): b for b in batch }
for job in futures.as_completed(jobs.keys()): for job in futures.as_completed(jobs.keys()):
@ -386,22 +503,33 @@ if __name__ == '__main__':
update_report(account_reports, total.bucket.userid, total.obj_count, total.total_size) update_report(account_reports, total.bucket.userid, total.obj_count, total.total_size)
# Bucket reports can be updated as we get them # Bucket reports can be updated as we get them
if options.dry_run:
for bucket, report in bucket_reports.items():
_log.info(
"DryRun: resource buckets [%s] would be updated with obj_count %i and total_size %i" % (
bucket, report['obj_count'], report['total_size']
)
)
else:
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket, report in bucket_reports.items(): for bucket, report in bucket_reports.items():
update_redis(pipeline, 'buckets', bucket, report['obj_count'], report['total_size']) update_redis(pipeline, 'buckets', bucket, report['obj_count'], report['total_size'])
log_report('buckets', bucket, report['obj_count'], report['total_size']) log_report('buckets', bucket, report['obj_count'], report['total_size'])
pipeline.execute() pipeline.execute()
recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
if options.bucket is None:
stale_buckets = recorded_buckets.difference(observed_buckets)
elif observed_buckets and options.bucket not in recorded_buckets:
# The provided bucket does not exist, so clean up any metrics
stale_buckets = { options.bucket }
else:
stale_buckets = set() stale_buckets = set()
recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
if options.bucket:
stale_buckets = { b for b in options.bucket if b not in observed_buckets }
elif options.account:
_log.warning('Stale buckets will not be cleared when using the --account or --account-file flags')
else:
stale_buckets = recorded_buckets.difference(observed_buckets)
_log.info('Found %s stale buckets' % len(stale_buckets)) _log.info('Found %s stale buckets' % len(stale_buckets))
if options.dry_run:
_log.info("DryRun: not updating stale buckets")
else:
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE): for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket in chunk: for bucket in chunk:
@ -410,9 +538,19 @@ if __name__ == '__main__':
pipeline.execute() pipeline.execute()
# Account metrics are not updated if a bucket is specified # Account metrics are not updated if a bucket is specified
if options.bucket is None: if options.bucket:
_log.warning('Account metrics will not be updated when using the --bucket or --bucket-file flags')
else:
# Don't update any accounts with failed listings # Don't update any accounts with failed listings
without_failed = filter(lambda x: x[0] not in failed_accounts, account_reports.items()) without_failed = filter(lambda x: x[0] not in failed_accounts, account_reports.items())
if options.dry_run:
for userid, report in account_reports.items():
_log.info(
"DryRun: resource account [%s] would be updated with obj_count %i and total_size %i" % (
userid, report['obj_count'], report['total_size']
)
)
else:
# Update total account reports in chunks # Update total account reports in chunks
for chunk in chunks(without_failed, ACCOUNT_UPDATE_CHUNKSIZE): for chunk in chunks(without_failed, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
@ -421,13 +559,25 @@ if __name__ == '__main__':
log_report('accounts', userid, report['obj_count'], report['total_size']) log_report('accounts', userid, report['obj_count'], report['total_size'])
pipeline.execute() pipeline.execute()
if options.account:
for account in options.account:
if account in failed_accounts:
_log.error("No metrics updated for account %s, one or more buckets failed" % account)
# Include failed_accounts in observed_accounts to avoid clearing metrics # Include failed_accounts in observed_accounts to avoid clearing metrics
observed_accounts = failed_accounts.union(set(account_reports.keys())) observed_accounts = failed_accounts.union(set(account_reports.keys()))
recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts')) recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts'))
if options.account:
stale_accounts = { a for a in options.account if a not in observed_accounts }
else:
# Stale accounts and buckets are ones that do not appear in the listing, but have recorded values # Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
stale_accounts = recorded_accounts.difference(observed_accounts) stale_accounts = recorded_accounts.difference(observed_accounts)
_log.info('Found %s stale accounts' % len(stale_accounts)) _log.info('Found %s stale accounts' % len(stale_accounts))
if options.dry_run:
_log.info("DryRun: not updating stale accounts")
else:
for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE): for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for account in chunk: for account in chunk:

View File

@ -7,7 +7,6 @@ const { Clustering, errors, ipCheck } = require('arsenal');
const arsenalHttps = require('arsenal').https; const arsenalHttps = require('arsenal').https;
const { Logger } = require('werelogs'); const { Logger } = require('werelogs');
const config = require('./Config');
const routes = require('../router/routes'); const routes = require('../router/routes');
const Route = require('../router/Route'); const Route = require('../router/Route');
const Router = require('../router/Router'); const Router = require('../router/Router');
@ -28,7 +27,12 @@ class UtapiServer {
constructor(worker, port, datastore, logger, config) { constructor(worker, port, datastore, logger, config) {
this.worker = worker; this.worker = worker;
this.port = port; this.port = port;
this.router = new Router(config); this.vault = config.vaultclient;
if (!this.vault) {
const Vault = require('./Vault');
this.vault = new Vault(config);
}
this.router = new Router(config, this.vault);
this.logger = logger; this.logger = logger;
this.datastore = datastore; this.datastore = datastore;
this.server = null; this.server = null;
@ -71,6 +75,7 @@ class UtapiServer {
req.socket.setNoDelay(); req.socket.setNoDelay();
const { query, path, pathname } = url.parse(req.url, true); const { query, path, pathname } = url.parse(req.url, true);
const utapiRequest = new UtapiRequest() const utapiRequest = new UtapiRequest()
.setVault(this.vault)
.setRequest(req) .setRequest(req)
.setLog(this.logger.newRequestLogger()) .setLog(this.logger.newRequestLogger())
.setResponse(res) .setResponse(res)
@ -214,8 +219,7 @@ class UtapiServer {
* @property {object} params.log - logger configuration * @property {object} params.log - logger configuration
* @return {undefined} * @return {undefined}
*/ */
function spawn(params) { function spawn(config) {
Object.assign(config, params);
const { const {
workers, redis, log, port, workers, redis, log, port,
} = config; } = config;

View File

@ -23,10 +23,6 @@
"healthChecks": { "healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"] "allowFrom": ["127.0.0.1/8", "::1"]
}, },
"vaultd": {
"host": "127.0.0.1",
"port": 8500
},
"cacheBackend": "memory", "cacheBackend": "memory",
"development": false, "development": false,
"nodeId": "single_node", "nodeId": "single_node",

View File

@ -2,6 +2,8 @@ const fs = require('fs');
const path = require('path'); const path = require('path');
const Joi = require('@hapi/joi'); const Joi = require('@hapi/joi');
const assert = require('assert'); const assert = require('assert');
const defaults = require('./defaults.json');
const werelogs = require('werelogs');
const { const {
truthy, envNamespace, allowedFilterFields, allowedFilterStates, truthy, envNamespace, allowedFilterFields, allowedFilterStates,
@ -71,7 +73,6 @@ class Config {
constructor(overrides) { constructor(overrides) {
this._basePath = path.join(__dirname, '../../'); this._basePath = path.join(__dirname, '../../');
this._configPath = _loadFromEnv('CONFIG_FILE', defaultConfigPath); this._configPath = _loadFromEnv('CONFIG_FILE', defaultConfigPath);
this._defaultsPath = path.join(__dirname, 'defaults.json');
this.host = undefined; this.host = undefined;
this.port = undefined; this.port = undefined;
@ -89,6 +90,11 @@ class Config {
parsedConfig = this._recursiveUpdate(parsedConfig, overrides); parsedConfig = this._recursiveUpdate(parsedConfig, overrides);
} }
Object.assign(this, parsedConfig); Object.assign(this, parsedConfig);
werelogs.configure({
level: Config.logging.level,
dump: Config.logging.dumpLevel,
});
} }
static _readFile(path, encoding = 'utf-8') { static _readFile(path, encoding = 'utf-8') {
@ -113,7 +119,7 @@ class Config {
} }
_loadDefaults() { _loadDefaults() {
return Config._readJSON(this._defaultsPath); return defaults;
} }
_loadUserConfig() { _loadUserConfig() {

View File

@ -6,7 +6,8 @@ const BackOff = require('backo');
const { whilst } = require('async'); const { whilst } = require('async');
const errors = require('./errors'); const errors = require('./errors');
const { LoggerContext, asyncOrCallback } = require('./utils'); const { LoggerContext } = require('./utils/log');
const { asyncOrCallback } = require('./utils/func');
const moduleLogger = new LoggerContext({ const moduleLogger = new LoggerContext({
module: 'redis', module: 'redis',

View File

@ -1,14 +1,6 @@
const werelogs = require('werelogs'); const werelogs = require('werelogs');
const config = require('../config');
const { comprehend } = require('./func'); const { comprehend } = require('./func');
const loggerConfig = {
level: config.logging.level,
dump: config.logging.dumpLevel,
};
werelogs.configure(loggerConfig);
const rootLogger = new werelogs.Logger('Utapi'); const rootLogger = new werelogs.Logger('Utapi');
class LoggerContext { class LoggerContext {
@ -78,8 +70,6 @@ class LoggerContext {
} }
} }
rootLogger.debug('logger initialized', { loggerConfig });
function buildRequestLogger(req) { function buildRequestLogger(req) {
let reqUids = []; let reqUids = [];
if (req.headers['x-scal-request-uids'] !== undefined) { if (req.headers['x-scal-request-uids'] !== undefined) {

View File

@ -1,6 +1,5 @@
const assert = require('assert'); const assert = require('assert');
const { auth, policies } = require('arsenal'); const { auth, policies } = require('arsenal');
const vaultclient = require('vaultclient');
const config = require('../config'); const config = require('../config');
const errors = require('../errors'); const errors = require('../errors');
/** /**
@ -9,9 +8,17 @@ const errors = require('../errors');
*/ */
class VaultWrapper extends auth.Vault { class VaultWrapper extends auth.Vault {
create(config) {
if (config.vaultd.host) {
return new VaultWrapper(config);
}
return null;
}
constructor(options) { constructor(options) {
let client; let client;
const { host, port } = options.vaultd; const { host, port } = options.vaultd;
const vaultclient = require('vaultclient');
if (options.tls) { if (options.tls) {
const { key, cert, ca } = options.tls; const { key, cert, ca } = options.tls;
client = new vaultclient.Client(host, port, true, key, cert, client = new vaultclient.Client(host, port, true, key, cert,
@ -119,7 +126,7 @@ class VaultWrapper extends auth.Vault {
} }
} }
const vault = new VaultWrapper(config); const vault = VaultWrapper.create(config);
auth.setHandler(vault); auth.setHandler(vault);
module.exports = { module.exports = {

View File

@ -3,7 +3,7 @@
"engines": { "engines": {
"node": ">=16" "node": ">=16"
}, },
"version": "8.1.12", "version": "8.1.15",
"description": "API for tracking resource utilization and reporting metrics", "description": "API for tracking resource utilization and reporting metrics",
"main": "index.js", "main": "index.js",
"repository": { "repository": {
@ -19,13 +19,12 @@
"dependencies": { "dependencies": {
"@hapi/joi": "^17.1.1", "@hapi/joi": "^17.1.1",
"@senx/warp10": "^1.0.14", "@senx/warp10": "^1.0.14",
"arsenal": "git+https://github.com/scality/Arsenal#8.1.87", "arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
"async": "^3.2.0", "async": "^3.2.0",
"aws-sdk": "^2.1005.0", "aws-sdk": "^2.1005.0",
"aws4": "^1.8.0", "aws4": "^1.8.0",
"backo": "^1.1.0", "backo": "^1.1.0",
"body-parser": "^1.19.0", "body-parser": "^1.19.0",
"bucketclient": "scality/bucketclient#8.1.9",
"byte-size": "^7.0.0", "byte-size": "^7.0.0",
"commander": "^5.1.0", "commander": "^5.1.0",
"cron-parser": "^2.15.0", "cron-parser": "^2.15.0",
@ -38,17 +37,16 @@
"needle": "^2.5.0", "needle": "^2.5.0",
"node-schedule": "^1.3.2", "node-schedule": "^1.3.2",
"oas-tools": "^2.2.2", "oas-tools": "^2.2.2",
"prom-client": "^13.1.0", "prom-client": "14.2.0",
"uuid": "^3.3.2", "uuid": "^3.3.2",
"vaultclient": "scality/vaultclient#8.2.8", "werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1"
"werelogs": "scality/werelogs#8.1.0"
}, },
"devDependencies": { "devDependencies": {
"eslint": "6.0.1", "eslint": "^8.14.0",
"eslint-config-airbnb": "17.1.0", "eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "scality/Guidelines#8.2.0", "eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint-plugin-import": "^2.18.0", "eslint-plugin-import": "^2.18.0",
"mocha": "^3.0.2", "mocha": ">=3.1.2",
"nodemon": "^2.0.4", "nodemon": "^2.0.4",
"protobufjs": "^6.10.1", "protobufjs": "^6.10.1",
"sinon": "^9.0.2" "sinon": "^9.0.2"

2
requirements.txt Normal file
View File

@ -0,0 +1,2 @@
redis==5.0.3
requests==2.31.0

View File

@ -3,17 +3,16 @@ const assert = require('assert');
const url = require('url'); const url = require('url');
const { auth, errors, policies } = require('arsenal'); const { auth, errors, policies } = require('arsenal');
const safeJsonParse = require('../utils/safeJsonParse'); const safeJsonParse = require('../utils/safeJsonParse');
const Vault = require('../lib/Vault');
class Router { class Router {
/** /**
* @constructor * @constructor
* @param {Config} config - Config instance * @param {Config} config - Config instance
*/ */
constructor(config) { constructor(config, vault) {
this._service = config.component; this._service = config.component;
this._routes = {}; this._routes = {};
this._vault = new Vault(config); this._vault = vault;
} }
/** /**

View File

@ -1,4 +1,21 @@
const config = require('./lib/Config'); const fs = require('fs');
const path = require('path');
const Config = require('./lib/Config');
const server = require('./lib/server'); const server = require('./lib/server');
server(Object.assign({}, config, { component: 's3' })); /*
* By default, the config file is "config.json" at the root.
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
*/
const cfgpath = process.env.UTAPI_CONFIG_FILE || (__dirname+'/config.json');
let cfg;
try {
cfg = JSON.parse(fs.readFileSync(cfgpath, { encoding: 'utf-8' }));
} catch (err) {
throw new Error(`could not parse config file: ${err.message}`);
}
cfg.component = 's3';
server(new Config(cfg));

View File

@ -112,6 +112,17 @@ class BucketD {
return body; return body;
} }
_getBucketVersionResponse(bucketName) {
const body = {
CommonPrefixes: [],
IsTruncated: false,
Versions: (this._bucketContent[bucketName] || [])
// patch in a versionId to more closely match the real response
.map(entry => ({ ...entry, versionId: 'null' })),
};
return body;
}
_getShadowBucketOverviewResponse(bucketName) { _getShadowBucketOverviewResponse(bucketName) {
const mpus = (this._bucketContent[bucketName] || []).map(o => ({ const mpus = (this._bucketContent[bucketName] || []).map(o => ({
key: o.key, key: o.key,
@ -137,6 +148,8 @@ class BucketD {
|| req.query.listingType === 'Delimiter' || req.query.listingType === 'Delimiter'
) { ) {
req.body = this._getBucketResponse(bucketName); req.body = this._getBucketResponse(bucketName);
} else if (req.query.listingType === 'DelimiterVersions') {
req.body = this._getBucketVersionResponse(bucketName);
} }
// v2 reindex uses `Basic` listing type for everything // v2 reindex uses `Basic` listing type for everything

5499
yarn.lock

File diff suppressed because it is too large Load Diff