Compare commits

..

No commits in common. "development/8.1" and "improvement/UTAPI-99-migration" have entirely different histories.

229 changed files with 41 additions and 23832 deletions

View File

@ -1,5 +0,0 @@
{
"plugins": [
"transform-es2015-modules-commonjs"
]
}

View File

@ -1,3 +0,0 @@
node_modules/
**/node_modules/
.git

View File

@ -1,25 +0,0 @@
{
"extends": "scality",
"env": {
"es6": true
},
"parserOptions": {
"ecmaVersion": 9
},
"rules": {
"no-underscore-dangle": "off",
"implicit-arrow-linebreak" : "off",
"import/extensions": 0,
"prefer-spread": 0,
"no-param-reassign": 0,
"array-callback-return": 0
},
"settings": {
"import/resolver": {
"node": {
"paths": ["/utapi/node_modules", "node_modules"]
}
}
}
}

View File

@ -1,87 +0,0 @@
# General support information
GitHub Issues are **reserved** for actionable bug reports (including
documentation inaccuracies), and feature requests.
**All questions** (regarding configuration, usecases, performance, community,
events, setup and usage recommendations, among other things) should be asked on
the **[Zenko Forum](http://forum.zenko.io/)**.
> Questions opened as GitHub issues will systematically be closed, and moved to
> the [Zenko Forum](http://forum.zenko.io/).
--------------------------------------------------------------------------------
## Avoiding duplicates
When reporting a new issue/requesting a feature, make sure that we do not have
any duplicates already open:
- search the issue list for this repository (use the search bar, select
"Issues" on the left pane after searching);
- if there is a duplicate, please do not open your issue, and add a comment
to the existing issue instead.
--------------------------------------------------------------------------------
## Bug report information
(delete this section (everything between the lines) if you're not reporting a
bug but requesting a feature)
### Description
Briefly describe the problem you are having in a few paragraphs.
### Steps to reproduce the issue
Please provide steps to reproduce, including full log output
### Actual result
Describe the results you received
### Expected result
Describe the results you expected
### Additional information
- Node.js version,
- Docker version,
- npm version,
- distribution/OS,
- optional: anything else you deem helpful to us.
--------------------------------------------------------------------------------
## Feature Request
(delete this section (everything between the lines) if you're not requesting
a feature but reporting a bug)
### Proposal
Describe the feature
### Current behavior
What currently happens
### Desired behavior
What you would like to happen
### Usecase
Please provide usecases for changing the current behavior
### Additional information
- Is this request for your company? Y/N
- If Y: Company name:
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
- Are you willing to contribute this feature yourself?
- Position/Title:
- How did you hear about us?
--------------------------------------------------------------------------------

View File

@ -1,14 +0,0 @@
# Creating this image for the CI as GitHub Actions
# is unable to overwrite the entrypoint
ARG REDIS_IMAGE="redis:latest"
FROM ${REDIS_IMAGE}
ENV REDIS_LISTEN_PORT 6380
ENV REDIS_MASTER_HOST redis
ENV REDIS_MASTER_PORT_NUMBER 6379
ENTRYPOINT redis-server \
--port ${REDIS_LISTEN_PORT} \
--slaveof ${REDIS_MASTER_HOST} ${REDIS_MASTER_PORT_NUMBER}

View File

@ -1,7 +0,0 @@
FROM ghcr.io/scality/vault:c2607856
ENV VAULT_DB_BACKEND LEVELDB
RUN chmod 400 tests/utils/keyfile
ENTRYPOINT yarn start

View File

@ -1,22 +0,0 @@
#!/bin/bash
set -x
set -e -o pipefail
# port for utapi server
PORT=8100
trap killandsleep EXIT
killandsleep () {
kill -9 $(lsof -t -i:$PORT) || true
sleep 10
}
if [ -z "$SETUP_CMD" ]; then
SETUP_CMD="start"
fi
UTAPI_INTERVAL_TEST_MODE=$1 npm $SETUP_CMD 2>&1 | tee -a "setup_$2.log" &
bash tests/utils/wait_for_local_port.bash $PORT 40
UTAPI_INTERVAL_TEST_MODE=$1 npm run $2 | tee -a "test_$2.log"

View File

@ -1,65 +0,0 @@
name: build-ci-images
on:
workflow_call:
jobs:
warp10-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: warp10-ci
context: .
file: images/warp10/Dockerfile
lfs: true
redis-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: redis-ci
context: .
file: images/redis/Dockerfile
redis-replica-ci:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
needs:
- redis-ci
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
name: redis-replica-ci
context: .github/docker/redis-replica
build-args: |
REDIS_IMAGE=ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
vault-ci:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push vault Image
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/vault
tags: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
cache-from: type=gha,scope=vault
cache-to: type=gha,mode=max,scope=vault

View File

@ -1,16 +0,0 @@
name: build-dev-image
on:
push:
branches-ignore:
- 'development/**'
jobs:
build-dev:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets:
REGISTRY_LOGIN: ${{ github.repository_owner }}
REGISTRY_PASSWORD: ${{ github.token }}
with:
namespace: ${{ github.repository_owner }}
name: ${{ github.event.repository.name }}

41
.github/workflows/migrate.yaml vendored Normal file
View File

@ -0,0 +1,41 @@
---
name: migrate
on:
push:
branches-ignore:
- development/**
- q/*/**
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Sync ${{ github.event.repository.name }}
run: |
docker run --rm quay.io/skopeo/stable:v1.15.0 sync \
--src docker --dest docker --all --preserve-digests --retry-times 3 \
--src-creds ${{ secrets.REGISTRY_LOGIN }}:${{ secrets.REGISTRY_PASSWORD }} \
--dest-creds ${{ github.repository_owner }}:${{ github.token }} \
registry.scality.com/${{ github.event.repository.name }}/${{ github.event.repository.name }} \
ghcr.io/scality/
- name: Sync warp10
run: |
docker run --rm quay.io/skopeo/stable:v1.15.0 sync \
--src docker --dest docker --all --preserve-digests --retry-times 3 \
--src-creds ${{ secrets.REGISTRY_LOGIN }}:${{ secrets.REGISTRY_PASSWORD }} \
--dest-creds ${{ github.repository_owner }}:${{ github.token }} \
registry.scality.com/${{ github.event.repository.name }}/warp10 \
ghcr.io/scality/${{ github.event.repository.name }}
- name: Sync sf-eng/${{ github.event.repository.name }}
run: |
docker run --rm quay.io/skopeo/stable:v1.15.0 sync \
--src docker --dest docker --all --preserve-digests --retry-times 3 \
--src-creds ${{ secrets.REGISTRY_LOGIN }}:${{ secrets.REGISTRY_PASSWORD }} \
--dest-creds ${{ github.repository_owner }}:${{ github.token }} \
registry.scality.com/sf-eng/${{ github.event.repository.name }} \
ghcr.io/scality/

View File

@ -1,39 +0,0 @@
name: release-warp10
on:
workflow_dispatch:
inputs:
tag:
type: string
description: 'Tag to be released'
required: true
create-github-release:
type: boolean
description: Create a tag and matching Github release.
required: false
default: true
jobs:
build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
secrets: inherit
with:
name: warp10
context: .
file: images/warp10/Dockerfile
tag: ${{ github.event.inputs.tag }}
lfs: true
release:
if: ${{ inputs.create-github-release }}
runs-on: ubuntu-latest
needs: build
steps:
- uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
name: Release utapi/warp10:${{ github.event.inputs.tag }}-warp10
tag_name: ${{ github.event.inputs.tag }}-warp10
generate_release_notes: false
target_commitish: ${{ github.sha }}

View File

@ -1,45 +0,0 @@
name: release
on:
workflow_dispatch:
inputs:
dockerfile:
description: Dockerfile to build image from
type: choice
options:
- images/nodesvc-base/Dockerfile
- Dockerfile
required: true
tag:
type: string
description: 'Tag to be released'
required: true
create-github-release:
type: boolean
description: Create a tag and matching Github release.
required: false
default: false
jobs:
build:
uses: scality/workflows/.github/workflows/docker-build.yaml@v2
with:
namespace: ${{ github.repository_owner }}
name: ${{ github.event.repository.name }}
context: .
file: ${{ github.event.inputs.dockerfile}}
tag: ${{ github.event.inputs.tag }}
release:
if: ${{ inputs.create-github-release }}
runs-on: ubuntu-latest
needs: build
steps:
- uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }}
generate_release_notes: true
target_commitish: ${{ github.sha }}

View File

@ -1,361 +0,0 @@
---
name: tests
on:
push:
branches-ignore:
- 'development/**'
workflow_dispatch:
inputs:
debug:
description: Debug (enable the ability to SSH to runners)
type: boolean
required: false
default: 'false'
connection-timeout-m:
type: number
required: false
description: Timeout for ssh connection to worker (minutes)
default: 30
jobs:
build-ci:
uses: ./.github/workflows/build-ci.yaml
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: run static analysis tools on markdown
run: yarn run lint_md
- name: run static analysis tools on code
run: yarn run lint
tests-v1:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
name: ${{ matrix.test.name }}
strategy:
fail-fast: false
matrix:
test:
- name: run unit tests
command: yarn test
env:
UTAPI_METRICS_ENABLED: 'true'
- name: run v1 client tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:client
env: {}
- name: run v1 server tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:server
env: {}
- name: run v1 cron tests
command: bash ./.github/scripts/run_ft_tests.bash false ft_test:cron
env: {}
- name: run v1 interval tests
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:interval
env: {}
services:
redis:
image: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
ports:
- 6379:6379
- 9121:9121
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-replica:
image: ghcr.io/${{ github.repository }}/redis-replica-ci:${{ github.sha }}
ports:
- 6380:6380
options: >-
--health-cmd "redis-cli -p 6380 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
REDIS_SENTINEL_QUORUM: '1'
ports:
- 16379:16379
options: >-
--health-cmd "redis-cli -p 16379 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
warp10:
image: ghcr.io/${{ github.repository }}/warp10-ci:${{ github.sha }}
env:
standalone.port: '4802'
warpscript.maxops: '10000000'
ENABLE_SENSISION: 't'
options: >-
--health-cmd "curl localhost:4802/api/v0/check"
--health-interval 10s
--health-timeout 5s
--health-retries 10
--health-start-period 60s
ports:
- 4802:4802
- 8082:8082
- 9718:9718
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v5
with:
python-version: '3.9'
cache: pip
- name: Install python deps
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: ${{ matrix.test.name }}
run: ${{ matrix.test.command }}
env: ${{ matrix.test.env }}
tests-v2-with-vault:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
services:
redis:
image: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
ports:
- 6379:6379
- 9121:9121
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-replica:
image: ghcr.io/${{ github.repository }}/redis-replica-ci:${{ github.sha }}
ports:
- 6380:6380
options: >-
--health-cmd "redis-cli -p 6380 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
REDIS_SENTINEL_QUORUM: '1'
ports:
- 16379:16379
options: >-
--health-cmd "redis-cli -p 16379 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
warp10:
image: ghcr.io/${{ github.repository }}/warp10-ci:${{ github.sha }}
env:
standalone.port: '4802'
warpscript.maxops: '10000000'
ENABLE_SENSISION: 't'
ports:
- 4802:4802
- 8082:8082
- 9718:9718
options: >-
--health-cmd "curl localhost:4802/api/v0/check"
--health-interval 10s
--health-timeout 5s
--health-retries 10
--health-start-period 60s
vault:
image: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
ports:
- 8500:8500
- 8600:8600
- 8700:8700
- 8800:8800
options: >-
--health-cmd "curl http://localhost:8500/_/healthcheck"
--health-interval 10s
--health-timeout 5s
--health-retries 10
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v5
with:
python-version: '3.9'
cache: pip
- name: Install python deps
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 for 60 seconds
run: sleep 60
- name: run v2 functional tests
run: bash ./.github/scripts/run_ft_tests.bash true ft_test:v2
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_SERVICE_USER_ENABLED: 'true'
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
- name: 'Debug: SSH to runner'
uses: scality/actions/action-ssh-to-runner@1.7.0
timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }}
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}
tests-v2-without-sensision:
needs:
- build-ci
runs-on: ubuntu-latest
env:
REINDEX_PYTHON_INTERPRETER: python3
name: ${{ matrix.test.name }}
strategy:
fail-fast: false
matrix:
test:
- name: run v2 soft limit test
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:softLimit
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
- name: run v2 hard limit test
command: bash ./.github/scripts/run_ft_tests.bash true ft_test:hardLimit
env:
UTAPI_CACHE_BACKEND: redis
UTAPI_LOG_LEVEL: trace
SETUP_CMD: "run start_v2:server"
services:
redis:
image: ghcr.io/${{ github.repository }}/redis-ci:${{ github.sha }}
ports:
- 6379:6379
- 9121:9121
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-replica:
image: ghcr.io/${{ github.repository }}/redis-replica-ci:${{ github.sha }}
ports:
- 6380:6380
options: >-
--health-cmd "redis-cli -p 6380 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
redis-sentinel:
image: bitnami/redis-sentinel:7.2.4
env:
REDIS_MASTER_SET: scality-s3
REDIS_SENTINEL_PORT_NUMBER: '16379'
REDIS_SENTINEL_QUORUM: '1'
ports:
- 16379:16379
options: >-
--health-cmd "redis-cli -p 16379 ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
warp10:
image: ghcr.io/${{ github.repository }}/warp10-ci:${{ github.sha }}
env:
standalone.port: '4802'
warpscript.maxops: '10000000'
ports:
- 4802:4802
- 8082:8082
- 9718:9718
options: >-
--health-cmd "curl localhost:4802/api/v0/check"
--health-interval 10s
--health-timeout 5s
--health-retries 10
--health-start-period 60s
vault:
image: ghcr.io/${{ github.repository }}/vault-ci:${{ github.sha }}
ports:
- 8500:8500
- 8600:8600
- 8700:8700
- 8800:8800
options: >-
--health-cmd "curl http://localhost:8500/_/healthcheck"
--health-interval 10s
--health-timeout 5s
--health-retries 10
steps:
- name: Checkout
uses: actions/checkout@v4
with:
lfs: true
- uses: actions/setup-node@v4
with:
node-version: '16.13.2'
cache: yarn
- uses: actions/setup-python@v5
with:
python-version: '3.9'
cache: pip
- name: Install python deps
run: pip install -r requirements.txt
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- name: Wait for warp10 a little bit
run: sleep 60
- name: ${{ matrix.test.name }}
run: ${{ matrix.test.command }}
env: ${{ matrix.test.env }}
- name: 'Debug: SSH to runner'
uses: scality/actions/action-ssh-to-runner@1.7.0
timeout-minutes: ${{ fromJSON(github.event.inputs.connection-timeout-m) }}
continue-on-error: true
with:
tmate-server-host: ${{ secrets.TMATE_SERVER_HOST }}
tmate-server-port: ${{ secrets.TMATE_SERVER_PORT }}
tmate-server-rsa-fingerprint: ${{ secrets.TMATE_SERVER_RSA_FINGERPRINT }}
tmate-server-ed25519-fingerprint: ${{ secrets.TMATE_SERVER_ED25519_FINGERPRINT }}
if: ${{ ( github.event.inputs.debug == true || github.event.inputs.debug == 'true' ) }}

7
.gitignore vendored
View File

@ -1,7 +0,0 @@
dist
node_modules/
**/node_modules/
logs
*.log
dump.rdb
.vscode/

View File

@ -1,5 +0,0 @@
# .prettierrc or .prettierrc.yaml
trailingComma: "all"
tabWidth: 4
arrowParens: avoid
singleQuote: true

181
DESIGN.md
View File

@ -1,181 +0,0 @@
# Design
Utapi tracks metrics of a service's usage. Metrics provided by Utapi include the
number of incoming and outgoing bytes, the number of objects being stored, the
storage utilized in bytes, and a count of operations performed on a service's
resources. Operations supported by Utapi include APIs offered by Scality's [S3
Server](https://github.com/scality/S3). Metrics can be retrieved for a given
time range in a service's history.
## Time Range
Utapi offers metrics for a time range provided by the user. For example, Utapi
allows a user to view all actions that have occurred over the course of a
particular month, week, or day. Time ranges are customizable up to a precision
of fifteen minutes.
Note: A time range precision of less than fifteen minutes can be supported as a
feature request and could be set as a configurable option.
### Timestamps
Metrics provided by Utapi are set to the latest fifteen minute interval (i.e.,
00:00:00, 00:15:00, 00:30:00, or 00:45:00). For example, if a user creates a
bucket at 06:15:01, the operation will have a timestamp of 06:15:00. All
timestamps are then formatted as a UNIX epoch expressed in milliseconds. During
a listing of metrics, then, we can know that this operation occurred sometime
between 06:15:00 and 06:29:59.
#### Example
| current time | timestamp | UNIX epoch timestamp |
|--------------|-----------|----------------------|
| 06:15:01 | 06:15:00 | 1483280100000 |
| 06:29:59 | 06:15:00 | 1483280100000 |
| 06:31:00 | 06:30:00 | 1483281000000 |
| 07:01:00 | 07:00:00 | 1483282800000 |
![timestamp graphic](res/timestamp-graphic.png)
### Data Storage
Utapi uses Redis as a database for storage of its metrics values. Accordingly,
it uses three different Redis data types: Sorted Sets, Strings, and Lists. This
document describes how these three data types are used by Utapi. For further
information on data types see the Redis
[documentation](https://redis.io/topics/data-types).
#### Sorted Sets
The Redis keys storing metrics for the number of objects and the storage
utilized are recorded with a Sorted Set data type. We use this data type to
associate the value of the key with its timestamp (in Redis terminology, a
score). In this way, these two metrics hold *stateful* data. That is, the key's
value represents the state of a metric at that point in history.
With a Sorted Set, then, we can create a list of values ordered by their
timestamp. This ordered nature is especially useful for Utapi during a listing
of metrics, as it allows for speedy retrieval of the nearest starting and ending
values.
#### Strings
The Redis keys storing metrics for incoming bytes, outgoing bytes, and all S3
operations (e.g., 'CreateBucket', 'PutObject', etc.) are recorded with a String
data type.
Moreover, there are also global counters associated with metrics for the number
of objects and the storage utilized. Such counters are updated during any
applicable operation. For example, when uploading or deleting an object, the
counter for the number of objects increments or decrements, respectively. These
counters are used internally by Sorted Sets to record the state (the storage
used and the number of objects) at a particular point in time.
#### Lists
Redis keys storing cached metrics use a List data type, where each List element
is a string containing information from the original request. This datatype is
used by a component named UtapiReplay that pushes any metrics stored in the List
every five minutes, by default.
#### Example
Steps occurring during a successful 'PutObject' request:
1. If the new object overwrites a pre-existing object, the counter for the
number of objects remains unchanged. Otherwise it increments by one.
2. If the new object overwrites a pre-existing object, the counter for the
storage utilized increments by the difference between the byte size of the
object and byte size of the object being overwritten. Otherwise, it
increments by the byte size of the new object.
3. The metric for the incoming bytes increments by the byte size of the new
object.
4. The metric for the 'PutObject' operation increments by one.
5. The Sorted Set keys (the storage used and the number of objects) are updated
to the value of their respective counter.
If a connection to the Redis datastore cannot be made, metrics from the original
request to Utapi are pushed to a local Redis cache to be retried at a later
time.
### Schema Keyspace
The key created for each metric expresses a hierarchy of the data stored by that
key. It expresses the service, resource type, the resource, and the metric value
being stored by the key. These levels are divided by a colon.
```
<service>:<resourcetype>:<resource>:<metric>
```
`<service>` The service that the metric belongs to (for example, 's3').
`<resourcetype>` The type of resource being accessed (for example, 'buckets'
or 'accounts').
`<resource>` The bucket name or account ID (for example, 'foo-bucket').
`<metric>` The metric to get values for (for example, 'storageUtilized').
Thus, a key storing the storage utilized by 'foo-bucket' in 's3' would be:
```
s3:buckets:foo-bucket:storageUtilized
```
#### Timestamped Keys
Metrics for S3 operations create keys that generally follow the same pattern as
above. However, they also include the timestamp at which the operation occurred.
For example, the key storing the count of 'PutObject' operations `foo-bucket` on
January 01 2016 00:01:00 (where `1451635200000` is the UNIX epoch timestamp of
the operation):
```
s3:buckets:1451635200000:foo-bucket:PutObject
```
#### Local Redis Cache Key
Metrics of operations pushed by S3 that are unsuccessfully recorded as schema
keys in the Redis datastore (for example, in the case of a failed connection)
are stored in a local Redis cache. For example, the key storing cached metrics
of S3 operations:
```
s3:utapireplay
```
The value of the the local Redis cache key is list of JSON strings, where each
string contains the parameters and timestamp of an unsuccessful `pushMetric`
call. For example, a string storing metrics for a 'PutObject' operation:
```
"{\"action\":\"putObject\",\"reqUid\":\"3d534b1511e5630e68f0\",\"params\":{\"bucket\":\"foo-bucket\",\"newByteLength\":1024,\"oldByteLength\":null},\"timestamp\":1451635200000}"
```
### redis-cli
Note: Using blocking calls (for example, `KEYS *`) with a Redis client during
production will temporarily block other calls to the Redis Server by Utapi.
Access the storage utilized for the latest fifteen minute time interval using
the command line interface of Redis, `redis-cli` (see Redis
[documentation](https://redis.io/topics/rediscli)):
```
ZRANGE s3:buckets:foo-bucket:storageUtilized -1 -1 WITHSCORES
```
The `WITHSCORES` option in the above command will return the timestamp for each
value.
Access the value stored by a key that is a String data type:
```
GET s3:buckets:1451635200000:foo-bucket:PutObject
```

View File

@ -1,31 +0,0 @@
FROM node:16.13.2-buster-slim
WORKDIR /usr/src/app
COPY package.json yarn.lock /usr/src/app/
RUN apt-get update \
&& apt-get install -y \
curl \
gnupg2
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
RUN apt-get update \
&& apt-get install -y jq git python3 build-essential yarn --no-install-recommends \
&& yarn cache clean \
&& yarn install --frozen-lockfile --production --ignore-optional --network-concurrency=1 \
&& apt-get autoremove --purge -y python3 git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-*
# Keep the .git directory in order to properly report version
COPY . /usr/src/app
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]
EXPOSE 8100

191
LICENSE
View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2016 Scality
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

344
README.md
View File

@ -1,344 +0,0 @@
# utapi
![Utapi logo](res/utapi-logo.png)
[![Circle CI][badgepub]](https://circleci.com/gh/scality/utapi)
Service Utilization API for tracking resource usage and metrics reporting.
## Design
Please refer to the [design](/DESIGN.md) for more information.
## Server
To run the server:
```
npm start
```
## Client
The module exposes a client, named UtapiClient. Projects can use this client to
push metrics directly to the underlying datastore (Redis) without the need of an
extra HTTP request to Utapi.
```js
const { UtapiClient } = require('utapi');
const config = {
redis: {
host: '127.0.0.1',
port: 6379
},
localCache: {
host: '127.0.0.1',
port: 6379
}
}
const c = new UtapiClient(config);
// The second argument to `pushMetric` is a hexadecimal string Request Unique
// Identifier used for logging.
c.pushMetric('createBucket', '3d534b1511e5630e68f0', { bucket: 'demo' });
c.pushMetric('putObject', '3d534b1511e5630e68f0', {
bucket: 'demo',
newByteLength: 1024,
oldByteLength: null,
});
c.pushMetric('putObject', '3d534b1511e5630e68f0', {
bucket: 'demo',
newByteLength: 1024,
oldByteLength: 256,
});
c.pushMetric('multiObjectDelete', '3d534b1511e5630e68f0', {
bucket: 'demo',
byteLength: 1024,
numberOfObjects: 999,
});
```
If an error occurs during a `pushMetric` call and the client is unable to record
metrics in the underlying datastore, metric data is instead stored in a local
Redis cache. Utapi attempts to push these cached metrics (every five minutes, by
default) using a component named UtapiReplay. If the `pushMetric` call initiated
by UtapiReplay fails, the metric is reinserted into the local Redis cache. The
particularities of this behavior are configurable. For further information, see
[design](/DESIGN.md).
## Listing Metrics with Utapi
To make a successful request to Utapi you would need
1. [IAM user with a policy giving access to Utapi](#iam-user-with-a-policy-giving-access-to-utapi)
2. [Sign request with Auth V4](#signing-request-with-auth-v4)
### IAM user with a policy giving access to Utapi
Note: The examples here use AWS CLI but any AWS SDK is capable of these actions.
**endpoint-url:** This would be `https://<host>:<port>` where your Identity(IAM)
Server is running.
1. Create an IAM user
```
aws iam --endpoint-url <endpoint> create-user --user-name <user-name>
```
2. Create access key for the user
```
aws iam --endpoint-url <endpoint> create-access-key --user-name <user-name>
```
3. Define a managed IAM policy
sample utapi policy
```json
cat - > utapipolicy.json <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "utapiMetrics",
"Action": [ "utapi:ListMetrics" ],
"Effect": "Allow",
"Resource": [
"arn:scality:utapi::012345678901:buckets/*",
"arn:scality:utapi::012345678901:accounts/*",
"arn:scality:utapi::012345678901:users/*",
]
}
]
}
EOF
```
In the above sample, the `Resource` property includes a series of Amazon
Resource Names (ARNs) used to define which resources the policy applies to.
Thus the sample policy applies to a user with an account ID '012345678901',
and grants access to metrics at the levels 'buckets', 'accounts', and
'users'.
The account ID of the ARN can also be omitted, allowing any account to
access metrics for those resources. As an example, we can extend the above
sample policy to allow any account to access metrics at the level 'service':
```json
...
"Resource": [
"arn:scality:utapi::012345678901:buckets/*",
"arn:scality:utapi::012345678901:accounts/*",
"arn:scality:utapi::012345678901:users/*",
"arn:scality:utapi:::service/*",
]
...
```
The omission of a metric level denies a user access to all resources at that
level. For example, we can allow access to metrics only at the level
'buckets':
```json
...
"Resource": ["arn:scality:utapi::012345678901:buckets/*"]
...
```
Further, access may be limited to specific resources within a metric level.
For example, we can allow access to metrics only for a bucket 'foo':
```json
...
"Resource": ["arn:scality:utapi::012345678901:buckets/foo"]
...
```
Or allow access to metrics for the bucket 'foo' for any user:
```json
...
"Resource": ["arn:scality:utapi:::buckets/foo"]
...
```
4. Create a managed IAM policy
Once your IAM policy is defined, create the policy using the following
command.
```
aws iam --endpoint-url <endpoint> create-policy --policy-name utapipolicy \
--policy-document file://utapipolicy.json
```
A sample output of the above command would look like
```json
{
"Policy": {
"PolicyName": "utapipolicy",
"CreateDate": "2017-06-01T19:31:18.620Z",
"AttachmentCount": 0,
"IsAttachable": true,
"PolicyId": "ZXR6A36LTYANPAI7NJ5UV",
"DefaultVersionId": "v1",
"Path": "/",
"Arn": "arn:aws:iam::0123456789012:policy/utapipolicy",
"UpdateDate": "2017-06-01T19:31:18.620Z"
}
}
```
The arn property of the response, which we call `<policy arn>`, will be used
in the next step to attach the policy to the user.
5. Attach user to the managed policy
```
aws --endpoint-url <endpoint> iam attach-user-policy --user-name
<user-name> --policy-arn <policy arn>
```
Now the user has access to ListMetrics request in Utapi on all buckets.
### Signing request with Auth V4
There are two options here.
You can generate V4 signature using AWS SDKs or the node module aws4. See the
following urls for reference.
* http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
* http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
* https://github.com/mhart/aws4
You may also view examples making a request with Auth V4 using various languages
and AWS SDKs [here](/examples).
Alternatively, you can use a nifty command line tool available in Scality's
CloudServer.
You can git clone the CloudServer repo from here
https://github.com/scality/cloudserver and follow the instructions in the README
to install the dependencies.
If you have CloudServer running inside a docker container you can docker exec
into the CloudServer container as
```
docker exec -it <container-id> bash
```
and then run the command
```
node bin/list_metrics
```
It will generate the following output listing available options.
```
Usage: list_metrics [options]
Options:
-h, --help output usage information
-V, --version output the version number
-a, --access-key <accessKey> Access key id
-k, --secret-key <secretKey> Secret access key
-m, --metric <metric> Metric type
--buckets <buckets> Name of bucket(s) with a comma separator if
more than one
--accounts <accounts> Account ID(s) with a comma separator if more
than one
--users <users> User ID(s) with a comma separator if more than
one
--service <service> Name of service
-s, --start <start> Start of time range
-r, --recent List metrics including the previous and
current 15 minute interval
-e --end <end> End of time range
-h, --host <host> Host of the server
-p, --port <port> Port of the server
--ssl Enable ssl
-v, --verbose
```
An example call to list metrics for a bucket `demo` to Utapi in a https enabled
deployment would be
```
node bin/list_metrics --metric buckets --buckets demo --start 1476231300000
--end 1476233099999 -a myAccessKey -k mySecretKey -h 127.0.0.1 -p 8100 --ssl
```
Both start and end times are time expressed as UNIX epoch timestamps **expressed
in milliseconds**.
Keep in mind, since Utapi metrics are normalized to the nearest 15 min.
interval, start time and end time need to be in the specific format as follows.
#### Start time
Start time needs to be normalized to the nearest 15 minute interval with seconds
and milliseconds set to 0. So valid start timestamps would look something like
`09:00:00:000`, `09:15:00:000`, `09:30:00:000` and `09:45:00:000`.
For example
Date: Tue Oct 11 2016 17:35:25 GMT-0700 (PDT)
Unix timestamp (milliseconds): 1476232525320
Here's an example JS method to get a start timestamp
```javascript
function getStartTimestamp(t) {
const time = new Date(t);
const minutes = time.getMinutes();
const timestamp = time.setMinutes((minutes - minutes % 15), 0, 0);
return timestamp;
}
```
This would format the start time timestamp to `1476231300000`
#### End time
End time needs to be normalized to the nearest 15 minute end interval with
seconds and milliseconds set to 59 and 999 respectively. So valid end timestamps
would look something like `09:14:59:999`, `09:29:59:999`, `09:44:59:999` and
`09:59:59:999`.
Here's an example JS method to get an end timestamp
```javascript
function getEndTimestamp(t) {
const time = new Date(t);
const minutes = time.getMinutes();
const timestamp = time.setMinutes((minutes - minutes % 15) + 15, 0, -1);
return timestamp;
}
```
This would format the end time timestamp to `1476233099999`
## Guidelines
Please read our coding and workflow guidelines at
[scality/Guidelines](https://github.com/scality/Guidelines).
### Contributing
In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
[badgepub]: http://circleci.com/gh/scality/utapi.svg?style=svg

View File

@ -1,15 +0,0 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'CreateCheckpoint',
});
const task = new tasks.CreateCheckpoint({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting checkpoint creation'))
.then(() => task.start())
.then(() => logger.info('Checkpoint creation started'));

View File

@ -1,14 +0,0 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'CreateSnapshot',
});
const task = new tasks.CreateSnapshot({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting snapshot creation'))
.then(() => task.start())
.then(() => logger.info('Snapshot creation started'));

View File

@ -1,15 +0,0 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'MonitorDiskUsage',
});
const task = new tasks.MonitorDiskUsage({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting disk usage monitor'))
.then(() => task.start())
.then(() => logger.info('Disk usage monitor started'));

View File

@ -1,276 +0,0 @@
#! /usr/bin/env node
// TODO
// - deduplicate with Vault's seed script at https://github.com/scality/Vault/pull/1627
// - add permission boundaries to user when https://scality.atlassian.net/browse/VAULT-4 is implemented
const { errors } = require('arsenal');
const program = require('commander');
const werelogs = require('werelogs');
const async = require('async');
const { IAM } = require('aws-sdk');
const { version } = require('../package.json');
const systemPrefix = '/scality-internal/';
function generateUserPolicyDocument() {
return {
Version: '2012-10-17',
Statement: {
Effect: 'Allow',
Action: 'utapi:ListMetrics',
Resource: 'arn:scality:utapi:::*/*',
},
};
}
function createIAMClient(opts) {
return new IAM({
endpoint: opts.iamEndpoint,
});
}
function needsCreation(v) {
if (Array.isArray(v)) {
return !v.length;
}
return !v;
}
class BaseHandler {
constructor(serviceName, iamClient, log) {
this.serviceName = serviceName;
this.iamClient = iamClient;
this.log = log;
}
applyWaterfall(values, done) {
this.log.debug('applyWaterfall', { values, type: this.resourceType });
const v = values[this.resourceType];
if (needsCreation(v)) {
this.log.debug('creating', { v, type: this.resourceType });
return this.create(values)
.then(res =>
done(null, Object.assign(values, {
[this.resourceType]: res,
})))
.catch(done);
}
this.log.debug('conflicts check', { v, type: this.resourceType });
if (this.conflicts(v)) {
return done(errors.EntityAlreadyExists.customizeDescription(
`${this.resourceType} ${this.serviceName} already exists and conflicts with the expected value.`));
}
this.log.debug('nothing to do', { v, type: this.resourceType });
return done(null, values);
}
}
class UserHandler extends BaseHandler {
get resourceType() {
return 'user';
}
collect() {
return this.iamClient.getUser({
UserName: this.serviceName,
})
.promise()
.then(res => res.User);
}
create(allResources) {
return this.iamClient.createUser({
UserName: this.serviceName,
Path: systemPrefix,
})
.promise()
.then(res => res.User);
}
conflicts(u) {
return u.Path !== systemPrefix;
}
}
class PolicyHandler extends BaseHandler {
get resourceType() {
return 'policy';
}
collect() {
return this.iamClient.listPolicies({
MaxItems: 100,
OnlyAttached: false,
Scope: 'All',
})
.promise()
.then(res => res.Policies.find(p => p.PolicyName === this.serviceName));
}
create(allResources) {
const doc = generateUserPolicyDocument();
return this.iamClient.createPolicy({
PolicyName: this.serviceName,
PolicyDocument: JSON.stringify(doc),
Path: systemPrefix,
})
.promise()
.then(res => res.Policy);
}
conflicts(p) {
return p.Path !== systemPrefix;
}
}
class PolicyAttachmentHandler extends BaseHandler {
get resourceType() {
return 'policyAttachment';
}
collect() {
return this.iamClient.listAttachedUserPolicies({
UserName: this.serviceName,
MaxItems: 100,
})
.promise()
.then(res => res.AttachedPolicies)
}
create(allResources) {
return this.iamClient.attachUserPolicy({
PolicyArn: allResources.policy.Arn,
UserName: this.serviceName,
})
.promise();
}
conflicts(p) {
return false;
}
}
class AccessKeyHandler extends BaseHandler {
get resourceType() {
return 'accessKey';
}
collect() {
return this.iamClient.listAccessKeys({
UserName: this.serviceName,
MaxItems: 100,
})
.promise()
.then(res => res.AccessKeyMetadata)
}
create(allResources) {
return this.iamClient.createAccessKey({
UserName: this.serviceName,
})
.promise()
.then(res => res.AccessKey);
}
conflicts(a) {
return false;
}
}
function collectResource(v, done) {
v.collect()
.then(res => done(null, res))
.catch(err => {
if (err.code === 'NoSuchEntity') {
return done(null, null);
}
done(err);
});
}
function collectResourcesFromHandlers(handlers, cb) {
const tasks = handlers.reduce((acc, v) => ({
[v.resourceType]: done => collectResource(v, done),
...acc,
}), {});
async.parallel(tasks, cb);
}
function buildServiceUserHandlers(serviceName, client, log) {
return [
UserHandler,
PolicyHandler,
PolicyAttachmentHandler,
AccessKeyHandler,
].map(h => new h(serviceName, client, log));
}
function apply(client, serviceName, log, cb) {
const handlers = buildServiceUserHandlers(serviceName, client, log);
async.waterfall([
done => collectResourcesFromHandlers(handlers, done),
...handlers.map(h => h.applyWaterfall.bind(h)),
(values, done) => done(null, values.accessKey),
], cb);
}
function wrapAction(actionFunc, serviceName, options) {
werelogs.configure({
level: options.logLevel,
dump: options.logDumpLevel,
});
const log = new werelogs.Logger(process.argv[1]).newRequestLogger();
const client = createIAMClient(options);
actionFunc(client, serviceName, log, (err, data) => {
if (err) {
log.error('failed', {
data,
error: err,
});
if (err.EntityAlreadyExists) {
log.error(`run "${process.argv[1]} purge ${serviceName}" to fix.`);
}
process.exit(1);
}
log.info('success', { data });
process.exit();
});
}
program.version(version);
[
{
name: 'apply <service-name>',
actionFunc: apply,
},
].forEach(cmd => {
program
.command(cmd.name)
.option('--iam-endpoint <url>', 'IAM endpoint', 'http://localhost:8600')
.option('--log-level <level>', 'log level', 'info')
.option('--log-dump-level <level>', 'log level that triggers a dump of the debug buffer', 'error')
.action(wrapAction.bind(null, cmd.actionFunc));
});
const validCommands = program.commands.map(n => n._name);
// Is the command given invalid or are there too few arguments passed
if (!validCommands.includes(process.argv[2])) {
program.outputHelp();
process.stdout.write('\n');
process.exit(1);
} else {
program.parse(process.argv);
}

View File

@ -1,15 +0,0 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'IngestShard',
});
const task = new tasks.IngestShard({ warp10: warp10Clients });
task.setup()
.then(() => logger.info('Starting shard ingestion'))
.then(() => task.start())
.then(() => logger.info('Ingestion started'));

View File

@ -1,15 +0,0 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'ManualAdjust',
});
const task = new tasks.ManualAdjust({ warp10: warp10Clients });
task.setup()
.then(() => logger.info('Starting manual adjustment'))
.then(() => task.start())
.then(() => logger.info('Manual adjustment started'));

View File

@ -1,14 +0,0 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'Migrate',
});
const task = new tasks.MigrateTask({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting utapi v1 => v2 migration'))
.then(() => task.start())
.then(() => logger.info('Migration started'));

View File

@ -1,15 +0,0 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'Reindex',
});
const task = new tasks.ReindexTask({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting Reindex daemon'))
.then(() => task.start())
.then(() => logger.info('Reindex started'));

View File

@ -1,15 +0,0 @@
const { tasks } = require('..');
const { LoggerContext } = require('../libV2/utils');
const { clients: warp10Clients } = require('../libV2/warp10');
const logger = new LoggerContext({
task: 'Repair',
});
const task = new tasks.RepairTask({ warp10: [warp10Clients[0]] });
task.setup()
.then(() => logger.info('Starting Repair daemon'))
.then(() => task.start())
.then(() => logger.info('Repair started'));

View File

@ -1,9 +0,0 @@
const { startUtapiServer } = require('..');
const { LoggerContext } = require('../libV2/utils');
const logger = new LoggerContext({ module: 'entrypoint' });
startUtapiServer().then(
() => logger.info('utapi started'),
error => logger.error('Unhandled Error', { error }),
);

View File

@ -1,23 +0,0 @@
general:
branches:
ignore:
- /^ultron\/.*/ # Ignore ultron/* branches
machine:
node:
version: 6.13.1
services:
- redis
environment:
CXX: g++-4.9
dependencies:
override:
- rm -rf node_modules
- npm install
test:
override:
- npm run --silent lint_md
- npm run --silent lint
- npm test
- npm run ft_test

View File

@ -1,21 +0,0 @@
{
"port": 8100,
"workers": 10,
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"redis": {
"host": "127.0.0.1",
"port": 6379
},
"vaultd": {
"host": "127.0.0.1",
"port": 8500
},
"expireMetrics": false,
"expireMetricsTTL": 0
}

View File

@ -1,75 +0,0 @@
version: '3.8'
x-models:
warp10: &warp10
build:
context: .
dockerfile: ./images/warp10/Dockerfile
volumes: [ $PWD/warpscript:/usr/local/share/warpscript ]
warp10_env: &warp10_env
ENABLE_WARPSTUDIO: 'true'
ENABLE_SENSISION: 'true'
warpscript.repository.refresh: 1000
warpscript.maxops: 1000000000
warpscript.maxops.hard: 1000000000
warpscript.maxfetch: 1000000000
warpscript.maxfetch.hard: 1000000000
warpscript.extension.debug: io.warp10.script.ext.debug.DebugWarpScriptExtension
warpscript.maxrecursion: 1000
warpscript.repository.directory: /usr/local/share/warpscript
warpscript.extension.logEvent: io.warp10.script.ext.logging.LoggingWarpScriptExtension
redis: &redis
build:
context: .
dockerfile: ./images/redis/Dockerfile
services:
redis-0:
image: redis:7.2.4
command: redis-server --port 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports:
- 6379:6379
environment:
- HOST_IP="${EXTERNAL_HOST}"
redis-1:
image: redis:7.2.4
command: redis-server --port 6380 --slaveof "${EXTERNAL_HOST}" 6379 --slave-announce-ip "${EXTERNAL_HOST}"
ports:
- 6380:6380
environment:
- HOST_IP="${EXTERNAL_HOST}"
redis-sentinel-0:
image: redis:7.2.4
command: |-
bash -c 'cat > /tmp/sentinel.conf <<EOF
port 16379
logfile ""
dir /tmp
sentinel announce-ip ${EXTERNAL_HOST}
sentinel announce-port 16379
sentinel monitor scality-s3 "${EXTERNAL_HOST}" 6379 1
EOF
redis-sentinel /tmp/sentinel.conf'
environment:
- HOST_IP="${EXTERNAL_HOST}"
ports:
- 16379:16379
warp10:
<< : *warp10
environment:
<< : *warp10_env
ports:
- 4802:4802
- 8081:8081
- 9718:9718
volumes:
- /tmp/warp10:/data
- '${PWD}/warpscript:/usr/local/share/warpscript'

View File

@ -1,47 +0,0 @@
#!/bin/bash
# set -e stops the execution of a script if a command or pipeline has an error
set -e
# modifying config.json
JQ_FILTERS_CONFIG="."
if [[ "$LOG_LEVEL" ]]; then
if [[ "$LOG_LEVEL" == "info" || "$LOG_LEVEL" == "debug" || "$LOG_LEVEL" == "trace" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .log.logLevel=\"$LOG_LEVEL\""
echo "Log level has been modified to $LOG_LEVEL"
else
echo "The log level you provided is incorrect (info/debug/trace)"
fi
fi
if [[ "$WORKERS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workers=\"$WORKERS\""
fi
if [[ "$REDIS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.host=\"$REDIS_HOST\""
fi
if [[ "$REDIS_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=\"$REDIS_PORT\""
fi
if [[ "$VAULTD_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .vaultd.host=\"$VAULTD_HOST\""
fi
if [[ "$VAULTD_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .vaultd.port=\"$VAULTD_PORT\""
fi
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json
fi
exec "$@"

View File

@ -1,42 +0,0 @@
# Utapi Release Plan
## Docker Image Generation
Docker images are hosted on [ghcr.io](https://github.com/orgs/scality/packages).
Utapi has one namespace there:
* Namespace: ghcr.io/scality/utapi
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
This allows those images to be used by developers, CI builds,
build chain and so on.
Tagged versions of utapi will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull ghcr.io/scality/utapi:<commit hash>
docker pull ghcr.io/scality/utapi:<tag>
```
## Release Process
To release a production image:
* Name the tag for the repository and Docker image.
* Use the `yarn version` command with the same tag to update `package.json`.
* Create a PR and merge the `package.json` change.
* Tag the repository using the same tag.
* [Force a build] using:
* A given branch that ideally matches the tag.
* The `release` stage.
* An extra property with the name `tag` and its value being the actual tag.
[Force a build]:
https://eve.devsca.com/github/scality/utapi/#/builders/bootstrap/force/force

View File

@ -1,60 +0,0 @@
package main
import (
"fmt"
"time"
"bytes"
"encoding/json"
"net/http"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/signer/v4"
)
func main() {
// Input AWS access key, secret key, and session token.
aws_access_key_id := "EO4FRH6BA2L7FCK0EKVT"
aws_secret_access_key := "B5QSoChhLVwKzZG1w2fXO0FE2tMg4imAIiV47YWX"
token := ""
bucket_name := "test-bucket"
// Get the start and end times for a range of one month.
start_time := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC)
end_time := time.Date(2016, 2, 1, 0, 0, 0, 0, time.UTC)
// Get the UNIX epoch timestamps expressed in milliseconds.
start_timestamp := start_time.UnixNano() / int64(time.Millisecond)
end_timestamp := ((end_time.UnixNano() / int64(time.Millisecond)) -
((end_time.UnixNano() / int64(time.Millisecond)) % 900000)) - 1
type BucketMetricRequest struct {
Buckets []string `json:"buckets"`
TimeRange [2]int64 `json:"timeRange"`
}
bucketMetricRequest := BucketMetricRequest{
Buckets: []string{bucket_name},
TimeRange: [2]int64{start_timestamp, end_timestamp},
}
buf := bytes.NewBuffer([]byte{})
enc := json.NewEncoder(buf)
enc.Encode(&bucketMetricRequest)
request, err := http.NewRequest("POST",
fmt.Sprintf("%s/buckets?Action=ListMetrics", "http://localhost:8100"),
buf)
if err != nil {
panic(err)
}
reader := bytes.NewReader(buf.Bytes())
credentials := credentials.NewStaticCredentials(aws_access_key_id,
aws_secret_access_key, token)
signer := v4.NewSigner(credentials)
signer.Sign(request, reader, "s3", "us-east-1", time.Now())
client := &http.Client{}
resp, err := client.Do(request)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Println(string(body))
}

View File

@ -1,35 +0,0 @@
const http = require('http');
// eslint-disable-next-line import/no-extraneous-dependencies
const aws4 = require('aws4');
// Input AWS access key, secret key, and session token.
const accessKeyId = 'EO4FRH6BA2L7FCK0EKVT';
const secretAccessKey = 'B5QSoChhLVwKzZG1w2fXO0FE2tMg4imAIiV47YWX';
const token = '';
const bucketName = 'test-bucket';
// Get the start and end times for a range of one month.
const startTime = new Date(2016, 1, 1, 0, 0, 0, 0).getTime();
const endTime = new Date(2016, 2, 1, 0, 0, 0, 0).getTime() - 1;
const requestBody = JSON.stringify({
buckets: [bucketName],
timeRange: [startTime, endTime],
});
const header = {
host: 'localhost',
port: 8100,
method: 'POST',
service: 's3',
path: '/buckets?Action=ListMetrics',
signQuery: false,
body: requestBody,
};
const credentials = { accessKeyId, secretAccessKey, token };
const options = aws4.sign(header, credentials);
const request = http.request(options, response => {
const body = [];
response.on('data', chunk => body.push(chunk));
response.on('end', () => process.stdout.write(`${body.join('')}\n`));
});
request.on('error', e => process.stdout.write(`error: ${e.message}\n`));
request.write(requestBody);
request.end();

View File

@ -1,90 +0,0 @@
import sys, os, base64, datetime, hashlib, hmac, datetime, calendar, json
import requests # pip install requests
access_key = '9EQTVVVCLSSG6QBMNKO5'
secret_key = 'T5mK/skkkwJ/mTjXZnHyZ5UzgGIN=k9nl4dyTmDH'
method = 'POST'
service = 's3'
host = 'localhost:8100'
region = 'us-east-1'
canonical_uri = '/buckets'
canonical_querystring = 'Action=ListMetrics&Version=20160815'
content_type = 'application/x-amz-json-1.0'
algorithm = 'AWS4-HMAC-SHA256'
t = datetime.datetime.utcnow()
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
date_stamp = t.strftime('%Y%m%d')
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def getSignatureKey(key, date_stamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
def get_start_time(t):
start = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
return calendar.timegm(start.utctimetuple()) * 1000;
def get_end_time(t):
end = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
return calendar.timegm(end.utctimetuple()) * 1000 - 1;
start_time = get_start_time(datetime.datetime(2016, 1, 1, 0, 0, 0, 0))
end_time = get_end_time(datetime.datetime(2016, 2, 1, 0, 0, 0, 0))
# Request parameters for listing Utapi bucket metrics--passed in a JSON block.
bucketListing = {
'buckets': [ 'utapi-test' ],
'timeRange': [ start_time, end_time ],
}
request_parameters = json.dumps(bucketListing)
payload_hash = hashlib.sha256(request_parameters).hexdigest()
canonical_headers = \
'content-type:{0}\nhost:{1}\nx-amz-content-sha256:{2}\nx-amz-date:{3}\n' \
.format(content_type, host, payload_hash, amz_date)
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
.format(method, canonical_uri, canonical_querystring, canonical_headers,
signed_headers, payload_hash)
credential_scope = '{0}/{1}/{2}/aws4_request' \
.format(date_stamp, region, service)
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
.format(algorithm, amz_date, credential_scope,
hashlib.sha256(canonical_request).hexdigest())
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
hashlib.sha256).hexdigest()
authorization_header = \
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
.format(algorithm, access_key, credential_scope, signed_headers, signature)
# The 'host' header is added automatically by the Python 'requests' library.
headers = {
'Content-Type': content_type,
'X-Amz-Content-Sha256': payload_hash,
'X-Amz-Date': amz_date,
'Authorization': authorization_header
}
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring;
r = requests.post(endpoint, data=request_parameters, headers=headers)
print (r.text)

View File

@ -1,157 +0,0 @@
const ListMetrics = require('../lib/ListMetrics');
function listRecentMetrics(utapiRequest, metric, service, cb) {
const log = utapiRequest.getLog();
log.debug('handling list recent metrics request', {
method: 'listRecentMetrics',
});
const Metric = new ListMetrics(metric, service);
return Metric.getRecentTypesMetrics(utapiRequest, cb);
}
/**
* @class BucketsHandler
* Handles Buckets resource actions
*/
class BucketsHandler {
/**
* List metrics for the given list of buckets
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {string} service - the service component (e.g., 's3')
* @param {callback} cb - callback
* @return {undefined}
*/
static listMetrics(utapiRequest, service, cb) {
const log = utapiRequest.getLog();
log.debug('handling list metrics request', {
method: 'BucketsHandler.listMetrics',
});
const Buckets = new ListMetrics('buckets', service);
return Buckets.getTypesMetrics(utapiRequest, cb);
}
/**
* List metrics starting from the second most recent fifteen minute timestamp
* for the given list of buckets
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {string} service - the service component (e.g., 's3')
* @param {callback} cb - callback
* @return {undefined}
*/
static listRecentMetrics(utapiRequest, service, cb) {
return listRecentMetrics(utapiRequest, 'buckets', service, cb);
}
}
/**
* @class AccountsHandler
* Handles Accounts resource actions
*/
class AccountsHandler {
/**
* List metrics for the given list of accounts
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {string} service - the service component (e.g., 's3')
* @param {callback} cb - callback
* @return {undefined}
*/
static listMetrics(utapiRequest, service, cb) {
const log = utapiRequest.getLog();
log.debug('handling list metrics request', {
method: 'AccountsHandler.listMetrics',
});
const Accounts = new ListMetrics('accounts', service);
return Accounts.getTypesMetrics(utapiRequest, cb);
}
/**
* List metrics starting from the second most recent fifteen minute timestamp
* for the given list of accounts
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {string} service - the service component (e.g., 's3')
* @param {callback} cb - callback
* @return {undefined}
*/
static listRecentMetrics(utapiRequest, service, cb) {
return listRecentMetrics(utapiRequest, 'accounts', service, cb);
}
}
/**
* @class ServiceHandler
* Handles Services resource actions
*/
class ServiceHandler {
/**
* List metrics for the given list of services
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {string} service - the service component (e.g., 's3')
* @param {callback} cb - callback
* @return {undefined}
*/
static listMetrics(utapiRequest, service, cb) {
const log = utapiRequest.getLog();
log.debug('handling list metrics request', {
method: 'ServiceHandler.listMetrics',
});
const Service = new ListMetrics('service', service);
return Service.getTypesMetrics(utapiRequest, cb);
}
/**
* List metrics starting from the second most recent fifteen minute timestamp
* for the given list of services
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {string} service - the service component (e.g., 's3')
* @param {callback} cb - callback
* @return {undefined}
*/
static listRecentMetrics(utapiRequest, service, cb) {
return listRecentMetrics(utapiRequest, 'service', service, cb);
}
}
/**
* @class UsersHandler
* Handles Users resource actions
*/
class UsersHandler {
/**
* List metrics for the given list of users
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {string} service - the service component (e.g., 's3')
* @param {callback} cb - callback
* @return {undefined}
*/
static listMetrics(utapiRequest, service, cb) {
const log = utapiRequest.getLog();
log.debug('handling list metrics request', {
method: 'UsersHandler.listMetrics',
});
const Users = new ListMetrics('users', service);
return Users.getTypesMetrics(utapiRequest, cb);
}
/**
* List metrics starting from the second most recent fifteen minute timestamp
* for the given list of users
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {string} service - the service component (e.g., 's3')
* @param {callback} cb - callback
* @return {undefined}
*/
static listRecentMetrics(utapiRequest, service, cb) {
return listRecentMetrics(utapiRequest, 'users', service, cb);
}
}
module.exports = {
BucketsHandler,
AccountsHandler,
ServiceHandler,
UsersHandler,
};

View File

@ -1,20 +0,0 @@
FROM ghcr.io/scality/federation/nodesvc-base:7.10.5.0
ENV UTAPI_CONFIG_FILE=${CONF_DIR}/config.json
WORKDIR ${HOME_DIR}/utapi
COPY ./package.json ./yarn.lock ${HOME_DIR}/utapi
# Remove when gitcache is sorted out
RUN rm /root/.gitconfig
RUN yarn install --production --frozen-lockfile --network-concurrency 1
COPY . ${HOME_DIR}/utapi
RUN chown -R ${USER} ${HOME_DIR}/utapi
USER ${USER}
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/${SUPERVISORD_CONF}"

View File

@ -1,17 +0,0 @@
FROM redis:alpine
ENV S6_VERSION 2.0.0.1
ENV EXPORTER_VERSION 1.24.0
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
RUN wget https://github.com/just-containers/s6-overlay/releases/download/v${S6_VERSION}/s6-overlay-amd64.tar.gz -O /tmp/s6-overlay-amd64.tar.gz \
&& tar xzf /tmp/s6-overlay-amd64.tar.gz -C / \
&& rm -rf /tmp/s6-overlay-amd64.tar.gz
RUN wget https://github.com/oliver006/redis_exporter/releases/download/v${EXPORTER_VERSION}/redis_exporter-v${EXPORTER_VERSION}.linux-amd64.tar.gz -O redis_exporter.tar.gz \
&& tar xzf redis_exporter.tar.gz -C / \
&& cd .. \
&& mv /redis_exporter-v${EXPORTER_VERSION}.linux-amd64/redis_exporter /usr/local/bin/redis_exporter
ADD ./images/redis/s6 /etc
CMD /init

View File

@ -1,4 +0,0 @@
#!/usr/bin/with-contenv sh
echo "starting redis exporter"
exec redis_exporter

View File

@ -1,4 +0,0 @@
#!/usr/bin/with-contenv sh
echo "starting redis"
exec redis-server

View File

@ -1,2 +0,0 @@
standalone.host = 0.0.0.0
standalone.port = 4802

View File

@ -1,56 +0,0 @@
FROM golang:1.14-alpine as builder
ENV WARP10_EXPORTER_VERSION 2.7.5
RUN apk add zip unzip build-base \
&& wget -q -O exporter.zip https://github.com/centreon/warp10-sensision-exporter/archive/refs/heads/master.zip \
&& unzip exporter.zip \
&& cd warp10-sensision-exporter-master \
&& go mod download \
&& cd tools \
&& go run generate_sensision_metrics.go ${WARP10_EXPORTER_VERSION} \
&& cp sensision.go ../collector/ \
&& cd .. \
&& go build -a -o /usr/local/go/warp10_sensision_exporter
FROM ghcr.io/scality/utapi/warp10:2.8.1-95-g73e7de80
# Override baked in version
# Remove when updating to a numbered release
ENV WARP10_VERSION 2.8.1-95-g73e7de80
ENV S6_VERSION 2.0.0.1
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
ENV WARP10_CONF_TEMPLATES ${WARP10_HOME}/conf.templates/standalone
ENV SENSISION_DATA_DIR /data/sensision
ENV SENSISION_PORT 8082
# Modify Warp 10 default config
ENV standalone.home /opt/warp10
ENV warpscript.repository.directory /usr/local/share/warpscript
ENV warp.token.file /static.tokens
ENV warpscript.extension.protobuf io.warp10.ext.protobuf.ProtobufWarpScriptExtension
ENV warpscript.extension.macrovalueencoder 'io.warp10.continuum.ingress.MacroValueEncoder$Extension'
# ENV warpscript.extension.debug io.warp10.script.ext.debug.DebugWarpScriptExtension
RUN wget https://github.com/just-containers/s6-overlay/releases/download/v${S6_VERSION}/s6-overlay-amd64.tar.gz -O /tmp/s6-overlay-amd64.tar.gz \
&& tar xzf /tmp/s6-overlay-amd64.tar.gz -C / \
&& rm -rf /tmp/s6-overlay-amd64.tar.gz
# Install jmx exporter
ADD https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar /opt/jmx_prom_agent.jar
ADD ./images/warp10/jmx_prom.yaml /opt/jmx_prom.yaml
# Install protobuf extestion
ADD ./images/warp10/warp10-ext-protobuf-1.2.2-uberjar.jar /opt/warp10/lib/
# Install Sensision exporter
COPY --from=builder /usr/local/go/warp10_sensision_exporter /usr/local/bin/warp10_sensision_exporter
ADD ./images/warp10/s6 /etc
ADD ./warpscript /usr/local/share/warpscript
ADD ./images/warp10/static.tokens /
ADD ./images/warp10/90-default-host-port.conf $WARP10_CONF_TEMPLATES/90-default-host-port.conf
CMD /init

View File

@ -1,2 +0,0 @@
---
startDelaySeconds: 0

View File

@ -1,22 +0,0 @@
#!/usr/bin/with-contenv bash
set -eu
ensureDir() {
if [ ! -d "$1" ]; then
mkdir -p "$1"
echo "Created directory $1"
fi
}
ensureDir "$WARP10_DATA_DIR"
ensureDir "$WARP10_DATA_DIR/logs"
ensureDir "$WARP10_DATA_DIR/conf"
ensureDir "$WARP10_DATA_DIR/data/leveldb"
ensureDir "$WARP10_DATA_DIR/data/datalog"
ensureDir "$WARP10_DATA_DIR/data/datalog_done"
ensureDir "$SENSISION_DATA_DIR"
ensureDir "$SENSISION_DATA_DIR/logs"
ensureDir "$SENSISION_DATA_DIR/conf"
ensureDir "/var/run/sensision"

View File

@ -1,14 +0,0 @@
#!/usr/bin/with-contenv bash
set -eu
WARP10_JAR=${WARP10_HOME}/bin/warp10-${WARP10_VERSION}.jar
WARP10_CONFIG_DIR="$WARP10_DATA_DIR/conf"
WARP10_SECRETS="$WARP10_CONFIG_DIR/00-secrets.conf"
if [ ! -f "$WARP10_SECRETS" ]; then
cp "$WARP10_CONF_TEMPLATES/00-secrets.conf.template" "$WARP10_SECRETS"
/usr/bin/java -cp ${WARP10_JAR} -Dfile.encoding=UTF-8 io.warp10.GenerateCryptoKey ${WARP10_SECRETS}
echo "warp10.manager.secret = scality" >> $WARP10_SECRETS
fi

View File

@ -1,14 +0,0 @@
#!/usr/bin/with-contenv sh
echo "Installing warp 10 config"
for path in $WARP10_CONF_TEMPLATES/*; do
name="$(basename $path .template)"
if [ ! -f "$WARP10_DATA_DIR/conf/$name" ]; then
cp "$path" "$WARP10_DATA_DIR/conf/$name"
echo "Copied $name to $WARP10_DATA_DIR/conf/$name"
fi
done
echo "Installing sensision config"
cp ${SENSISION_HOME}/templates/sensision.template ${SENSISION_DATA_DIR}/conf/sensision.conf
cp ${SENSISION_HOME}/templates/log4j.properties.template ${SENSISION_DATA_DIR}/conf/log4j.properties

View File

@ -1,23 +0,0 @@
#!/usr/bin/with-contenv sh
WARP10_CONFIG_DIR="$WARP10_DATA_DIR/conf"
ensure_link() {
if [ ! -L "$1" ]; then
rm -rf "$1"
ln -s "$2" "$1"
echo "Created symlink $1->$2"
fi
}
ensure_link "$WARP10_HOME/logs" "$WARP10_DATA_DIR/logs"
ensure_link "$WARP10_HOME/etc/conf.d" "$WARP10_DATA_DIR/conf"
ensure_link "$WARP10_HOME/leveldb" "$WARP10_DATA_DIR/data/leveldb"
ensure_link "$WARP10_HOME/datalog" "$WARP10_DATA_DIR/data/datalog"
ensure_link "$WARP10_HOME/datalog_done" "$WARP10_DATA_DIR/data/datalog_done"
ensure_link "$SENSISION_HOME/etc" "${SENSISION_DATA_DIR}/conf"
ensure_link "$SENSISION_HOME/logs" "${SENSISION_DATA_DIR}/logs"
ensure_link /var/run/sensision/metrics ${SENSISION_HOME}/metrics
ensure_link /var/run/sensision/targets ${SENSISION_HOME}/targets
ensure_link /var/run/sensision/queued ${SENSISION_HOME}/queued

View File

@ -1,14 +0,0 @@
#!/usr/bin/with-contenv sh
JAVA="/usr/bin/java"
WARP10_JAR=${WARP10_HOME}/bin/warp10-${WARP10_VERSION}.jar
WARP10_CP="${WARP10_HOME}/etc:${WARP10_JAR}:${WARP10_HOME}/lib/*"
WARP10_INIT="io.warp10.standalone.WarpInit"
LEVELDB_HOME="/opt/warp10/leveldb"
# Create leveldb database
if [ "$(find -L ${LEVELDB_HOME} -maxdepth 1 -type f | wc -l)" -eq 0 ]; then
echo "Init leveldb database..." | tee -a "$WARP10_HOME/logs/warp10.log"
$JAVA -cp "$WARP10_CP" "$WARP10_INIT" "$LEVELDB_HOME" | tee -a "$WARP10_HOME/logs/warp10.log" 2>&1
fi

View File

@ -1,11 +0,0 @@
#!/usr/bin/with-contenv sh
WARPSTUDIO_CONFIG=${WARP10_CONFIG_DIR}/80-warpstudio-plugin.conf
if [ -n "$ENABLE_WARPSTUDIO" ]; then
cat > $WARPSTUDIO_CONFIG << EOF
warp10.plugin.warpstudio = io.warp10.plugins.warpstudio.WarpStudioPlugin
warpstudio.port = 8081
warpstudio.host = \${standalone.host}
EOF
fi

View File

@ -1,9 +0,0 @@
#!/usr/bin/with-contenv sh
chmod 1733 "$SENSISION_HOME/metrics"
chmod 1733 "$SENSISION_HOME/targets"
chmod 700 "$SENSISION_HOME/queued"
sed -i 's/@warp:WriteToken@/'"writeTokenStatic"'/' $SENSISION_DATA_DIR/conf/sensision.conf
sed -i -e "s_^sensision\.home.*_sensision\.home = ${SENSISION_HOME}_" $SENSISION_DATA_DIR/conf/sensision.conf
sed -i -e 's_^sensision\.qf\.url\.default.*_sensision\.qf\.url\.default=http://127.0.0.1:4802/api/v0/update_' $SENSISION_DATA_DIR/conf/sensision.conf

View File

@ -1,12 +0,0 @@
#!/usr/bin/with-contenv sh
EXPORTER_CMD="warp10_sensision_exporter --warp10.url=http://localhost:${SENSISION_PORT}/metrics"
if [ -f "/usr/local/bin/warp10_sensision_exporter" -a -n "$ENABLE_SENSISION" ]; then
echo "Starting Sensision exporter with $EXPORTER_CMD ..."
exec $EXPORTER_CMD
else
echo "Sensision is disabled. Not starting exporter."
# wait indefinitely
exec tail -f /dev/null
fi

View File

@ -1,25 +0,0 @@
#!/usr/bin/with-contenv sh
JAVA="/usr/bin/java"
JAVA_OPTS=""
SENSISION_CONFIG=${SENSISION_DATA_DIR}/conf/sensision.conf
SENSISION_JAR=${SENSISION_HOME}/bin/sensision-${SENSISION_VERSION}.jar
SENSISION_CP=${SENSISION_HOME}/etc:${SENSISION_JAR}
SENSISION_CLASS=io.warp10.sensision.Main
export MALLOC_ARENA_MAX=1
if [ -z "$SENSISION_HEAP" ]; then
SENSISION_HEAP=64m
fi
SENSISION_CMD="${JAVA} ${JAVA_OPTS} -Xmx${SENSISION_HEAP} -Dsensision.server.port=${SENSISION_PORT} ${SENSISION_OPTS} -Dsensision.config=${SENSISION_CONFIG} -cp ${SENSISION_CP} ${SENSISION_CLASS}"
if [ -n "$ENABLE_SENSISION" ]; then
echo "Starting Sensision with $SENSISION_CMD ..."
exec $SENSISION_CMD | tee -a ${SENSISION_HOME}/logs/sensision.log
else
echo "Sensision is disabled. Not starting."
# wait indefinitely
exec tail -f /dev/null
fi

View File

@ -1,43 +0,0 @@
#!/usr/bin/with-contenv sh
export SENSISIONID=warp10
export MALLOC_ARENA_MAX=1
JAVA="/usr/bin/java"
WARP10_JAR=${WARP10_HOME}/bin/warp10-${WARP10_VERSION}.jar
WARP10_CLASS=io.warp10.standalone.Warp
WARP10_CP="${WARP10_HOME}/etc:${WARP10_JAR}:${WARP10_HOME}/lib/*"
WARP10_CONFIG_DIR="$WARP10_DATA_DIR/conf"
CONFIG_FILES="$(find ${WARP10_CONFIG_DIR} -not -path '*/.*' -name '*.conf' | sort | tr '\n' ' ' 2> /dev/null)"
LOG4J_CONF=${WARP10_HOME}/etc/log4j.properties
if [ -z "$WARP10_HEAP" ]; then
WARP10_HEAP=1g
fi
if [ -z "$WARP10_HEAP_MAX" ]; then
WARP10_HEAP_MAX=4g
fi
JAVA_OPTS="-Dlog4j.configuration=file:${LOG4J_CONF} ${JAVA__EXTRA_OPTS} -Djava.awt.headless=true -Xms${WARP10_HEAP} -Xmx${WARP10_HEAP_MAX} -XX:+UseG1GC"
SENSISION_OPTS=
if [ -n "$ENABLE_SENSISION" ]; then
_SENSISION_LABELS=
# Expects a comma seperated list of key=value ex key=value,foo=bar
if [ -n "$SENSISION_LABELS" ]; then
_SENSISION_LABELS="-Dsensision.default.labels=$SENSISION_LABELS"
fi
SENSISION_OPTS="${_SENSISION_LABELS} -Dsensision.events.dir=/var/run/sensision/metrics -Dfile.encoding=UTF-8 ${SENSISION_EXTRA_OPTS}"
fi
JMX_EXPORTER_OPTS=
if [ -n "$ENABLE_JMX_EXPORTER" ]; then
JMX_EXPORTER_OPTS="-javaagent:/opt/jmx_prom_agent.jar=4803:/opt/jmx_prom.yaml ${JMX_EXPORTER_EXTRA_OPTS}"
echo "Starting jmx exporter with Warp 10."
fi
WARP10_CMD="${JAVA} ${JMX_EXPORTER_OPTS} ${JAVA_OPTS} ${SENSISION_OPTS} -cp ${WARP10_CP} ${WARP10_CLASS} ${CONFIG_FILES}"
echo "Starting Warp 10 with $WARP10_CMD ..."
exec $WARP10_CMD | tee -a ${WARP10_HOME}/logs/warp10.log

View File

@ -1,9 +0,0 @@
token.write.0.name=writeTokenStatic
token.write.0.producer=42424242-4242-4242-4242-424242424242
token.write.0.owner=42424242-4242-4242-4242-424242424242
token.write.0.app=utapi
token.read.0.name=readTokenStatic
token.read.0.owner=42424242-4242-4242-4242-424242424242
token.read.0.app=utapi

View File

@ -1,22 +0,0 @@
/* eslint-disable global-require */
// eslint-disable-line strict
let toExport;
if (process.env.ENABLE_UTAPI_V2) {
toExport = {
utapiVersion: 2,
startUtapiServer: require('./libV2/server').startServer,
UtapiClient: require('./libV2/client'),
tasks: require('./libV2/tasks'),
};
} else {
toExport = {
utapiVersion: 1,
UtapiServer: require('./lib/server'),
UtapiClient: require('./lib/UtapiClient'),
UtapiReplay: require('./lib/UtapiReplay'),
UtapiReindex: require('./lib/UtapiReindex'),
};
}
module.exports = toExport;

View File

@ -1,169 +0,0 @@
/* eslint-disable no-bitwise */
const assert = require('assert');
const fs = require('fs');
/**
* Reads from a config file and returns the content as a config object
*/
class Config {
constructor(config) {
this.component = config.component;
this.port = 9500;
if (config.port !== undefined) {
assert(Number.isInteger(config.port) && config.port > 0,
'bad config: port must be a positive integer');
this.port = config.port;
}
this.workers = 10;
if (config.workers !== undefined) {
assert(Number.isInteger(config.workers) && config.workers > 0,
'bad config: workers must be a positive integer');
this.workers = config.workers;
}
this.log = { logLevel: 'debug', dumpLevel: 'error' };
if (config.log !== undefined) {
if (config.log.logLevel !== undefined) {
assert(typeof config.log.logLevel === 'string',
'bad config: log.logLevel must be a string');
this.log.logLevel = config.log.logLevel;
}
if (config.log.dumpLevel !== undefined) {
assert(typeof config.log.dumpLevel === 'string',
'bad config: log.dumpLevel must be a string');
this.log.dumpLevel = config.log.dumpLevel;
}
}
this.healthChecks = { allowFrom: ['127.0.0.1/8', '::1'] };
if (config.healthChecks && config.healthChecks.allowFrom) {
assert(Array.isArray(config.healthChecks.allowFrom),
'config: invalid healthcheck configuration. allowFrom must '
+ 'be an array');
config.healthChecks.allowFrom.forEach(item => {
assert(typeof item === 'string',
'config: invalid healthcheck configuration. allowFrom IP '
+ 'address must be a string');
});
// augment to the defaults
this.healthChecks.allowFrom = this.healthChecks.allowFrom.concat(
config.healthChecks.allowFrom,
);
}
// default to standalone configuration
this.redis = { host: '127.0.0.1', port: 6379 };
if (config.redis) {
if (config.redis.sentinels) {
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels),
'bad config: sentinels must be an array');
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: sentinel host must be a string');
assert(typeof port === 'number',
'bad config: sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
if (config.redis.sentinelPassword !== undefined) {
assert(typeof config.redis.sentinelPassword === 'string',
'bad config: redis.sentinelPassword must be a string');
this.redis.sentinelPassword = config.redis.sentinelPassword;
}
} else {
// check for standalone configuration
assert(typeof config.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
}
if (config.redis.password !== undefined) {
assert(typeof config.redis.password === 'string',
'bad confg: redis.password must be a string');
this.redis.password = config.redis.password;
}
}
if (config.vaultclient) {
// Instance passed from outside
this.vaultclient = config.vaultclient;
this.vaultd = null;
} else {
// Connection data
this.vaultclient = null;
this.vaultd = {};
if (config.vaultd) {
if (config.vaultd.port !== undefined) {
assert(Number.isInteger(config.vaultd.port)
&& config.vaultd.port > 0,
'bad config: vaultd port must be a positive integer');
this.vaultd.port = config.vaultd.port;
}
if (config.vaultd.host !== undefined) {
assert.strictEqual(typeof config.vaultd.host, 'string',
'bad config: vaultd host must be a string');
this.vaultd.host = config.vaultd.host;
}
}
}
if (config.certFilePaths) {
assert(typeof config.certFilePaths === 'object'
&& typeof config.certFilePaths.key === 'string'
&& typeof config.certFilePaths.cert === 'string' && ((
config.certFilePaths.ca
&& typeof config.certFilePaths.ca === 'string')
|| !config.certFilePaths.ca));
}
const { key, cert, ca } = config.certFilePaths
? config.certFilePaths : {};
if (key && cert) {
const keypath = key;
const certpath = cert;
let capath;
if (ca) {
capath = ca;
assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${capath}`);
}
assert.doesNotThrow(() => fs.accessSync(keypath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${keypath}`);
assert.doesNotThrow(() => fs.accessSync(certpath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${certpath}`);
this.https = {
cert: fs.readFileSync(certpath, 'ascii'),
key: fs.readFileSync(keypath, 'ascii'),
ca: ca ? fs.readFileSync(capath, 'ascii') : undefined,
};
this.httpsPath = {
ca: capath,
cert: certpath,
};
} else if (key || cert) {
throw new Error('bad config: both certFilePaths.key and '
+ 'certFilePaths.cert must be defined');
}
if (config.expireMetrics !== undefined) {
assert(typeof config.expireMetrics === 'boolean', 'bad config: '
+ 'expireMetrics must be a boolean');
this.expireMetrics = config.expireMetrics;
}
if (config.onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof config.onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: onlyCountLatestWhenObjectLocked must be a boolean');
this.onlyCountLatestWhenObjectLocked = config.onlyCountLatestWhenObjectLocked;
}
}
}
module.exports = Config;

View File

@ -1,297 +0,0 @@
/* Provides methods for operations on a datastore */
class Datastore {
/**
* @constructor
*/
constructor() {
this._client = null;
}
/**
* set client, enables switching between different backends
* @param {object} client - client providing interface to the datastore
* @return {undefined}
*/
setClient(client) {
this._client = client;
return this;
}
/**
* retrieve client object containing backend interfaces
* @return {object} client - client providing interface to the datastore
*/
getClient() {
return this._client;
}
/**
* set key to hold the string value
* @param {string} key - key holding the value
* @param {string} value - value containing the data
* @param {callback} cb - callback
* @return {undefined}
*/
set(key, value, cb) {
return this._client.call(
(backend, done) => backend.set(key, value, done),
cb,
);
}
/**
* Set a lock key, if it does not already exist, with an expiration
* @param {string} key - key to set with an expiration
* @param {string} value - value containing the data
* @param {string} ttl - time after which the key expires
* @return {undefined}
*/
setExpire(key, value, ttl) {
// This method is a Promise because no callback is given.
return this._client.call(backend => backend.set(key, value, 'EX', ttl, 'NX'));
}
/**
* delete a key
* @param {string} key - key to delete
* @return {undefined}
*/
del(key) {
// This method is a Promise because no callback is given.
return this._client.call(backend => backend.del(key));
}
/**
* get value from a key
* @param {string} key - key holding the value
* @param {callback} cb - callback
* @return {undefined}
*/
get(key, cb) {
return this._client.call((backend, done) => backend.get(key, done), cb);
}
/**
* increment value of a key by 1
* @param {string} key - key holding the value
* @param {callback} cb - callback
* @return {undefined}
*/
incr(key, cb) {
return this._client.call((backend, done) => backend.incr(key, done), cb);
}
/**
* increment value of a key by the provided value
* @param {string} key - key holding the value
* @param {string} value - value containing the data
* @param {callback} cb - callback
* @return {undefined}
*/
incrby(key, value, cb) {
return this._client.call((backend, done) => backend.incrby(key, value, done), cb);
}
/**
* decrement value of a key by 1
* @param {string} key - key holding the value
* @param {callback} cb - callback
* @return {undefined}
*/
decr(key, cb) {
return this._client.call((backend, done) => backend.decr(key, done), cb);
}
/**
* decrement value of a key by the provided value
* @param {string} key - key holding the value
* @param {string} value - value containing the data
* @param {callback} cb - callback
* @return {undefined}
*/
decrby(key, value, cb) {
return this._client.call((backend, done) => backend.decrby(key, value, done), cb);
}
/**
* set value of a key in a sorted set with a score
* @param {string} key - key holding the value
* @param {number} score - integer score for the key in the sorted set
* @param {string} value - value containing the data
* @param {callback} cb - callback
* @return {undefined}
*/
zadd(key, score, value, cb) {
return this._client.call((backend, done) => backend.zadd(key, score, value, done), cb);
}
/**
* get a list of results containing values whose keys fall within the
* min and max range
* @param {string} key - key holding the value
* @param {number} min - integer for start range (inclusive)
* @param {number} max - integer for end range (inclusive)
* @param {callback} cb - callback
* @return {undefined}
*/
zrange(key, min, max, cb) {
return this._client.call((backend, done) => backend.zrange(key, min, max, done), cb);
}
/**
* get a list of results containing values whose keys fall within the
* min and max scores
* @param {string} key - key holding the value
* @param {number} min - integer score for start range (inclusive)
* @param {number} max - integer score for end range (inclusive)
* @param {callback} cb - callback
* @return {undefined}
*/
zrangebyscore(key, min, max, cb) {
return this._client.call((backend, done) => backend.zrangebyscore(key, min, max, done), cb);
}
/**
* batch get a list of results containing values whose keys fall within the
* min and max scores
* @param {string[]} keys - list of keys
* @param {number} min - integer score for start range (inclusive)
* @param {number} max - integer score for end range (inclusive)
* @param {callback} cb - callback
* @return {undefined}
*/
bZrangebyscore(keys, min, max, cb) {
return this._client.call(
(backend, done) => backend
.pipeline(keys.map(item => ['zrangebyscore', item, min, max]))
.exec(done),
cb,
);
}
/**
* execute a batch of commands
* @param {string[]} cmds - list of commands
* @param {callback} cb - callback
* @return {undefined}
*/
batch(cmds, cb) {
return this._client.call((backend, done) => {
backend.multi(cmds).exec(done);
}, cb);
}
/**
* execute a batch of commands
* @param {string[]} cmds - list of commands
* @param {callback} cb - callback
* @return {undefined}
*/
pipeline(cmds, cb) {
return this._client.call((backend, done) => backend.pipeline(cmds).exec(done), cb);
}
/**
* execute a list of commands as transaction
* @param {string[]} cmds - list of commands
* @param {callback} cb - callback
* @return {undefined}
*/
multi(cmds, cb) {
return this._client.call((backend, done) =>
backend.multi(cmds).exec((err, res) => {
if (err) {
return done(err);
}
const flattenRes = [];
const resErr = res.filter(item => {
flattenRes.push(item[1]);
return item[0] !== null;
});
if (resErr && resErr.length > 0) {
return done(resErr);
}
return done(null, flattenRes);
}), cb);
}
/**
* remove elements from the sorted set that fall between the min and max
* scores
* @param {string} key - key holding the value
* @param {number} min - integer score for start range (inclusive)
* @param {number} max - integer score for end range (inclusive)
* @param {callback} cb - callback
* @return {undefined}
*/
zremrangebyscore(key, min, max, cb) {
return this._client.call((backend, done) => backend.zremrangebyscore(key, min, max, done), cb);
}
/**
* push a value to the head of the list
* @param {string} key - key for the list
* @param {string} val - value to be pushed onto the list
* @param {callback} cb - callback
* @return {undefined}
*/
lpush(key, val, cb) {
return this._client.call((backend, done) => backend.lpush(key, val, done), cb);
}
/**
* remove and return the last element of the list
* @param {string} key - key for the list
* @param {callback} cb - callback
* @return {undefined}
*/
rpop(key, cb) {
return this._client.call((backend, done) => backend.rpop(key, done), cb);
}
/**
* get a range of elements from the list
* @param {string} key - key for the list
* @param {number} start - start offset in a zero-based index
* @param {number} stop - stop offset in a zero-based index
* @param {callback} cb - callback
* @return {undefined}
*/
lrange(key, start, stop, cb) {
return this._client.call((backend, done) => backend.lrange(key, start, stop, done), cb);
}
/**
* get the length of the list
* @param {string} key - key for the list
* @param {callback} cb - callback
* @return {undefined}
*/
llen(key, cb) {
return this._client.call((backend, done) => backend.llen(key, done), cb);
}
/**
* publish a message on the specified channel
* @param {string} channel - the channel name where the message is published
* @param {string} message - the message to send
* @param {callback} cb - callback
* @return {undefined}
*/
publish(channel, message, cb) {
return this._client.call((backend, done) => backend.publish(channel, message, done), cb);
}
/**
* scan for keys matching the pattern
* @param {string} cursor - cursor for pagination
* @param {string} pattern - pattern to search for
* @param {callback} cb - callback
* @return {undefined}
*/
scan(cursor, pattern, cb) {
return this._client.call((backend, done) => backend.scan(cursor, 'match', pattern, done), cb);
}
}
module.exports = Datastore;

View File

@ -1,353 +0,0 @@
/* eslint-disable prefer-spread */
/* eslint-disable prefer-destructuring */
/* eslint-disable class-methods-use-this */
/* eslint-disable no-mixed-operators */
const async = require('async');
const { errors } = require('arsenal');
const { getMetricFromKey, getKeys, generateStateKey } = require('./schema');
const s3metricResponseJSON = require('../models/s3metricResponse');
const MAX_RANGE_MS = (((1000 * 60) * 60) * 24) * 30; // One month.
/**
* Provides methods to get metrics of different levels
*/
class ListMetrics {
/**
* Assign the metric property to an instance of this class
* @param {string} metric - The metric type (e.g., 'buckets', 'accounts')
* @param {string} component - The service component (e.g., 's3')
*/
constructor(metric, component) {
this.metric = metric;
this.service = component;
}
/**
* Create the metric object to retrieve data from schema methods
* @param {string} resource - The resource to get metrics for
* @return {object} obj - Object with a key-value pair for a schema method
*/
_getSchemaObject(resource) {
// Include service to generate key for any non-service level metric
const obj = {
level: this.metric,
service: this.service,
};
const schemaKeys = {
buckets: 'bucket',
accounts: 'accountId',
users: 'userId',
service: 'service',
};
obj[schemaKeys[this.metric]] = resource;
return obj;
}
// Create the metric response object for a given metric.
_getMetricResponse(resource, start, end) {
// Use `JSON.parse` to make deep clone because `Object.assign` will
// copy property values.
const metricResponse = JSON.parse(JSON.stringify(s3metricResponseJSON));
metricResponse.timeRange = [start, end];
const metricResponseKeys = {
buckets: 'bucketName',
accounts: 'accountId',
users: 'userId',
service: 'serviceName',
};
metricResponse[metricResponseKeys[this.metric]] = resource;
return metricResponse;
}
/**
* Callback for getting metrics for a list of resources
* @callback ListMetrics~ListMetricsCb
* @param {object} err - ArsenalError instance
* @param {object[]} metric - list of objects containing metrics for each
* resource provided in the request
*/
/**
* Get metrics for a list of metric resources
* @param {utapiRequest} utapiRequest - utapiRequest instance
* @param {ListMetrics~bucketsMetricsCb} cb - callback
* @return {undefined}
*/
getTypesMetrics(utapiRequest, cb) {
const log = utapiRequest.getLog();
const validator = utapiRequest.getValidator();
const resources = validator.get(this.metric);
const timeRange = validator.get('timeRange');
const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids
if (this.metric === 'accounts') {
return vault.getCanonicalIds(resources, log, (err, list) => {
if (err) {
return cb(err);
}
return async.mapLimit(list.message.body, 5,
(item, next) => this.getMetrics(item.canonicalId, timeRange,
datastore, log, (err, res) => {
if (err) {
return next(err);
}
return next(null, Object.assign({}, res,
{ accountId: item.accountId }));
}),
cb);
});
}
return async.mapLimit(resources, 5, (resource, next) => this.getMetrics(resource, timeRange, datastore, log,
next), cb);
}
/**
* Get metrics starting from the second most recent normalized timestamp
* range (e.g., if it is 6:31 when the request is made, then list metrics
* starting from 6:15).
* @param {utapiRequest} utapiRequest - utapiRequest instance
* @param {ListMetrics~bucketsMetricsCb} cb - callback
* @return {undefined}
*/
getRecentTypesMetrics(utapiRequest, cb) {
const log = utapiRequest.getLog();
const validator = utapiRequest.getValidator();
const resources = validator.get(this.metric);
const end = Date.now();
const d = new Date(end);
const minutes = d.getMinutes();
const start = d.setMinutes((minutes - minutes % 15), 0, 0);
const fifteenMinutes = 15 * 60 * 1000; // In milliseconds
const timeRange = [start - fifteenMinutes, end];
const datastore = utapiRequest.getDatastore();
const vault = utapiRequest.getVault();
// map account ids to canonical ids
if (this.metric === 'accounts') {
return vault.getCanonicalIds(resources, log, (err, list) => {
if (err) {
return cb(err);
}
return async.mapLimit(list.message.body, 5,
(item, next) => this.getMetrics(item.canonicalId, timeRange,
datastore, log, (err, res) => {
if (err) {
return next(err);
}
return next(null, Object.assign({}, res,
{ accountId: item.accountId }));
}),
cb);
});
}
return async.mapLimit(resources, 5, (resource, next) => this.getMetrics(resource, timeRange, datastore, log,
next), cb);
}
/**
* Returns a list of timestamps incremented by 15 min. from start timestamp
* to end timestamp
* @param {number} start - start timestamp
* @param {number} end - end timestamp
* @return {number[]} range - array of timestamps
*/
_getTimestampRange(start, end) {
const res = [];
let last = start;
while (last < end) {
res.push(last);
const d = new Date(last);
last = d.setMinutes(d.getMinutes() + 15);
if (process.env.UTAPI_INTERVAL_TEST_MODE === 'true') {
last = d.setSeconds(d.getSeconds() + 15);
}
}
res.push(end);
return res;
}
_buildSubRanges(range) {
let start = range[0];
const end = range[1] || Date.now();
const subRangesCount = Math.floor((end - start) / MAX_RANGE_MS) + 1;
const subRanges = [];
// eslint-disable-next-line no-plusplus
for (let i = 0; i < subRangesCount; i++) {
if (i + 1 === subRangesCount) {
subRanges.push([start, end]);
} else {
subRanges.push([start, (start + (MAX_RANGE_MS - 1))]);
start += MAX_RANGE_MS;
}
}
return subRanges;
}
_reduceResults(results) {
const reducer = (accumulator, current) => {
const result = Object.assign({}, accumulator);
result.timeRange[1] = current.timeRange[1];
result.storageUtilized[1] = current.storageUtilized[1];
result.numberOfObjects[1] = current.numberOfObjects[1];
result.incomingBytes += current.incomingBytes;
result.outgoingBytes += current.outgoingBytes;
const operations = Object.keys(result.operations);
operations.forEach(operation => {
result.operations[operation] += current.operations[operation];
});
return result;
};
return results.reduce(reducer);
}
getMetrics(resource, range, datastore, log, cb) {
const ranges = this._buildSubRanges(range);
async.mapLimit(ranges, 5, (subRange, next) => this._getMetricsRange(resource, subRange, datastore, log, next),
(err, results) => {
if (err) {
return cb(err);
}
const response = this._reduceResults(results);
return cb(null, response);
});
}
/**
* Callback for getting metrics for a single resource
* @callback ListMetrics~getMetricsCb
* @param {object} err - ArsenalError instance
* @param {object} metricRes - metrics for a single resource
* @param {string} [metricRes.bucketName] - (optional) name of the bucket
* @param {string} [metricRes.accountId] - (optional) ID of the account
* @param {number[]} metricRes.timeRange - start and end times as unix epoch
* @param {number[]} metricRes.storageUtilized - storage utilized by the
* bucket at start and end time. These are absolute values
* @param {number} metricRes.incomingBytes - number of bytes received by the
* bucket as object puts or mutlipart uploads
* @param {number} metricRes.outgoingBytes - number of bytes transferred to
* the clients from the objects belonging to the bucket
* @param {number[]} metricRes.numberOfObjects - number of objects held by
* the bucket at start and end times. These are absolute values.
* @param {object} metricRes.operations - object containing S3 operations
* and their counters, with the specific S3 operation as key and total count
* of operations that happened between start time and end time as value
*/
/**
* Get metrics for a single resource
* @param {string} resource - the metric resource
* @param {number[]} range - time range with start time and end time as
* its members in unix epoch timestamp format
* @param {object} datastore - Datastore instance
* @param {object} log - Werelogs logger instance
* @param {ListMetrics~getMetricsCb} cb - callback
* @return {undefined}
*/
_getMetricsRange(resource, range, datastore, log, cb) {
const start = range[0];
const end = range[1];
const obj = this._getSchemaObject(resource);
// find nearest neighbors for absolutes
const storageUtilizedKey = generateStateKey(obj, 'storageUtilized');
const numberOfObjectsKey = generateStateKey(obj, 'numberOfObjects');
const storageUtilizedStart = ['zrevrangebyscore', storageUtilizedKey,
start, '-inf', 'LIMIT', '0', '1'];
const storageUtilizedEnd = ['zrevrangebyscore', storageUtilizedKey, end,
'-inf', 'LIMIT', '0', '1'];
const numberOfObjectsStart = ['zrevrangebyscore', numberOfObjectsKey,
start, '-inf', 'LIMIT', '0', '1'];
const numberOfObjectsEnd = ['zrevrangebyscore', numberOfObjectsKey, end,
'-inf', 'LIMIT', '0', '1'];
const timestampRange = this._getTimestampRange(start, end);
const metricKeys = [].concat.apply([], timestampRange.map(
i => getKeys(obj, i),
));
const cmds = metricKeys.map(item => ['get', item]);
cmds.push(storageUtilizedStart, storageUtilizedEnd,
numberOfObjectsStart, numberOfObjectsEnd);
datastore.batch(cmds, (err, res) => {
if (err) {
log.trace('error occurred while getting metrics', {
error: err,
method: 'ListMetrics.getMetrics',
resource,
});
return cb(errors.InternalError);
}
const metricResponse = this._getMetricResponse(resource, start,
end);
// last 4 are results of storageUtilized, numberOfObjects,
const absolutes = res.slice(-4);
const deltas = res.slice(0, res.length - 4);
const areMetricsPositive = absolutes.every((item, index) => {
if (item[0]) {
// log error and continue
log.trace('command in a batch failed to execute', {
error: item[0],
method: 'ListMetrics.getMetrics',
});
} else {
let val = parseInt(item[1], 10);
val = Number.isNaN(val) ? 0 : val;
if (val < 0) {
return false;
}
if (index === 0) {
metricResponse.storageUtilized[0] = val;
} else if (index === 1) {
metricResponse.storageUtilized[1] = val;
} else if (index === 2) {
metricResponse.numberOfObjects[0] = val;
} else if (index === 3) {
metricResponse.numberOfObjects[1] = val;
}
}
return true;
});
if (!areMetricsPositive) {
log.info('negative metric value found', {
error: resource,
method: 'ListMetrics.getMetrics',
});
}
/**
* Batch result is of the format
* [ [null, '1'], [null, '2'], [null, '3'] ] where each
* item is the result of the each batch command
* Foreach item in the resut, index 0 signifies the error and
* index 1 contains the result
*/
deltas.forEach((item, index) => {
const key = metricKeys[index];
if (item[0]) {
// log error and continue
log.trace('command in a batch failed to execute', {
error: item[0],
method: 'ListMetrics.getMetrics',
cmd: key,
});
} else {
const m = getMetricFromKey(key);
let count = parseInt(item[1], 10);
count = Number.isNaN(count) ? 0 : count;
if (m === 'incomingBytes' || m === 'outgoingBytes') {
metricResponse[m] += count;
} else {
metricResponse.operations[`${this.service}:${m}`]
+= count;
}
}
});
return cb(null, metricResponse);
});
}
}
module.exports = ListMetrics;

File diff suppressed because it is too large Load Diff

View File

@ -1,265 +0,0 @@
const childProcess = require('child_process');
const async = require('async');
const nodeSchedule = require('node-schedule');
const { jsutil } = require('arsenal');
const werelogs = require('werelogs');
const Datastore = require('./Datastore');
const RedisClient = require('../libV2/redis');
const REINDEX_SCHEDULE = '0 0 * * Sun';
const REINDEX_LOCK_KEY = 's3:utapireindex:lock';
const REINDEX_LOCK_TTL = (60 * 60) * 24;
const REINDEX_PYTHON_INTERPRETER = process.env.REINDEX_PYTHON_INTERPRETER !== undefined
? process.env.REINDEX_PYTHON_INTERPRETER
: 'python3.7';
const EXIT_CODE_SENTINEL_CONNECTION = 100;
class UtapiReindex {
constructor(config) {
this._enabled = false;
this._schedule = REINDEX_SCHEDULE;
this._redis = {
name: 'scality-s3',
sentinelPassword: '',
sentinels: [{
host: '127.0.0.1',
port: 16379,
}],
};
this._bucketd = {
host: '127.0.0.1',
port: 9000,
};
this._password = '';
this._log = new werelogs.Logger('UtapiReindex');
if (config && config.enabled) {
this._enabled = config.enabled;
}
if (config && config.schedule) {
this._schedule = config.schedule;
}
if (config && config.password) {
this._password = config.password;
}
if (config && config.redis) {
const {
name, sentinelPassword, sentinels,
} = config.redis;
this._redis.name = name || this._redis.name;
this._redis.sentinelPassword = sentinelPassword || this._redis.sentinelPassword;
this._redis.sentinels = sentinels || this._redis.sentinels;
}
if (config && config.bucketd) {
const { host, port } = config.bucketd;
this._bucketd.host = host || this._bucketd.host;
this._bucketd.port = port || this._bucketd.port;
}
if (config && config.log) {
const { level, dump } = config.log;
this._log = new werelogs.Logger('UtapiReindex', { level, dump });
}
this._onlyCountLatestWhenObjectLocked = (config && config.onlyCountLatestWhenObjectLocked === true);
this._requestLogger = this._log.newRequestLogger();
}
_getRedisClient() {
const client = new RedisClient({
sentinels: this._redis.sentinels,
name: this._redis.name,
sentinelPassword: this._redis.sentinelPassword,
password: this._password,
});
client.connect();
return client;
}
_lock() {
return this.ds.setExpire(REINDEX_LOCK_KEY, 'true', REINDEX_LOCK_TTL);
}
_unLock() {
return this.ds.del(REINDEX_LOCK_KEY);
}
_buildFlags(sentinel) {
const flags = {
/* eslint-disable camelcase */
sentinel_ip: sentinel.host,
sentinel_port: sentinel.port,
sentinel_cluster_name: this._redis.name,
bucketd_addr: `http://${this._bucketd.host}:${this._bucketd.port}`,
};
if (this._redis.sentinelPassword) {
flags.redis_password = this._redis.sentinelPassword;
}
/* eslint-enable camelcase */
const opts = [];
Object.keys(flags)
.forEach(flag => {
const name = `--${flag.replace(/_/g, '-')}`;
opts.push(name);
opts.push(flags[flag]);
});
if (this._onlyCountLatestWhenObjectLocked) {
opts.push('--only-latest-when-locked');
}
return opts;
}
_runScriptWithSentinels(path, remainingSentinels, done) {
const flags = this._buildFlags(remainingSentinels.shift());
this._requestLogger.debug(`launching subprocess ${path} with flags: ${flags}`);
const process = childProcess.spawn(REINDEX_PYTHON_INTERPRETER, [path, ...flags]);
process.stdout.on('data', data => {
this._requestLogger.info('received output from script', {
output: Buffer.from(data).toString(),
script: path,
});
});
process.stderr.on('data', data => {
this._requestLogger.debug('received error from script', {
output: Buffer.from(data).toString(),
script: path,
});
});
process.on('error', err => {
this._requestLogger.debug('failed to start process', {
error: err,
script: path,
});
});
process.on('close', code => {
if (code) {
this._requestLogger.error('script exited with error', {
statusCode: code,
script: path,
});
if (code === EXIT_CODE_SENTINEL_CONNECTION) {
if (remainingSentinels.length > 0) {
this._requestLogger.info('retrying with next sentinel host', {
script: path,
});
return this._runScriptWithSentinels(path, remainingSentinels, done);
}
this._requestLogger.error('no more sentinel host to try', {
script: path,
});
}
} else {
this._requestLogger.info('script exited successfully', {
statusCode: code,
script: path,
});
}
return done();
});
}
_runScript(path, done) {
const remainingSentinels = [...this._redis.sentinels];
this._runScriptWithSentinels(path, remainingSentinels, done);
}
_attemptLock(job) {
this._requestLogger.info('attempting to acquire the lock to begin job');
this._lock()
.then(res => {
if (res) {
this._requestLogger
.info('acquired the lock, proceeding with job');
job();
} else {
this._requestLogger
.info('the lock is already acquired, skipping job');
}
})
.catch(err => {
this._requestLogger.error(
'an error occurred when acquiring the lock, skipping job', {
stack: err && err.stack,
},
);
});
}
_attemptUnlock() {
this._unLock()
.catch(err => {
this._requestLogger
.error('an error occurred when removing the lock', {
stack: err && err.stack,
});
});
}
_connect(done) {
const doneOnce = jsutil.once(done);
const client = this._getRedisClient();
this.ds = new Datastore().setClient(client);
client
.on('ready', doneOnce)
.on('error', doneOnce);
}
_scheduleJob() {
this._connect(err => {
if (err) {
this._requestLogger.error(
'could not connect to datastore, skipping', {
error: err && err.stack,
},
);
return undefined;
}
return this._attemptLock(() => {
const scripts = [
`${__dirname}/reindex/s3_bucketd.py`,
`${__dirname}/reindex/reporting.py`,
];
return async.eachSeries(scripts, (script, next) => {
this._runScript(script, next);
}, () => {
this._attemptUnlock();
});
});
});
}
_job() {
const job = nodeSchedule.scheduleJob(this._schedule, () => this._scheduleJob());
if (!job) {
this._log.error('could not initiate job schedule');
return undefined;
}
job.on('scheduled', () => {
this._requestLogger = this._log.newRequestLogger();
this._requestLogger.info('utapi reindex job scheduled', {
schedule: this._schedule,
});
});
return undefined;
}
start() {
if (this._enabled) {
this._log.info('initiating job schedule', {
schedule: this._schedule,
});
this._job();
} else {
this._log.info('utapi reindex is disabled');
}
return this;
}
}
module.exports = UtapiReindex;

View File

@ -1,206 +0,0 @@
/* eslint-disable no-plusplus */
const assert = require('assert');
const async = require('async');
const { scheduleJob } = require('node-schedule');
const werelogs = require('werelogs');
const UtapiClient = require('./UtapiClient');
const Datastore = require('./Datastore');
const safeJsonParse = require('../utils/safeJsonParse');
const redisClientv2 = require('../utils/redisClientv2');
// Every five minutes. Cron-style scheduling used by node-schedule.
const REPLAY_SCHEDULE = '*/5 * * * *';
// Fifteen minutes. The time to live for a replay lock.
const TTL = 60 * 15;
const BATCH_SIZE = 10;
class UtapiReplay {
/**
* Create a UtapiReplay
* @param {object} [config] - The configuration of UtapiReplay
* @param {werelogs.API} [config.logApi] - object providing a constructor
* function for the Logger object
* @param {object} [config.redis] - Object defining the host, port and
* password(optional) of the Redis datastore
* @param {object} [config.localCache] - Object defining the host, port and
* password(optional) of the local cache datastore
* @param {number} [config.batchSize] - The batch size to get metrics from
* Redis datastore
* @param {string} [config.replaySchedule] - The Cron-style schedule at
* which the replay job should run
*/
constructor(config) {
this.log = new (config.logApi || werelogs).Logger('UtapiReplay');
this.replaySchedule = REPLAY_SCHEDULE;
this.batchSize = BATCH_SIZE;
this.disableReplay = true;
if (config) {
const message = 'missing required property in UtapiReplay '
+ 'configuration';
assert(config.redis, `${message}: redis`);
assert(config.localCache, `${message}: localCache`);
this.utapiClient = new UtapiClient(config);
this.localCache = new Datastore()
.setClient(redisClientv2(config.localCache, this.log));
if (config.replaySchedule) {
this.replaySchedule = config.replaySchedule;
}
if (config.batchSize) {
this.batchSize = config.batchSize;
}
this.disableReplay = false;
}
}
/**
* Set the replay lock key.
* @return {undefined}
*/
_setLock() {
return this.localCache.setExpire('s3:utapireplay:lock', 'true', TTL);
}
/**
* Delete the replay lock key. If there is an error during this command, do
* not handle it as the lock will expire after the value of `TTL`.
* @return {undefined}
*/
_removeLock() {
return this.localCache.del('s3:utapireplay:lock');
}
/**
* Validates that all required items can be retrived from the JSON object.
* @param {string} data - JSON of list element from local cache.
* @return {boolean} Returns `true` if object is valid, `false` otherwise.
*/
_validateElement(data) {
const {
action, reqUid, params, timestamp,
} = data;
if (!action || !reqUid || !params || !timestamp) {
this.log.fatal('missing required parameter in element',
{ method: 'UtapiReplay._validateElement' });
return false;
}
return true;
}
/**
* Pushes list elements using the array returned by nodeio.
* @param {array[]} metricsArr - The array returned by nodeio containing the
* an array of an error code and value for each element in the cache list.
* @param {callback} cb - Callback to call.
* @return {function} async.each - Iterates through all array elements.
*/
_pushCachedMetrics(metricsArr, cb) {
return async.each(metricsArr, (arr, next) => {
const actionErr = arr[0];
const element = arr[1];
// If there is an error in one of the RPOP commands, it remains in
// the local cache list, so do not handle that element.
if (!actionErr && element) {
const { error, result } = safeJsonParse(element);
if (error) {
this.log.error('cannot parse element into JSON',
{ method: 'UtapiReplay._pushCachedMetrics' });
return next(error);
}
if (!this._validateElement(result)) {
return next();
}
this.log.trace('pushing metric with utapiClient::pushMetric',
{ method: 'UtapiReplay._pushCachedMetrics' });
const { action, reqUid, params } = result;
const firstReqUid = reqUid.split(':')[0];
// We do not pass the callback to pushMetric since UtapiClient
// will handle pushing it to local cache if internal error.
this.utapiClient.pushMetric(action, firstReqUid, params);
}
return next();
}, err => cb(err));
}
/**
* Gets and removes all elements from the local cache list.
* @param {number} listLen - The length of the local cache list.
* @return {function} async.timesSeries - Iterates through all list
* elements.
*/
_getCachedMetrics(listLen) {
const count = Math.ceil(listLen / this.batchSize);
return async.timesSeries(count, (n, next) => {
// We create the array each time because ioredis modifies it.
const cmds = [];
for (let i = 0; i < this.batchSize; i++) {
cmds.push(['rpop', 's3:utapireplay']);
}
return this.localCache.pipeline(cmds, (err, res) => {
if (err) {
return next(err);
}
return this._pushCachedMetrics(res, next);
});
}, err => {
if (err) {
this.log.error('cannot push element from cache list', {
method: 'UtapiReplay._getCachedMetrics',
error: err,
});
this.log.info(`replay job completed: ${err}`);
return this._removeLock();
}
this.log.info(`replay job completed: pushed ${listLen} metrics`);
return this._removeLock();
});
}
/**
* Checks local cache to determine if any action has been logged.
* @return {function} this.localCache.llen - Gets the length of the list
* in local cache.
*/
_checkLocalCache() {
return this.localCache.llen('s3:utapireplay', (err, res) => {
if (err) {
this.log.error('cannot get length of localCache list', {
method: 'UtapiReplay._getCachedMetrics',
error: err.stack || err,
});
return this._removeLock();
}
if (res > 0) {
return this._getCachedMetrics(res);
}
this.log.info('replay job completed: no cached metrics found');
return this._removeLock();
});
}
/**
* Starts the replay job at the given job schedule.
* @return {UtapiReplay} this - UtapiReplay instance.
*/
start() {
if (this.disableReplay) {
this.log.info('disabled utapi replay scheduler');
return this;
}
const replay = scheduleJob(this.replaySchedule, () => this._setLock()
.then(res => {
// If `res` is not `null`, there is no pre-existing lock.
if (res) {
return this._checkLocalCache();
}
return undefined;
}));
replay.on('scheduled', date => this.log.info(`replay job started: ${date}`));
this.log.info('enabled utapi replay scheduler', {
schedule: this.replaySchedule,
});
return this;
}
}
module.exports = UtapiReplay;

View File

@ -1,280 +0,0 @@
/**
* Utapi request class
*
* @class
*/
class UtapiRequest {
constructor() {
this._log = null;
this._validator = null;
this._request = null;
this._response = null;
this._route = null;
this._statusCode = 0;
this._datastore = null;
this._requestQuery = null;
this._requestPath = null;
this._vault = null;
}
getVault() {
return this._vault;
}
setVault() {
return this._vault;
}
/**
* Function to get the logger
*
* @return {object} Logger object
*/
getLog() {
return this._log;
}
/**
* Function to set the logger
*
* @param {object} log - Logger
* @return {UtapiRequest} itself
*/
setLog(log) {
this._log = log;
return this;
}
/**
* Function to get the validator
*
* @return {Validator} Validator object
*/
getValidator() {
return this._validator;
}
/**
* Function to set the validator
*
* @param {Validator} validator - Validator
* @return {UtapiRequest} itself
*/
setValidator(validator) {
this._validator = validator;
return this;
}
/**
* Set http request object
*
* @param {object} req - Http request object
* @return {UtapiRequest} itself
*/
setRequest(req) {
this._request = req;
return this;
}
/**
* Set request query
*
* @param {object} query - query from request
* @return {UtapiRequest} itself
*/
setRequestQuery(query) {
const decodedQuery = {};
Object.keys(query).forEach(x => {
const key = decodeURIComponent(x);
const value = decodeURIComponent(query[x]);
decodedQuery[key] = value;
});
this._requestQuery = decodedQuery;
return this;
}
/**
* Set request path
*
* @param {string} path - path from url.parse
* of request.url (pathname plus query)
* @return {UtapiRequest} itself
*/
setRequestPath(path) {
this._requestPath = decodeURIComponent(path);
return this;
}
/**
* Set request pathname
*
* @param {string} pathname - pathname from url.parse
* of request.url (pathname minus query)
* @return {UtapiRequest} itself
*/
setRequestPathname(pathname) {
this._requestPathname = pathname;
return this;
}
/**
* Get http request object
*
* @return {object} Http request object
*/
getRequest() {
return this._request;
}
/**
* Get http headers object
*
* @return {object} headers request headers
*/
getRequestHeaders() {
return this._request.headers;
}
/**
* Get http query object
*
* @return {object} request query
*/
getRequestQuery() {
return this._requestQuery;
}
/**
* Get request path
*
* @return {string} request path
*/
getRequestPath() {
return this._requestPath;
}
/**
* Get request pathname
*
* @return {string} request pathname
*/
getRequestPathname() {
return this._requestPathname;
}
/**
* Get requester ip address
*
* @return {string} requesterIp requester Ip address
*/
getRequesterIp() {
return this._request.socket.remoteAddress;
}
/**
* Get ssl enabled
*
* @return {boolean} sslEnabled whether sslEnabled request
*/
getSslEnabled() {
return this._request.connection.encrypted;
}
/**
* Get action
*
* @return {string} action
*/
getAction() {
return this._route.getAction();
}
/**
* Get resource
*
* @return {string} resource
*/
getResource() {
return this._route.getResource();
}
/**
* Set http response object
*
* @param {object} res - Http response object
* @return {UtapiRequest} itself
*/
setResponse(res) {
this._response = res;
return this;
}
/**
* Get http response object
*
* @return {object} Http response object
*/
getResponse() {
return this._response;
}
/**
* Get the current route
*
* @return {Route} current route
*/
getRoute() {
return this._route;
}
/**
* Set the current route
*
* @param {Route} route - Current route
* @return {UtapiRequest} itself
*/
setRoute(route) {
this._route = route;
return this;
}
/**
* Get the status code of the request
*
* @return {number} Http status code of the request
*/
getStatusCode() {
return this._statusCode;
}
/**
* Set the status code of the request
*
* @param {number} code - Http status code of the request
* @return {UtapiRequest} itself
*/
setStatusCode(code) {
this._statusCode = code;
return this;
}
/**
* Set the datastore to be used as backend
* @param {Datastore} ds - Datastore instance
* @return {UtapiRequest} itself
*/
setDatastore(ds) {
this._datastore = ds;
return this;
}
/**
* Get the datastore to be used as backend
* @return {Datastore} Datastore instance
*/
getDatastore() {
return this._datastore;
}
}
module.exports = UtapiRequest;

View File

@ -1,82 +0,0 @@
const vaultclient = require('vaultclient');
/**
@class Vault
* Creates a vault instance for authentication and authorization
*/
class Vault {
constructor(config) {
const { host, port } = config.vaultd;
if (config.https) {
const { key, cert, ca } = config.https;
this._client = new vaultclient.Client(host, port, true, key, cert,
ca);
} else {
this._client = new vaultclient.Client(host, port);
}
}
/** authenticateV4Request
*
* @param {object} params - the authentication parameters as returned by
* auth.extractParams
* @param {number} params.version - shall equal 4
* @param {string} params.data.accessKey - the user's accessKey
* @param {string} params.data.signatureFromRequest - the signature read from
* the request
* @param {string} params.data.region - the AWS region
* @param {string} params.data.stringToSign - the stringToSign
* @param {string} params.data.scopeDate - the timespan to allow the request
* @param {string} params.data.authType - the type of authentication
* (query or header)
* @param {string} params.data.signatureVersion - the version of the
* signature (AWS or AWS4)
* @param {number} params.data.signatureAge - the age of the signature in ms
* @param {string} params.data.log - the logger object
* @param {RequestContext []} requestContexts - an array of
* RequestContext instances which contain information
* for policy authorization check
* @param {function} callback - cb(err)
* @return {undefined}
*/
authenticateV4Request(params, requestContexts, callback) {
const {
accessKey, signatureFromRequest, region, scopeDate,
stringToSign,
} = params.data;
const { log } = params;
log.debug('authenticating V4 request');
const serializedRCs = requestContexts.map(rc => rc.serialize());
this._client.verifySignatureV4(
stringToSign, signatureFromRequest,
accessKey, region, scopeDate,
{ reqUid: log.getSerializedUids(), requestContext: serializedRCs },
(err, authInfo) => {
if (err) {
log.trace('error from vault', { error: err });
return callback(err);
}
return callback(null,
authInfo.message.body.authorizationResults);
},
);
}
/**
* Returns canonical Ids for a given list of account Ids
* @param {string[]} accountIds - list of account ids
* @param {object} log - Werelogs request logger
* @param {callback} callback - callback with error and result as params
* @return {undefined}
*/
getCanonicalIds(accountIds, log, callback) {
log.debug('retrieving canonical ids for account ids', {
method: 'Vault.getCanonicalIds',
});
return this._client.getCanonicalIdsByAccountIds(accountIds,
{ reqUid: log.getSerializedUids(), logger: log }, callback);
}
}
module.exports = Vault;

View File

@ -1,347 +0,0 @@
/* eslint-disable class-methods-use-this */
const assert = require('assert');
const map = require('async/map');
/**
* Pipeline - executes multiple commands sent as a batch
*/
class Pipeline {
/**
* @constructor
* @param {array[]} cmds - array of commands
* typical usage looks like [['set', 'foo', 'bar'], ['get', 'foo']]
* @param {Memory} db - Memory instance
*/
constructor(cmds, db) {
this.cmds = JSON.parse(JSON.stringify(cmds));
this.db = db;
}
/**
* @param {callback} cb - callback
* @return {undefined}
*/
exec(cb) {
process.nextTick(() => {
// e.g. [['set', 'foo', 'bar'], ['get', 'foo']]
map(this.cmds, (item, next) => {
// ['set', 'foo', 'bar']
const fnName = item.shift();
// arg1 = 'foo', arg2 = 'bar', arg2 = undefined
const [arg1, arg2, arg3] = item;
if (arg1 !== undefined && arg2 !== undefined
&& arg3 !== undefined) {
return this.db[fnName](arg1, arg2, arg3,
(err, res) => next(null, [err, res]));
}
if (arg1 !== undefined && arg2 !== undefined) {
return this.db[fnName](arg1, arg2,
(err, res) => next(null, [err, res]));
}
return this.db[fnName](arg1,
(err, res) => next(null, [err, res]));
}, cb);
});
}
}
/**
* Memory backend which emulates IoRedis client methods
*/
class Memory {
constructor() {
this.data = {};
}
/**
* A simple wrapper provided for API compatibility with redis
* @param {Function} func - Function to call
* @param {callback} cb - callback
* @returns {undefined}
*/
call(func, cb) {
return func(this, cb);
}
/**
* Set key to hold a value
* @param {string} key - data key
* @param {number|string} value - data value
* @param {callback} cb - callback
* @return {undefined}
*/
set(key, value, cb) {
assert.strictEqual(typeof key, 'string');
assert.strictEqual(typeof value === 'string'
|| typeof value === 'number', true);
process.nextTick(() => {
this.data[key] = value.toString();
return cb(null, this.data[key]);
});
}
/**
* Get value from a key
* @param {string} key - data key
* @param {callback} cb - callback
* @return {undefined}
*/
get(key, cb) {
assert.strictEqual(typeof key, 'string');
process.nextTick(() => cb(null, this.data[key] === undefined
? null : this.data[key]));
}
/**
* Increment value held by the key by 1
* @param {string} key - data key
* @param {callback} cb - callback
* @return {undefined}
*/
incr(key, cb) {
assert.strictEqual(typeof key, 'string');
process.nextTick(() => {
if (this.data[key] === undefined) {
this.data[key] = 0;
}
const val = parseInt(this.data[key], 10);
if (Number.isNaN(val)) {
throw new Error('Value at key cannot be represented as a '
+ 'number');
}
this.data[key] = (val + 1).toString();
return cb(null, this.data[key]);
});
}
/**
* Increment value held by the key by the given number
* @param {string} key - data key
* @param {number} num - number to increment by
* @param {callback} cb - callback
* @return {undefined}
*/
incrby(key, num, cb) {
assert.strictEqual(typeof key, 'string');
assert.strictEqual(typeof num, 'number');
process.nextTick(() => {
if (this.data[key] === undefined) {
this.data[key] = 0;
}
const val = parseInt(this.data[key], 10);
if (Number.isNaN(val)) {
throw new Error('Value at key cannot be represented as a '
+ 'number');
}
this.data[key] = (val + num).toString();
return cb(null, this.data[key]);
});
}
/**
* Decrement value held by the key by 1
* @param {string} key - data key
* @param {callback} cb - callback
* @return {undefined}
*/
decr(key, cb) {
assert.strictEqual(typeof key, 'string');
process.nextTick(() => {
if (this.data[key] === undefined) {
this.data[key] = 0;
}
const val = parseInt(this.data[key], 10);
if (Number.isNaN(val)) {
throw new Error('Value at key cannot be represented as a '
+ 'number');
}
this.data[key] = (val - 1).toString();
return cb(null, this.data[key]);
});
}
/**
* Decrement value held by the key by the given number
* @param {string} key - data key
* @param {number} num - number to increment by
* @param {callback} cb - callback
* @return {undefined}
*/
decrby(key, num, cb) {
assert.strictEqual(typeof key, 'string');
assert.strictEqual(typeof num, 'number');
process.nextTick(() => {
if (this.data[key] === undefined) {
this.data[key] = 0;
}
const val = parseInt(this.data[key], 10);
if (Number.isNaN(val)) {
throw new Error('Value at key cannot be represented as a '
+ 'number');
}
this.data[key] = (val - num).toString();
return cb(null, this.data[key]);
});
}
/**
* Store value by score in a sorted set
* @param {string} key - data key
* @param {number} score - data score
* @param {string|number} value - data value
* @param {callback} cb - callback
* @return {undefined}
*/
zadd(key, score, value, cb) {
assert.strictEqual(typeof key, 'string');
assert.strictEqual(typeof score, 'number');
assert.strictEqual(typeof value === 'string'
|| typeof value === 'number', true);
process.nextTick(() => {
if (this.data[key] === undefined) {
this.data[key] = [];
}
const valStr = value.toString();
// compares both arrays of data
const found = this.data[key].some(item => JSON.stringify(item) === JSON.stringify([score, valStr]));
if (!found) {
// as this is a sorted set emulation, it sorts the data by score
// after each insertion
this.data[key].push([score, valStr]);
this.data[key].sort((a, b) => a[0] - b[0]);
}
return cb(null, valStr);
});
}
/**
* Returns range result from sorted set at key with scores between min and
* max (all inclusive). Ordering is from low to high scores
* @param {string} key - data key
* @param {string|number} min - min score (number or -inf)
* @param {string|number} max - max score (number or +inf)
* @param {callback} cb - callback
* @return {undefined}
*/
zrangebyscore(key, min, max, cb) {
assert.strictEqual(typeof key, 'string');
assert.strictEqual(typeof min === 'number'
|| typeof min === 'string', true);
assert.strictEqual(typeof max === 'number'
|| typeof max === 'string', true);
process.nextTick(() => {
if (!this.data[key]) {
// emulating redis-client which returns nulls
return cb(null, null);
}
const minScore = (min === '-inf') ? this.data[key][0][0] : min;
const maxScore = (min === '+inf')
? this.data[key][this.data[key].length - 1][0] : max;
return cb(null, this.data[key].filter(item => item[0] >= minScore
&& item[0] <= maxScore).map(item => item[1]));
});
}
/**
* Returns range result from sorted set at key with scores between min and
* max (all inclusive). Ordering is from high to low scores
* @param {string} key - data key
* @param {string|number} max - max score (number or +inf)
* @param {string|number} min - min score (number or -inf)
* @param {callback} cb - callback
* @return {undefined}
*/
zrevrangebyscore(key, max, min, cb) {
assert.strictEqual(typeof key, 'string');
assert.strictEqual(typeof min === 'number'
|| typeof min === 'string', true);
assert.strictEqual(typeof max === 'number'
|| typeof max === 'string', true);
process.nextTick(() => {
if (!this.data[key]) {
// emulating redis-client which returns nulls
return cb(null, null);
}
const minScore = (min === '-inf') ? this.data[key][0][0] : min;
const maxScore = (min === '+inf')
? this.data[key][this.data[key].length][0] : max;
const cloneKeyData = Object.assign(this.data[key]);
// Sort keys by scores in the decreasing order, if scores are equal
// sort by their value in the decreasing order
cloneKeyData.sort((a, b) => {
if (a[0] === b[0]) {
return b[1] - a[1];
}
return b[0] - a[0];
});
return cb(null, cloneKeyData.filter(item => item[0] >= minScore
&& item[0] <= maxScore).map(item => item[1]));
});
}
/**
* Remove elements from sorted set at key with scores between min and
* max (all inclusive)
* @param {string} key - data key
* @param {string|number} min - min score (number or -inf)
* @param {string|number} max - max score (number or +inf)
* @param {callback} cb - callback
* @return {undefined}
*/
zremrangebyscore(key, min, max, cb) {
assert.strictEqual(typeof key, 'string');
assert.strictEqual(typeof min === 'number'
|| typeof min === 'string', true);
assert.strictEqual(typeof max === 'number'
|| typeof max === 'string', true);
process.nextTick(() => {
if (!this.data[key]) {
// emulating redis-client which returns nulls
return cb(null, null);
}
const minScore = (min === '-inf') ? this.data[key][0][0] : min;
const maxScore = (min === '+inf')
? this.data[key][this.data[key].length][0] : max;
const oldLen = this.data[key].length;
this.data[key] = this.data[key].filter(item => (item[0] < minScore || item[0] > maxScore));
return cb(null, (oldLen - this.data[key].length));
});
}
/**
* Returns a pipeline instance that can execute commmands as a batch
* @param {array} cmds - list of commands
* @return {Pipeline} - Pipeline instance
*/
multi(cmds) {
return new Pipeline(cmds, this);
}
/**
* Flushes(clears) the data out from db
* @return {object} - current instance
*/
flushDb() {
this.data = {};
return this;
}
getDb() {
return this.data;
}
/**
* Does nothing since memory backend is kind of a dead end
* @param {string} channel - the channel name where the message is published
* @param {string} message - the message to send
* @param {callback} cb - callback
* @return {undefined}
*/
publish(channel, message, cb) {
return cb();
}
}
module.exports = Memory;

View File

@ -1,117 +0,0 @@
import argparse
import ast
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import re
import redis
import requests
import sys
from threading import Thread
import time
import urllib
logging.basicConfig(level=logging.INFO)
_log = logging.getLogger('utapi-reindex:reporting')
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
parser.add_argument("-p", "--sentinel-port", default="16379", help="Sentinel Port")
parser.add_argument("-v", "--redis-password", default=None, help="Redis AUTH Password")
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-b", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
return parser.parse_args()
def safe_print(content):
print("{0}".format(content))
class askRedis():
def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None):
self._password = password
r = redis.Redis(
host=ip,
port=port,
db=0,
password=password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
)
try:
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {ip}:{port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
def read(self, resource, name):
r = redis.Redis(host=self._ip, port=self._port, db=0, password=self._password)
res = 's3:%s:%s:storageUtilized:counter' % (resource, name)
total_size = r.get(res)
res = 's3:%s:%s:numberOfObjects:counter' % (resource, name)
files = r.get(res)
try:
return {'files': int(files), "total_size": int(total_size)}
except Exception as e:
return {'files': 0, "total_size": 0}
class S3ListBuckets():
def __init__(self, host='127.0.0.1:9000'):
self.bucketd_host = host
def run(self):
docs = []
url = "%s/default/bucket/users..bucket" % self.bucketd_host
session = requests.Session()
r = session.get(url, timeout=30)
if r.status_code == 200:
payload = json.loads(r.text)
for keys in payload['Contents']:
key = keys["key"]
r1 = re.match("(\w+)..\|..(\w+.*)", key)
docs.append(r1.groups())
return docs
return(self.userid, self.bucket, user, files, total_size)
if __name__ == '__main__':
options = get_options()
redis_conf = dict(
ip=options.sentinel_ip,
port=options.sentinel_port,
sentinel_cluster_name=options.sentinel_cluster_name,
password=options.redis_password
)
P = S3ListBuckets(options.bucketd_addr)
listbuckets = P.run()
userids = set([x for x, y in listbuckets])
executor = ThreadPoolExecutor(max_workers=1)
for userid, bucket in listbuckets:
U = askRedis(**redis_conf)
data = U.read('buckets', bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
data = U.read('buckets', 'mpuShadowBucket'+bucket)
content = "Account:%s|Bucket:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, 'mpuShadowBucket'+bucket, data["files"], data["total_size"])
executor.submit(safe_print, content)
executor.submit(safe_print, "")
for userid in sorted(userids):
U = askRedis(**redis_conf)
data = U.read('accounts', userid)
content = "Account:%s|NumberOFfiles:%s|StorageCapacity:%s " % (
userid, data["files"], data["total_size"])
executor.submit(safe_print, content)

View File

@ -1,586 +0,0 @@
import argparse
import concurrent.futures as futures
import functools
import itertools
import json
import logging
import os
import re
import sys
import time
import urllib
from pathlib import Path
from collections import defaultdict, namedtuple
from concurrent.futures import ThreadPoolExecutor
import redis
import requests
from requests import ConnectionError, HTTPError, Timeout
logging.basicConfig(level=logging.INFO)
_log = logging.getLogger('utapi-reindex')
USERS_BUCKET = 'users..bucket'
MPU_SHADOW_BUCKET_PREFIX = 'mpuShadowBucket'
ACCOUNT_UPDATE_CHUNKSIZE = 100
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--sentinel-ip", default='127.0.0.1', help="Sentinel IP")
parser.add_argument("-p", "--sentinel-port", default="16379", help="Sentinel Port")
parser.add_argument("-v", "--redis-password", default=None, help="Redis AUTH Password")
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
parser.add_argument("-w", "--worker", default=10, type=int, help="Number of workers")
parser.add_argument("-r", "--max-retries", default=2, type=int, help="Max retries before failing a bucketd request")
parser.add_argument("--only-latest-when-locked", action='store_true', help="Only index the latest version of a key when the bucket has a default object lock policy")
parser.add_argument("--debug", action='store_true', help="Enable debug logging")
parser.add_argument("--dry-run", action="store_true", help="Do not update redis")
group = parser.add_mutually_exclusive_group()
group.add_argument("-a", "--account", default=[], help="account canonical ID (all account buckets will be processed)", action="append", type=nonempty_string('account'))
group.add_argument("--account-file", default=None, help="file containing account canonical IDs, one ID per line", type=existing_file)
group.add_argument("-b", "--bucket", default=[], help="bucket name", action="append", type=nonempty_string('bucket'))
group.add_argument("--bucket-file", default=None, help="file containing bucket names, one bucket name per line", type=existing_file)
options = parser.parse_args()
if options.bucket_file:
with open(options.bucket_file) as f:
options.bucket = [line.strip() for line in f if line.strip()]
elif options.account_file:
with open(options.account_file) as f:
options.account = [line.strip() for line in f if line.strip()]
return options
def nonempty_string(flag):
def inner(value):
if not value.strip():
raise argparse.ArgumentTypeError("%s: value must not be empty"%flag)
return value
return inner
def existing_file(path):
path = Path(path).resolve()
if not path.exists():
raise argparse.ArgumentTypeError("File does not exist: %s"%path)
return path
def chunks(iterable, size):
it = iter(iterable)
chunk = tuple(itertools.islice(it,size))
while chunk:
yield chunk
chunk = tuple(itertools.islice(it,size))
def _encoded(func):
def inner(*args, **kwargs):
val = func(*args, **kwargs)
return urllib.parse.quote(val.encode('utf-8'))
return inner
Bucket = namedtuple('Bucket', ['userid', 'name', 'object_lock_enabled'])
MPU = namedtuple('MPU', ['bucket', 'key', 'upload_id'])
BucketContents = namedtuple('BucketContents', ['bucket', 'obj_count', 'total_size'])
class MaxRetriesReached(Exception):
def __init__(self, url):
super().__init__('Max retries reached for request to %s'%url)
class InvalidListing(Exception):
def __init__(self, bucket):
super().__init__('Invalid contents found while listing bucket %s'%bucket)
class BucketNotFound(Exception):
def __init__(self, bucket):
super().__init__('Bucket %s not found'%bucket)
class BucketDClient:
'''Performs Listing calls against bucketd'''
__url_attribute_format = '{addr}/default/attributes/{bucket}'
__url_bucket_format = '{addr}/default/bucket/{bucket}'
__headers = {"x-scal-request-uids": "utapi-reindex-list-buckets"}
def __init__(self, bucketd_addr=None, max_retries=2, only_latest_when_locked=False):
self._bucketd_addr = bucketd_addr
self._max_retries = max_retries
self._only_latest_when_locked = only_latest_when_locked
self._session = requests.Session()
def _do_req(self, url, check_500=True, **kwargs):
# Add 1 for the initial request
for x in range(self._max_retries + 1):
try:
resp = self._session.get(url, timeout=30, verify=False, headers=self.__headers, **kwargs)
if check_500 and resp.status_code == 500:
_log.warning('500 from bucketd, sleeping 15 secs')
time.sleep(15)
continue
return resp
except (Timeout, ConnectionError) as e:
_log.exception(e)
_log.error('Error during listing, sleeping 5 secs %s'%url)
time.sleep(5)
raise MaxRetriesReached(url)
def _list_bucket(self, bucket, **kwargs):
'''
Lists a bucket lazily until "empty"
bucket: name of the bucket
kwargs: url parameters key=value
To support multiple next marker keys and param encoding, a function can
be passed as a parameters value. It will be call with the json decode
response body as its only argument and is expected to return the
parameters value. On the first request the function will be called with
`None` and should return its initial value. Return `None` for the param to be excluded.
'''
url = self.__url_bucket_format.format(addr=self._bucketd_addr, bucket=bucket)
static_params = {k: v for k, v in kwargs.items() if not callable(v)}
dynamic_params = {k: v for k, v in kwargs.items() if callable(v)}
is_truncated = True # Set to True for first loop
payload = None
while is_truncated:
params = static_params.copy() # Use a copy of the static params for a base
for key, func in dynamic_params.items():
params[key] = func(payload) # Call each of our dynamic params with the previous payload
try:
_log.debug('listing bucket bucket: %s params: %s'%(
bucket, ', '.join('%s=%s'%p for p in params.items())))
resp = self._do_req(url, params=params)
if resp.status_code == 404:
_log.debug('Bucket not found bucket: %s'%bucket)
return
if resp.status_code == 200:
payload = resp.json()
except ValueError as e:
_log.exception(e)
_log.error('Invalid listing response body! bucket:%s params:%s'%(
bucket, ', '.join('%s=%s'%p for p in params.items())))
continue
except MaxRetriesReached:
_log.error('Max retries reached listing bucket:%s'%bucket)
raise
except Exception as e:
_log.exception(e)
_log.error('Unhandled exception during listing! bucket:%s params:%s'%(
bucket, ', '.join('%s=%s'%p for p in params.items())))
raise
yield resp.status_code, payload
if isinstance(payload, dict):
is_truncated = payload.get('IsTruncated', False)
else:
is_truncated = len(payload) > 0
@functools.lru_cache(maxsize=16)
def _get_bucket_attributes(self, name):
url = self.__url_attribute_format.format(addr=self._bucketd_addr, bucket=name)
try:
resp = self._do_req(url)
if resp.status_code == 200:
return resp.json()
else:
_log.error('Error getting bucket attributes bucket:%s status_code:%s'%(name, resp.status_code))
raise BucketNotFound(name)
except ValueError as e:
_log.exception(e)
_log.error('Invalid attributes response body! bucket:%s'%name)
raise
except MaxRetriesReached:
_log.error('Max retries reached getting bucket attributes bucket:%s'%name)
raise
except Exception as e:
_log.exception(e)
_log.error('Unhandled exception getting bucket attributes bucket:%s'%name)
raise
def get_bucket_md(self, name):
md = self._get_bucket_attributes(name)
canonId = md.get('owner')
if canonId is None:
_log.error('No owner found for bucket %s'%name)
raise InvalidListing(name)
return Bucket(canonId, name, md.get('objectLockEnabled', False))
def list_buckets(self, account=None):
def get_next_marker(p):
if p is None:
return ''
return p.get('Contents', [{}])[-1].get('key', '')
params = {
'delimiter': '',
'maxKeys': 1000,
'marker': get_next_marker
}
if account is not None:
params['prefix'] = '%s..|..' % account
for _, payload in self._list_bucket(USERS_BUCKET, **params):
buckets = []
for result in payload.get('Contents', []):
match = re.match("(\w+)..\|..(\w+.*)", result['key'])
bucket = Bucket(*match.groups(), False)
# We need to get the attributes for each bucket to determine if it is locked
if self._only_latest_when_locked:
bucket_attrs = self._get_bucket_attributes(bucket.name)
object_lock_enabled = bucket_attrs.get('objectLockEnabled', False)
bucket = bucket._replace(object_lock_enabled=object_lock_enabled)
buckets.append(bucket)
if buckets:
yield buckets
def list_mpus(self, bucket):
_bucket = MPU_SHADOW_BUCKET_PREFIX + bucket.name
def get_next_marker(p):
if p is None:
return 'overview..|..'
return p.get('NextKeyMarker', '')
def get_next_upload_id(p):
if p is None:
return 'None'
return p.get('NextUploadIdMarker', '')
params = {
'delimiter': '',
'keyMarker': '',
'maxKeys': 1000,
'queryPrefixLength': 0,
'listingType': 'MPU',
'splitter': '..|..',
'prefix': get_next_marker,
'uploadIdMarker': get_next_upload_id,
}
keys = []
for status_code, payload in self._list_bucket(_bucket, **params):
if status_code == 404:
break
for key in payload['Uploads']:
keys.append(MPU(
bucket=bucket,
key=key['key'],
upload_id=key['value']['UploadId']))
return keys
def _sum_objects(self, bucket, listing, only_latest_when_locked = False):
count = 0
total_size = 0
last_key = None
try:
for obj in listing:
if isinstance(obj['value'], dict):
# bucketd v6 returns a dict:
data = obj.get('value', {})
size = data["Size"]
else:
# bucketd v7 returns an encoded string
data = json.loads(obj['value'])
size = data.get('content-length', 0)
is_latest = obj['key'] != last_key
last_key = obj['key']
if only_latest_when_locked and bucket.object_lock_enabled and not is_latest:
_log.debug('Skipping versioned key: %s'%obj['key'])
continue
count += 1
total_size += size
except InvalidListing:
_log.error('Invalid contents in listing. bucket:%s'%bucket.name)
raise InvalidListing(bucket.name)
return count, total_size
def _extract_listing(self, key, listing):
for status_code, payload in listing:
contents = payload[key] if isinstance(payload, dict) else payload
if contents is None:
raise InvalidListing('')
for obj in contents:
yield obj
def count_bucket_contents(self, bucket):
def get_key_marker(p):
if p is None:
return ''
return p.get('NextKeyMarker', '')
def get_vid_marker(p):
if p is None:
return ''
return p.get('NextVersionIdMarker', '')
params = {
'listingType': 'DelimiterVersions',
'maxKeys': 1000,
'keyMarker': get_key_marker,
'versionIdMarker': get_vid_marker,
}
listing = self._list_bucket(bucket.name, **params)
count, total_size = self._sum_objects(bucket, self._extract_listing('Versions', listing), self._only_latest_when_locked)
return BucketContents(
bucket=bucket,
obj_count=count,
total_size=total_size
)
def count_mpu_parts(self, mpu):
shadow_bucket_name = MPU_SHADOW_BUCKET_PREFIX + mpu.bucket.name
shadow_bucket = mpu.bucket._replace(name=shadow_bucket_name)
def get_prefix(p):
if p is None:
return mpu.upload_id
return p.get('Contents', [{}])[-1].get('key', '')
@_encoded
def get_next_marker(p):
prefix = get_prefix(p)
return prefix + '..|..00000'
params = {
'prefix': get_prefix,
'marker': get_next_marker,
'delimiter': '',
'maxKeys': 1000,
'listingType': 'Delimiter',
}
listing = self._list_bucket(shadow_bucket_name, **params)
count, total_size = self._sum_objects(shadow_bucket, self._extract_listing('Contents', listing))
return BucketContents(
bucket=shadow_bucket,
obj_count=0, # MPU parts are not counted towards numberOfObjects
total_size=total_size
)
def list_all_buckets(bucket_client):
return bucket_client.list_buckets()
def list_specific_accounts(bucket_client, accounts):
for account in accounts:
yield from bucket_client.list_buckets(account=account)
def list_specific_buckets(bucket_client, buckets):
batch = []
for bucket in buckets:
try:
batch.append(bucket_client.get_bucket_md(bucket))
except BucketNotFound:
_log.error('Failed to list bucket %s. Removing from results.'%bucket)
continue
yield batch
def index_bucket(client, bucket):
'''
Takes an instance of BucketDClient and a bucket name, and returns a
tuple of BucketContents for the passed bucket and its mpu shadow bucket.
'''
try:
bucket_total = client.count_bucket_contents(bucket)
mpus = client.list_mpus(bucket)
if not mpus:
return bucket_total
total_size = bucket_total.total_size
mpu_totals = [client.count_mpu_parts(m) for m in mpus]
for mpu in mpu_totals:
total_size += mpu.total_size
return bucket_total._replace(total_size=total_size)
except Exception as e:
_log.exception(e)
_log.error('Error during listing. Removing from results bucket:%s'%bucket.name)
raise InvalidListing(bucket.name)
def update_report(report, key, obj_count, total_size):
'''Convenience function to update the report dicts'''
if key in report:
report[key]['obj_count'] += obj_count
report[key]['total_size'] += total_size
else:
report[key] = {
'obj_count': obj_count,
'total_size': total_size,
}
def get_redis_client(options):
sentinel = redis.Redis(
host=options.sentinel_ip,
port=options.sentinel_port,
db=0,
password=options.redis_password,
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
)
try:
ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name)
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
_log.error(f'Failed to connect to redis sentinel at {options.sentinel_ip}:{options.sentinel_port}: {e}')
# use a specific error code to hint on retrying with another sentinel node
sys.exit(EXIT_CODE_SENTINEL_CONNECTION_ERROR)
return redis.Redis(
host=ip,
port=port,
db=0,
password=options.redis_password
)
def update_redis(client, resource, name, obj_count, total_size):
timestamp = int(time.time() - 15 * 60) * 1000
obj_count_key = 's3:%s:%s:numberOfObjects' % (resource, name)
total_size_key = 's3:%s:%s:storageUtilized' % (resource, name)
client.zremrangebyscore(obj_count_key, timestamp, timestamp)
client.zremrangebyscore(total_size_key, timestamp, timestamp)
client.zadd(obj_count_key, {obj_count: timestamp})
client.zadd(total_size_key, {total_size: timestamp})
client.set(obj_count_key + ':counter', obj_count)
client.set(total_size_key + ':counter', total_size)
def get_resources_from_redis(client, resource):
for key in redis_client.scan_iter('s3:%s:*:storageUtilized' % resource):
yield key.decode('utf-8').split(':')[2]
def log_report(resource, name, obj_count, total_size):
print('%s:%s:%s:%s'%(
resource,
name,
obj_count,
total_size
))
if __name__ == '__main__':
options = get_options()
if options.debug:
_log.setLevel(logging.DEBUG)
bucket_client = BucketDClient(options.bucketd_addr, options.max_retries, options.only_latest_when_locked)
redis_client = get_redis_client(options)
account_reports = {}
observed_buckets = set()
failed_accounts = set()
if options.account:
batch_generator = list_specific_accounts(bucket_client, options.account)
elif options.bucket:
batch_generator = list_specific_buckets(bucket_client, options.bucket)
else:
batch_generator = list_all_buckets(bucket_client)
with ThreadPoolExecutor(max_workers=options.worker) as executor:
for batch in batch_generator:
bucket_reports = {}
jobs = { executor.submit(index_bucket, bucket_client, b): b for b in batch }
for job in futures.as_completed(jobs.keys()):
try:
total = job.result() # Summed bucket and shadowbucket totals
except InvalidListing:
_bucket = jobs[job]
_log.error('Failed to list bucket %s. Removing from results.'%_bucket.name)
# Add the bucket to observed_buckets anyway to avoid clearing existing metrics
observed_buckets.add(_bucket.name)
# If we can not list one of an account's buckets we can not update its total
failed_accounts.add(_bucket.userid)
continue
observed_buckets.add(total.bucket.name)
update_report(bucket_reports, total.bucket.name, total.obj_count, total.total_size)
update_report(account_reports, total.bucket.userid, total.obj_count, total.total_size)
# Bucket reports can be updated as we get them
if options.dry_run:
for bucket, report in bucket_reports.items():
_log.info(
"DryRun: resource buckets [%s] would be updated with obj_count %i and total_size %i" % (
bucket, report['obj_count'], report['total_size']
)
)
else:
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket, report in bucket_reports.items():
update_redis(pipeline, 'buckets', bucket, report['obj_count'], report['total_size'])
log_report('buckets', bucket, report['obj_count'], report['total_size'])
pipeline.execute()
stale_buckets = set()
recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
if options.bucket:
stale_buckets = { b for b in options.bucket if b not in observed_buckets }
elif options.account:
_log.warning('Stale buckets will not be cleared when using the --account or --account-file flags')
else:
stale_buckets = recorded_buckets.difference(observed_buckets)
_log.info('Found %s stale buckets' % len(stale_buckets))
if options.dry_run:
_log.info("DryRun: not updating stale buckets")
else:
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for bucket in chunk:
update_redis(pipeline, 'buckets', bucket, 0, 0)
log_report('buckets', bucket, 0, 0)
pipeline.execute()
# Account metrics are not updated if a bucket is specified
if options.bucket:
_log.warning('Account metrics will not be updated when using the --bucket or --bucket-file flags')
else:
# Don't update any accounts with failed listings
without_failed = filter(lambda x: x[0] not in failed_accounts, account_reports.items())
if options.dry_run:
for userid, report in account_reports.items():
_log.info(
"DryRun: resource account [%s] would be updated with obj_count %i and total_size %i" % (
userid, report['obj_count'], report['total_size']
)
)
else:
# Update total account reports in chunks
for chunk in chunks(without_failed, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for userid, report in chunk:
update_redis(pipeline, 'accounts', userid, report['obj_count'], report['total_size'])
log_report('accounts', userid, report['obj_count'], report['total_size'])
pipeline.execute()
if options.account:
for account in options.account:
if account in failed_accounts:
_log.error("No metrics updated for account %s, one or more buckets failed" % account)
# Include failed_accounts in observed_accounts to avoid clearing metrics
observed_accounts = failed_accounts.union(set(account_reports.keys()))
recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts'))
if options.account:
stale_accounts = { a for a in options.account if a not in observed_accounts }
else:
# Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
stale_accounts = recorded_accounts.difference(observed_accounts)
_log.info('Found %s stale accounts' % len(stale_accounts))
if options.dry_run:
_log.info("DryRun: not updating stale accounts")
else:
for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE):
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
for account in chunk:
update_redis(pipeline, 'accounts', account, 0, 0)
log_report('accounts', account, 0, 0)
pipeline.execute()

View File

@ -1,169 +0,0 @@
// metric type schema
const stateKeys = {
storageUtilized: prefix => `${prefix}storageUtilized`,
numberOfObjects: prefix => `${prefix}numberOfObjects`,
};
const counters = {
storageUtilizedCounter: prefix => `${prefix}storageUtilized:counter`,
numberOfObjectsCounter: prefix => `${prefix}numberOfObjects:counter`,
};
const keys = {
createBucket: prefix => `${prefix}CreateBucket`,
deleteBucket: prefix => `${prefix}DeleteBucket`,
listBucket: prefix => `${prefix}ListBucket`,
getBucketAcl: prefix => `${prefix}GetBucketAcl`,
putBucketAcl: prefix => `${prefix}PutBucketAcl`,
putBucketCors: prefix => `${prefix}PutBucketCors`,
deleteBucketCors: prefix => `${prefix}DeleteBucketCors`,
getBucketCors: prefix => `${prefix}GetBucketCors`,
putBucketWebsite: prefix => `${prefix}PutBucketWebsite`,
putBucketVersioning: prefix => `${prefix}PutBucketVersioning`,
getBucketVersioning: prefix => `${prefix}GetBucketVersioning`,
deleteBucketWebsite: prefix => `${prefix}DeleteBucketWebsite`,
getBucketWebsite: prefix => `${prefix}GetBucketWebsite`,
getBucketLocation: prefix => `${prefix}GetBucketLocation`,
listBucketMultipartUploads: prefix => `${prefix}ListBucketMultipartUploads`,
listMultipartUploadParts: prefix => `${prefix}ListMultipartUploadParts`,
initiateMultipartUpload: prefix => `${prefix}InitiateMultipartUpload`,
completeMultipartUpload: prefix => `${prefix}CompleteMultipartUpload`,
abortMultipartUpload: prefix => `${prefix}AbortMultipartUpload`,
deleteObject: prefix => `${prefix}DeleteObject`,
multiObjectDelete: prefix => `${prefix}MultiObjectDelete`,
uploadPart: prefix => `${prefix}UploadPart`,
uploadPartCopy: prefix => `${prefix}UploadPartCopy`,
getObject: prefix => `${prefix}GetObject`,
getObjectAcl: prefix => `${prefix}GetObjectAcl`,
getObjectTagging: prefix => `${prefix}GetObjectTagging`,
putObject: prefix => `${prefix}PutObject`,
copyObject: prefix => `${prefix}CopyObject`,
putObjectAcl: prefix => `${prefix}PutObjectAcl`,
putObjectTagging: prefix => `${prefix}PutObjectTagging`,
deleteObjectTagging: prefix => `${prefix}DeleteObjectTagging`,
headBucket: prefix => `${prefix}HeadBucket`,
headObject: prefix => `${prefix}HeadObject`,
putBucketReplication: prefix => `${prefix}PutBucketReplication`,
getBucketReplication: prefix => `${prefix}GetBucketReplication`,
deleteBucketReplication: prefix => `${prefix}DeleteBucketReplication`,
putBucketObjectLock: prefix => `${prefix}PutBucketObjectLock`,
getBucketObjectLock: prefix => `${prefix}GetBucketObjectLock`,
putObjectRetention: prefix => `${prefix}PutObjectRetention`,
getObjectRetention: prefix => `${prefix}GetObjectRetention`,
putObjectLegalHold: prefix => `${prefix}PutObjectLegalHold`,
getObjectLegalHold: prefix => `${prefix}GetObjectLegalHold`,
replicateObject: prefix => `${prefix}ReplicateObject`,
replicateTags: prefix => `${prefix}ReplicateTags`,
replicateDelete: prefix => `${prefix}ReplicateDelete`,
incomingBytes: prefix => `${prefix}incomingBytes`,
outgoingBytes: prefix => `${prefix}outgoingBytes`,
};
/**
* Creates the appropriate prefix for schema keys
* @param {object} params - object with metric type and id as a property
* @param {number} [timestamp] - (optional) unix timestamp normalized to the
* nearest 15 min.
* @return {string} - prefix for the schema key
*/
function getSchemaPrefix(params, timestamp) {
const {
bucket, accountId, userId, level, service, location,
} = params;
// `service` property must remain last because other objects also include it
const id = bucket || accountId || userId || location || service;
const prefix = timestamp ? `${service}:${level}:${timestamp}:${id}:`
: `${service}:${level}:${id}:`;
return prefix;
}
/**
* Returns the metric key for the metric type
* @param {object} params - object with metric type and id as a property
* @param {string} metric - metric to generate a key for
* @param {number} timestamp - unix timestamp normalized to the nearest 15 min.
* @return {string} - schema key
*/
function generateKey(params, metric, timestamp) {
const prefix = getSchemaPrefix(params, timestamp);
if (params.location) {
return `${prefix}locationStorage`;
}
return keys[metric](prefix);
}
/**
* Returns a list of the counters for a metric type
* @param {object} params - object with metric type and id as a property
* @return {string[]} - array of keys for counters
*/
function getCounters(params) {
const prefix = getSchemaPrefix(params);
return Object.keys(counters).map(item => counters[item](prefix));
}
/**
* Returns a list of all keys for a metric type
* @param {object} params - object with metric type and id as a property
* @param {number} timestamp - unix timestamp normalized to the nearest 15 min.
* @return {string[]} - list of keys
*/
function getKeys(params, timestamp) {
const prefix = getSchemaPrefix(params, timestamp);
return Object.keys(keys).map(item => keys[item](prefix));
}
/**
* Returns metric from key
* @param {string} key - schema key
* @return {string} metric - Utapi metric
*/
function getMetricFromKey(key) {
const fields = key.split(':');
// Identify the location of the metric in the array.
const metricLocation = key.includes('counter') ? -2 : -1;
return fields[fields.length + metricLocation];
}
/**
* Returns the keys representing state of the metric type
* @param {object} params - object with metric type and id as a property
* @return {string[]} - list of keys
*/
function getStateKeys(params) {
const prefix = getSchemaPrefix(params);
return Object.keys(stateKeys).map(item => stateKeys[item](prefix));
}
/**
* Returns the state metric key for the metric type
* @param {object} params - object with metric type and id as a property
* @param {string} metric - metric to generate a key for
* @return {string} - schema key
*/
function generateStateKey(params, metric) {
const prefix = getSchemaPrefix(params);
return stateKeys[metric](prefix);
}
/**
* Returns the counter metric key for the metric type
* @param {object} params - object with metric type and id as a property
* @param {string} metric - metric to generate a key for
* @return {string} - schema key
*/
function generateCounter(params, metric) {
const prefix = getSchemaPrefix(params);
return counters[metric](prefix);
}
module.exports = {
getCounters,
getKeys,
getMetricFromKey,
getStateKeys,
generateCounter,
generateKey,
generateStateKey,
};

View File

@ -1,239 +0,0 @@
/* eslint-disable class-methods-use-this */
const http = require('http');
const https = require('https');
const url = require('url');
const { Clustering, errors, ipCheck } = require('arsenal');
const arsenalHttps = require('arsenal').https;
const { Logger } = require('werelogs');
const routes = require('../router/routes');
const Route = require('../router/Route');
const Router = require('../router/Router');
const UtapiRequest = require('../lib/UtapiRequest');
const Datastore = require('./Datastore');
const redisClientv2 = require('../utils/redisClientv2');
class UtapiServer {
/**
* This represents UtapiServer
* @constructor
* @param {Worker} [worker=null] - Track the worker when using cluster
* @param {number} port - server port
* @param {Datasore} datastore - DataStore instance
* @param {Werelogs} logger - Werelogs logger instance
* @param {Config} config - Config instance
*/
constructor(worker, port, datastore, logger, config) {
this.worker = worker;
this.port = port;
this.vault = config.vaultclient;
if (!this.vault) {
const Vault = require('./Vault');
this.vault = new Vault(config);
}
this.router = new Router(config, this.vault);
this.logger = logger;
this.datastore = datastore;
this.server = null;
// setup routes
routes.forEach(item => this.router.addRoute(new Route(item)));
}
/**
* Function to validate a URI component
*
* @param {string|object} component - path from url.parse of request.url
* (pathname plus query) or query from request
* @return {string|undefined} If `decodeURIComponent` throws an error,
* return the invalid `decodeURIComponent` string, otherwise return
* `undefined`
*/
_checkURIComponent(component) {
if (typeof component === 'string') {
try {
decodeURIComponent(component);
} catch (err) {
return true;
}
} else {
return Object.keys(component).find(x => {
try {
decodeURIComponent(x);
decodeURIComponent(component[x]);
} catch (err) {
return true;
}
return false;
});
}
return undefined;
}
requestListener(req, res, router) {
// disable nagle algorithm
req.socket.setNoDelay();
const { query, path, pathname } = url.parse(req.url, true);
const utapiRequest = new UtapiRequest()
.setVault(this.vault)
.setRequest(req)
.setLog(this.logger.newRequestLogger())
.setResponse(res)
.setDatastore(this.datastore);
// Sanity check for valid URI component
if (this._checkURIComponent(query) || this._checkURIComponent(path)) {
return this.errorResponse(utapiRequest, errors.InvalidURI);
}
utapiRequest.setRequestQuery(query);
utapiRequest.setRequestPath(path);
utapiRequest.setRequestPathname(pathname);
// temp hack: healthcheck route
if (path === '/_/healthcheck' && (req.method === 'GET'
|| req.method === 'POST')) {
utapiRequest.setStatusCode(200);
const allowIp = ipCheck.ipMatchCidrList(
config.healthChecks.allowFrom, req.socket.remoteAddress,
);
if (!allowIp) {
return this.errorResponse(utapiRequest, errors.AccessDenied);
}
const redisClient = this.datastore.getClient();
if (!redisClient.isReady) {
return this.errorResponse(utapiRequest,
errors.InternalError.customizeDescription(
'Redis server is not ready',
));
}
return this.response(utapiRequest, {});
}
return router.doRoute(utapiRequest, (err, data) => {
if (err) {
return this.errorResponse(utapiRequest, err);
}
return this.response(utapiRequest, data);
});
}
/*
* This starts the http server.
*/
startup() {
if (config.https) {
const { cert, key, ca } = config.https;
this.server = https.createServer({
cert,
key,
ca,
ciphers: arsenalHttps.ciphers.ciphers,
dhparam: arsenalHttps.dhparam.dhparam,
rejectUnauthorized: true,
}, (req, res) => this.requestListener(req, res, this.router));
} else {
this.server = http.createServer((req, res) => this.requestListener(req, res, this.router));
}
this.server.on('listening', () => {
const addr = this.server.address() || {
address: '0.0.0.0',
port: this.port,
};
this.logger.trace('server started', {
address: addr.address,
port: addr.port,
pid: process.pid,
https: config.https === true,
});
});
this.server.listen(this.port);
}
/*
* This exits the running process properly.
*/
cleanUp() {
this.logger.info('server shutting down');
this.server.close();
process.exit(0);
}
static logRequestEnd(logger, req, res) {
const info = {
clientIp: req.socket.remoteAddress,
clientPort: req.socket.remotePort,
httpMethod: req.method,
httpURL: req.url,
httpCode: res.statusCode,
httpMessage: res.statusMessage,
};
logger.end('finished handling request', info);
}
/**
* Server's response to the client
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {Object} data - JSON response to the client
* @return {Object} res - response object
*/
response(utapiRequest, data) {
const log = utapiRequest.getLog();
const req = utapiRequest.getRequest();
const res = utapiRequest.getResponse();
log.trace('writing HTTP response', {
method: 'UtapiServer.resoponse',
});
const code = utapiRequest.getStatusCode();
/*
* Encoding data to binary provides a hot path to write data
* directly to the socket, without node.js trying to encode the data
* over and over again.
*/
const payload = Buffer.from(JSON.stringify(data), 'utf8');
res.writeHead(code, {
'server': 'ScalityS3',
'x-scal-request-id': log.getSerializedUids(),
'content-type': 'application/json',
'content-length': payload.length,
});
res.write(payload);
UtapiServer.logRequestEnd(log, req, res);
return res.end();
}
/**
* Respond to the request with the error details
* @param {UtapiRequest} utapiRequest - UtapiRequest instance
* @param {ArsenalError} err - Arsenal error instance
* @return {Object} res - response object
*/
errorResponse(utapiRequest, err) {
utapiRequest.setStatusCode(err.code);
return this.response(utapiRequest,
{ code: err.message, message: err.description });
}
}
/**
* Spawns a new server
* @param {object} [params] - configuration params (optional)
* @property {object} params.redis - redis configuration
* @property {number} params.workers - number of workers for Cluster
* @property {object} params.log - logger configuration
* @return {undefined}
*/
function spawn(config) {
const {
workers, redis, log, port,
} = config;
const logger = new Logger('Utapi', {
level: log.logLevel,
dump: log.dumpLevel,
});
const cluster = new Clustering(workers, logger);
cluster.start(worker => {
const datastore = new Datastore().setClient(redisClientv2(redis, logger));
const server = new UtapiServer(worker, port, datastore, logger, config);
server.startup();
});
}
module.exports = spawn;

View File

@ -1,7 +0,0 @@
const MemoryCache = require('./memory');
const RedisCache = require('./redis');
module.exports = {
MemoryCache,
RedisCache,
};

View File

@ -1,110 +0,0 @@
const schema = require('../schema');
const constants = require('../../constants');
/**
* Returns null iff the value is undefined.
* Returns the passed value otherwise.
*
* @param {*} value - Any value
* @returns {*} - Passed value or null
*/
function orNull(value) {
return value === undefined ? null : value;
}
class MemoryCache {
constructor() {
this._data = {};
this._shards = {};
this._prefix = 'utapi';
this._expirations = {};
}
// eslint-disable-next-line class-methods-use-this
async connect() {
return true;
}
// eslint-disable-next-line class-methods-use-this
async disconnect() {
Object.values(this._expirations).forEach(clearTimeout);
return true;
}
_expireKey(key, delay) {
if (this._expirations[key]) {
clearTimeout(this._expirations[key]);
}
this._expirations[key] = setTimeout(() => delete this._data[key], delay * 1000);
}
async getKey(key) {
return this._data[key];
}
async setKey(key, data) {
this._data[key] = data;
return true;
}
async addToShard(shard, event) {
const metricKey = schema.getUtapiMetricKey(this._prefix, event);
this._data[metricKey] = event;
if (this._shards[shard]) {
this._shards[shard].push(metricKey);
} else {
this._shards[shard] = [metricKey];
}
return true;
}
async getKeysInShard(shard) {
return this._shards[shard] || [];
}
async fetchShard(shard) {
if (this._shards[shard]) {
return this._shards[shard].map(key => this._data[key]);
}
return [];
}
async deleteShardAndKeys(shard) {
(this._shards[shard] || []).forEach(key => {
delete this._data[key];
});
delete this._shards[shard];
return true;
}
async getShards() {
return Object.keys(this._shards);
}
async shardExists(shard) {
return this._shards[shard.toString()] !== undefined;
}
async updateCounters(metric) {
if (metric.sizeDelta) {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, metric.account);
this._data[accountSizeKey] = (this._data[accountSizeKey] || 0) + metric.sizeDelta;
}
}
async updateAccountCounterBase(account, size) {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, account);
const accountSizeBaseKey = schema.getAccountSizeCounterBaseKey(this._prefix, account);
this._data[accountSizeKey] = 0;
this._data[accountSizeBaseKey] = size;
this._expireKey(accountSizeBaseKey, constants.counterBaseValueExpiration);
}
async fetchAccountSizeCounter(account) {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, account);
const accountSizeBaseKey = schema.getAccountSizeCounterBaseKey(this._prefix, account);
return [orNull(this._data[accountSizeKey]), orNull(this._data[accountSizeBaseKey])];
}
}
module.exports = MemoryCache;

View File

@ -1,194 +0,0 @@
const RedisClient = require('../../redis');
const schema = require('../schema');
const { LoggerContext } = require('../../utils');
const constants = require('../../constants');
const moduleLogger = new LoggerContext({
module: 'cache.backend.redis.RedisCache',
});
class RedisCache {
constructor(options, prefix) {
this._redis = null;
this._options = options;
this._prefix = prefix || 'utapi';
}
async connect() {
moduleLogger.debug('Connecting to redis...');
this._redis = new RedisClient(this._options);
this._redis.connect();
return true;
}
async disconnect() {
const logger = moduleLogger.with({ method: 'disconnect' });
if (this._redis) {
try {
logger.debug('closing connection to redis');
await this._redis.disconnect();
} catch (error) {
logger.error('error while closing connection to redis', {
error,
});
throw error;
}
this._redis = null;
} else {
logger.debug('disconnect called but no connection to redis found');
}
}
async getKey(key) {
return moduleLogger
.with({ method: 'getKey' })
.logAsyncError(() => this._redis.call(redis => redis.get(key)),
'error fetching key from redis', { key });
}
async setKey(key, value) {
return moduleLogger
.with({ method: 'setKey' })
.logAsyncError(async () => {
const res = await this._redis.call(redis => redis.set(key, value));
return res === 'OK';
}, 'error setting key in redis', { key });
}
async addToShard(shard, metric) {
const logger = moduleLogger.with({ method: 'addToShard' });
return logger
.logAsyncError(async () => {
const metricKey = schema.getUtapiMetricKey(this._prefix, metric);
const shardKey = schema.getShardKey(this._prefix, shard);
const shardMasterKey = schema.getShardMasterKey(this._prefix);
logger.debug('adding metric to shard', { metricKey, shardKey });
const [setResults, saddResults] = await this._redis
.call(redis => redis
.multi([
['set', metricKey, JSON.stringify(metric.getValue())],
['sadd', shardKey, metricKey],
['sadd', shardMasterKey, shardKey],
])
.exec());
let success = true;
if (setResults[1] !== 'OK') {
moduleLogger.error('failed to set metric key', {
metricKey,
shardKey,
res: setResults[1],
});
success = false;
}
if (saddResults[1] !== 1) {
moduleLogger.error('metric key already present in shard', {
metricKey,
shardKey,
res: saddResults[1],
});
success = false;
}
return success;
}, 'error during redis command');
}
async getKeysInShard(shard) {
return moduleLogger
.with({ method: 'getKeysInShard' })
.logAsyncError(async () => {
const shardKey = schema.getShardKey(this._prefix, shard);
return this._redis.call(redis => redis.smembers(shardKey));
}, 'error while fetching shard keys', { shard });
}
async fetchShard(shard) {
return moduleLogger
.with({ method: 'fetchShard' })
.logAsyncError(async () => {
const keys = await this.getKeysInShard(shard);
if (!keys.length) {
return [];
}
return this._redis.call(redis => redis.mget(...keys));
}, 'error while fetching shard data', { shard });
}
async deleteShardAndKeys(shard) {
return moduleLogger
.with({ method: 'deleteShardAndKeys' })
.logAsyncError(async () => {
const shardKey = schema.getShardKey(this._prefix, shard);
const shardMasterKey = schema.getShardMasterKey(this._prefix);
const keys = await this.getKeysInShard(shard);
return this._redis.call(
redis => redis.multi([
['del', shardKey, ...keys],
['srem', shardMasterKey, shardKey],
]).exec(),
);
}, 'error while deleting shard', { shard });
}
async shardExists(shard) {
return moduleLogger
.with({ method: 'shardExists' })
.logAsyncError(async () => {
const shardKey = schema.getShardKey(this._prefix, shard);
const res = await this._redis.call(redis => redis.exists(shardKey));
return res === 1;
}, 'error while checking shard', { shard });
}
async getShards() {
return moduleLogger
.with({ method: 'getShards' })
.logAsyncError(async () => {
const shardMasterKey = schema.getShardMasterKey(this._prefix);
return this._redis.call(redis => redis.smembers(shardMasterKey));
}, 'error while fetching shards');
}
async updateCounters(metric) {
return moduleLogger
.with({ method: 'updateCounter' })
.logAsyncError(async () => {
if (metric.sizeDelta) {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, metric.account);
await this._redis.call(redis => redis.incrby(accountSizeKey, metric.sizeDelta));
}
}, 'error while updating metric counters');
}
async updateAccountCounterBase(account, size) {
return moduleLogger
.with({ method: 'updateAccountCounterBase' })
.logAsyncError(async () => {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, account);
const accountSizeBaseKey = schema.getAccountSizeCounterBaseKey(this._prefix, account);
await this._redis.call(async redis => {
await redis.mset(accountSizeKey, 0, accountSizeBaseKey, size);
await redis.expire(accountSizeBaseKey, constants.counterBaseValueExpiration);
});
}, 'error while updating metric counter base');
}
async fetchAccountSizeCounter(account) {
return moduleLogger
.with({ method: 'fetchAccountSizeCounter' })
.logAsyncError(async () => {
const accountSizeKey = schema.getAccountSizeCounterKey(this._prefix, account);
const accountSizeBaseKey = schema.getAccountSizeCounterBaseKey(this._prefix, account);
const [counter, base] = await this._redis.call(redis => redis.mget(accountSizeKey, accountSizeBaseKey));
return [
counter !== null ? parseInt(counter, 10) : null,
base !== null ? parseInt(base, 10) : null,
];
}, 'error fetching account size counters', { account });
}
}
module.exports = RedisCache;

58
libV2/cache/client.js vendored
View File

@ -1,58 +0,0 @@
const { shardFromTimestamp } = require('../utils');
class CacheClient {
constructor(config) {
this._prefix = config.prefix || 'utapi';
this._cacheBackend = config.cacheBackend;
this._counterBackend = config.counterBackend;
}
async connect() {
return Promise.all([
this._cacheBackend.connect(),
this._counterBackend.connect(),
]);
}
async disconnect() {
return Promise.all([
this._cacheBackend.disconnect(),
this._counterBackend.disconnect(),
]);
}
async pushMetric(metric) {
const shard = shardFromTimestamp(metric.timestamp);
if (!(await this._cacheBackend.addToShard(shard, metric))) {
return false;
}
await this._counterBackend.updateCounters(metric);
return true;
}
async getMetricsForShard(shard) {
return this._cacheBackend.fetchShard(shard);
}
async deleteShard(shard) {
return this._cacheBackend.deleteShardAndKeys(shard);
}
async shardExists(shard) {
return this._cacheBackend.shardExists(shard);
}
async getShards() {
return this._cacheBackend.getShards();
}
async updateAccountCounterBase(account, size) {
return this._counterBackend.updateAccountCounterBase(account, size);
}
async fetchAccountSizeCounter(account) {
return this._counterBackend.fetchAccountSizeCounter(account);
}
}
module.exports = CacheClient;

21
libV2/cache/index.js vendored
View File

@ -1,21 +0,0 @@
const config = require('../config');
const CacheClient = require('./client');
const { MemoryCache, RedisCache } = require('./backend');
const cacheTypes = {
redis: conf => new RedisCache(conf),
memory: () => new MemoryCache(),
};
const cacheBackend = cacheTypes[config.cache.backend](config.cache);
const counterBackend = cacheTypes[config.cache.backend](config.redis);
module.exports = {
CacheClient,
backends: {
MemoryCache,
RedisCache,
},
client: new CacheClient({ cacheBackend, counterBackend }),
};

27
libV2/cache/schema.js vendored
View File

@ -1,27 +0,0 @@
function getShardKey(prefix, shard) {
return `${prefix}:shard:${shard}`;
}
function getUtapiMetricKey(prefix, metric) {
return `${prefix}:events:${metric.uuid}`;
}
function getShardMasterKey(prefix) {
return `${prefix}:shard:master`;
}
function getAccountSizeCounterKey(prefix, account) {
return `${prefix}:counters:account:${account}:size`;
}
function getAccountSizeCounterBaseKey(prefix, account) {
return `${prefix}:counters:account:${account}:size:base`;
}
module.exports = {
getShardKey,
getUtapiMetricKey,
getShardMasterKey,
getAccountSizeCounterKey,
getAccountSizeCounterBaseKey,
};

View File

@ -1,329 +0,0 @@
const { callbackify } = require('util');
const { Transform } = require('stream');
const uuid = require('uuid');
const needle = require('needle');
// These modules are added via the `level-mem` package rather than individually
/* eslint-disable import/no-extraneous-dependencies */
const levelup = require('levelup');
const memdown = require('memdown');
const encode = require('encoding-down');
/* eslint-enable import/no-extraneous-dependencies */
const { UtapiMetric } = require('../models');
const {
LoggerContext,
logEventFilter,
asyncOrCallback,
buildFilterChain,
} = require('../utils');
const moduleLogger = new LoggerContext({
module: 'client',
});
class Chunker extends Transform {
constructor(options) {
super({ objectMode: true, ...options });
this._chunkSize = (options && options.chunkSize) || 100;
this._currentChunk = [];
}
_transform(chunk, encoding, callback) {
this._currentChunk.push(chunk);
if (this._currentChunk.length >= this._chunkSize) {
this.push(this._currentChunk);
this._currentChunk = [];
}
callback();
}
_flush(callback) {
if (this._currentChunk.length) {
this.push(this._currentChunk);
}
callback();
}
}
class Uploader extends Transform {
constructor(options) {
super({ objectMode: true, ...options });
this._ingest = options.ingest;
}
_transform(chunk, encoding, callback) {
this._ingest(chunk.map(i => new UtapiMetric(i.value)))
.then(() => {
this.push({
success: true,
keys: chunk.map(i => i.key),
});
callback();
},
error => {
this.push({
success: false,
keys: [],
});
moduleLogger.error('error uploading metrics from retry cache', { error });
callback();
});
}
}
class UtapiClient {
constructor(config) {
this._host = (config && config.host) || 'localhost';
this._port = (config && config.port) || '8100';
this._tls = (config && config.tls) || {};
this._transport = (config && config.tls) ? 'https' : 'http';
this._logger = (config && config.logger) || moduleLogger;
this._maxCachedMetrics = (config && config.maxCachedMetrics) || 200000; // roughly 100MB
this._numCachedMetrics = 0;
this._disableRetryCache = config && config.disableRetryCache;
this._retryCache = this._disableRetryCache
? null
: levelup(encode(memdown(), { valueEncoding: 'json' }));
this._drainTimer = null;
this._drainCanSchedule = true;
this._drainDelay = (config && config.drainDelay) || 30000;
this._suppressedEventFields = (config && config.suppressedEventFields) || null;
const eventFilters = (config && config.filter) || {};
this._shouldPushMetric = buildFilterChain(eventFilters);
if (Object.keys(eventFilters).length !== 0) {
logEventFilter((...args) => moduleLogger.info(...args), 'utapi event filter enabled', eventFilters);
}
}
async join() {
await this._flushRetryCacheToLogs();
this._retryCache.close();
}
async _pushToUtapi(metrics) {
const resp = await needle(
'post',
`${this._transport}://${this._host}:${this._port}/v2/ingest`,
metrics.map(metric => metric.getValue()),
{ json: true, ...this._tls },
);
if (resp.statusCode !== 200) {
throw Error('failed to push metric, server returned non 200 status code',
{ respCode: resp.statusCode, respMessage: resp.statusMessage });
}
}
async _addToRetryCache(metric) {
if (this._numCachedMetrics < this._maxCachedMetrics) {
try {
await this._retryCache.put(metric.uuid, metric.getValue());
this._numCachedMetrics += 1;
await this._scheduleDrain();
return true;
} catch (error) {
this._logger
.error('error adding metric to retry cache', { error });
this._emitMetricLogLine(metric, { reason: 'error' });
}
} else {
this._emitMetricLogLine(metric, { reason: 'overflow' });
}
return false;
}
async _drainRetryCache() {
return new Promise((resolve, reject) => {
let empty = true;
const toRemove = [];
this._retryCache.createReadStream()
.pipe(new Chunker())
.pipe(new Uploader({ ingest: this._pushToUtapi.bind(this) }))
.on('data', res => {
if (res.success) {
toRemove.push(...res.keys);
} else {
empty = false;
}
})
.on('end', () => {
this._retryCache.batch(
toRemove.map(key => ({ type: 'del', key })),
error => {
if (error) {
this._logger.error('error removing events from retry cache', { error });
reject(error);
return;
}
resolve(empty);
},
);
})
.on('error', reject);
});
}
async _drainRetryCachePreflight() {
try {
const resp = await needle(
'get',
`${this._transport}://${this._host}:${this._port}/_/healthcheck`,
this._tls,
);
return resp.statusCode === 200;
} catch (error) {
this._logger
.debug('drain preflight request failed', { error });
return false;
}
}
async _attemptDrain() {
if (await this._drainRetryCachePreflight()) {
let empty = false;
try {
empty = await this._drainRetryCache();
} catch (error) {
this._logger
.error('Error while draining cache', { error });
}
if (!empty) {
await this._scheduleDrain();
}
}
this._drainTimer = null;
}
async _scheduleDrain() {
if (this._drainCanSchedule && !this._drainTimer) {
this._drainTimer = setTimeout(this._attemptDrain.bind(this), this._drainDelay);
}
}
async _disableDrain() {
this._drainCanSchedule = false;
if (this._drainTimer) {
clearTimeout(this._drainTimer);
this._drainTimer = null;
}
}
_emitMetricLogLine(metric, extra) {
this._logger.info('utapi metric recovery log', {
event: metric.getValue(),
utapiRecovery: true,
...(extra || {}),
});
}
async _flushRetryCacheToLogs() {
const toRemove = [];
return new Promise((resolve, reject) => {
this._retryCache.createReadStream()
.on('data', entry => {
this._emitMetricLogLine(entry.value);
toRemove.push(entry.key);
})
.on('end', () => {
this._retryCache.batch(
toRemove.map(key => ({ type: 'del', key })),
error => {
if (error) {
this._logger.error('error removing events from retry cache', { error });
reject(error);
return;
}
resolve();
},
);
})
.on('error', err => reject(err));
});
}
async _pushMetric(data) {
let metric = data instanceof UtapiMetric
? data
: new UtapiMetric(data);
// If this event has been filtered then exit early
if (!this._shouldPushMetric(metric)) {
return;
}
// Assign a uuid if one isn't passed
if (!metric.uuid) {
metric.uuid = uuid.v4();
}
// Assign a timestamp if one isn't passed
if (!metric.timestamp) {
metric.timestamp = new Date().getTime();
}
if (this._suppressedEventFields !== null) {
const filteredData = Object.entries(metric.getValue())
.filter(([key]) => !this._suppressedEventFields.includes(key))
.reduce((obj, [key, value]) => {
obj[key] = value;
return obj;
}, {});
metric = new UtapiMetric(filteredData);
}
try {
await this._pushToUtapi([metric]);
} catch (error) {
if (!this._disableRetryCache) {
this._logger.error('unable to push metric, adding to retry cache', { error });
if (!await this._addToRetryCache(metric)) {
throw new Error('unable to store metric');
}
} else {
this._logger.debug('unable to push metric. retry cache disabled, not retrying ingestion.', { error });
}
}
}
pushMetric(data, cb) {
if (typeof cb === 'function') {
callbackify(this._pushMetric.bind(this))(data, cb);
return undefined;
}
return this._pushMetric(data);
}
/**
* Get the storageUtilized of a resource
*
* @param {string} level - level of metrics, currently only 'accounts' is supported
* @param {string} resource - id of the resource
* @param {Function|undefined} callback - optional callback
* @returns {Promise|undefined} - return a Promise if no callback is provided, undefined otherwise
*/
getStorage(level, resource, callback) {
if (level !== 'accounts') {
throw new Error('invalid level, only "accounts" is supported');
}
return asyncOrCallback(async () => {
const resp = await needle(
'get',
`${this._transport}://${this._host}:${this._port}/v2/storage/${level}/${resource}`,
this._tls,
);
if (resp.statusCode !== 200) {
throw new Error(`unable to retrieve metrics: ${resp.statusMessage}`);
}
return resp.body;
}, callback);
}
}
module.exports = UtapiClient;

View File

@ -1,64 +0,0 @@
{
"host": "127.0.0.1",
"port": 8100,
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"redis": {
"host": "127.0.0.1",
"port": 6379
},
"localCache": {
"host": "127.0.0.1",
"port": 6379
},
"warp10": {
"host": "127.0.0.1",
"port": 4802,
"nodeId": "single_node",
"requestTimeout": 60000,
"connectTimeout": 60000
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"cacheBackend": "memory",
"development": false,
"nodeId": "single_node",
"ingestionSchedule": "*/5 * * * * *",
"ingestionShardSize": 10,
"ingestionLagSeconds": 30,
"checkpointSchedule": "*/30 * * * * *",
"snapshotSchedule": "5 0 * * * *",
"repairSchedule": "0 */5 * * * *",
"reindexSchedule": "0 0 0 * * Sun",
"diskUsageSchedule": "0 */15 * * * *",
"bucketd": [ "localhost:9000" ],
"reindex": {
"enabled": true,
"schedule": "0 0 0 * * 6"
},
"diskUsage": {
"retentionDays": 45,
"expirationEnabled": false
},
"serviceUser": {
"arn": "arn:aws:iam::000000000000:user/scality-internal/service-utapi-user",
"enabled": false
},
"filter": {
"allow": {},
"deny": {}
},
"metrics" : {
"enabled": false,
"host": "localhost",
"ingestPort": 10902,
"checkpointPort": 10903,
"snapshotPort": 10904,
"diskUsagePort": 10905,
"reindexPort": 10906,
"repairPort": 10907
}
}

View File

@ -1,414 +0,0 @@
const fs = require('fs');
const path = require('path');
const Joi = require('@hapi/joi');
const assert = require('assert');
const defaults = require('./defaults.json');
const werelogs = require('werelogs');
const {
truthy, envNamespace, allowedFilterFields, allowedFilterStates,
} = require('../constants');
const configSchema = require('./schema');
// We need to require the specific file rather than the parent module to avoid a circular require
const { parseDiskSizeSpec } = require('../utils/disk');
function _splitTrim(char, text) {
return text.split(char).map(v => v.trim());
}
function _splitServer(text) {
assert.notStrictEqual(text.indexOf(':'), -1);
const [host, port] = _splitTrim(':', text);
return {
host,
port: Number.parseInt(port, 10),
};
}
function _splitNode(text) {
assert.notStrictEqual(text.indexOf('='), -1);
const [nodeId, hostname] = _splitTrim('=', text);
return {
nodeId,
..._splitServer(hostname),
};
}
const _typeCasts = {
bool: val => truthy.has(val.toLowerCase()),
int: val => parseInt(val, 10),
list: val => _splitTrim(',', val),
serverList: val => _splitTrim(',', val).map(_splitServer),
nodeList: val => _splitTrim(',', val).map(_splitNode),
diskSize: parseDiskSizeSpec,
};
function _definedInEnv(key) {
return process.env[`${envNamespace}_${key}`] !== undefined;
}
function _loadFromEnv(key, defaultValue, type) {
const envKey = `${envNamespace}_${key}`;
const value = process.env[envKey];
if (value !== undefined) {
if (type !== undefined) {
return type(value);
}
return value;
}
return defaultValue;
}
const defaultConfigPath = path.join(__dirname, '../../config.json');
class Config {
/**
* Returns a new Config instance merging the loaded config with the provided values.
* Passed values override loaded ones recursively.
*
* @param {object} overrides - an object using the same structure as the config file
* @returns {Config} - New Config instance
*/
constructor(overrides) {
this._basePath = path.join(__dirname, '../../');
this._configPath = _loadFromEnv('CONFIG_FILE', defaultConfigPath);
this.host = undefined;
this.port = undefined;
this.healthChecks = undefined;
this.logging = { level: 'debug', dumpLevel: 'error' };
this.redis = undefined;
this.warp10 = undefined;
// read config automatically
const loadedConfig = this._loadConfig();
let parsedConfig = this._parseConfig(loadedConfig);
if (typeof overrides === 'object') {
parsedConfig = this._recursiveUpdate(parsedConfig, overrides);
}
Object.assign(this, parsedConfig);
werelogs.configure({
level: Config.logging.level,
dump: Config.logging.dumpLevel,
});
}
static _readFile(path, encoding = 'utf-8') {
try {
return fs.readFileSync(path, { encoding });
} catch (error) {
// eslint-disable-next-line no-console
console.error({ message: `error reading file at ${path}`, error });
throw error;
}
}
static _readJSON(path) {
const data = Config._readFile(path);
try {
return JSON.parse(data);
} catch (error) {
// eslint-disable-next-line no-console
console.error({ message: `error parsing JSON from file at ${path}`, error });
throw error;
}
}
_loadDefaults() {
return defaults;
}
_loadUserConfig() {
return Joi.attempt(
Config._readJSON(this._configPath),
configSchema,
'invalid Utapi config',
);
}
_recursiveUpdateArray(parent, child) {
const ret = [];
for (let i = 0; i < Math.max(parent.length, child.length); i += 1) {
ret[i] = this._recursiveUpdate(parent[i], child[i]);
}
return ret;
}
_recursiveUpdateObject(parent, child) {
return Array.from(
new Set([
...Object.keys(parent),
...Object.keys(child)],
// eslint-disable-next-line function-paren-newline
))
.reduce((ret, key) => {
// eslint-disable-next-line no-param-reassign
ret[key] = this._recursiveUpdate(parent[key], child[key]);
return ret;
}, {});
}
/**
* Given two nested Object/Array combos, walk each and return a new object
* with values from child overwriting parent.
* @param {*} parent - Initial value
* @param {*} child - New value
* @returns {*} - Merged value
*/
_recursiveUpdate(parent, child) {
// If no parent value use the child
if (parent === undefined) {
return child;
}
// If no child value use the parent
if (child === undefined) {
return parent;
}
if (Array.isArray(parent) && Array.isArray(child)) {
return this._recursiveUpdateArray(parent, child);
}
if (typeof parent === 'object' && typeof child === 'object') {
return this._recursiveUpdateObject(parent, child);
}
return child;
}
_loadConfig() {
const defaultConf = this._loadDefaults();
const userConf = this._loadUserConfig();
return this._recursiveUpdateObject(defaultConf, userConf);
}
static _parseRedisConfig(prefix, config) {
const redisConf = {
retry: config.retry,
};
if (config.sentinels || _definedInEnv(`${prefix}_SENTINELS`)) {
redisConf.name = _loadFromEnv(`${prefix}_NAME`, config.name);
redisConf.sentinels = _loadFromEnv(
`${prefix}_SENTINELS`,
config.sentinels,
_typeCasts.serverList,
);
redisConf.sentinelPassword = _loadFromEnv(
`${prefix}_SENTINEL_PASSWORD`,
config.sentinelPassword,
);
redisConf.password = _loadFromEnv(
`${prefix}_PASSWORD`,
config.password,
);
} else {
redisConf.host = _loadFromEnv(
`${prefix}_HOST`,
config.host,
);
redisConf.port = _loadFromEnv(
`${prefix}_PORT`,
config.port,
_typeCasts.int,
);
redisConf.password = _loadFromEnv(
`${prefix}_PASSWORD`,
config.password,
);
}
return redisConf;
}
_loadCertificates(config) {
const { key, cert, ca } = config;
const keyPath = path.isAbsolute(key) ? key : path.join(this._basePath, key);
const certPath = path.isAbsolute(cert) ? cert : path.join(this._basePath, cert);
const certs = {
cert: Config._readFile(certPath, 'ascii'),
key: Config._readFile(keyPath, 'ascii'),
};
if (ca) {
const caPath = path.isAbsolute(ca) ? ca : path.join(this._basePath, ca);
certs.ca = Config._readFile(caPath, 'ascii');
}
return certs;
}
static _parseResourceFilters(config) {
const resourceFilters = {};
allowedFilterFields.forEach(
field => allowedFilterStates.forEach(
state => {
const configResources = (config[state] && config[state][field]) || null;
const envVar = `FILTER_${field.toUpperCase()}_${state.toUpperCase()}`;
const resources = _loadFromEnv(envVar, configResources, _typeCasts.list);
if (resources) {
if (resourceFilters[field]) {
throw new Error('You can not define both an allow and a deny list for an event field.');
}
resourceFilters[field] = { [state]: new Set(resources) };
}
},
),
);
return resourceFilters;
}
_parseConfig(config) {
const parsedConfig = {};
parsedConfig.development = _loadFromEnv('DEV_MODE', config.development, _typeCasts.bool);
parsedConfig.nodeId = _loadFromEnv('NODE_ID', config.nodeId);
parsedConfig.host = _loadFromEnv('HOST', config.host);
parsedConfig.port = _loadFromEnv('PORT', config.port, _typeCasts.int);
const healthCheckFromEnv = _loadFromEnv(
'ALLOW_HEALTHCHECK',
[],
_typeCasts.list,
);
parsedConfig.healthChecks = {
allowFrom: healthCheckFromEnv.concat(config.healthChecks.allowFrom),
};
const certPaths = {
cert: _loadFromEnv('TLS_CERT', config.certFilePaths.cert),
key: _loadFromEnv('TLS_KEY', config.certFilePaths.key),
ca: _loadFromEnv('TLS_CA', config.certFilePaths.ca),
};
if (certPaths.key && certPaths.cert) {
parsedConfig.tls = this._loadCertificates(certPaths);
} else if (certPaths.key || certPaths.cert) {
throw new Error('bad config: both certFilePaths.key and certFilePaths.cert must be defined');
}
parsedConfig.redis = Config._parseRedisConfig('REDIS', config.redis);
parsedConfig.cache = Config._parseRedisConfig('REDIS_CACHE', config.localCache);
parsedConfig.cache.backend = _loadFromEnv('CACHE_BACKEND', config.cacheBackend);
const warp10Conf = {
readToken: _loadFromEnv('WARP10_READ_TOKEN', config.warp10.readToken),
writeToken: _loadFromEnv('WARP10_WRITE_TOKEN', config.warp10.writeToken),
requestTimeout: _loadFromEnv('WARP10_REQUEST_TIMEOUT', config.warp10.requestTimeout, _typeCasts.int),
connectTimeout: _loadFromEnv('WARP10_CONNECT_TIMEOUT', config.warp10.connectTimeout, _typeCasts.int),
};
if (Array.isArray(config.warp10.hosts) || _definedInEnv('WARP10_HOSTS')) {
warp10Conf.hosts = _loadFromEnv('WARP10_HOSTS', config.warp10.hosts, _typeCasts.nodeList);
} else {
warp10Conf.hosts = [{
host: _loadFromEnv('WARP10_HOST', config.warp10.host),
port: _loadFromEnv('WARP10_PORT', config.warp10.port, _typeCasts.int),
nodeId: _loadFromEnv('WARP10_NODE_ID', config.warp10.nodeId),
}];
}
parsedConfig.warp10 = warp10Conf;
parsedConfig.logging = {
level: parsedConfig.development
? 'debug'
: _loadFromEnv('LOG_LEVEL', config.log.logLevel),
dumpLevel: _loadFromEnv(
'LOG_DUMP_LEVEL',
config.log.dumpLevel,
),
};
parsedConfig.ingestionSchedule = _loadFromEnv('INGESTION_SCHEDULE', config.ingestionSchedule);
parsedConfig.checkpointSchedule = _loadFromEnv('CHECKPOINT_SCHEDULE', config.checkpointSchedule);
parsedConfig.snapshotSchedule = _loadFromEnv('SNAPSHOT_SCHEDULE', config.snapshotSchedule);
parsedConfig.repairSchedule = _loadFromEnv('REPAIR_SCHEDULE', config.repairSchedule);
parsedConfig.reindexSchedule = _loadFromEnv('REINDEX_SCHEDULE', config.reindexSchedule);
parsedConfig.diskUsageSchedule = _loadFromEnv('DISK_USAGE_SCHEDULE', config.diskUsageSchedule);
parsedConfig.ingestionLagSeconds = _loadFromEnv(
'INGESTION_LAG_SECONDS',
config.ingestionLagSeconds,
_typeCasts.int,
);
parsedConfig.ingestionShardSize = _loadFromEnv(
'INGESTION_SHARD_SIZE',
config.ingestionShardSize,
_typeCasts.int,
);
const diskUsage = {
path: _loadFromEnv('DISK_USAGE_PATH', (config.diskUsage || {}).path),
hardLimit: _loadFromEnv('DISK_USAGE_HARD_LIMIT', (config.diskUsage || {}).hardLimit),
retentionDays: _loadFromEnv(
'METRIC_RETENTION_PERIOD',
(config.diskUsage || {}).retentionDays, _typeCasts.int,
),
expirationEnabled: _loadFromEnv(
'METRIC_EXPIRATION_ENABLED',
(config.diskUsage || {}).expirationEnabled, _typeCasts.bool,
),
};
if (diskUsage.hardLimit !== undefined) {
diskUsage.hardLimit = parseDiskSizeSpec(diskUsage.hardLimit);
}
if (!diskUsage.path && diskUsage.hardLimit !== undefined) {
throw Error('You must specify diskUsage.path to monitor for disk usage');
} else if (diskUsage.path && diskUsage.hardLimit === undefined) {
throw Error('diskUsage.hardLimit must be specified');
} else if (diskUsage.expirationEnabled && diskUsage.retentionDays === undefined) {
throw Error('diskUsage.retentionDays must be specified');
}
diskUsage.enabled = diskUsage.path !== undefined;
parsedConfig.diskUsage = diskUsage;
parsedConfig.vaultd = {
host: _loadFromEnv('VAULT_HOST', config.vaultd.host),
port: _loadFromEnv('VAULT_PORT', config.vaultd.port),
};
parsedConfig.bucketd = _loadFromEnv('BUCKETD_BOOTSTRAP', config.bucketd, _typeCasts.serverList);
parsedConfig.serviceUser = {
arn: _loadFromEnv('SERVICE_USER_ARN', config.serviceUser.arn),
enabled: _loadFromEnv('SERVICE_USER_ENABLED', config.serviceUser.enabled, _typeCasts.bool),
};
parsedConfig.filter = Config._parseResourceFilters(config.filter);
parsedConfig.metrics = {
enabled: _loadFromEnv('METRICS_ENABLED', config.metrics.enabled, _typeCasts.bool),
host: _loadFromEnv('METRICS_HOST', config.metrics.host),
ingestPort: _loadFromEnv('METRICS_PORT_INGEST', config.metrics.ingestPort, _typeCasts.int),
checkpointPort: _loadFromEnv('METRICS_PORT_CHECKPOINT', config.metrics.checkpointPort, _typeCasts.int),
snapshotPort: _loadFromEnv('METRICS_PORT_SNAPSHOT', config.metrics.snapshotPort, _typeCasts.int),
diskUsagePort: _loadFromEnv('METRICS_PORT_DISK_USAGE', config.metrics.diskUsagePort, _typeCasts.int),
reindexPort: _loadFromEnv('METRICS_PORT_REINDEX', config.metrics.reindexPort, _typeCasts.int),
repairPort: _loadFromEnv('METRICS_PORT_REPAIR', config.metrics.repairPort, _typeCasts.int),
};
return parsedConfig;
}
/**
* Returns a new Config instance merging the loaded config with the provided values.
* Passed values override loaded ones recursively.
*
* @param {object} newConfig - an object using the same structure as the config file
* @returns {Config} - New Config instance
*/
static merge(newConfig) {
return new Config(newConfig);
}
}
module.exports = new Config();

View File

@ -1,130 +0,0 @@
const Joi = require('@hapi/joi');
const { allowedFilterFields, allowedFilterStates } = require('../constants');
const backoffSchema = Joi.object({
min: Joi.number(),
max: Joi.number(),
deadline: Joi.number(),
jitter: Joi.number(),
factor: Joi.number(),
});
const redisRetrySchema = Joi.object({
connectBackoff: backoffSchema,
});
const redisServerSchema = Joi.object({
host: Joi.string(),
port: Joi.number(),
password: Joi.string().allow(''),
retry: redisRetrySchema,
});
const redisSentinelSchema = Joi.object({
name: Joi.string().default('utapi'),
sentinels: Joi.array().items(Joi.object({
host: Joi.alternatives(Joi.string().hostname(), Joi.string().ip()),
port: Joi.number().port(),
})),
password: Joi.string().default('').allow(''),
sentinelPassword: Joi.string().default('').allow(''),
retry: redisRetrySchema,
});
const warp10SingleHost = Joi.object({
host: Joi.alternatives(Joi.string().hostname(), Joi.string().ip()),
port: Joi.number().port(),
readToken: Joi.string(),
writeToken: Joi.string(),
});
const warp10MultiHost = Joi.object({
hosts: Joi.array().items(Joi.object({
host: Joi.alternatives(Joi.string().hostname(), Joi.string().ip()),
port: Joi.number().port(),
nodeId: Joi.string(),
})),
readToken: Joi.string(),
writeToken: Joi.string(),
});
const tlsSchema = Joi.object({
key: Joi.string(),
cert: Joi.string(),
ca: Joi.string(),
});
const schema = Joi.object({
host: Joi.string(),
port: Joi.number().port(),
certFilePaths: tlsSchema.default({}),
workers: Joi.number(),
development: Joi.boolean(),
log: Joi.object({
logLevel: Joi.alternatives()
.try('error', 'warn', 'info', 'debug', 'trace'),
dumpLevel: Joi.alternatives()
.try('error', 'warn', 'info', 'debug', 'trace'),
}),
redis: Joi.alternatives().try(redisServerSchema, redisSentinelSchema),
localCache: Joi.alternatives().try(redisServerSchema, redisSentinelSchema),
warp10: Joi.alternatives().try(warp10SingleHost, warp10MultiHost),
healthChecks: Joi.object({
allowFrom: Joi.array().items(Joi.string()),
}),
vaultd: Joi.object({
host: Joi.string().hostname(),
port: Joi.number().port(),
}),
reindex: Joi.object({
enabled: Joi.boolean(),
schedule: Joi.string(),
}),
bucketd: Joi.array().items(Joi.string()),
expireMetrics: Joi.boolean(),
expireMetricsTTL: Joi.number(),
cacheBackend: Joi.string().valid('memory', 'redis'),
nodeId: Joi.string(),
ingestionSchedule: Joi.string(),
ingestionShardSize: Joi.number().greater(0),
ingestionLagSeconds: Joi.number().greater(0),
checkpointSchedule: Joi.string(),
snapshotSchedule: Joi.string(),
repairSchedule: Joi.string(),
reindexSchedule: Joi.string(),
diskUsageSchedule: Joi.string(),
diskUsage: Joi.object({
path: Joi.string(),
retentionDays: Joi.number().greater(0),
expirationEnabled: Joi.boolean(),
hardLimit: Joi.string(),
}),
serviceUser: Joi.object({
arn: Joi.string(),
enabled: Joi.boolean(),
}),
filter: Joi.object(allowedFilterStates.reduce(
(filterObj, state) => {
filterObj[state] = allowedFilterFields.reduce(
(stateObj, field) => {
stateObj[field] = Joi.array().items(Joi.string());
return stateObj;
}, {},
);
return filterObj;
}, {},
)),
metrics: {
enabled: Joi.boolean(),
host: Joi.string(),
ingestPort: Joi.number().port(),
checkpointPort: Joi.number().port(),
snapshotPort: Joi.number().port(),
diskUsagePort: Joi.number().port(),
reindexPort: Joi.number().port(),
repairPort: Joi.number().port(),
},
});
module.exports = schema;

View File

@ -1,133 +0,0 @@
const truthy = new Set([
'true',
'on',
'yes',
'y',
't',
'enabled',
'enable',
'1',
]);
const constants = {
envNamespace: 'UTAPI',
operations: [
'abortMultipartUpload',
'completeMultipartUpload',
'copyObject',
'createBucket',
'deleteBucket',
'deleteBucketCors',
'deleteBucketEncryption',
'deleteBucketLifecycle',
'deleteBucketReplication',
'deleteBucketTagging',
'deleteBucketWebsite',
'deleteObject',
'deleteObjectTagging',
'getBucketAcl',
'getBucketCors',
'getBucketEncryption',
'getBucketLifecycle',
'getBucketLocation',
'getBucketNotification',
'getBucketObjectLock',
'getBucketReplication',
'getBucketVersioning',
'getBucketTagging',
'getBucketWebsite',
'getObject',
'getObjectAcl',
'getObjectLegalHold',
'getObjectRetention',
'getObjectTagging',
'headBucket',
'headObject',
'initiateMultipartUpload',
'listBucket',
'listMultipartUploadParts',
'listMultipartUploads',
'multiObjectDelete',
'putBucketAcl',
'putBucketCors',
'putBucketEncryption',
'putBucketLifecycle',
'putBucketNotification',
'putBucketObjectLock',
'putBucketReplication',
'putBucketVersioning',
'putBucketTagging',
'putBucketWebsite',
'putDeleteMarkerObject',
'putObject',
'putObjectAcl',
'putObjectLegalHold',
'putObjectRetention',
'putObjectTagging',
'replicateDelete',
'replicateObject',
'replicateTags',
'uploadPart',
'uploadPartCopy',
],
eventFieldsToWarp10: {
operationId: 'op',
uuid: 'id',
bucket: 'bck',
object: 'obj',
versionId: 'vid',
account: 'acc',
user: 'usr',
location: 'loc',
objectDelta: 'objD',
sizeDelta: 'sizeD',
incomingBytes: 'inB',
outgoingBytes: 'outB',
operations: 'ops',
},
indexedEventFields: [
'acc',
'usr',
'bck',
],
serviceToWarp10Label: {
locations: 'loc',
accounts: 'acc',
users: 'usr',
buckets: 'bck',
},
warp10EventType: ':m:utapi/event:',
warp10RecordType: ':m:utapi/record:',
truthy,
checkpointLagSecs: 300,
snapshotLagSecs: 900,
repairLagSecs: 5,
counterBaseValueExpiration: 86400, // 24hrs
keyVersionSplitter: String.fromCharCode(0),
migrationChunksize: 500,
migrationOpTranslationMap: {
listBucketMultipartUploads: 'listMultipartUploads',
},
ingestionOpTranslationMap: {
putDeleteMarkerObject: 'deleteObject',
},
expirationChunkDuration: 900000000, // 15 minutes in microseconds
allowedFilterFields: [
'operationId',
'location',
'account',
'user',
'bucket',
],
allowedFilterStates: ['allow', 'deny'],
};
constants.operationToResponse = constants.operations
.reduce((prev, opId) => {
prev[opId] = `s3:${opId.charAt(0).toUpperCase() + opId.slice(1)}`;
return prev;
}, {});
module.exports = constants;

View File

@ -1,42 +0,0 @@
{
"AccessDenied": {
"code": 403,
"description": "Access Denied"
},
"InternalError": {
"code": 500,
"description": "The server encountered an internal error. Please retry the request."
},
"InvalidUri": {
"code": 400,
"description": "The requested URI does not represent any resource on the server."
},
"NotImplemented": {
"code": 501,
"description": "This operation has yet to be implemented."
},
"OperationIdNotFound": {
"code": 404,
"description": "The specified operation Id is not found."
},
"OperationTimedOut": {
"code": 500,
"description": "The operation could not be completed within the permitted time."
},
"ServiceUnavailable": {
"code": 503,
"description": "The server (or an internal component) is currently unavailable to receive requests. Please retry your request."
},
"InvalidRequest": {
"code": 400,
"description": "Request validation error"
},
"FailedMigration": {
"code": 1000,
"description": "failed to migrate metrics"
},
"FailedCorrection": {
"code": 1001,
"description": "failed to correct migrated metric"
}
}

View File

@ -1,27 +0,0 @@
/* eslint-disable no-param-reassign */
const utapiErrors = require('./errors.json');
class UtapiError extends Error {
constructor(type, code, desc) {
super(type);
this.code = code;
this.description = desc;
this[type] = true;
this.utapiError = true;
}
customizeDescription(description) {
return new UtapiError(this.message, this.code, description);
}
}
function errorsGen() {
return Object.keys(utapiErrors)
.reduce((errors, name) => {
errors[name] = new UtapiError(name, utapiErrors[name].code,
utapiErrors[name].description);
return errors;
}, {});
}
module.exports = errorsGen();

View File

@ -1,16 +0,0 @@
const bucketclient = require('bucketclient');
const { BucketClientInterface } = require('arsenal').storage.metadata.bucketclient;
const config = require('../config');
const { LoggerContext } = require('../utils');
const moduleLogger = new LoggerContext({
module: 'metadata.client',
});
const params = {
bucketdBootstrap: config.bucketd,
https: config.tls,
};
module.exports = new BucketClientInterface(params, bucketclient, moduleLogger);

View File

@ -1,141 +0,0 @@
/* eslint-disable no-restricted-syntax */
const arsenal = require('arsenal');
const async = require('async');
const metadata = require('./client');
const { LoggerContext, logger } = require('../utils');
const { keyVersionSplitter } = require('../constants');
const { usersBucket, splitter: mdKeySplitter, mpuBucketPrefix } = arsenal.constants;
const { BucketInfo } = arsenal.models;
const moduleLogger = new LoggerContext({
module: 'metadata.client',
});
const ebConfig = {
times: 10,
interval: retryCount => 50 * (2 ** retryCount),
};
const PAGE_SIZE = 1000;
async function _listingWrapper(bucket, params) {
return new Promise(
(resolve, reject) => metadata.listObject(
bucket,
params,
logger.newRequestLogger(),
(err, res) => {
if (err) {
reject(err);
return;
}
resolve(res);
},
),
);
}
function _listObject(bucket, prefix, hydrateFunc) {
const listingParams = { prefix, maxKeys: PAGE_SIZE, listingType: 'Basic' };
let gt;
return {
async* [Symbol.asyncIterator]() {
while (true) {
let res;
try {
// eslint-disable-next-line no-await-in-loop
res = await async.retryable(ebConfig, _listingWrapper)(bucket, { ...listingParams, gt });
} catch (error) {
moduleLogger.error('Error during listing', { error });
throw error;
}
for (const item of res) {
yield hydrateFunc ? hydrateFunc(item) : item;
}
if (res.length !== PAGE_SIZE) {
break;
}
gt = res[res.length - 1].key;
}
},
};
}
function listObjects(bucket) {
return _listObject(bucket, '', data => {
const { key, value } = data;
const [name, version] = key.split(keyVersionSplitter);
return {
name,
version,
value: JSON.parse(value),
};
});
}
function listBuckets() {
return _listObject(usersBucket, '', data => {
const { key, value } = data;
const [account, name] = key.split(mdKeySplitter);
return {
account,
name,
value: JSON.parse(value),
};
});
}
async function listMPUs(bucket) {
const mpuBucket = `${mpuBucketPrefix}${bucket}`;
return _listObject(mpuBucket, '', data => {
const { key, value } = data;
const [account, name] = key.split(mdKeySplitter);
return {
account,
name,
value: JSON.parse(value),
};
});
}
function bucketExists(bucket) {
return new Promise((resolve, reject) => metadata.getBucketAttributes(
bucket,
logger.newRequestLogger(),
err => {
if (err && (!err.is || !err.is.NoSuchBucket)) {
reject(err);
return;
}
resolve(err === null);
},
));
}
function getBucket(bucket) {
return new Promise((resolve, reject) => {
metadata.getBucketAttributes(
bucket,
logger.newRequestLogger(), (err, data) => {
if (err) {
reject(err);
return;
}
resolve(BucketInfo.fromObj(data));
},
);
});
}
module.exports = {
listBuckets,
listObjects,
listMPUs,
bucketExists,
getBucket,
};

View File

@ -1,71 +0,0 @@
const Joi = require('@hapi/joi');
class BaseModel {
constructor(data) {
this._data = data || {};
}
_get(key, defaultValue) {
const val = this._data[key];
return val === undefined ? defaultValue : val;
}
_set(key, value) {
this._data[key] = value;
return this;
}
getValue() {
return this._data;
}
}
/*
Builds a flexible data container with automatic field checking using `hapi/joi`.
Getters and Setters are automatically created for fields so they can be accessed using `.` notation.
@param name - Name for the model. Used as the internal js class name.
@param schema - An object of joi schemas. Keys are used as field names and
the schemas are used to typecheck values.
@returns - A subclass of BaseModel
*/
function buildModel(name, schema) {
class Model extends BaseModel {
constructor(data) {
if (data !== undefined) {
Object.entries(data).forEach(([key, value]) => {
if (schema[key]) {
Joi.attempt(value, schema[key]);
}
});
}
super(data);
}
_set(key, value) {
if (schema[key]) {
Joi.attempt(value, schema[key]);
}
return super._set(key, value);
}
}
Object.defineProperty(Model, 'name', { value: name });
Object.keys(schema).forEach(key =>
Object.defineProperty(Model.prototype, key, {
// `function` is used rather than `=>` to work around context problem with `this`
/* eslint-disable func-names, object-shorthand */
get: function () {
return this._get(key);
},
set: function (value) {
this._set(key, value);
},
/* eslint-enable func-names, object-shorthand */
}));
return Model;
}
module.exports = {
BaseModel,
buildModel,
};

View File

@ -1,75 +0,0 @@
const Joi = require('@hapi/joi');
const { buildModel } = require('./Base');
const { apiOperations } = require('../server/spec');
const ResponseContainer = require('./ResponseContainer');
const { httpRequestDurationSeconds } = require('../server/metrics');
const apiTags = Object.keys(apiOperations);
const apiOperationIds = Object.values(apiOperations)
.reduce((ids, ops) => {
ops.forEach(id => ids.add(id));
return ids;
}, new Set());
const contextSchema = {
host: Joi.string(),
protocol: Joi.string().valid('http', 'https'),
url: Joi.string().uri({ scheme: ['http', 'https'] }),
operationId: Joi.string().valid(...apiOperationIds),
tag: Joi.string().valid(...apiTags),
encrypted: Joi.boolean(),
logger: Joi.any(),
request: Joi.any(),
results: Joi.any(),
requestTimer: Joi.any(),
};
const RequestContextModel = buildModel('RequestContext', contextSchema);
class RequestContext extends RequestContextModel {
constructor(request) {
const host = request.headers.host || 'localhost';
const protocol = RequestContext._determineProtocol(request);
const encrypted = protocol === 'https';
const url = `${protocol}://${host}${request.url}`;
const tag = request.swagger.operation['x-router-controller'];
const { operationId } = request.swagger.operation;
const requestTimer = tag !== 'internal'
? httpRequestDurationSeconds.startTimer({ action: operationId })
: null;
request.logger.logger.addDefaultFields({
tag,
operationId,
service: 'utapi',
});
super({
request,
host,
url,
protocol,
operationId,
tag,
encrypted,
results: new ResponseContainer(),
logger: request.logger,
requestTimer,
});
}
static _determineProtocol(request) {
// Respect the X-Forwarded-Proto header if set
if (request.headers['x-forwarded-proto']) {
return request.headers['x-forwarded-proto'] === 'https' ? 'https' : 'http';
}
// Use req.connection.encrypted for fallback
return request.connection.encrypted !== undefined
&& request.connection.encrypted ? 'https' : 'http';
}
}
module.exports = RequestContext;

View File

@ -1,31 +0,0 @@
const Joi = require('@hapi/joi');
const { buildModel } = require('./Base');
const orNull = schema => Joi.alternatives(schema, Joi.any().valid(null));
const responseSchema = {
body: Joi.any(),
statusCode: orNull(Joi.number().min(100).max(599)),
redirect: orNull(Joi.string().uri({ scheme: ['http', 'https'], allowRelative: true })),
};
const ResponseContainerModel = buildModel('RequestContext', responseSchema);
class ResponseContainer extends ResponseContainerModel {
constructor() {
super({ body: null, statusCode: null, redirect: null });
}
hasBody() {
return this.body !== null;
}
hasStatusCode() {
return this.statusCode !== null;
}
hasRedirect() {
return this.redirect !== null;
}
}
module.exports = ResponseContainer;

View File

@ -1,22 +0,0 @@
const Joi = require('@hapi/joi');
const { operations } = require('../constants');
const { buildModel } = require('./Base');
const metricSchema = {
operationId: Joi.string().valid(...operations),
uuid: Joi.string(),
timestamp: Joi.number(),
bucket: Joi.string(),
object: Joi.string(),
versionId: Joi.string(),
account: Joi.string(),
user: Joi.string(),
location: Joi.string(),
objectDelta: Joi.number(),
sizeDelta: Joi.number(),
incomingBytes: Joi.number(),
outgoingBytes: Joi.number(),
};
module.exports = buildModel('UtapiMetric', metricSchema);

View File

@ -1,13 +0,0 @@
const Joi = require('@hapi/joi');
const { buildModel } = require('./Base');
const recordSchema = {
timestamp: Joi.number(),
objectDelta: Joi.number(),
sizeDelta: Joi.number(),
incomingBytes: Joi.number(),
outgoingBytes: Joi.number(),
operations: Joi.object(),
};
module.exports = buildModel('UtapiRecord', recordSchema);

View File

@ -1,13 +0,0 @@
const BaseModel = require('./Base');
const UtapiMetric = require('./UtapiMetric');
const UtapiRecord = require('./UtapiRecord');
const RequestContext = require('./RequestContext');
const ResponseContainer = require('./ResponseContainer');
module.exports = {
BaseModel,
UtapiMetric,
RequestContext,
ResponseContainer,
UtapiRecord,
};

View File

@ -1,45 +0,0 @@
const { EventEmitter } = require('events');
const os = require('os');
const { Command } = require('commander');
const { logger } = require('./utils');
class Process extends EventEmitter {
constructor(...options) {
super(...options);
this._program = null;
}
async setup() {
const cleanUpFunc = this.join.bind(this);
['SIGINT', 'SIGQUIT', 'SIGTERM'].forEach(eventName => {
process.on(eventName, cleanUpFunc);
});
process.on('uncaughtException', error => {
logger.error('uncaught exception',
{ error, stack: error.stack.split(os.EOL) });
cleanUpFunc();
});
this._program = new Command();
await this._setup();
}
async start() {
this._program.parse(process.argv);
await this._start();
}
async join() {
this.emit('exit');
await this._join();
}
/* eslint-disable class-methods-use-this,no-empty-function */
async _setup() {}
async _start() {}
async _join() {}
/* eslint-enable class-methods-use-this,no-empty-function */
}
module.exports = Process;

View File

@ -1,211 +0,0 @@
const EventEmitter = require('events');
const { callbackify, promisify } = require('util');
const IORedis = require('ioredis');
const { jsutil } = require('arsenal');
const BackOff = require('backo');
const { whilst } = require('async');
const errors = require('./errors');
const { LoggerContext } = require('./utils/log');
const { asyncOrCallback } = require('./utils/func');
const moduleLogger = new LoggerContext({
module: 'redis',
});
const COMMAND_TIMEOUT = 10000;
const CONNECTION_TIMEOUT = 30000;
/**
* Creates a new Redis client instance
* @param {object} conf - redis configuration
* @param {string} conf.host - redis host
* @param {number} conf.port - redis port
* @param {string} [conf.password] - redis password (optional)
* @param {string} [conf.sentinelPassword] - sentinel password (optional)
* @param {Array<Object>} conf.sentinels - sentinels
* @param {Werelogs.Logger} log - Werelogs logger
* @return {Redis} - Redis client instance
*/
class RedisClient extends EventEmitter {
constructor(options) {
super();
this._redisOptions = options;
this._redis = null;
// Controls the use of additional command timeouts
// Only use if connecting to a sentinel cluster
this._useTimeouts = options.sentinels !== undefined;
this._inFlightTimeouts = this._useTimeouts ? new Set() : null;
this._runningRedisProbe = null;
this._isConnected = false;
this._isReady = false;
}
connect(callback) {
this._initClient(false);
if (callback) {
process.nextTick(callback);
}
}
disconnect(callback) {
return asyncOrCallback(async () => {
if (this._useTimeouts) {
Object.values(this._inFlightTimeouts)
.forEach(clearTimeout);
}
if (this._redis !== null) {
await this._redis.quit();
this._redis = null;
}
}, callback);
}
get isReady() {
return this._isConnected && this._isReady;
}
_initClient(startProbe = true) {
moduleLogger.debug('initializing redis client');
if (this._redis !== null) {
this._redis.off('connect', this._onConnect);
this._redis.off('ready', this._onReady);
this._redis.off('error', this._onError);
this._redis.disconnect();
}
this._isConnected = false;
this._isReady = false;
this._redis = new IORedis(this._redisOptions);
this._redis.on('connect', this._onConnect.bind(this));
this._redis.on('ready', this._onReady.bind(this));
this._redis.on('error', this._onError.bind(this));
if (startProbe && this._runningRedisProbe === null) {
this._runningRedisProbe = setInterval(this._probeRedis.bind(this), CONNECTION_TIMEOUT);
}
}
_probeRedis() {
if (this.isReady) {
moduleLogger.debug('redis client is ready, clearing reinitialize interval');
clearInterval(this._runningRedisProbe);
this._runningRedisProbe = null;
} else {
moduleLogger.warn('redis client has failed to become ready, reinitializing');
this._initClient();
}
}
_onConnect() {
this._isConnected = true;
this.emit('connect');
}
_onReady() {
this._isReady = true;
this.emit('ready');
}
_onError(error) {
this._isReady = false;
moduleLogger.error('error connecting to redis', { error });
if (this.listenerCount('error') > 0) {
this.emit('error', error);
}
}
_createCommandTimeout() {
let timer;
let onTimeout;
const cancelTimeout = jsutil.once(() => {
clearTimeout(timer);
this.off('timeout', onTimeout);
this._inFlightTimeouts.delete(timer);
});
const timeout = new Promise((_, reject) => {
timer = setTimeout(this.emit.bind(this, 'timeout'), COMMAND_TIMEOUT);
this._inFlightTimeouts.add(timer);
onTimeout = () => {
moduleLogger.warn('redis command timed out');
cancelTimeout();
this._initClient();
reject(errors.OperationTimedOut);
};
this.once('timeout', onTimeout);
});
return { timeout, cancelTimeout };
}
async _call(asyncFunc) {
const start = Date.now();
const { connectBackoff } = this._redisOptions.retry || {};
const backoff = new BackOff(connectBackoff);
const timeoutMs = (connectBackoff || {}).deadline || 2000;
let retried = false;
return new Promise((resolve, reject) => {
whilst(
next => { // WARNING: test is asynchronous in `async` v3
if (!connectBackoff && !this.isReady) {
moduleLogger.warn('redis not ready and backoff is not configured');
}
process.nextTick(next, null, !!connectBackoff && !this.isReady);
},
next => {
retried = true;
if ((Date.now() - start) > timeoutMs) {
moduleLogger.error('redis still not ready after max wait, giving up', { timeoutMs });
return next(errors.InternalError.customizeDescription(
'redis client is not ready',
));
}
const backoffDurationMs = backoff.duration();
moduleLogger.error('redis not ready, retrying', { backoffDurationMs });
return setTimeout(next, backoffDurationMs);
},
err => {
if (err) {
return reject(err);
}
if (retried) {
moduleLogger.info('redis connection recovered', {
recoveryOverheadMs: Date.now() - start,
});
}
const funcPromise = asyncFunc(this._redis);
if (!this._useTimeouts) {
// If timeouts are disabled simply return the Promise
return resolve(funcPromise);
}
const { timeout, cancelTimeout } = this._createCommandTimeout();
try {
// timeout always rejects so we can just return
return resolve(Promise.race([funcPromise, timeout]));
} finally {
cancelTimeout();
}
},
);
});
}
call(func, callback) {
if (callback !== undefined) {
// If a callback is provided `func` is assumed to also take a callback
// and is converted to a promise using promisify
return callbackify(this._call.bind(this))(promisify(func), callback);
}
return this._call(func);
}
}
module.exports = RedisClient;

View File

@ -1,5 +0,0 @@
const ApiController = require('../controller');
const controller = new ApiController('internal');
module.exports = controller.buildMap();

View File

@ -1,6 +0,0 @@
async function healthcheck(ctx) {
// eslint-disable-next-line no-param-reassign
ctx.results.statusCode = 200;
}
module.exports = healthcheck;

View File

@ -1,14 +0,0 @@
const { collectDefaultMetrics, register } = require('prom-client');
collectDefaultMetrics({
timeout: 10000,
gcDurationBuckets: [0.001, 0.01, 0.1, 1, 2, 5],
});
async function prometheusMetrics(ctx) {
// eslint-disable-next-line no-param-reassign
ctx.results.statusCode = 200;
ctx.results.body = await register.metrics();
}
module.exports = prometheusMetrics;

View File

@ -1,5 +0,0 @@
const ApiController = require('../controller');
const controller = new ApiController('metrics');
module.exports = controller.buildMap();

View File

@ -1,63 +0,0 @@
const errors = require('../../../errors');
const { serviceToWarp10Label } = require('../../../constants');
const { clients: warp10Clients } = require('../../../warp10');
const { client: cache } = require('../../../cache');
const { now, iterIfError } = require('../../../utils');
/**
*
* @param {RequestContext} ctx - request context
* @param {object} params - request parameters
* @param {string} params.level - metric level
* @param {string} params.resource - Id of the requested resource
* @returns {Promise<undefined>} -
*/
async function getStorage(ctx, params) {
const { level, resource } = params;
if (level !== 'accounts') {
throw errors.BadRequest
.customizeDescription(`Unsupported level "${level}". Only "accounts" is currently supported`);
}
const [counter, base] = await cache.fetchAccountSizeCounter(resource);
let storageUtilized;
if (base !== null) {
storageUtilized = counter + base;
} else {
const labelName = serviceToWarp10Label[params.level];
const labels = { [labelName]: resource };
const res = await iterIfError(warp10Clients, warp10 => {
const options = {
params: {
end: now(),
labels,
node: warp10.nodeId,
},
macro: 'utapi/getMetricsAt',
};
return warp10.exec(options);
}, error => ctx.logger.error('error while fetching metrics', { error }));
if (res.result.length === 0) {
ctx.logger.error('unable to retrieve metrics', { level, resource });
throw errors.InternalError;
}
const { sizeD: currentSize } = res.result[0];
await cache.updateAccountCounterBase(resource, currentSize);
storageUtilized = currentSize;
}
ctx.results.statusCode = 200;
ctx.results.body = {
storageUtilized: Math.max(storageUtilized, 0),
resource,
level,
};
}
module.exports = getStorage;

View File

@ -1,27 +0,0 @@
const errors = require('../../../errors');
const { UtapiMetric } = require('../../../models');
const { client: cacheClient } = require('../../../cache');
const { convertTimestamp } = require('../../../utils');
const { ingestionOpTranslationMap } = require('../../../constants');
async function ingestMetric(ctx, params) {
let metrics;
try {
metrics = params.body.map(m => new UtapiMetric({
...m,
timestamp: convertTimestamp(m.timestamp),
operationId: ingestionOpTranslationMap[m.operationId] || m.operationId,
}));
} catch (error) {
throw errors.InvalidRequest;
}
try {
await Promise.all(metrics.map(m => cacheClient.pushMetric(m)));
} catch (error) {
throw errors.ServiceUnavailable;
}
// eslint-disable-next-line no-param-reassign
ctx.results.statusCode = 200;
}
module.exports = ingestMetric;

View File

@ -1,113 +0,0 @@
const errors = require('../../../errors');
const { serviceToWarp10Label, operationToResponse } = require('../../../constants');
const { convertTimestamp, iterIfError } = require('../../../utils');
const { clients: warp10Clients } = require('../../../warp10');
const emptyOperationsResponse = Object.values(operationToResponse)
.reduce((prev, key) => {
prev[key] = 0;
return prev;
}, {});
const metricResponseKeys = {
buckets: 'bucketName',
accounts: 'accountId',
users: 'userId',
service: 'serviceName',
};
function positiveOrZero(value) {
return Math.max(value, 0);
}
async function listMetric(ctx, params) {
const labelName = serviceToWarp10Label[params.level];
const resources = params.body[params.level];
let [start, end] = params.body.timeRange;
if (end === undefined) {
end = Date.now();
}
let results;
try {
// A separate request will be made to warp 10 per requested resource
results = await Promise.all(
resources.map(async ({ resource, id }) => {
const labels = { [labelName]: id };
const res = await iterIfError(warp10Clients, warp10 => {
const options = {
params: {
start: convertTimestamp(start).toString(),
end: convertTimestamp(end).toString(),
labels,
node: warp10.nodeId,
},
macro: 'utapi/getMetrics',
};
return warp10.exec(options);
}, error => ctx.logger.error('error during warp 10 request', {
error,
requestParams: {
start,
end,
labels,
},
}));
if (res.result.length === 0) {
ctx.logger.error('unable to retrieve metrics', { resource, type: params.level });
throw errors.InternalError;
}
const rawMetrics = JSON.parse(res.result[0]);
// Due to various error cases it is possible for metrics in utapi to go negative.
// As this is nonsensical to the user we replace any negative values with zero.
const metrics = {
storageUtilized: rawMetrics.storageUtilized.map(positiveOrZero),
numberOfObjects: rawMetrics.numberOfObjects.map(positiveOrZero),
incomingBytes: positiveOrZero(rawMetrics.incomingBytes),
outgoingBytes: positiveOrZero(rawMetrics.outgoingBytes),
operations: rawMetrics.operations,
};
return {
resource,
metrics,
};
}),
);
} catch (error) {
ctx.logger.error('error fetching metrics from warp10', { error });
throw errors.InternalError;
}
// Convert the results from warp10 into the expected response format
const resp = results
.map(result => {
const operations = Object.entries(result.metrics.operations)
.reduce((prev, [key, value]) => {
prev[operationToResponse[key]] = value;
return prev;
}, {});
const metric = {
...result.metrics,
timeRange: [start, end],
operations: {
...emptyOperationsResponse,
...operations,
},
};
metric[metricResponseKeys[params.level]] = result.resource;
return metric;
});
ctx.results.body = resp;
ctx.results.statusCode = 200;
}
module.exports = listMetric;

Some files were not shown because too many files have changed in this diff Show More