Compare commits
1 Commits
developmen
...
user/jonat
Author | SHA1 | Date |
---|---|---|
Jonathan Gramain | cbc28e8bca |
|
@ -1,87 +0,0 @@
|
|||
# General support information
|
||||
|
||||
GitHub Issues are **reserved** for actionable bug reports (including
|
||||
documentation inaccuracies), and feature requests.
|
||||
**All questions** (regarding configuration, usecases, performance, community,
|
||||
events, setup and usage recommendations, among other things) should be asked on
|
||||
the **[Zenko Forum](http://forum.zenko.io/)**.
|
||||
|
||||
> Questions opened as GitHub issues will systematically be closed, and moved to
|
||||
> the [Zenko Forum](http://forum.zenko.io/).
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## Avoiding duplicates
|
||||
|
||||
When reporting a new issue/requesting a feature, make sure that we do not have
|
||||
any duplicates already open:
|
||||
|
||||
- search the issue list for this repository (use the search bar, select
|
||||
"Issues" on the left pane after searching);
|
||||
- if there is a duplicate, please do not open your issue, and add a comment
|
||||
to the existing issue instead.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## Bug report information
|
||||
|
||||
(delete this section (everything between the lines) if you're not reporting a
|
||||
bug but requesting a feature)
|
||||
|
||||
### Description
|
||||
|
||||
Briefly describe the problem you are having in a few paragraphs.
|
||||
|
||||
### Steps to reproduce the issue
|
||||
|
||||
Please provide steps to reproduce, including full log output
|
||||
|
||||
### Actual result
|
||||
|
||||
Describe the results you received
|
||||
|
||||
### Expected result
|
||||
|
||||
Describe the results you expected
|
||||
|
||||
### Additional information
|
||||
|
||||
- Node.js version,
|
||||
- Docker version,
|
||||
- npm version,
|
||||
- distribution/OS,
|
||||
- optional: anything else you deem helpful to us.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## Feature Request
|
||||
|
||||
(delete this section (everything between the lines) if you're not requesting
|
||||
a feature but reporting a bug)
|
||||
|
||||
### Proposal
|
||||
|
||||
Describe the feature
|
||||
|
||||
### Current behavior
|
||||
|
||||
What currently happens
|
||||
|
||||
### Desired behavior
|
||||
|
||||
What you would like to happen
|
||||
|
||||
### Usecase
|
||||
|
||||
Please provide usecases for changing the current behavior
|
||||
|
||||
### Additional information
|
||||
|
||||
- Is this request for your company? Y/N
|
||||
- If Y: Company name:
|
||||
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
|
||||
- Are you willing to contribute this feature yourself?
|
||||
- Position/Title:
|
||||
- How did you hear about us?
|
||||
|
||||
--------------------------------------------------------------------------------
|
37
README.md
37
README.md
|
@ -3,8 +3,9 @@
|
|||
![Utapi logo](res/utapi-logo.png)
|
||||
|
||||
[![Circle CI][badgepub]](https://circleci.com/gh/scality/utapi)
|
||||
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/utapi)
|
||||
|
||||
Service Utilization API for tracking resource usage and metrics reporting.
|
||||
Service Utilization API for tracking resource usage and metrics reporting
|
||||
|
||||
## Design
|
||||
|
||||
|
@ -87,13 +88,13 @@ Server is running.
|
|||
1. Create an IAM user
|
||||
|
||||
```
|
||||
aws iam --endpoint-url <endpoint> create-user --user-name <user-name>
|
||||
aws iam --endpoint-url <endpoint> create-user --user-name utapiuser
|
||||
```
|
||||
|
||||
2. Create access key for the user
|
||||
|
||||
```
|
||||
aws iam --endpoint-url <endpoint> create-access-key --user-name <user-name>
|
||||
aws iam --endpoint-url <endpoint> create-access-key --user-name utapiuser
|
||||
```
|
||||
|
||||
3. Define a managed IAM policy
|
||||
|
@ -202,11 +203,12 @@ Server is running.
|
|||
5. Attach user to the managed policy
|
||||
|
||||
```
|
||||
aws --endpoint-url <endpoint> iam attach-user-policy --user-name
|
||||
<user-name> --policy-arn <policy arn>
|
||||
aws --endpoint-url <endpoint> iam attach-user-policy --user-name utapiuser
|
||||
--policy-arn <policy arn>
|
||||
```
|
||||
|
||||
Now the user has access to ListMetrics request in Utapi on all buckets.
|
||||
Now the user `utapiuser` has access to ListMetrics request in Utapi on all
|
||||
buckets.
|
||||
|
||||
### Signing request with Auth V4
|
||||
|
||||
|
@ -222,18 +224,16 @@ following urls for reference.
|
|||
You may also view examples making a request with Auth V4 using various languages
|
||||
and AWS SDKs [here](/examples).
|
||||
|
||||
Alternatively, you can use a nifty command line tool available in Scality's
|
||||
CloudServer.
|
||||
Alternatively, you can use a nifty command line tool available in Scality's S3.
|
||||
|
||||
You can git clone the CloudServer repo from here
|
||||
https://github.com/scality/cloudserver and follow the instructions in the README
|
||||
to install the dependencies.
|
||||
You can git clone S3 repo from here https://github.com/scality/S3.git and follow
|
||||
the instructions in README to install the dependencies.
|
||||
|
||||
If you have CloudServer running inside a docker container you can docker exec
|
||||
into the CloudServer container as
|
||||
If you have S3 running inside a docker container you can docker exec into the S3
|
||||
container as
|
||||
|
||||
```
|
||||
docker exec -it <container-id> bash
|
||||
docker exec -it <container id> bash
|
||||
```
|
||||
|
||||
and then run the command
|
||||
|
@ -271,7 +271,7 @@ Usage: list_metrics [options]
|
|||
-v, --verbose
|
||||
```
|
||||
|
||||
An example call to list metrics for a bucket `demo` to Utapi in a https enabled
|
||||
A typical call to list metrics for a bucket `demo` to Utapi in a https enabled
|
||||
deployment would be
|
||||
|
||||
```
|
||||
|
@ -283,7 +283,7 @@ Both start and end times are time expressed as UNIX epoch timestamps **expressed
|
|||
in milliseconds**.
|
||||
|
||||
Keep in mind, since Utapi metrics are normalized to the nearest 15 min.
|
||||
interval, start time and end time need to be in the specific format as follows.
|
||||
interval, so start time and end time need to be in specific format as follows.
|
||||
|
||||
#### Start time
|
||||
|
||||
|
@ -297,7 +297,7 @@ Date: Tue Oct 11 2016 17:35:25 GMT-0700 (PDT)
|
|||
|
||||
Unix timestamp (milliseconds): 1476232525320
|
||||
|
||||
Here's an example JS method to get a start timestamp
|
||||
Here's a typical JS method to get start timestamp
|
||||
|
||||
```javascript
|
||||
function getStartTimestamp(t) {
|
||||
|
@ -317,7 +317,7 @@ seconds and milliseconds set to 59 and 999 respectively. So valid end timestamps
|
|||
would look something like `09:14:59:999`, `09:29:59:999`, `09:44:59:999` and
|
||||
`09:59:59:999`.
|
||||
|
||||
Here's an example JS method to get an end timestamp
|
||||
Here's a typical JS method to get end timestamp
|
||||
|
||||
```javascript
|
||||
function getEndTimestamp(t) {
|
||||
|
@ -342,3 +342,4 @@ In order to contribute, please follow the
|
|||
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||
|
||||
[badgepub]: http://circleci.com/gh/scality/utapi.svg?style=svg
|
||||
[badgepriv]: http://ci.ironmann.io/gh/scality/utapi.svg?style=svg
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# set -e stops the execution of a script if a command or pipeline has an error
|
||||
set -e
|
||||
|
||||
# modifying config.json
|
||||
JQ_FILTERS_CONFIG="."
|
||||
|
||||
if [[ "$LOG_LEVEL" ]]; then
|
||||
if [[ "$LOG_LEVEL" == "info" || "$LOG_LEVEL" == "debug" || "$LOG_LEVEL" == "trace" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .log.logLevel=\"$LOG_LEVEL\""
|
||||
echo "Log level has been modified to $LOG_LEVEL"
|
||||
else
|
||||
echo "The log level you provided is incorrect (info/debug/trace)"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$WORKERS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workers=\"$WORKERS\""
|
||||
fi
|
||||
|
||||
if [[ "$REDIS_HOST" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.host=\"$REDIS_HOST\""
|
||||
fi
|
||||
|
||||
if [[ "$REDIS_PORT" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=\"$REDIS_PORT\""
|
||||
fi
|
||||
|
||||
if [[ "$VAULTD_HOST" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .vaultd.host=\"$VAULTD_HOST\""
|
||||
fi
|
||||
|
||||
if [[ "$VAULTD_PORT" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .vaultd.port=\"$VAULTD_PORT\""
|
||||
fi
|
||||
|
||||
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
|
||||
fi
|
||||
|
||||
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
||||
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
||||
mv config.json.tmp config.json
|
||||
fi
|
||||
|
||||
exec "$@"
|
|
@ -1,42 +0,0 @@
|
|||
# Utapi Release Plan
|
||||
|
||||
## Docker Image Generation
|
||||
|
||||
Docker images are hosted on [ghcr.io](https://github.com/orgs/scality/packages).
|
||||
Utapi has one namespace there:
|
||||
|
||||
* Namespace: ghcr.io/scality/utapi
|
||||
|
||||
With every CI build, the CI will push images, tagging the
|
||||
content with the developer branch's short SHA-1 commit hash.
|
||||
This allows those images to be used by developers, CI builds,
|
||||
build chain and so on.
|
||||
|
||||
Tagged versions of utapi will be stored in the production namespace.
|
||||
|
||||
## How to Pull Docker Images
|
||||
|
||||
```sh
|
||||
docker pull ghcr.io/scality/utapi:<commit hash>
|
||||
docker pull ghcr.io/scality/utapi:<tag>
|
||||
```
|
||||
|
||||
## Release Process
|
||||
|
||||
To release a production image:
|
||||
|
||||
* Name the tag for the repository and Docker image.
|
||||
|
||||
* Use the `yarn version` command with the same tag to update `package.json`.
|
||||
|
||||
* Create a PR and merge the `package.json` change.
|
||||
|
||||
* Tag the repository using the same tag.
|
||||
|
||||
* [Force a build] using:
|
||||
* A given branch that ideally matches the tag.
|
||||
* The `release` stage.
|
||||
* An extra property with the name `tag` and its value being the actual tag.
|
||||
|
||||
[Force a build]:
|
||||
https://eve.devsca.com/github/scality/utapi/#/builders/bootstrap/force/force
|
|
@ -1,90 +0,0 @@
|
|||
import sys, os, base64, datetime, hashlib, hmac, datetime, calendar, json
|
||||
import requests # pip install requests
|
||||
|
||||
access_key = '9EQTVVVCLSSG6QBMNKO5'
|
||||
secret_key = 'T5mK/skkkwJ/mTjXZnHyZ5UzgGIN=k9nl4dyTmDH'
|
||||
|
||||
method = 'POST'
|
||||
service = 's3'
|
||||
host = 'localhost:8100'
|
||||
region = 'us-east-1'
|
||||
canonical_uri = '/buckets'
|
||||
canonical_querystring = 'Action=ListMetrics&Version=20160815'
|
||||
content_type = 'application/x-amz-json-1.0'
|
||||
algorithm = 'AWS4-HMAC-SHA256'
|
||||
|
||||
t = datetime.datetime.utcnow()
|
||||
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
|
||||
date_stamp = t.strftime('%Y%m%d')
|
||||
|
||||
# Key derivation functions. See:
|
||||
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
|
||||
def sign(key, msg):
|
||||
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
||||
|
||||
def getSignatureKey(key, date_stamp, regionName, serviceName):
|
||||
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
|
||||
kRegion = sign(kDate, regionName)
|
||||
kService = sign(kRegion, serviceName)
|
||||
kSigning = sign(kService, 'aws4_request')
|
||||
return kSigning
|
||||
|
||||
def get_start_time(t):
|
||||
start = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
|
||||
return calendar.timegm(start.utctimetuple()) * 1000;
|
||||
|
||||
def get_end_time(t):
|
||||
end = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
|
||||
return calendar.timegm(end.utctimetuple()) * 1000 - 1;
|
||||
|
||||
start_time = get_start_time(datetime.datetime(2016, 1, 1, 0, 0, 0, 0))
|
||||
end_time = get_end_time(datetime.datetime(2016, 2, 1, 0, 0, 0, 0))
|
||||
|
||||
# Request parameters for listing Utapi bucket metrics--passed in a JSON block.
|
||||
bucketListing = {
|
||||
'buckets': [ 'utapi-test' ],
|
||||
'timeRange': [ start_time, end_time ],
|
||||
}
|
||||
|
||||
request_parameters = json.dumps(bucketListing)
|
||||
|
||||
payload_hash = hashlib.sha256(request_parameters).hexdigest()
|
||||
|
||||
canonical_headers = \
|
||||
'content-type:{0}\nhost:{1}\nx-amz-content-sha256:{2}\nx-amz-date:{3}\n' \
|
||||
.format(content_type, host, payload_hash, amz_date)
|
||||
|
||||
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
|
||||
|
||||
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
|
||||
.format(method, canonical_uri, canonical_querystring, canonical_headers,
|
||||
signed_headers, payload_hash)
|
||||
|
||||
credential_scope = '{0}/{1}/{2}/aws4_request' \
|
||||
.format(date_stamp, region, service)
|
||||
|
||||
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
|
||||
.format(algorithm, amz_date, credential_scope,
|
||||
hashlib.sha256(canonical_request).hexdigest())
|
||||
|
||||
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
|
||||
|
||||
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
|
||||
hashlib.sha256).hexdigest()
|
||||
|
||||
authorization_header = \
|
||||
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
|
||||
.format(algorithm, access_key, credential_scope, signed_headers, signature)
|
||||
|
||||
# The 'host' header is added automatically by the Python 'requests' library.
|
||||
headers = {
|
||||
'Content-Type': content_type,
|
||||
'X-Amz-Content-Sha256': payload_hash,
|
||||
'X-Amz-Date': amz_date,
|
||||
'Authorization': authorization_header
|
||||
}
|
||||
|
||||
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring;
|
||||
|
||||
r = requests.post(endpoint, data=request_parameters, headers=headers)
|
||||
print (r.text)
|
|
@ -0,0 +1 @@
|
|||
*.jar filter=lfs diff=lfs merge=lfs -text
|
|
@ -0,0 +1,3 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:389d2135867c399a389901460c5f2cc09f4857d0c6d08632c2638c25fb150c46
|
||||
size 15468553
|
1
index.js
1
index.js
|
@ -1,4 +1,5 @@
|
|||
/* eslint-disable global-require */
|
||||
|
||||
// eslint-disable-line strict
|
||||
let toExport;
|
||||
|
||||
|
|
|
@ -1,13 +1,35 @@
|
|||
/* eslint-disable no-bitwise */
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
/**
|
||||
* Reads from a config file and returns the content as a config object
|
||||
*/
|
||||
class Config {
|
||||
constructor(config) {
|
||||
this.component = config.component;
|
||||
constructor() {
|
||||
/*
|
||||
* By default, the config file is "config.json" at the root.
|
||||
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
|
||||
*/
|
||||
this._basePath = path.resolve(__dirname, '..');
|
||||
this.path = `${this._basePath}/config.json`;
|
||||
if (process.env.UTAPI_CONFIG_FILE !== undefined) {
|
||||
this.path = process.env.UTAPI_CONFIG_FILE;
|
||||
}
|
||||
|
||||
// Read config automatically
|
||||
this._getConfig();
|
||||
}
|
||||
|
||||
_getConfig() {
|
||||
let config;
|
||||
try {
|
||||
const data = fs.readFileSync(this.path, { encoding: 'utf-8' });
|
||||
config = JSON.parse(data);
|
||||
} catch (err) {
|
||||
throw new Error(`could not parse config file: ${err.message}`);
|
||||
}
|
||||
|
||||
this.port = 9500;
|
||||
if (config.port !== undefined) {
|
||||
|
@ -93,26 +115,18 @@ class Config {
|
|||
}
|
||||
}
|
||||
|
||||
if (config.vaultclient) {
|
||||
// Instance passed from outside
|
||||
this.vaultclient = config.vaultclient;
|
||||
this.vaultd = null;
|
||||
} else {
|
||||
// Connection data
|
||||
this.vaultclient = null;
|
||||
this.vaultd = {};
|
||||
if (config.vaultd) {
|
||||
if (config.vaultd.port !== undefined) {
|
||||
assert(Number.isInteger(config.vaultd.port)
|
||||
&& config.vaultd.port > 0,
|
||||
'bad config: vaultd port must be a positive integer');
|
||||
this.vaultd.port = config.vaultd.port;
|
||||
}
|
||||
if (config.vaultd.host !== undefined) {
|
||||
assert.strictEqual(typeof config.vaultd.host, 'string',
|
||||
'bad config: vaultd host must be a string');
|
||||
this.vaultd.host = config.vaultd.host;
|
||||
}
|
||||
this.vaultd = {};
|
||||
if (config.vaultd) {
|
||||
if (config.vaultd.port !== undefined) {
|
||||
assert(Number.isInteger(config.vaultd.port)
|
||||
&& config.vaultd.port > 0,
|
||||
'bad config: vaultd port must be a positive integer');
|
||||
this.vaultd.port = config.vaultd.port;
|
||||
}
|
||||
if (config.vaultd.host !== undefined) {
|
||||
assert.strictEqual(typeof config.vaultd.host, 'string',
|
||||
'bad config: vaultd host must be a string');
|
||||
this.vaultd.host = config.vaultd.host;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,11 +141,12 @@ class Config {
|
|||
const { key, cert, ca } = config.certFilePaths
|
||||
? config.certFilePaths : {};
|
||||
if (key && cert) {
|
||||
const keypath = key;
|
||||
const certpath = cert;
|
||||
const keypath = (key[0] === '/') ? key : `${this._basePath}/${key}`;
|
||||
const certpath = (cert[0] === '/')
|
||||
? cert : `${this._basePath}/${cert}`;
|
||||
let capath;
|
||||
if (ca) {
|
||||
capath = ca;
|
||||
capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`;
|
||||
assert.doesNotThrow(() => fs.accessSync(capath, fs.F_OK | fs.R_OK),
|
||||
`File not found or unreachable: ${capath}`);
|
||||
}
|
||||
|
@ -163,7 +178,9 @@ class Config {
|
|||
'bad config: onlyCountLatestWhenObjectLocked must be a boolean');
|
||||
this.onlyCountLatestWhenObjectLocked = config.onlyCountLatestWhenObjectLocked;
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = Config;
|
||||
module.exports = new Config();
|
||||
|
|
|
@ -81,17 +81,6 @@ class Datastore {
|
|||
return this._client.call((backend, done) => backend.incr(key, done), cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* increment value of a key by the provided value
|
||||
* @param {string} key - key holding the value
|
||||
* @param {string} value - value containing the data
|
||||
* @param {callback} cb - callback
|
||||
* @return {undefined}
|
||||
*/
|
||||
incrby(key, value, cb) {
|
||||
return this._client.call((backend, done) => backend.incrby(key, value, done), cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* decrement value of a key by 1
|
||||
* @param {string} key - key holding the value
|
||||
|
|
|
@ -6,6 +6,8 @@ const async = require('async');
|
|||
const { errors } = require('arsenal');
|
||||
const { getMetricFromKey, getKeys, generateStateKey } = require('./schema');
|
||||
const s3metricResponseJSON = require('../models/s3metricResponse');
|
||||
const config = require('./Config');
|
||||
const Vault = require('./Vault');
|
||||
|
||||
const MAX_RANGE_MS = (((1000 * 60) * 60) * 24) * 30; // One month.
|
||||
|
||||
|
@ -21,6 +23,7 @@ class ListMetrics {
|
|||
constructor(metric, component) {
|
||||
this.metric = metric;
|
||||
this.service = component;
|
||||
this.vault = new Vault(config);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -80,10 +83,9 @@ class ListMetrics {
|
|||
const resources = validator.get(this.metric);
|
||||
const timeRange = validator.get('timeRange');
|
||||
const datastore = utapiRequest.getDatastore();
|
||||
const vault = utapiRequest.getVault();
|
||||
// map account ids to canonical ids
|
||||
if (this.metric === 'accounts') {
|
||||
return vault.getCanonicalIds(resources, log, (err, list) => {
|
||||
return this.vault.getCanonicalIds(resources, log, (err, list) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
|
@ -122,11 +124,10 @@ class ListMetrics {
|
|||
const fifteenMinutes = 15 * 60 * 1000; // In milliseconds
|
||||
const timeRange = [start - fifteenMinutes, end];
|
||||
const datastore = utapiRequest.getDatastore();
|
||||
const vault = utapiRequest.getVault();
|
||||
|
||||
// map account ids to canonical ids
|
||||
if (this.metric === 'accounts') {
|
||||
return vault.getCanonicalIds(resources, log, (err, list) => {
|
||||
return this.vault.getCanonicalIds(resources, log, (err, list) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
|
|
|
@ -99,7 +99,6 @@ const metricObj = {
|
|||
buckets: 'bucket',
|
||||
accounts: 'accountId',
|
||||
users: 'userId',
|
||||
location: 'location',
|
||||
};
|
||||
|
||||
class UtapiClient {
|
||||
|
@ -123,17 +122,13 @@ class UtapiClient {
|
|||
const api = (config || {}).logApi || werelogs;
|
||||
this.log = new api.Logger('UtapiClient');
|
||||
// By default, we push all resource types
|
||||
this.metrics = ['buckets', 'accounts', 'users', 'service', 'location'];
|
||||
this.metrics = ['buckets', 'accounts', 'users', 'service'];
|
||||
this.service = 's3';
|
||||
this.disableOperationCounters = false;
|
||||
this.enabledOperationCounters = [];
|
||||
this.disableClient = true;
|
||||
|
||||
if (config && !config.disableClient) {
|
||||
this.disableClient = false;
|
||||
this.expireMetrics = config.expireMetrics;
|
||||
this.expireMetricsTTL = config.expireMetricsTTL || 0;
|
||||
|
||||
if (config.metrics) {
|
||||
const message = 'invalid property in UtapiClient configuration';
|
||||
assert(Array.isArray(config.metrics), `${message}: metrics `
|
||||
|
@ -161,6 +156,9 @@ class UtapiClient {
|
|||
if (config.enabledOperationCounters) {
|
||||
this.enabledOperationCounters = config.enabledOperationCounters;
|
||||
}
|
||||
this.disableClient = false;
|
||||
this.expireMetrics = config.expireMetrics;
|
||||
this.expireMetricsTTL = config.expireMetricsTTL || 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1156,69 +1154,6 @@ class UtapiClient {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {string} location - name of data location
|
||||
* @param {number} updateSize - size in bytes to update location metric by,
|
||||
* could be negative, indicating deleted object
|
||||
* @param {string} reqUid - Request Unique Identifier
|
||||
* @param {function} callback - callback to call
|
||||
* @return {undefined}
|
||||
*/
|
||||
pushLocationMetric(location, updateSize, reqUid, callback) {
|
||||
const log = this.log.newRequestLoggerFromSerializedUids(reqUid);
|
||||
const params = {
|
||||
level: 'location',
|
||||
service: 's3',
|
||||
location,
|
||||
};
|
||||
this._checkMetricTypes(params);
|
||||
const action = (updateSize < 0) ? 'decrby' : 'incrby';
|
||||
const size = (updateSize < 0) ? -updateSize : updateSize;
|
||||
return this.ds[action](generateKey(params, 'locationStorage'), size,
|
||||
err => {
|
||||
if (err) {
|
||||
log.error('error pushing metric', {
|
||||
method: 'UtapiClient.pushLocationMetric',
|
||||
error: err,
|
||||
});
|
||||
return callback(errors.InternalError);
|
||||
}
|
||||
return callback();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {string} location - name of data backend to get metric for
|
||||
* @param {string} reqUid - Request Unique Identifier
|
||||
* @param {function} callback - callback to call
|
||||
* @return {undefined}
|
||||
*/
|
||||
getLocationMetric(location, reqUid, callback) {
|
||||
const log = this.log.newRequestLoggerFromSerializedUids(reqUid);
|
||||
const params = {
|
||||
level: 'location',
|
||||
service: 's3',
|
||||
location,
|
||||
};
|
||||
const redisKey = generateKey(params, 'locationStorage');
|
||||
return this.ds.get(redisKey, (err, bytesStored) => {
|
||||
if (err) {
|
||||
log.error('error getting metric', {
|
||||
method: 'UtapiClient: getLocationMetric',
|
||||
error: err,
|
||||
});
|
||||
return callback(errors.InternalError);
|
||||
}
|
||||
// if err and bytesStored are null, key does not exist yet
|
||||
if (bytesStored === null) {
|
||||
return callback(null, 0);
|
||||
}
|
||||
return callback(null, bytesStored);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get storage used by bucket/account/user/service
|
||||
* @param {object} params - params for the metrics
|
||||
|
|
|
@ -14,15 +14,6 @@ class UtapiRequest {
|
|||
this._datastore = null;
|
||||
this._requestQuery = null;
|
||||
this._requestPath = null;
|
||||
this._vault = null;
|
||||
}
|
||||
|
||||
getVault() {
|
||||
return this._vault;
|
||||
}
|
||||
|
||||
setVault() {
|
||||
return this._vault;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -14,7 +14,6 @@ import urllib
|
|||
logging.basicConfig(level=logging.INFO)
|
||||
_log = logging.getLogger('utapi-reindex:reporting')
|
||||
|
||||
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
|
||||
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
|
||||
|
||||
def get_options():
|
||||
|
@ -34,13 +33,7 @@ class askRedis():
|
|||
|
||||
def __init__(self, ip="127.0.0.1", port="16379", sentinel_cluster_name="scality-s3", password=None):
|
||||
self._password = password
|
||||
r = redis.Redis(
|
||||
host=ip,
|
||||
port=port,
|
||||
db=0,
|
||||
password=password,
|
||||
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
|
||||
)
|
||||
r = redis.Redis(host=ip, port=port, db=0, password=password, socket_connect_timeout=10)
|
||||
try:
|
||||
self._ip, self._port = r.sentinel_get_master_addr_by_name(sentinel_cluster_name)
|
||||
except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError) as e:
|
||||
|
|
|
@ -25,7 +25,6 @@ MPU_SHADOW_BUCKET_PREFIX = 'mpuShadowBucket'
|
|||
|
||||
ACCOUNT_UPDATE_CHUNKSIZE = 100
|
||||
|
||||
SENTINEL_CONNECT_TIMEOUT_SECONDS = 10
|
||||
EXIT_CODE_SENTINEL_CONNECTION_ERROR = 100
|
||||
|
||||
def get_options():
|
||||
|
@ -425,7 +424,7 @@ def get_redis_client(options):
|
|||
port=options.sentinel_port,
|
||||
db=0,
|
||||
password=options.redis_password,
|
||||
socket_connect_timeout=SENTINEL_CONNECT_TIMEOUT_SECONDS
|
||||
socket_connect_timeout=10
|
||||
)
|
||||
try:
|
||||
ip, port = sentinel.sentinel_get_master_addr_by_name(options.sentinel_cluster_name)
|
||||
|
|
|
@ -68,10 +68,10 @@ const keys = {
|
|||
*/
|
||||
function getSchemaPrefix(params, timestamp) {
|
||||
const {
|
||||
bucket, accountId, userId, level, service, location,
|
||||
bucket, accountId, userId, level, service,
|
||||
} = params;
|
||||
// `service` property must remain last because other objects also include it
|
||||
const id = bucket || accountId || userId || location || service;
|
||||
const id = bucket || accountId || userId || service;
|
||||
const prefix = timestamp ? `${service}:${level}:${timestamp}:${id}:`
|
||||
: `${service}:${level}:${id}:`;
|
||||
return prefix;
|
||||
|
@ -86,13 +86,9 @@ function getSchemaPrefix(params, timestamp) {
|
|||
*/
|
||||
function generateKey(params, metric, timestamp) {
|
||||
const prefix = getSchemaPrefix(params, timestamp);
|
||||
if (params.location) {
|
||||
return `${prefix}locationStorage`;
|
||||
}
|
||||
return keys[metric](prefix);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a list of the counters for a metric type
|
||||
* @param {object} params - object with metric type and id as a property
|
||||
|
|
|
@ -7,6 +7,7 @@ const { Clustering, errors, ipCheck } = require('arsenal');
|
|||
const arsenalHttps = require('arsenal').https;
|
||||
const { Logger } = require('werelogs');
|
||||
|
||||
const config = require('./Config');
|
||||
const routes = require('../router/routes');
|
||||
const Route = require('../router/Route');
|
||||
const Router = require('../router/Router');
|
||||
|
@ -27,12 +28,7 @@ class UtapiServer {
|
|||
constructor(worker, port, datastore, logger, config) {
|
||||
this.worker = worker;
|
||||
this.port = port;
|
||||
this.vault = config.vaultclient;
|
||||
if (!this.vault) {
|
||||
const Vault = require('./Vault');
|
||||
this.vault = new Vault(config);
|
||||
}
|
||||
this.router = new Router(config, this.vault);
|
||||
this.router = new Router(config);
|
||||
this.logger = logger;
|
||||
this.datastore = datastore;
|
||||
this.server = null;
|
||||
|
@ -75,7 +71,6 @@ class UtapiServer {
|
|||
req.socket.setNoDelay();
|
||||
const { query, path, pathname } = url.parse(req.url, true);
|
||||
const utapiRequest = new UtapiRequest()
|
||||
.setVault(this.vault)
|
||||
.setRequest(req)
|
||||
.setLog(this.logger.newRequestLogger())
|
||||
.setResponse(res)
|
||||
|
@ -219,7 +214,8 @@ class UtapiServer {
|
|||
* @property {object} params.log - logger configuration
|
||||
* @return {undefined}
|
||||
*/
|
||||
function spawn(config) {
|
||||
function spawn(params) {
|
||||
Object.assign(config, params);
|
||||
const {
|
||||
workers, redis, log, port,
|
||||
} = config;
|
||||
|
|
|
@ -23,6 +23,10 @@
|
|||
"healthChecks": {
|
||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
||||
},
|
||||
"vaultd": {
|
||||
"host": "127.0.0.1",
|
||||
"port": 8500
|
||||
},
|
||||
"cacheBackend": "memory",
|
||||
"development": false,
|
||||
"nodeId": "single_node",
|
||||
|
|
|
@ -2,8 +2,6 @@ const fs = require('fs');
|
|||
const path = require('path');
|
||||
const Joi = require('@hapi/joi');
|
||||
const assert = require('assert');
|
||||
const defaults = require('./defaults.json');
|
||||
const werelogs = require('werelogs');
|
||||
|
||||
const {
|
||||
truthy, envNamespace, allowedFilterFields, allowedFilterStates,
|
||||
|
@ -73,6 +71,7 @@ class Config {
|
|||
constructor(overrides) {
|
||||
this._basePath = path.join(__dirname, '../../');
|
||||
this._configPath = _loadFromEnv('CONFIG_FILE', defaultConfigPath);
|
||||
this._defaultsPath = path.join(__dirname, 'defaults.json');
|
||||
|
||||
this.host = undefined;
|
||||
this.port = undefined;
|
||||
|
@ -90,11 +89,6 @@ class Config {
|
|||
parsedConfig = this._recursiveUpdate(parsedConfig, overrides);
|
||||
}
|
||||
Object.assign(this, parsedConfig);
|
||||
|
||||
werelogs.configure({
|
||||
level: Config.logging.level,
|
||||
dump: Config.logging.dumpLevel,
|
||||
});
|
||||
}
|
||||
|
||||
static _readFile(path, encoding = 'utf-8') {
|
||||
|
@ -119,7 +113,7 @@ class Config {
|
|||
}
|
||||
|
||||
_loadDefaults() {
|
||||
return defaults;
|
||||
return Config._readJSON(this._defaultsPath);
|
||||
}
|
||||
|
||||
_loadUserConfig() {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/* eslint-disable no-restricted-syntax */
|
||||
const arsenal = require('arsenal');
|
||||
const async = require('async');
|
||||
|
||||
const metadata = require('./client');
|
||||
const { LoggerContext, logger } = require('../utils');
|
||||
const { keyVersionSplitter } = require('../constants');
|
||||
|
@ -12,14 +12,9 @@ const moduleLogger = new LoggerContext({
|
|||
module: 'metadata.client',
|
||||
});
|
||||
|
||||
const ebConfig = {
|
||||
times: 10,
|
||||
interval: retryCount => 50 * (2 ** retryCount),
|
||||
};
|
||||
|
||||
const PAGE_SIZE = 1000;
|
||||
|
||||
async function _listingWrapper(bucket, params) {
|
||||
function _listingWrapper(bucket, params) {
|
||||
return new Promise(
|
||||
(resolve, reject) => metadata.listObject(
|
||||
bucket,
|
||||
|
@ -46,7 +41,7 @@ function _listObject(bucket, prefix, hydrateFunc) {
|
|||
|
||||
try {
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
res = await async.retryable(ebConfig, _listingWrapper)(bucket, { ...listingParams, gt });
|
||||
res = await _listingWrapper(bucket, { ...listingParams, gt });
|
||||
} catch (error) {
|
||||
moduleLogger.error('Error during listing', { error });
|
||||
throw error;
|
||||
|
|
|
@ -6,8 +6,7 @@ const BackOff = require('backo');
|
|||
const { whilst } = require('async');
|
||||
|
||||
const errors = require('./errors');
|
||||
const { LoggerContext } = require('./utils/log');
|
||||
const { asyncOrCallback } = require('./utils/func');
|
||||
const { LoggerContext, asyncOrCallback } = require('./utils');
|
||||
|
||||
const moduleLogger = new LoggerContext({
|
||||
module: 'redis',
|
||||
|
|
|
@ -31,12 +31,6 @@ class ReindexTask extends BaseTask {
|
|||
this._defaultLag = 0;
|
||||
const eventFilters = (config && config.filter) || {};
|
||||
this._shouldReindex = buildFilterChain((config && config.filter) || {});
|
||||
// exponential backoff: max wait = 50 * 2 ^ 10 milliseconds ~= 51 seconds
|
||||
this.ebConfig = {
|
||||
times: 10,
|
||||
interval: retryCount => 50 * (2 ** retryCount),
|
||||
};
|
||||
|
||||
if (Object.keys(eventFilters).length !== 0) {
|
||||
logEventFilter((...args) => logger.info(...args), 'reindex resource filtering enabled', eventFilters);
|
||||
}
|
||||
|
@ -164,6 +158,7 @@ class ReindexTask extends BaseTask {
|
|||
if (this._program.bucket.length) {
|
||||
return this._program.bucket.map(name => ({ name }));
|
||||
}
|
||||
|
||||
return metadata.listBuckets();
|
||||
}
|
||||
|
||||
|
@ -185,8 +180,8 @@ class ReindexTask extends BaseTask {
|
|||
let mpuTotal;
|
||||
|
||||
try {
|
||||
bktTotal = await async.retryable(this.ebConfig, ReindexTask._indexBucket)(bucket.name);
|
||||
mpuTotal = await async.retryable(this.ebConfig, ReindexTask._indexMpuBucket)(mpuBucket);
|
||||
bktTotal = await async.retryable(ReindexTask._indexBucket)(bucket.name);
|
||||
mpuTotal = await async.retryable(ReindexTask._indexMpuBucket)(mpuBucket);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
'failed bucket reindex. any associated account will be skipped',
|
||||
|
|
|
@ -1,6 +1,14 @@
|
|||
const werelogs = require('werelogs');
|
||||
const config = require('../config');
|
||||
const { comprehend } = require('./func');
|
||||
|
||||
const loggerConfig = {
|
||||
level: config.logging.level,
|
||||
dump: config.logging.dumpLevel,
|
||||
};
|
||||
|
||||
werelogs.configure(loggerConfig);
|
||||
|
||||
const rootLogger = new werelogs.Logger('Utapi');
|
||||
|
||||
class LoggerContext {
|
||||
|
@ -70,6 +78,8 @@ class LoggerContext {
|
|||
}
|
||||
}
|
||||
|
||||
rootLogger.debug('logger initialized', { loggerConfig });
|
||||
|
||||
function buildRequestLogger(req) {
|
||||
let reqUids = [];
|
||||
if (req.headers['x-scal-request-uids'] !== undefined) {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
const assert = require('assert');
|
||||
const { auth, policies } = require('arsenal');
|
||||
const vaultclient = require('vaultclient');
|
||||
const config = require('../config');
|
||||
const errors = require('../errors');
|
||||
/**
|
||||
|
@ -8,17 +9,9 @@ const errors = require('../errors');
|
|||
*/
|
||||
|
||||
class VaultWrapper extends auth.Vault {
|
||||
create(config) {
|
||||
if (config.vaultd.host) {
|
||||
return new VaultWrapper(config);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
constructor(options) {
|
||||
let client;
|
||||
const { host, port } = options.vaultd;
|
||||
const vaultclient = require('vaultclient');
|
||||
if (options.tls) {
|
||||
const { key, cert, ca } = options.tls;
|
||||
client = new vaultclient.Client(host, port, true, key, cert,
|
||||
|
@ -126,7 +119,7 @@ class VaultWrapper extends auth.Vault {
|
|||
}
|
||||
}
|
||||
|
||||
const vault = VaultWrapper.create(config);
|
||||
const vault = new VaultWrapper(config);
|
||||
auth.setHandler(vault);
|
||||
|
||||
module.exports = {
|
||||
|
|
16
package.json
16
package.json
|
@ -3,7 +3,7 @@
|
|||
"engines": {
|
||||
"node": ">=16"
|
||||
},
|
||||
"version": "8.1.15",
|
||||
"version": "7.70.4",
|
||||
"description": "API for tracking resource utilization and reporting metrics",
|
||||
"main": "index.js",
|
||||
"repository": {
|
||||
|
@ -19,12 +19,13 @@
|
|||
"dependencies": {
|
||||
"@hapi/joi": "^17.1.1",
|
||||
"@senx/warp10": "^1.0.14",
|
||||
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
|
||||
"arsenal": "git+https://github.com/scality/Arsenal#7.10.46",
|
||||
"async": "^3.2.0",
|
||||
"aws-sdk": "^2.1005.0",
|
||||
"aws4": "^1.8.0",
|
||||
"backo": "^1.1.0",
|
||||
"body-parser": "^1.19.0",
|
||||
"bucketclient": "scality/bucketclient#7.10.6",
|
||||
"byte-size": "^7.0.0",
|
||||
"commander": "^5.1.0",
|
||||
"cron-parser": "^2.15.0",
|
||||
|
@ -39,14 +40,15 @@
|
|||
"oas-tools": "^2.2.2",
|
||||
"prom-client": "14.2.0",
|
||||
"uuid": "^3.3.2",
|
||||
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1"
|
||||
"vaultclient": "scality/vaultclient#7.10.13",
|
||||
"werelogs": "scality/werelogs#8.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"eslint": "^8.14.0",
|
||||
"eslint-config-airbnb-base": "^15.0.0",
|
||||
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
|
||||
"eslint": "6.0.1",
|
||||
"eslint-config-airbnb": "17.1.0",
|
||||
"eslint-config-scality": "scality/Guidelines#7.10.2",
|
||||
"eslint-plugin-import": "^2.18.0",
|
||||
"mocha": ">=3.1.2",
|
||||
"mocha": "^3.0.2",
|
||||
"nodemon": "^2.0.4",
|
||||
"protobufjs": "^6.10.1",
|
||||
"sinon": "^9.0.2"
|
||||
|
|
|
@ -3,16 +3,17 @@ const assert = require('assert');
|
|||
const url = require('url');
|
||||
const { auth, errors, policies } = require('arsenal');
|
||||
const safeJsonParse = require('../utils/safeJsonParse');
|
||||
const Vault = require('../lib/Vault');
|
||||
|
||||
class Router {
|
||||
/**
|
||||
* @constructor
|
||||
* @param {Config} config - Config instance
|
||||
*/
|
||||
constructor(config, vault) {
|
||||
constructor(config) {
|
||||
this._service = config.component;
|
||||
this._routes = {};
|
||||
this._vault = vault;
|
||||
this._vault = new Vault(config);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -265,10 +266,6 @@ class Router {
|
|||
*/
|
||||
_processSecurityChecks(utapiRequest, route, cb) {
|
||||
const log = utapiRequest.getLog();
|
||||
if (process.env.UTAPI_AUTH === 'false') {
|
||||
// Zenko route request does not need to go through Vault
|
||||
return this._startRequest(utapiRequest, route, cb);
|
||||
}
|
||||
return this._authSquared(utapiRequest, err => {
|
||||
if (err) {
|
||||
log.trace('error from vault', { errors: err });
|
||||
|
|
21
server.js
21
server.js
|
@ -1,21 +1,4 @@
|
|||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const Config = require('./lib/Config');
|
||||
const config = require('./lib/Config');
|
||||
const server = require('./lib/server');
|
||||
|
||||
/*
|
||||
* By default, the config file is "config.json" at the root.
|
||||
* It can be overridden using the UTAPI_CONFIG_FILE environment var.
|
||||
*/
|
||||
const cfgpath = process.env.UTAPI_CONFIG_FILE || (__dirname+'/config.json');
|
||||
|
||||
let cfg;
|
||||
try {
|
||||
cfg = JSON.parse(fs.readFileSync(cfgpath, { encoding: 'utf-8' }));
|
||||
} catch (err) {
|
||||
throw new Error(`could not parse config file: ${err.message}`);
|
||||
}
|
||||
|
||||
cfg.component = 's3';
|
||||
server(new Config(cfg));
|
||||
server(Object.assign({}, config, { component: 's3' }));
|
||||
|
|
|
@ -170,6 +170,7 @@ describe('Test IngestShards', function () {
|
|||
const results = await warp10.fetch({
|
||||
className: 'utapi.event', labels: { node: prefix }, start: start + 1, stop: -2,
|
||||
});
|
||||
|
||||
const series = JSON.parse(results.result[0])[0];
|
||||
const timestamps = series.v.map(ev => ev[0]);
|
||||
assert.deepStrictEqual([
|
||||
|
@ -178,8 +179,7 @@ describe('Test IngestShards', function () {
|
|||
], timestamps);
|
||||
});
|
||||
|
||||
// please unskip this in https://scality.atlassian.net/browse/UTAPI-65
|
||||
it.skip('should increment microseconds for several duplicate timestamps', async () => {
|
||||
it('should increment microseconds for several duplicate timestamps', async () => {
|
||||
const start = shardFromTimestamp(getTs(-120));
|
||||
const events = generateFakeEvents(start, start + 5, 5)
|
||||
.map(ev => { ev.timestamp = start; return ev; });
|
||||
|
@ -190,6 +190,7 @@ describe('Test IngestShards', function () {
|
|||
const results = await warp10.fetch({
|
||||
className: 'utapi.event', labels: { node: prefix }, start: start + 5, stop: -5,
|
||||
});
|
||||
|
||||
const series = JSON.parse(results.result[0])[0];
|
||||
const timestamps = series.v.map(ev => ev[0]);
|
||||
assert.deepStrictEqual([
|
||||
|
|
|
@ -177,49 +177,4 @@ describe('Test ReindexTask', function () {
|
|||
assert.strictEqual(series[0].values.length, 2);
|
||||
series[0].values.map(value => assert.deepStrictEqual(value, bucketRecord));
|
||||
});
|
||||
|
||||
describe('exponential backoff', () => {
|
||||
it('should retry when bucketd is unreachable', done => {
|
||||
// disable bucketd to simulate downtime
|
||||
bucketd.end();
|
||||
|
||||
const bucketDStub = sinon.stub(bucketd, '_getBucketResponse');
|
||||
bucketDStub.onFirstCall().callsFake(
|
||||
// Once the timeout promise resolves, bucketd is able to be called.
|
||||
// If we make a call after 10 seconds, this shows that retries
|
||||
// have been occuring during bucketd downtime.
|
||||
() => {
|
||||
return {
|
||||
key: 'foo',
|
||||
value: 'bar',
|
||||
};
|
||||
},
|
||||
);
|
||||
|
||||
const reindexPromise = new Promise((resolve, reject) => {
|
||||
reindexTask._execute()
|
||||
.then(() => {
|
||||
resolve('reindexed');
|
||||
})
|
||||
.catch(err => {
|
||||
reject(err);
|
||||
});
|
||||
});
|
||||
|
||||
const timeoutPromise = new Promise(resolve => {
|
||||
const f = () => {
|
||||
bucketd.start();
|
||||
resolve();
|
||||
};
|
||||
setTimeout(f, 10000);
|
||||
});
|
||||
|
||||
Promise.all([reindexPromise, timeoutPromise])
|
||||
.then(values => {
|
||||
assert.strictEqual(values[0], 'reindexed');
|
||||
sinon.restore();
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -233,8 +233,7 @@ describe('Test UtapiClient', function () {
|
|||
});
|
||||
});
|
||||
|
||||
// please unskip this in https://scality.atlassian.net/browse/UTAPI-65
|
||||
it.skip('should get the current storage for an account using the cache', async () => {
|
||||
it('should get the current storage for an account using the cache', async () => {
|
||||
await async.eachOf(totals.accounts, async (total, acc) => {
|
||||
cacheClient.updateAccountCounterBase(acc, total.bytes);
|
||||
});
|
||||
|
|
|
@ -21,9 +21,6 @@ const config = {
|
|||
localCache: redisLocal,
|
||||
component: 's3',
|
||||
};
|
||||
const location = 'foo-backend';
|
||||
const incrby = 100;
|
||||
const decrby = -30;
|
||||
|
||||
function isSortedSetKey(key) {
|
||||
return key.endsWith('storageUtilized') || key.endsWith('numberOfObjects');
|
||||
|
@ -79,29 +76,6 @@ function setMockData(data, timestamp, cb) {
|
|||
return cb();
|
||||
}
|
||||
|
||||
function getLocationObject(bytesValue) {
|
||||
const obj = {};
|
||||
obj[`s3:location:${location}:locationStorage`] = `${bytesValue}`;
|
||||
return obj;
|
||||
}
|
||||
|
||||
function testLocationMetric(c, params, expected, cb) {
|
||||
const { location, updateSize } = params;
|
||||
if (updateSize) {
|
||||
c.pushLocationMetric(location, updateSize, REQUID, err => {
|
||||
assert.equal(err, null);
|
||||
assert.deepStrictEqual(memoryBackend.data, expected);
|
||||
return cb();
|
||||
});
|
||||
} else {
|
||||
c.getLocationMetric(location, REQUID, (err, bytesStored) => {
|
||||
assert.equal(err, null);
|
||||
assert.strictEqual(bytesStored, expected);
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
describe('UtapiClient:: enable/disable client', () => {
|
||||
it('should disable client when no redis config is provided', () => {
|
||||
const c = new UtapiClient();
|
||||
|
@ -832,26 +806,3 @@ tests.forEach(test => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('UtapiClient:: location quota metrics', () => {
|
||||
beforeEach(function beFn() {
|
||||
this.currentTest.c = new UtapiClient(config);
|
||||
this.currentTest.c.setDataStore(ds);
|
||||
});
|
||||
|
||||
afterEach(() => memoryBackend.flushDb());
|
||||
|
||||
it('should increment location metric', function itFn(done) {
|
||||
const expected = getLocationObject(incrby);
|
||||
testLocationMetric(this.test.c, { location, updateSize: incrby },
|
||||
expected, done);
|
||||
});
|
||||
it('should decrement location metric', function itFn(done) {
|
||||
const expected = getLocationObject(decrby);
|
||||
testLocationMetric(this.test.c, { location, updateSize: decrby },
|
||||
expected, done);
|
||||
});
|
||||
it('should list location metric', function itFn(done) {
|
||||
const expected = 0;
|
||||
testLocationMetric(this.test.c, { location }, expected, done);
|
||||
});
|
||||
});
|
||||
|
|
Loading…
Reference in New Issue