Compare commits
No commits in common. "7439805219683466b66ec3557f528d476356f9c8" and "afea2043fcfd84341cc871aebb2ca60114efcb31" have entirely different histories.
7439805219
...
afea2043fc
|
@ -1,87 +0,0 @@
|
||||||
# General support information
|
|
||||||
|
|
||||||
GitHub Issues are **reserved** for actionable bug reports (including
|
|
||||||
documentation inaccuracies), and feature requests.
|
|
||||||
**All questions** (regarding configuration, usecases, performance, community,
|
|
||||||
events, setup and usage recommendations, among other things) should be asked on
|
|
||||||
the **[Zenko Forum](http://forum.zenko.io/)**.
|
|
||||||
|
|
||||||
> Questions opened as GitHub issues will systematically be closed, and moved to
|
|
||||||
> the [Zenko Forum](http://forum.zenko.io/).
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
## Avoiding duplicates
|
|
||||||
|
|
||||||
When reporting a new issue/requesting a feature, make sure that we do not have
|
|
||||||
any duplicates already open:
|
|
||||||
|
|
||||||
- search the issue list for this repository (use the search bar, select
|
|
||||||
"Issues" on the left pane after searching);
|
|
||||||
- if there is a duplicate, please do not open your issue, and add a comment
|
|
||||||
to the existing issue instead.
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
## Bug report information
|
|
||||||
|
|
||||||
(delete this section (everything between the lines) if you're not reporting a
|
|
||||||
bug but requesting a feature)
|
|
||||||
|
|
||||||
### Description
|
|
||||||
|
|
||||||
Briefly describe the problem you are having in a few paragraphs.
|
|
||||||
|
|
||||||
### Steps to reproduce the issue
|
|
||||||
|
|
||||||
Please provide steps to reproduce, including full log output
|
|
||||||
|
|
||||||
### Actual result
|
|
||||||
|
|
||||||
Describe the results you received
|
|
||||||
|
|
||||||
### Expected result
|
|
||||||
|
|
||||||
Describe the results you expected
|
|
||||||
|
|
||||||
### Additional information
|
|
||||||
|
|
||||||
- Node.js version,
|
|
||||||
- Docker version,
|
|
||||||
- npm version,
|
|
||||||
- distribution/OS,
|
|
||||||
- optional: anything else you deem helpful to us.
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
## Feature Request
|
|
||||||
|
|
||||||
(delete this section (everything between the lines) if you're not requesting
|
|
||||||
a feature but reporting a bug)
|
|
||||||
|
|
||||||
### Proposal
|
|
||||||
|
|
||||||
Describe the feature
|
|
||||||
|
|
||||||
### Current behavior
|
|
||||||
|
|
||||||
What currently happens
|
|
||||||
|
|
||||||
### Desired behavior
|
|
||||||
|
|
||||||
What you would like to happen
|
|
||||||
|
|
||||||
### Usecase
|
|
||||||
|
|
||||||
Please provide usecases for changing the current behavior
|
|
||||||
|
|
||||||
### Additional information
|
|
||||||
|
|
||||||
- Is this request for your company? Y/N
|
|
||||||
- If Y: Company name:
|
|
||||||
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
|
|
||||||
- Are you willing to contribute this feature yourself?
|
|
||||||
- Position/Title:
|
|
||||||
- How did you hear about us?
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
|
|
@ -5,7 +5,7 @@ general:
|
||||||
|
|
||||||
machine:
|
machine:
|
||||||
node:
|
node:
|
||||||
version: 6.13.1
|
version: 6.9.5
|
||||||
services:
|
services:
|
||||||
- redis
|
- redis
|
||||||
environment:
|
environment:
|
||||||
|
|
39
eve/main.yml
39
eve/main.yml
|
@ -1,39 +0,0 @@
|
||||||
---
|
|
||||||
version: 0.2
|
|
||||||
|
|
||||||
branches:
|
|
||||||
default:
|
|
||||||
stage: pre-merge
|
|
||||||
|
|
||||||
stages:
|
|
||||||
pre-merge:
|
|
||||||
worker:
|
|
||||||
type: docker
|
|
||||||
path: eve/workers/unit_and_feature_tests
|
|
||||||
volumes:
|
|
||||||
- '/home/eve/workspace'
|
|
||||||
steps:
|
|
||||||
- Git:
|
|
||||||
name: fetch source
|
|
||||||
repourl: '%(prop:git_reference)s'
|
|
||||||
shallow: True
|
|
||||||
retryFetch: True
|
|
||||||
haltOnFailure: True
|
|
||||||
- ShellCommand:
|
|
||||||
name: npm install
|
|
||||||
command: npm install
|
|
||||||
# - ShellCommand:
|
|
||||||
# name: get api node modules from cache
|
|
||||||
# command: mv /home/eve/node_reqs/node_modules .
|
|
||||||
- ShellCommand:
|
|
||||||
name: run static analysis tools on markdown
|
|
||||||
command: npm run lint_md
|
|
||||||
- ShellCommand:
|
|
||||||
name: run static analysis tools on code
|
|
||||||
command: npm run lint
|
|
||||||
- ShellCommand:
|
|
||||||
name: run unit tests
|
|
||||||
command: npm test
|
|
||||||
- ShellCommand:
|
|
||||||
name: run feature tests
|
|
||||||
command: npm run ft_test
|
|
|
@ -1,26 +0,0 @@
|
||||||
FROM buildpack-deps:jessie-curl
|
|
||||||
|
|
||||||
#
|
|
||||||
# Install apt packages needed by backbeat and buildbot_worker
|
|
||||||
#
|
|
||||||
ENV LANG C.UTF-8
|
|
||||||
COPY utapi_packages.list buildbot_worker_packages.list /tmp/
|
|
||||||
|
|
||||||
RUN curl -sL https://deb.nodesource.com/setup_6.x | bash - \
|
|
||||||
&& apt-get update -qq \
|
|
||||||
&& cat /tmp/*packages.list | xargs apt-get install -y \
|
|
||||||
&& pip install pip==9.0.1 \
|
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
|
||||||
&& rm -f /tmp/*packages.list \
|
|
||||||
&& rm -f /etc/supervisor/conf.d/*.conf
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Run buildbot-worker on startup through supervisor
|
|
||||||
#
|
|
||||||
ARG BUILDBOT_VERSION
|
|
||||||
|
|
||||||
RUN pip install buildbot-worker==$BUILDBOT_VERSION
|
|
||||||
ADD supervisor/buildbot_worker.conf /etc/supervisor/conf.d/
|
|
||||||
|
|
||||||
CMD ["supervisord", "-n"]
|
|
|
@ -1,9 +0,0 @@
|
||||||
ca-certificates
|
|
||||||
git
|
|
||||||
libffi-dev
|
|
||||||
libssl-dev
|
|
||||||
python2.7
|
|
||||||
python2.7-dev
|
|
||||||
python-pip
|
|
||||||
sudo
|
|
||||||
supervisor
|
|
|
@ -1,9 +0,0 @@
|
||||||
[program:buildbot_worker]
|
|
||||||
command=/bin/sh -c 'buildbot-worker create-worker . "%(ENV_BUILDMASTER)s:%(ENV_BUILDMASTER_PORT)s" "%(ENV_WORKERNAME)s" "%(ENV_WORKERPASS)s" && buildbot-worker start --nodaemon'
|
|
||||||
autostart=true
|
|
||||||
autorestart=false
|
|
||||||
|
|
||||||
[program:redis]
|
|
||||||
command=/usr/bin/redis-server
|
|
||||||
autostart=true
|
|
||||||
autorestart=false
|
|
|
@ -1,3 +0,0 @@
|
||||||
build-essential
|
|
||||||
redis-server
|
|
||||||
nodejs
|
|
|
@ -1,5 +1,5 @@
|
||||||
const http = require('http');
|
const http = require('http');
|
||||||
const aws4 = require('aws4'); // eslint-disable-line import/no-unresolved
|
const aws4 = require('aws4');
|
||||||
|
|
||||||
// Input AWS access key, secret key, and session token.
|
// Input AWS access key, secret key, and session token.
|
||||||
const accessKeyId = 'EO4FRH6BA2L7FCK0EKVT';
|
const accessKeyId = 'EO4FRH6BA2L7FCK0EKVT';
|
||||||
|
|
|
@ -1,90 +0,0 @@
|
||||||
import sys, os, base64, datetime, hashlib, hmac, datetime, calendar, json
|
|
||||||
import requests # pip install requests
|
|
||||||
|
|
||||||
access_key = '9EQTVVVCLSSG6QBMNKO5'
|
|
||||||
secret_key = 'T5mK/skkkwJ/mTjXZnHyZ5UzgGIN=k9nl4dyTmDH'
|
|
||||||
|
|
||||||
method = 'POST'
|
|
||||||
service = 's3'
|
|
||||||
host = 'localhost:8100'
|
|
||||||
region = 'us-east-1'
|
|
||||||
canonical_uri = '/buckets'
|
|
||||||
canonical_querystring = 'Action=ListMetrics&Version=20160815'
|
|
||||||
content_type = 'application/x-amz-json-1.0'
|
|
||||||
algorithm = 'AWS4-HMAC-SHA256'
|
|
||||||
|
|
||||||
t = datetime.datetime.utcnow()
|
|
||||||
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
|
|
||||||
date_stamp = t.strftime('%Y%m%d')
|
|
||||||
|
|
||||||
# Key derivation functions. See:
|
|
||||||
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
|
|
||||||
def sign(key, msg):
|
|
||||||
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
|
||||||
|
|
||||||
def getSignatureKey(key, date_stamp, regionName, serviceName):
|
|
||||||
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
|
|
||||||
kRegion = sign(kDate, regionName)
|
|
||||||
kService = sign(kRegion, serviceName)
|
|
||||||
kSigning = sign(kService, 'aws4_request')
|
|
||||||
return kSigning
|
|
||||||
|
|
||||||
def get_start_time(t):
|
|
||||||
start = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
|
|
||||||
return calendar.timegm(start.utctimetuple()) * 1000;
|
|
||||||
|
|
||||||
def get_end_time(t):
|
|
||||||
end = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
|
|
||||||
return calendar.timegm(end.utctimetuple()) * 1000 - 1;
|
|
||||||
|
|
||||||
start_time = get_start_time(datetime.datetime(2016, 1, 1, 0, 0, 0, 0))
|
|
||||||
end_time = get_end_time(datetime.datetime(2016, 2, 1, 0, 0, 0, 0))
|
|
||||||
|
|
||||||
# Request parameters for listing Utapi bucket metrics--passed in a JSON block.
|
|
||||||
bucketListing = {
|
|
||||||
'buckets': [ 'utapi-test' ],
|
|
||||||
'timeRange': [ start_time, end_time ],
|
|
||||||
}
|
|
||||||
|
|
||||||
request_parameters = json.dumps(bucketListing)
|
|
||||||
|
|
||||||
payload_hash = hashlib.sha256(request_parameters).hexdigest()
|
|
||||||
|
|
||||||
canonical_headers = \
|
|
||||||
'content-type:{0}\nhost:{1}\nx-amz-content-sha256:{2}\nx-amz-date:{3}\n' \
|
|
||||||
.format(content_type, host, payload_hash, amz_date)
|
|
||||||
|
|
||||||
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
|
|
||||||
|
|
||||||
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
|
|
||||||
.format(method, canonical_uri, canonical_querystring, canonical_headers,
|
|
||||||
signed_headers, payload_hash)
|
|
||||||
|
|
||||||
credential_scope = '{0}/{1}/{2}/aws4_request' \
|
|
||||||
.format(date_stamp, region, service)
|
|
||||||
|
|
||||||
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
|
|
||||||
.format(algorithm, amz_date, credential_scope,
|
|
||||||
hashlib.sha256(canonical_request).hexdigest())
|
|
||||||
|
|
||||||
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
|
|
||||||
|
|
||||||
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
|
|
||||||
hashlib.sha256).hexdigest()
|
|
||||||
|
|
||||||
authorization_header = \
|
|
||||||
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
|
|
||||||
.format(algorithm, access_key, credential_scope, signed_headers, signature)
|
|
||||||
|
|
||||||
# The 'host' header is added automatically by the Python 'requests' library.
|
|
||||||
headers = {
|
|
||||||
'Content-Type': content_type,
|
|
||||||
'X-Amz-Content-Sha256': payload_hash,
|
|
||||||
'X-Amz-Date': amz_date,
|
|
||||||
'Authorization': authorization_header
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring;
|
|
||||||
|
|
||||||
r = requests.post(endpoint, data=request_parameters, headers=headers)
|
|
||||||
print (r.text)
|
|
|
@ -1,130 +0,0 @@
|
||||||
const async = require('async');
|
|
||||||
const Redis = require('ioredis');
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
This script updates the state of Utapi `numberOfObjects` and
|
|
||||||
`storageUtilized` starting from the current timestamp (i.e., the latest 15
|
|
||||||
minute interval).
|
|
||||||
|
|
||||||
To use:
|
|
||||||
- Set your redis endpoint (`REDIS_ENDPOINT`) to the host and port where the
|
|
||||||
Redis server is running. Two examples are provided: one for local use, and a
|
|
||||||
second example (which is commented out) for a deployment scenario in which Redis
|
|
||||||
Sentinels are used.
|
|
||||||
|
|
||||||
- Set the various metric resource states (`STATES`) to your desired values.
|
|
||||||
This can be an array of n length of objects. Each value `storageUtilized` and
|
|
||||||
`numberOfObjects` of the state objects is optional, and you should only
|
|
||||||
include those values that you want to change.
|
|
||||||
|
|
||||||
- To find the current state of a bucket, perform a recursive listing of
|
|
||||||
objects in a bucket to calculate the storage utilized and number of objects.
|
|
||||||
For this purpose, we suggest using `s3cmd` or `aws-cli`.
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
const REDIS_ENDPOINT = {
|
|
||||||
host: '127.0.0.1',
|
|
||||||
port: 6379,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* Example endpoint for a Utapi deployment:
|
|
||||||
|
|
||||||
const REDIS_ENDPOINT = {
|
|
||||||
name: 'scality-s3',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'endpoint0',
|
|
||||||
port: 6379
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'endpoint1',
|
|
||||||
port: 6379
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'endpoint2',
|
|
||||||
port: 6379
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'endpoint3',
|
|
||||||
port: 6379
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'endpoint4',
|
|
||||||
port: 6379
|
|
||||||
},
|
|
||||||
],
|
|
||||||
};
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
const STATES = [
|
|
||||||
{
|
|
||||||
resource: 'buckets', // required
|
|
||||||
bucket: '<bucket-name>', // required
|
|
||||||
storageUtilized: '0',
|
|
||||||
numberOfObjects: '0',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
resource: 'accounts', // required
|
|
||||||
accountId: '<account-canonical-id>', // required
|
|
||||||
storageUtilized: '0',
|
|
||||||
numberOfObjects: '0',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
resource: 'users', // required
|
|
||||||
userId: '<user-id>', // required
|
|
||||||
storageUtilized: '0',
|
|
||||||
numberOfObjects: '0',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
resource: 'service', // required
|
|
||||||
service: '<service-name>', // required
|
|
||||||
storageUtilized: '0',
|
|
||||||
numberOfObjects: '0',
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
function generateStateKey(params, metric) {
|
|
||||||
const { bucket, accountId, userId, service, resource } = params;
|
|
||||||
const id = bucket || accountId || userId || service;
|
|
||||||
return `s3:${resource}:${id}:${metric}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
function getCurrentTimestamp() {
|
|
||||||
const time = new Date();
|
|
||||||
const minutes = time.getMinutes();
|
|
||||||
return time.setMinutes((minutes - minutes % 15), 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
const redis = new Redis(Object.assign({
|
|
||||||
enableOfflineQueue: true,
|
|
||||||
keepAlive: 3000,
|
|
||||||
}, REDIS_ENDPOINT));
|
|
||||||
|
|
||||||
async.each(STATES, (params, cb) => {
|
|
||||||
const { storageUtilized, numberOfObjects } = params;
|
|
||||||
const timestamp = getCurrentTimestamp();
|
|
||||||
const cmds = [];
|
|
||||||
if (storageUtilized !== undefined) {
|
|
||||||
const storageUtilizedKey = generateStateKey(params, 'storageUtilized');
|
|
||||||
cmds.push(
|
|
||||||
['zremrangebyscore', storageUtilizedKey, timestamp, timestamp],
|
|
||||||
['zadd', storageUtilizedKey, timestamp, storageUtilized]);
|
|
||||||
}
|
|
||||||
if (numberOfObjects !== undefined) {
|
|
||||||
const numberOfObjectsKey = generateStateKey(params, 'numberOfObjects');
|
|
||||||
cmds.push(
|
|
||||||
['zremrangebyscore', numberOfObjectsKey, timestamp, timestamp],
|
|
||||||
['zadd', numberOfObjectsKey, timestamp, numberOfObjects]);
|
|
||||||
}
|
|
||||||
return redis.multi(cmds).exec(cb);
|
|
||||||
}, err => {
|
|
||||||
if (err) {
|
|
||||||
process.stdout.write(`An error occurred: ${err}\n`);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
process.stdout.write('Successfully updated all states.\n');
|
|
||||||
process.exit();
|
|
||||||
});
|
|
|
@ -215,7 +215,7 @@ class ListMetrics {
|
||||||
// last 4 are results of storageUtilized, numberOfObjects,
|
// last 4 are results of storageUtilized, numberOfObjects,
|
||||||
const absolutes = res.slice(-4);
|
const absolutes = res.slice(-4);
|
||||||
const deltas = res.slice(0, res.length - 4);
|
const deltas = res.slice(0, res.length - 4);
|
||||||
const areMetricsPositive = absolutes.every((item, index) => {
|
absolutes.forEach((item, index) => {
|
||||||
if (item[0]) {
|
if (item[0]) {
|
||||||
// log error and continue
|
// log error and continue
|
||||||
log.trace('command in a batch failed to execute', {
|
log.trace('command in a batch failed to execute', {
|
||||||
|
@ -224,11 +224,7 @@ class ListMetrics {
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
let val = parseInt(item[1], 10);
|
let val = parseInt(item[1], 10);
|
||||||
|
val = isNaN(val) ? 0 : val;
|
||||||
val = Number.isNaN(val) ? 0 : val;
|
|
||||||
if (val < 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (index === 0) {
|
if (index === 0) {
|
||||||
metricResponse.storageUtilized[0] = val;
|
metricResponse.storageUtilized[0] = val;
|
||||||
} else if (index === 1) {
|
} else if (index === 1) {
|
||||||
|
@ -239,15 +235,8 @@ class ListMetrics {
|
||||||
metricResponse.numberOfObjects[1] = val;
|
metricResponse.numberOfObjects[1] = val;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!areMetricsPositive) {
|
|
||||||
return cb(errors.InternalError.customizeDescription(
|
|
||||||
'Utapi is in a transient state for this time period as ' +
|
|
||||||
'metrics are being collected. Please try again in a few ' +
|
|
||||||
'minutes.'));
|
|
||||||
}
|
|
||||||
/**
|
/**
|
||||||
* Batch result is of the format
|
* Batch result is of the format
|
||||||
* [ [null, '1'], [null, '2'], [null, '3'] ] where each
|
* [ [null, '1'], [null, '2'], [null, '3'] ] where each
|
||||||
|
|
|
@ -110,10 +110,9 @@ class UtapiReplay {
|
||||||
this.log.trace('pushing metric with utapiClient::pushMetric',
|
this.log.trace('pushing metric with utapiClient::pushMetric',
|
||||||
{ method: 'UtapiReplay._pushCachedMetrics' });
|
{ method: 'UtapiReplay._pushCachedMetrics' });
|
||||||
const { action, reqUid, params } = result;
|
const { action, reqUid, params } = result;
|
||||||
const firstReqUid = reqUid.split(':')[0];
|
|
||||||
// We do not pass the callback to pushMetric since UtapiClient
|
// We do not pass the callback to pushMetric since UtapiClient
|
||||||
// will handle pushing it to local cache if internal error.
|
// will handle pushing it to local cache if internal error.
|
||||||
this.utapiClient.pushMetric(action, firstReqUid, params);
|
this.utapiClient.pushMetric(action, reqUid, params);
|
||||||
}
|
}
|
||||||
return next();
|
return next();
|
||||||
}, err => cb(err));
|
}, err => cb(err));
|
||||||
|
|
|
@ -48,11 +48,9 @@ class Vault {
|
||||||
const log = params.log;
|
const log = params.log;
|
||||||
log.debug('authenticating V4 request');
|
log.debug('authenticating V4 request');
|
||||||
const serializedRCs = requestContexts.map(rc => rc.serialize());
|
const serializedRCs = requestContexts.map(rc => rc.serialize());
|
||||||
this._client.verifySignatureV4(
|
this._client.verifySignatureV4(stringToSign, signatureFromRequest,
|
||||||
stringToSign, signatureFromRequest,
|
accessKey, region, scopeDate, { reqUid: log.getSerializedUids(),
|
||||||
accessKey, region, scopeDate,
|
requestContext: serializedRCs }, (err, authInfo) => {
|
||||||
{ reqUid: log.getSerializedUids(), requestContext: serializedRCs },
|
|
||||||
(err, authInfo) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error from vault', { error: err });
|
log.trace('error from vault', { error: err });
|
||||||
return callback(err);
|
return callback(err);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"name": "utapi",
|
"name": "utapi",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=6.9.5"
|
"node": "6.9.5"
|
||||||
},
|
},
|
||||||
"version": "7.0.0",
|
"version": "7.0.0",
|
||||||
"description": "API for tracking resource utilization and reporting metrics",
|
"description": "API for tracking resource utilization and reporting metrics",
|
||||||
|
|
|
@ -116,10 +116,14 @@ function checkListElement(action, params, res) {
|
||||||
'incorrect timestamp value');
|
'incorrect timestamp value');
|
||||||
assert(reqUid !== undefined,
|
assert(reqUid !== undefined,
|
||||||
`reqUid property not in cached element: ${action}`);
|
`reqUid property not in cached element: ${action}`);
|
||||||
|
const reqLog = log.newRequestLoggerFromSerializedUids(reqUid);
|
||||||
|
const reqUids = reqLog.getSerializedUids();
|
||||||
|
// The first two reqUidss should be those in the response.
|
||||||
|
const expectedReqUid = reqUids.substring(0, reqUids.lastIndexOf(':'));
|
||||||
// We want the action and original params for use during the replay.
|
// We want the action and original params for use during the replay.
|
||||||
assert.deepStrictEqual(result, {
|
assert.deepStrictEqual(result, {
|
||||||
action,
|
action,
|
||||||
reqUid,
|
reqUid: expectedReqUid,
|
||||||
params,
|
params,
|
||||||
timestamp,
|
timestamp,
|
||||||
}, `incorrect value for action: ${action}`);
|
}, `incorrect value for action: ${action}`);
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const MemoryBackend = require('../../lib/backend/Memory');
|
const MemoryBackend = require('../../lib/backend/Memory');
|
||||||
const Datastore = require('../../lib/Datastore');
|
const Datastore = require('../../lib/Datastore');
|
||||||
const ListMetrics = require('../../lib/ListMetrics');
|
const ListMetrics = require('../../lib/ListMetrics');
|
||||||
|
@ -38,7 +37,7 @@ function getMetricResponse(schemaKey) {
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
function assertMetrics(schemaKey, metricName, props, isNegativeValue, done) {
|
function assertMetrics(schemaKey, metricName, props, done) {
|
||||||
const timestamp = new Date().setMinutes(0, 0, 0);
|
const timestamp = new Date().setMinutes(0, 0, 0);
|
||||||
const timeRange = [timestamp, timestamp];
|
const timeRange = [timestamp, timestamp];
|
||||||
const expectedRes = getMetricResponse(schemaKey);
|
const expectedRes = getMetricResponse(schemaKey);
|
||||||
|
@ -46,14 +45,6 @@ function assertMetrics(schemaKey, metricName, props, isNegativeValue, done) {
|
||||||
const metricType = new ListMetrics(metricLevels[schemaKey], 's3');
|
const metricType = new ListMetrics(metricLevels[schemaKey], 's3');
|
||||||
metricType.getMetrics(metricName, timeRange, datastore, logger,
|
metricType.getMetrics(metricName, timeRange, datastore, logger,
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
if (isNegativeValue) {
|
|
||||||
assert.deepStrictEqual(err,
|
|
||||||
errors.InternalError.customizeDescription(
|
|
||||||
'Utapi is in a transient state for this time period as ' +
|
|
||||||
'metrics are being collected. Please try again in a few ' +
|
|
||||||
'minutes.'));
|
|
||||||
return done();
|
|
||||||
}
|
|
||||||
assert.strictEqual(err, null);
|
assert.strictEqual(err, null);
|
||||||
// overwrite operations metrics
|
// overwrite operations metrics
|
||||||
if (expectedResProps.operations) {
|
if (expectedResProps.operations) {
|
||||||
|
@ -63,7 +54,7 @@ function assertMetrics(schemaKey, metricName, props, isNegativeValue, done) {
|
||||||
}
|
}
|
||||||
assert.deepStrictEqual(res, Object.assign(expectedRes,
|
assert.deepStrictEqual(res, Object.assign(expectedRes,
|
||||||
{ timeRange }, expectedResProps));
|
{ timeRange }, expectedResProps));
|
||||||
return done();
|
done();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +68,7 @@ function getSchemaObject(schemaKey) {
|
||||||
return schemaObject;
|
return schemaObject;
|
||||||
}
|
}
|
||||||
|
|
||||||
function testOps(schemaKey, keyIndex, metricindex, isNegativeValue, done) {
|
function testOps(schemaKey, keyIndex, metricindex, done) {
|
||||||
const schemaObject = getSchemaObject(schemaKey);
|
const schemaObject = getSchemaObject(schemaKey);
|
||||||
const timestamp = new Date().setMinutes(0, 0, 0);
|
const timestamp = new Date().setMinutes(0, 0, 0);
|
||||||
let key;
|
let key;
|
||||||
|
@ -85,26 +76,23 @@ function testOps(schemaKey, keyIndex, metricindex, isNegativeValue, done) {
|
||||||
let val;
|
let val;
|
||||||
if (keyIndex === 'storageUtilized' || keyIndex === 'numberOfObjects') {
|
if (keyIndex === 'storageUtilized' || keyIndex === 'numberOfObjects') {
|
||||||
key = generateStateKey(schemaObject, keyIndex);
|
key = generateStateKey(schemaObject, keyIndex);
|
||||||
val = isNegativeValue ? -1024 : 1024;
|
val = 1024;
|
||||||
props[metricindex] = [val, val];
|
props[metricindex] = [val, val];
|
||||||
memBackend.zadd(key, timestamp, val, () =>
|
memBackend.zadd(key, timestamp, val, () =>
|
||||||
assertMetrics(schemaKey, schemaObject[schemaKey], props,
|
assertMetrics(schemaKey, schemaObject[schemaKey], props, done));
|
||||||
isNegativeValue, done));
|
|
||||||
} else if (keyIndex === 'incomingBytes' || keyIndex === 'outgoingBytes') {
|
} else if (keyIndex === 'incomingBytes' || keyIndex === 'outgoingBytes') {
|
||||||
key = generateKey(schemaObject, keyIndex, timestamp);
|
key = generateKey(schemaObject, keyIndex, timestamp);
|
||||||
val = 1024;
|
val = 1024;
|
||||||
props[metricindex] = val;
|
props[metricindex] = val;
|
||||||
memBackend.incrby(key, val, () =>
|
memBackend.incrby(key, val, () =>
|
||||||
assertMetrics(schemaKey, schemaObject[schemaKey], props,
|
assertMetrics(schemaKey, schemaObject[schemaKey], props, done));
|
||||||
isNegativeValue, done));
|
|
||||||
} else {
|
} else {
|
||||||
key = generateKey(schemaObject, keyIndex, timestamp);
|
key = generateKey(schemaObject, keyIndex, timestamp);
|
||||||
val = 1;
|
val = 1;
|
||||||
props = { operations: {} };
|
props = { operations: {} };
|
||||||
props.operations[metricindex] = val;
|
props.operations[metricindex] = val;
|
||||||
memBackend.incr(key, () =>
|
memBackend.incr(key, () =>
|
||||||
assertMetrics(schemaKey, schemaObject[schemaKey], props,
|
assertMetrics(schemaKey, schemaObject[schemaKey], props, done));
|
||||||
isNegativeValue, done));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,159 +103,141 @@ Object.keys(metricLevels).forEach(schemaKey => {
|
||||||
|
|
||||||
it(`should list default (0s) ${metric} level metrics of a bucket`,
|
it(`should list default (0s) ${metric} level metrics of a bucket`,
|
||||||
done => assertMetrics(schemaKey, resourceNames[schemaKey], null,
|
done => assertMetrics(schemaKey, resourceNames[schemaKey], null,
|
||||||
false, done));
|
done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for storage utilized`,
|
it(`should return ${metric} level metrics for storage utilized`, done =>
|
||||||
done => testOps(schemaKey, 'storageUtilized', 'storageUtilized',
|
testOps(schemaKey, 'storageUtilized', 'storageUtilized', done));
|
||||||
false, done));
|
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for number of objects`,
|
it(`should return ${metric} level metrics for number of objects`,
|
||||||
done => testOps(schemaKey, 'numberOfObjects', 'numberOfObjects',
|
done => testOps(schemaKey, 'numberOfObjects', 'numberOfObjects',
|
||||||
false, done));
|
done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for incoming bytes`, done =>
|
it(`should return ${metric} level metrics for incoming bytes`, done =>
|
||||||
testOps(schemaKey, 'incomingBytes', 'incomingBytes', false, done));
|
testOps(schemaKey, 'incomingBytes', 'incomingBytes', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for outgoing bytes`, done =>
|
it(`should return ${metric} level metrics for outgoing bytes`, done =>
|
||||||
testOps(schemaKey, 'outgoingBytes', 'outgoingBytes', false, done));
|
testOps(schemaKey, 'outgoingBytes', 'outgoingBytes', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for delete bucket`, done =>
|
it(`should return ${metric} level metrics for delete bucket`, done =>
|
||||||
testOps(schemaKey, 'deleteBucket', 's3:DeleteBucket', false, done));
|
testOps(schemaKey, 'deleteBucket', 's3:DeleteBucket', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for list bucket`, done =>
|
it(`should return ${metric} level metrics for list bucket`, done =>
|
||||||
testOps(schemaKey, 'listBucket', 's3:ListBucket', false, done));
|
testOps(schemaKey, 'listBucket', 's3:ListBucket', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get bucket acl`, done =>
|
it(`should return ${metric} level metrics for get bucket acl`, done =>
|
||||||
testOps(schemaKey, 'getBucketAcl', 's3:GetBucketAcl', false, done));
|
testOps(schemaKey, 'getBucketAcl', 's3:GetBucketAcl', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get bucket location`,
|
it(`should return ${metric} level metrics for get bucket location`,
|
||||||
done =>
|
done =>
|
||||||
testOps(schemaKey, 'getBucketLocation', 's3:GetBucketLocation',
|
testOps(schemaKey, 'getBucketLocation', 's3:GetBucketLocation',
|
||||||
false, done));
|
done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for put bucket acl`, done =>
|
it(`should return ${metric} level metrics for put bucket acl`, done =>
|
||||||
testOps(schemaKey, 'putBucketAcl', 's3:PutBucketAcl', false, done));
|
testOps(schemaKey, 'putBucketAcl', 's3:PutBucketAcl', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get bucket cors`, done =>
|
it(`should return ${metric} level metrics for get bucket cors`, done =>
|
||||||
testOps(schemaKey, 'getBucketCors', 's3:GetBucketCors', false,
|
testOps(schemaKey, 'getBucketCors', 's3:GetBucketCors', done));
|
||||||
done));
|
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for put bucket cors`, done =>
|
it(`should return ${metric} level metrics for put bucket cors`, done =>
|
||||||
testOps(schemaKey, 'putBucketCors', 's3:PutBucketCors', false,
|
testOps(schemaKey, 'putBucketCors', 's3:PutBucketCors', done));
|
||||||
done));
|
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for delete bucket cors`,
|
it(`should return ${metric} level metrics for delete bucket cors`,
|
||||||
done => testOps(schemaKey, 'deleteBucketCors',
|
done => testOps(schemaKey, 'deleteBucketCors',
|
||||||
's3:DeleteBucketCors', false, done));
|
's3:DeleteBucketCors', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get bucket website`,
|
it(`should return ${metric} level metrics for get bucket website`,
|
||||||
done => testOps(schemaKey, 'getBucketWebsite',
|
done => testOps(schemaKey, 'getBucketWebsite',
|
||||||
's3:GetBucketWebsite', false, done));
|
's3:GetBucketWebsite', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for put bucket website`,
|
it(`should return ${metric} level metrics for put bucket website`,
|
||||||
done => testOps(schemaKey, 'putBucketWebsite',
|
done => testOps(schemaKey, 'putBucketWebsite',
|
||||||
's3:PutBucketWebsite', false, done));
|
's3:PutBucketWebsite', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for delete bucket website`,
|
it(`should return ${metric} level metrics for delete bucket website`,
|
||||||
done => testOps(schemaKey, 'deleteBucketWebsite',
|
done => testOps(schemaKey, 'deleteBucketWebsite',
|
||||||
's3:DeleteBucketWebsite', false, done));
|
's3:DeleteBucketWebsite', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for put object`, done =>
|
it(`should return ${metric} level metrics for put object`, done =>
|
||||||
testOps(schemaKey, 'putObject', 's3:PutObject', false, done));
|
testOps(schemaKey, 'putObject', 's3:PutObject', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for copy object`, done =>
|
it(`should return ${metric} level metrics for copy object`, done =>
|
||||||
testOps(schemaKey, 'copyObject', 's3:CopyObject', false, done));
|
testOps(schemaKey, 'copyObject', 's3:CopyObject', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for upload part`, done =>
|
it(`should return ${metric} level metrics for upload part`, done =>
|
||||||
testOps(schemaKey, 'uploadPart', 's3:UploadPart', false, done));
|
testOps(schemaKey, 'uploadPart', 's3:UploadPart', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for list bucket multipart ` +
|
it(`should return ${metric} level metrics for list bucket multipart ` +
|
||||||
'uploads', done => testOps(schemaKey, 'listBucketMultipartUploads',
|
'uploads', done => testOps(schemaKey, 'listBucketMultipartUploads',
|
||||||
's3:ListBucketMultipartUploads', false, done));
|
's3:ListBucketMultipartUploads', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for list multipart upload ` +
|
it(`should return ${metric} level metrics for list multipart upload ` +
|
||||||
'parts', done => testOps(schemaKey, 'listMultipartUploadParts',
|
'parts', done => testOps(schemaKey, 'listMultipartUploadParts',
|
||||||
's3:ListMultipartUploadParts', false, done));
|
's3:ListMultipartUploadParts', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for initiate multipart ` +
|
it(`should return ${metric} level metrics for initiate multipart ` +
|
||||||
'upload', done => testOps(schemaKey, 'initiateMultipartUpload',
|
'upload', done => testOps(schemaKey, 'initiateMultipartUpload',
|
||||||
's3:InitiateMultipartUpload', false, done));
|
's3:InitiateMultipartUpload', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for complete multipart ` +
|
it(`should return ${metric} level metrics for complete multipart ` +
|
||||||
'upload', done => testOps(schemaKey, 'completeMultipartUpload',
|
'upload', done => testOps(schemaKey, 'completeMultipartUpload',
|
||||||
's3:CompleteMultipartUpload', false, done));
|
's3:CompleteMultipartUpload', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for abort multipart ` +
|
it(`should return ${metric} level metrics for abort multipart ` +
|
||||||
'upload', done => testOps(schemaKey, 'abortMultipartUpload',
|
'upload', done => testOps(schemaKey, 'abortMultipartUpload',
|
||||||
's3:AbortMultipartUpload', false, done));
|
's3:AbortMultipartUpload', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for delete object`, done =>
|
it(`should return ${metric} level metrics for delete object`, done =>
|
||||||
testOps(schemaKey, 'deleteObject', 's3:DeleteObject', false, done));
|
testOps(schemaKey, 'deleteObject', 's3:DeleteObject', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for multiObjectDelete`,
|
it(`should return ${metric} level metrics for multiObjectDelete`,
|
||||||
done => testOps(schemaKey, 'multiObjectDelete',
|
done => testOps(schemaKey, 'multiObjectDelete',
|
||||||
's3:MultiObjectDelete', false, done));
|
's3:MultiObjectDelete', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get object`, done =>
|
it(`should return ${metric} level metrics for get object`, done =>
|
||||||
testOps(schemaKey, 'getObject', 's3:GetObject', false, done));
|
testOps(schemaKey, 'getObject', 's3:GetObject', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get object acl`, done =>
|
it(`should return ${metric} level metrics for get object acl`, done =>
|
||||||
testOps(schemaKey, 'getObjectAcl', 's3:GetObjectAcl', false, done));
|
testOps(schemaKey, 'getObjectAcl', 's3:GetObjectAcl', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get object tagging`,
|
it(`should return ${metric} level metrics for get object tagging`,
|
||||||
done => testOps(schemaKey, 'getObjectTagging',
|
done => testOps(schemaKey, 'getObjectTagging',
|
||||||
's3:GetObjectTagging', false, done));
|
's3:GetObjectTagging', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for put object acl`, done =>
|
it(`should return ${metric} level metrics for put object acl`, done =>
|
||||||
testOps(schemaKey, 'putObjectAcl', 's3:PutObjectAcl', false, done));
|
testOps(schemaKey, 'putObjectAcl', 's3:PutObjectAcl', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for put object tagging`,
|
it(`should return ${metric} level metrics for put object tagging`,
|
||||||
done => testOps(schemaKey, 'putObjectTagging', 's3:PutObjectTagging',
|
done => testOps(schemaKey, 'putObjectTagging', 's3:PutObjectTagging',
|
||||||
false, done));
|
done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for delete object tagging`,
|
it(`should return ${metric} level metrics for delete object tagging`,
|
||||||
done => testOps(schemaKey, 'deleteObjectTagging',
|
done => testOps(schemaKey, 'deleteObjectTagging',
|
||||||
's3:DeleteObjectTagging', false, done));
|
's3:DeleteObjectTagging', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for head bucket`, done =>
|
it(`should return ${metric} level metrics for head bucket`, done =>
|
||||||
testOps(schemaKey, 'headBucket', 's3:HeadBucket', false, done));
|
testOps(schemaKey, 'headBucket', 's3:HeadBucket', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for head object`, done =>
|
it(`should return ${metric} level metrics for head object`, done =>
|
||||||
testOps(schemaKey, 'headObject', 's3:HeadObject', false, done));
|
testOps(schemaKey, 'headObject', 's3:HeadObject', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for put bucket versioning`,
|
it(`should return ${metric} level metrics for put bucket versioning`,
|
||||||
done => testOps(schemaKey, 'putBucketVersioning',
|
done => testOps(schemaKey, 'putBucketVersioning',
|
||||||
's3:PutBucketVersioning', false, done));
|
's3:PutBucketVersioning', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get bucket versioning`,
|
it(`should return ${metric} level metrics for get bucket versioning`,
|
||||||
done => testOps(schemaKey, 'getBucketVersioning',
|
done => testOps(schemaKey, 'getBucketVersioning',
|
||||||
's3:GetBucketVersioning', false, done));
|
's3:GetBucketVersioning', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for put bucket replication`,
|
it(`should return ${metric} level metrics for put bucket replication`,
|
||||||
done => testOps(schemaKey, 'putBucketReplication',
|
done => testOps(schemaKey, 'putBucketReplication',
|
||||||
's3:PutBucketReplication', false, done));
|
's3:PutBucketReplication', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for get bucket replication`,
|
it(`should return ${metric} level metrics for get bucket replication`,
|
||||||
done => testOps(schemaKey, 'getBucketReplication',
|
done => testOps(schemaKey, 'getBucketReplication',
|
||||||
's3:GetBucketReplication', false, done));
|
's3:GetBucketReplication', done));
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for delete bucket ` +
|
it(`should return ${metric} level metrics for delete bucket ` +
|
||||||
'replication', done => testOps(schemaKey, 'deleteBucketReplication',
|
'replication', done => testOps(schemaKey, 'deleteBucketReplication',
|
||||||
's3:DeleteBucketReplication', false, done));
|
's3:DeleteBucketReplication', done));
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
Object.keys(metricLevels).forEach(schemaKey => {
|
|
||||||
const metric = metricLevels[schemaKey];
|
|
||||||
describe(`Get ${metric} level metrics with negative values`, () => {
|
|
||||||
afterEach(() => memBackend.flushDb());
|
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for storage utilized as 0`,
|
|
||||||
done => testOps(schemaKey, 'storageUtilized', 'storageUtilized',
|
|
||||||
true, done));
|
|
||||||
|
|
||||||
it(`should return ${metric} level metrics for number of objects as 0`,
|
|
||||||
done => testOps(schemaKey, 'numberOfObjects', 'numberOfObjects',
|
|
||||||
true, done));
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
Loading…
Reference in New Issue