Compare commits
239 Commits
c5b209904e
...
5da4b90c21
Author | SHA1 | Date |
---|---|---|
bert-e | 5da4b90c21 | |
Taylor McKinnon | 08ab0b56f6 | |
bert-e | 97784eaa70 | |
Taylor McKinnon | 0fb3b79e71 | |
bert-e | cd88e9f4bf | |
bert-e | 558840c55c | |
bert-e | c784f09bc0 | |
Taylor McKinnon | 1e4b7bd9f2 | |
bert-e | d61f9997c8 | |
Taylor McKinnon | e61655baea | |
bert-e | d911aadd15 | |
bert-e | d9555e0038 | |
bert-e | e5a814aa13 | |
Taylor McKinnon | adf9ee325f | |
bert-e | acf5bc273c | |
bert-e | 1ed1b901c2 | |
bert-e | 8ebb10c051 | |
bert-e | 04a8021fe5 | |
bert-e | d80bd66387 | |
Taylor McKinnon | 1caa33cf9e | |
bert-e | 4678fdae05 | |
Taylor McKinnon | 304bca04c7 | |
Taylor McKinnon | c05d2dbaf6 | |
bert-e | 0814b59fda | |
bert-e | 3f7ea3e121 | |
Taylor McKinnon | d17e5545b3 | |
bert-e | 3d9d949b05 | |
bert-e | f136d7d994 | |
bert-e | d36af35db5 | |
bert-e | a1e6c4d11a | |
bert-e | 97014cf67b | |
Taylor McKinnon | 61cc5de8b5 | |
bert-e | c8ac4cf688 | |
Taylor McKinnon | eb785bf3b3 | |
bert-e | 4554828a52 | |
Taylor McKinnon | 64c2f7307a | |
bert-e | 4caa7f5641 | |
bert-e | 9b800a062f | |
Taylor McKinnon | 99d3f7e2b8 | |
bert-e | 92ffbcc3d7 | |
bert-e | 77d311b01d | |
Taylor McKinnon | 90ac30d288 | |
bert-e | 1197733b17 | |
bert-e | 72383df256 | |
bert-e | 78bc6290b2 | |
bert-e | 2073211a60 | |
bert-e | 124744b562 | |
bert-e | 478d5d69db | |
bert-e | 4a806da678 | |
bert-e | b198553bf1 | |
bert-e | 3f4f34976c | |
bert-e | 8a934e1b51 | |
bert-e | 74b8c91244 | |
bert-e | 97b65aaa5a | |
bert-e | df4e96132c | |
bert-e | a76cc118e9 | |
bert-e | ffe3ece284 | |
bert-e | 979a8cc804 | |
bert-e | d6d53eed8a | |
bert-e | bb3017e3b5 | |
bert-e | e3a6844fc5 | |
bert-e | 1f010bebde | |
bert-e | 58c73db7c3 | |
Anurag Mittal | d98bac3a75 | |
bert-e | 9a7ea1e564 | |
bert-e | 117f2a37cd | |
bert-e | 3a6e0a4c40 | |
bert-e | 641dbdc9f8 | |
bert-e | 62498fa330 | |
bert-e | ad31123c0f | |
bert-e | e9a252f3c4 | |
bert-e | c382a6647c | |
bert-e | 1b1d1ce35c | |
bert-e | dfbe4d61ab | |
bert-e | 0b308adf07 | |
bert-e | c718de352a | |
bert-e | 73735f0f27 | |
bert-e | af4937019c | |
bert-e | 7b1ed984e8 | |
bert-e | 700df82aa4 | |
bert-e | 0adb775a1c | |
bert-e | a64b25c26e | |
bert-e | 8b827052dd | |
bert-e | 392636df76 | |
bert-e | 32920a5a97 | |
bert-e | b875c72451 | |
bert-e | abbbad9f5f | |
bert-e | 4028b265f3 | |
Taylor McKinnon | c2254443dc | |
Taylor McKinnon | dd48282e9c | |
bert-e | 6051cada33 | |
bert-e | dd1ef6860e | |
bert-e | 33c1af303d | |
bert-e | 3d79444672 | |
bert-e | 1bd45ffd68 | |
bert-e | f19018f9e7 | |
bert-e | 87658b4351 | |
bert-e | 91eed09651 | |
bert-e | 728d54501b | |
bert-e | 911cfb3c36 | |
bert-e | 5d7ed8520f | |
Taylor McKinnon | dd118e4a44 | |
bert-e | 3076eaf115 | |
bert-e | fcd74b5707 | |
bert-e | ad52015f73 | |
bert-e | 60d0dc794d | |
bert-e | 14db2c93ce | |
bert-e | 4d4906dc94 | |
bert-e | 7a9ce9bf2c | |
bert-e | d203a94367 | |
Taylor McKinnon | bea0a4607a | |
bert-e | 047aa4f8eb | |
bert-e | 4aae68208c | |
bert-e | 0b7781d3b8 | |
bert-e | 2a2f818745 | |
bert-e | 8064180984 | |
bert-e | 6e096c9d39 | |
bert-e | 86285d1e45 | |
bert-e | 8ea9a5dc0a | |
bert-e | 390f1bb3c1 | |
bert-e | 3b2fc1b045 | |
bert-e | 58f20049f3 | |
bert-e | db05aaf2a3 | |
bert-e | 16c43c202b | |
bert-e | 90beac2fa7 | |
bert-e | 14446a10c2 | |
bert-e | 8e1417ad6b | |
bert-e | ae29a7d346 | |
bert-e | 646f921ded | |
bert-e | 31ff2aa63b | |
bert-e | 37f6b4ddc5 | |
bert-e | d043d8dcae | |
bert-e | 6dbc500fa9 | |
bert-e | 8e1550d61a | |
bert-e | 4a811d7e86 | |
bert-e | 1225f45805 | |
bert-e | 81e5c9e98c | |
bert-e | cdb9ef06d1 | |
bert-e | 186807e798 | |
bert-e | 25ffbe3bbc | |
bert-e | 77f8fc4b11 | |
bert-e | a02fd4830c | |
bert-e | aa314c5ed9 | |
bert-e | 51858fe41a | |
Taylor McKinnon | 70a79537fe | |
bert-e | b335675a36 | |
bert-e | 707620acf7 | |
bert-e | 30306f3dce | |
bert-e | d82623014d | |
bert-e | 1c5c011699 | |
bert-e | 282a55c724 | |
Taylor McKinnon | 90c8f49222 | |
Taylor McKinnon | 2d0c104cc2 | |
bert-e | 72bda734bf | |
bert-e | a2e8fe51b4 | |
bert-e | b494f1e85c | |
bert-e | af1a01b692 | |
Rahul Padigela | 1f0f7d91ff | |
Taylor McKinnon | 5c6386e33d | |
Taylor McKinnon | 0bf2a533f5 | |
bert-e | c59323952d | |
bert-e | b05d8c5528 | |
bert-e | a5430ba8a8 | |
bert-e | cc0087c3ba | |
bert-e | 6a45a13ab4 | |
bert-e | 3dd760835f | |
Flavien Lebarbé | 2720bdb096 | |
Flavien Lebarbé | 54390f82ba | |
Katherine Laue | 5ccb8d03be | |
Katherine Laue | 9fdad30ca0 | |
Katherine Laue | 3180aa2d02 | |
Katherine Laue | 60e4ed7880 | |
Katherine Laue | d77d15c7dd | |
Katherine Laue | dc34912298 | |
Katherine Laue | 4a845e80cd | |
bbuchanan9 | b56405f031 | |
bbuchanan9 | 4d6fd39693 | |
bbuchanan9 | b3a3383289 | |
bbuchanan9 | 196acf9fc8 | |
bbuchanan9 | 347ac8faf1 | |
bbuchanan9 | a62c22f06d | |
bbuchanan9 | d65b9a65ee | |
bert-e | 9533009100 | |
bert-e | d336997813 | |
Katherine Laue | 166d2c06cf | |
bbuchanan9 | 9042956610 | |
bert-e | 4f754e26f9 | |
bbuchanan9 | dfb7a83b2a | |
Katherine Laue | e8ac66ff09 | |
bert-e | 1919808c09 | |
bert-e | 46f62388cd | |
bert-e | 894f37750f | |
bert-e | a990c743af | |
bert-e | 3a3083c379 | |
bert-e | 39b4b8b623 | |
bert-e | c5165a0338 | |
bert-e | ef56d39193 | |
bert-e | da7144389d | |
bert-e | d2020f8190 | |
bert-e | 27ef9dfa33 | |
bert-e | fae26f0933 | |
bert-e | 270591bf23 | |
bert-e | 12fa8b567c | |
bbuchanan9 | fac88a209f | |
bert-e | ef2c350724 | |
bert-e | 46bb81e9f8 | |
bert-e | 829369d37b | |
bert-e | b5def9cb54 | |
bert-e | 2b514a618e | |
bbuchanan9 | 4f119ea917 | |
anurag4dsb | 608fddb4bd | |
Rahul Padigela | f2f1d0c742 | |
Dora Korpar | 6d0c8dd1c0 | |
bert-e | cd3324df87 | |
David Pineau | 4664ee3cca | |
David Pineau | a00aa6f05f | |
bert-e | 4b646285d2 | |
bert-e | e77bcc8e72 | |
Rahul Padigela | e3511ee7ef | |
Dora Korpar | fc634ee028 | |
Rahul Padigela | 4c776b3eb5 | |
Dora Korpar | 33024215e3 | |
Dora Korpar | 4965d96f5c | |
Dora Korpar | 0bfd8a66fb | |
Rahul Padigela | a8a8ad42ff | |
Rahul Padigela | 8e11d15893 | |
Rahul Padigela | bf1cbe4bf4 | |
Rahul Padigela | a4ab00ad92 | |
Rahul Padigela | 6c4e7aedce | |
Stefano Maffulli | b27c57bcfc | |
LaureVergeron | 1fda068967 | |
Rahul Padigela | 18bf5bb00e | |
Alexander Chan | 6de529b8b4 | |
Alexander Chan | ec3efcb9af | |
Rahul Padigela | d77f8cc46c | |
Rahul Padigela | 7487555957 | |
Bennett Buchanan | 7fbddc071b | |
ironman-machine | 6d708d54d0 | |
Rayene Ben Rayana | 6ab610b27f |
|
@ -0,0 +1,87 @@
|
||||||
|
# General support information
|
||||||
|
|
||||||
|
GitHub Issues are **reserved** for actionable bug reports (including
|
||||||
|
documentation inaccuracies), and feature requests.
|
||||||
|
**All questions** (regarding configuration, usecases, performance, community,
|
||||||
|
events, setup and usage recommendations, among other things) should be asked on
|
||||||
|
the **[Zenko Forum](http://forum.zenko.io/)**.
|
||||||
|
|
||||||
|
> Questions opened as GitHub issues will systematically be closed, and moved to
|
||||||
|
> the [Zenko Forum](http://forum.zenko.io/).
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## Avoiding duplicates
|
||||||
|
|
||||||
|
When reporting a new issue/requesting a feature, make sure that we do not have
|
||||||
|
any duplicates already open:
|
||||||
|
|
||||||
|
- search the issue list for this repository (use the search bar, select
|
||||||
|
"Issues" on the left pane after searching);
|
||||||
|
- if there is a duplicate, please do not open your issue, and add a comment
|
||||||
|
to the existing issue instead.
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## Bug report information
|
||||||
|
|
||||||
|
(delete this section (everything between the lines) if you're not reporting a
|
||||||
|
bug but requesting a feature)
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
Briefly describe the problem you are having in a few paragraphs.
|
||||||
|
|
||||||
|
### Steps to reproduce the issue
|
||||||
|
|
||||||
|
Please provide steps to reproduce, including full log output
|
||||||
|
|
||||||
|
### Actual result
|
||||||
|
|
||||||
|
Describe the results you received
|
||||||
|
|
||||||
|
### Expected result
|
||||||
|
|
||||||
|
Describe the results you expected
|
||||||
|
|
||||||
|
### Additional information
|
||||||
|
|
||||||
|
- Node.js version,
|
||||||
|
- Docker version,
|
||||||
|
- npm version,
|
||||||
|
- distribution/OS,
|
||||||
|
- optional: anything else you deem helpful to us.
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## Feature Request
|
||||||
|
|
||||||
|
(delete this section (everything between the lines) if you're not requesting
|
||||||
|
a feature but reporting a bug)
|
||||||
|
|
||||||
|
### Proposal
|
||||||
|
|
||||||
|
Describe the feature
|
||||||
|
|
||||||
|
### Current behavior
|
||||||
|
|
||||||
|
What currently happens
|
||||||
|
|
||||||
|
### Desired behavior
|
||||||
|
|
||||||
|
What you would like to happen
|
||||||
|
|
||||||
|
### Usecase
|
||||||
|
|
||||||
|
Please provide usecases for changing the current behavior
|
||||||
|
|
||||||
|
### Additional information
|
||||||
|
|
||||||
|
- Is this request for your company? Y/N
|
||||||
|
- If Y: Company name:
|
||||||
|
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
|
||||||
|
- Are you willing to contribute this feature yourself?
|
||||||
|
- Position/Title:
|
||||||
|
- How did you hear about us?
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------
|
|
@ -0,0 +1,21 @@
|
||||||
|
FROM node:6-slim
|
||||||
|
|
||||||
|
WORKDIR /usr/src/app
|
||||||
|
|
||||||
|
COPY package.json /usr/src/app
|
||||||
|
|
||||||
|
RUN apt-get update \
|
||||||
|
&& apt-get install -y jq --no-install-recommends \
|
||||||
|
&& npm install --production \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
|
&& npm cache clear --force \
|
||||||
|
&& rm -rf ~/.node-gyp \
|
||||||
|
&& rm -rf /tmp/npm-*
|
||||||
|
|
||||||
|
# Keep the .git directory in order to properly report version
|
||||||
|
COPY . /usr/src/app
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||||
|
CMD [ "npm", "start" ]
|
||||||
|
|
||||||
|
EXPOSE 8100
|
37
README.md
37
README.md
|
@ -3,9 +3,8 @@
|
||||||
![Utapi logo](res/utapi-logo.png)
|
![Utapi logo](res/utapi-logo.png)
|
||||||
|
|
||||||
[![Circle CI][badgepub]](https://circleci.com/gh/scality/utapi)
|
[![Circle CI][badgepub]](https://circleci.com/gh/scality/utapi)
|
||||||
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/utapi)
|
|
||||||
|
|
||||||
Service Utilization API for tracking resource usage and metrics reporting
|
Service Utilization API for tracking resource usage and metrics reporting.
|
||||||
|
|
||||||
## Design
|
## Design
|
||||||
|
|
||||||
|
@ -88,13 +87,13 @@ Server is running.
|
||||||
1. Create an IAM user
|
1. Create an IAM user
|
||||||
|
|
||||||
```
|
```
|
||||||
aws iam --endpoint-url <endpoint> create-user --user-name utapiuser
|
aws iam --endpoint-url <endpoint> create-user --user-name <user-name>
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Create access key for the user
|
2. Create access key for the user
|
||||||
|
|
||||||
```
|
```
|
||||||
aws iam --endpoint-url <endpoint> create-access-key --user-name utapiuser
|
aws iam --endpoint-url <endpoint> create-access-key --user-name <user-name>
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Define a managed IAM policy
|
3. Define a managed IAM policy
|
||||||
|
@ -203,12 +202,11 @@ Server is running.
|
||||||
5. Attach user to the managed policy
|
5. Attach user to the managed policy
|
||||||
|
|
||||||
```
|
```
|
||||||
aws --endpoint-url <endpoint> iam attach-user-policy --user-name utapiuser
|
aws --endpoint-url <endpoint> iam attach-user-policy --user-name
|
||||||
--policy-arn <policy arn>
|
<user-name> --policy-arn <policy arn>
|
||||||
```
|
```
|
||||||
|
|
||||||
Now the user `utapiuser` has access to ListMetrics request in Utapi on all
|
Now the user has access to ListMetrics request in Utapi on all buckets.
|
||||||
buckets.
|
|
||||||
|
|
||||||
### Signing request with Auth V4
|
### Signing request with Auth V4
|
||||||
|
|
||||||
|
@ -224,16 +222,18 @@ following urls for reference.
|
||||||
You may also view examples making a request with Auth V4 using various languages
|
You may also view examples making a request with Auth V4 using various languages
|
||||||
and AWS SDKs [here](/examples).
|
and AWS SDKs [here](/examples).
|
||||||
|
|
||||||
Alternatively, you can use a nifty command line tool available in Scality's S3.
|
Alternatively, you can use a nifty command line tool available in Scality's
|
||||||
|
CloudServer.
|
||||||
|
|
||||||
You can git clone S3 repo from here https://github.com/scality/S3.git and follow
|
You can git clone the CloudServer repo from here
|
||||||
the instructions in README to install the dependencies.
|
https://github.com/scality/cloudserver and follow the instructions in the README
|
||||||
|
to install the dependencies.
|
||||||
|
|
||||||
If you have S3 running inside a docker container you can docker exec into the S3
|
If you have CloudServer running inside a docker container you can docker exec
|
||||||
container as
|
into the CloudServer container as
|
||||||
|
|
||||||
```
|
```
|
||||||
docker exec -it <container id> bash
|
docker exec -it <container-id> bash
|
||||||
```
|
```
|
||||||
|
|
||||||
and then run the command
|
and then run the command
|
||||||
|
@ -271,7 +271,7 @@ Usage: list_metrics [options]
|
||||||
-v, --verbose
|
-v, --verbose
|
||||||
```
|
```
|
||||||
|
|
||||||
A typical call to list metrics for a bucket `demo` to Utapi in a https enabled
|
An example call to list metrics for a bucket `demo` to Utapi in a https enabled
|
||||||
deployment would be
|
deployment would be
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -283,7 +283,7 @@ Both start and end times are time expressed as UNIX epoch timestamps **expressed
|
||||||
in milliseconds**.
|
in milliseconds**.
|
||||||
|
|
||||||
Keep in mind, since Utapi metrics are normalized to the nearest 15 min.
|
Keep in mind, since Utapi metrics are normalized to the nearest 15 min.
|
||||||
interval, so start time and end time need to be in specific format as follows.
|
interval, start time and end time need to be in the specific format as follows.
|
||||||
|
|
||||||
#### Start time
|
#### Start time
|
||||||
|
|
||||||
|
@ -297,7 +297,7 @@ Date: Tue Oct 11 2016 17:35:25 GMT-0700 (PDT)
|
||||||
|
|
||||||
Unix timestamp (milliseconds): 1476232525320
|
Unix timestamp (milliseconds): 1476232525320
|
||||||
|
|
||||||
Here's a typical JS method to get start timestamp
|
Here's an example JS method to get a start timestamp
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
function getStartTimestamp(t) {
|
function getStartTimestamp(t) {
|
||||||
|
@ -317,7 +317,7 @@ seconds and milliseconds set to 59 and 999 respectively. So valid end timestamps
|
||||||
would look something like `09:14:59:999`, `09:29:59:999`, `09:44:59:999` and
|
would look something like `09:14:59:999`, `09:29:59:999`, `09:44:59:999` and
|
||||||
`09:59:59:999`.
|
`09:59:59:999`.
|
||||||
|
|
||||||
Here's a typical JS method to get end timestamp
|
Here's an example JS method to get an end timestamp
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
function getEndTimestamp(t) {
|
function getEndTimestamp(t) {
|
||||||
|
@ -342,4 +342,3 @@ In order to contribute, please follow the
|
||||||
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
[badgepub]: http://circleci.com/gh/scality/utapi.svg?style=svg
|
[badgepub]: http://circleci.com/gh/scality/utapi.svg?style=svg
|
||||||
[badgepriv]: http://ci.ironmann.io/gh/scality/utapi.svg?style=svg
|
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# set -e stops the execution of a script if a command or pipeline has an error
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# modifying config.json
|
||||||
|
JQ_FILTERS_CONFIG="."
|
||||||
|
|
||||||
|
if [[ "$LOG_LEVEL" ]]; then
|
||||||
|
if [[ "$LOG_LEVEL" == "info" || "$LOG_LEVEL" == "debug" || "$LOG_LEVEL" == "trace" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .log.logLevel=\"$LOG_LEVEL\""
|
||||||
|
echo "Log level has been modified to $LOG_LEVEL"
|
||||||
|
else
|
||||||
|
echo "The log level you provided is incorrect (info/debug/trace)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$WORKERS" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workers=\"$WORKERS\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$REDIS_HOST" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.host=\"$REDIS_HOST\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$REDIS_PORT" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .redis.port=\"$REDIS_PORT\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$VAULTD_HOST" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .vaultd.host=\"$VAULTD_HOST\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$VAULTD_PORT" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .vaultd.port=\"$VAULTD_PORT\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
|
||||||
|
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
||||||
|
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
|
||||||
|
mv config.json.tmp config.json
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec "$@"
|
|
@ -0,0 +1,90 @@
|
||||||
|
import sys, os, base64, datetime, hashlib, hmac, datetime, calendar, json
|
||||||
|
import requests # pip install requests
|
||||||
|
|
||||||
|
access_key = '9EQTVVVCLSSG6QBMNKO5'
|
||||||
|
secret_key = 'T5mK/skkkwJ/mTjXZnHyZ5UzgGIN=k9nl4dyTmDH'
|
||||||
|
|
||||||
|
method = 'POST'
|
||||||
|
service = 's3'
|
||||||
|
host = 'localhost:8100'
|
||||||
|
region = 'us-east-1'
|
||||||
|
canonical_uri = '/buckets'
|
||||||
|
canonical_querystring = 'Action=ListMetrics&Version=20160815'
|
||||||
|
content_type = 'application/x-amz-json-1.0'
|
||||||
|
algorithm = 'AWS4-HMAC-SHA256'
|
||||||
|
|
||||||
|
t = datetime.datetime.utcnow()
|
||||||
|
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
|
||||||
|
date_stamp = t.strftime('%Y%m%d')
|
||||||
|
|
||||||
|
# Key derivation functions. See:
|
||||||
|
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
|
||||||
|
def sign(key, msg):
|
||||||
|
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
||||||
|
|
||||||
|
def getSignatureKey(key, date_stamp, regionName, serviceName):
|
||||||
|
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
|
||||||
|
kRegion = sign(kDate, regionName)
|
||||||
|
kService = sign(kRegion, serviceName)
|
||||||
|
kSigning = sign(kService, 'aws4_request')
|
||||||
|
return kSigning
|
||||||
|
|
||||||
|
def get_start_time(t):
|
||||||
|
start = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
|
||||||
|
return calendar.timegm(start.utctimetuple()) * 1000;
|
||||||
|
|
||||||
|
def get_end_time(t):
|
||||||
|
end = t.replace(minute=t.minute - t.minute % 15, second=0, microsecond=0)
|
||||||
|
return calendar.timegm(end.utctimetuple()) * 1000 - 1;
|
||||||
|
|
||||||
|
start_time = get_start_time(datetime.datetime(2016, 1, 1, 0, 0, 0, 0))
|
||||||
|
end_time = get_end_time(datetime.datetime(2016, 2, 1, 0, 0, 0, 0))
|
||||||
|
|
||||||
|
# Request parameters for listing Utapi bucket metrics--passed in a JSON block.
|
||||||
|
bucketListing = {
|
||||||
|
'buckets': [ 'utapi-test' ],
|
||||||
|
'timeRange': [ start_time, end_time ],
|
||||||
|
}
|
||||||
|
|
||||||
|
request_parameters = json.dumps(bucketListing)
|
||||||
|
|
||||||
|
payload_hash = hashlib.sha256(request_parameters).hexdigest()
|
||||||
|
|
||||||
|
canonical_headers = \
|
||||||
|
'content-type:{0}\nhost:{1}\nx-amz-content-sha256:{2}\nx-amz-date:{3}\n' \
|
||||||
|
.format(content_type, host, payload_hash, amz_date)
|
||||||
|
|
||||||
|
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
|
||||||
|
|
||||||
|
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
|
||||||
|
.format(method, canonical_uri, canonical_querystring, canonical_headers,
|
||||||
|
signed_headers, payload_hash)
|
||||||
|
|
||||||
|
credential_scope = '{0}/{1}/{2}/aws4_request' \
|
||||||
|
.format(date_stamp, region, service)
|
||||||
|
|
||||||
|
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
|
||||||
|
.format(algorithm, amz_date, credential_scope,
|
||||||
|
hashlib.sha256(canonical_request).hexdigest())
|
||||||
|
|
||||||
|
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
|
||||||
|
|
||||||
|
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
|
||||||
|
hashlib.sha256).hexdigest()
|
||||||
|
|
||||||
|
authorization_header = \
|
||||||
|
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
|
||||||
|
.format(algorithm, access_key, credential_scope, signed_headers, signature)
|
||||||
|
|
||||||
|
# The 'host' header is added automatically by the Python 'requests' library.
|
||||||
|
headers = {
|
||||||
|
'Content-Type': content_type,
|
||||||
|
'X-Amz-Content-Sha256': payload_hash,
|
||||||
|
'X-Amz-Date': amz_date,
|
||||||
|
'Authorization': authorization_header
|
||||||
|
}
|
||||||
|
|
||||||
|
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring;
|
||||||
|
|
||||||
|
r = requests.post(endpoint, data=request_parameters, headers=headers)
|
||||||
|
print (r.text)
|
|
@ -3,10 +3,13 @@ FROM warp10io/warp10:2.6.0
|
||||||
ENV S6_VERSION 2.0.0.1
|
ENV S6_VERSION 2.0.0.1
|
||||||
|
|
||||||
ENV WARP10_CONF_TEMPLATES ${WARP10_HOME}/conf.templates/standalone
|
ENV WARP10_CONF_TEMPLATES ${WARP10_HOME}/conf.templates/standalone
|
||||||
|
ENV SENSISION_DATA_DIR /data/sensision
|
||||||
|
|
||||||
# Modify Warp 10 default config
|
# Modify Warp 10 default config
|
||||||
ENV standalone.host 0.0.0.0
|
ENV standalone.host 0.0.0.0
|
||||||
|
ENV standalone.port 4802
|
||||||
ENV warpscript.repository.directory /usr/local/share/warpscript
|
ENV warpscript.repository.directory /usr/local/share/warpscript
|
||||||
|
ENV warp.token.file /static.tokens
|
||||||
ENV warpscript.extension.protobuf io.warp10.ext.protobuf.ProtobufWarpScriptExtension
|
ENV warpscript.extension.protobuf io.warp10.ext.protobuf.ProtobufWarpScriptExtension
|
||||||
ENV warpscript.extension.macrovalueencoder 'io.warp10.continuum.ingress.MacroValueEncoder$Extension'
|
ENV warpscript.extension.macrovalueencoder 'io.warp10.continuum.ingress.MacroValueEncoder$Extension'
|
||||||
# ENV warpscript.extension.debug io.warp10.script.ext.debug.DebugWarpScriptExtension
|
# ENV warpscript.extension.debug io.warp10.script.ext.debug.DebugWarpScriptExtension
|
||||||
|
@ -20,5 +23,5 @@ ADD https://dl.bintray.com/senx/maven/io/warp10/warp10-ext-protobuf/1.1.0-uberja
|
||||||
|
|
||||||
ADD ./images/warp10/s6 /etc
|
ADD ./images/warp10/s6 /etc
|
||||||
ADD ./warpscript /usr/local/share/warpscript
|
ADD ./warpscript /usr/local/share/warpscript
|
||||||
|
ADD ./images/warp10/static.tokens /
|
||||||
CMD /init
|
CMD /init
|
||||||
|
|
|
@ -14,4 +14,9 @@ ensureDir "$WARP10_DATA_DIR/logs"
|
||||||
ensureDir "$WARP10_DATA_DIR/conf"
|
ensureDir "$WARP10_DATA_DIR/conf"
|
||||||
ensureDir "$WARP10_DATA_DIR/data/leveldb"
|
ensureDir "$WARP10_DATA_DIR/data/leveldb"
|
||||||
ensureDir "$WARP10_DATA_DIR/data/datalog"
|
ensureDir "$WARP10_DATA_DIR/data/datalog"
|
||||||
ensureDir "$WARP10_DATA_DIR/data/datalog_done"
|
ensureDir "$WARP10_DATA_DIR/data/datalog_done"
|
||||||
|
|
||||||
|
ensureDir "$SENSISION_DATA_DIR"
|
||||||
|
ensureDir "$SENSISION_DATA_DIR/logs"
|
||||||
|
ensureDir "$SENSISION_DATA_DIR/conf"
|
||||||
|
ensureDir "/var/run/sensision"
|
||||||
|
|
|
@ -1,9 +1,14 @@
|
||||||
#!/usr/bin/with-contenv sh
|
#!/usr/bin/with-contenv sh
|
||||||
|
|
||||||
|
echo "Installing warp 10 config"
|
||||||
for path in $WARP10_CONF_TEMPLATES/*; do
|
for path in $WARP10_CONF_TEMPLATES/*; do
|
||||||
name="$(basename $path .template)"
|
name="$(basename $path .template)"
|
||||||
if [ ! -f "$WARP10_DATA_DIR/conf/$name" ]; then
|
if [ ! -f "$WARP10_DATA_DIR/conf/$name" ]; then
|
||||||
cp "$path" "$WARP10_DATA_DIR/conf/$name"
|
cp "$path" "$WARP10_DATA_DIR/conf/$name"
|
||||||
echo "Copied $name to $WARP10_DATA_DIR/conf/$name"
|
echo "Copied $name to $WARP10_DATA_DIR/conf/$name"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
echo "Installing sensision config"
|
||||||
|
cp ${SENSISION_HOME}/templates/sensision.template ${SENSISION_DATA_DIR}/conf/sensision.conf
|
||||||
|
cp ${SENSISION_HOME}/templates/log4j.properties.template ${SENSISION_DATA_DIR}/conf/log4j.properties
|
||||||
|
|
|
@ -14,4 +14,10 @@ ensure_link "$WARP10_HOME/logs" "$WARP10_DATA_DIR/logs"
|
||||||
ensure_link "$WARP10_HOME/etc/conf.d" "$WARP10_DATA_DIR/conf"
|
ensure_link "$WARP10_HOME/etc/conf.d" "$WARP10_DATA_DIR/conf"
|
||||||
ensure_link "$WARP10_HOME/leveldb" "$WARP10_DATA_DIR/data/leveldb"
|
ensure_link "$WARP10_HOME/leveldb" "$WARP10_DATA_DIR/data/leveldb"
|
||||||
ensure_link "$WARP10_HOME/datalog" "$WARP10_DATA_DIR/data/datalog"
|
ensure_link "$WARP10_HOME/datalog" "$WARP10_DATA_DIR/data/datalog"
|
||||||
ensure_link "$WARP10_HOME/datalog_done" "$WARP10_DATA_DIR/data/datalog_done"
|
ensure_link "$WARP10_HOME/datalog_done" "$WARP10_DATA_DIR/data/datalog_done"
|
||||||
|
|
||||||
|
ensure_link "$SENSISION_HOME/etc" "${SENSISION_DATA_DIR}/conf"
|
||||||
|
ensure_link "$SENSISION_HOME/logs" "${SENSISION_DATA_DIR}/logs"
|
||||||
|
ensure_link /var/run/sensision/metrics ${SENSISION_HOME}/metrics
|
||||||
|
ensure_link /var/run/sensision/targets ${SENSISION_HOME}/targets
|
||||||
|
ensure_link /var/run/sensision/queued ${SENSISION_HOME}/queued
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
#!/usr/bin/with-contenv sh
|
||||||
|
|
||||||
|
chmod 1733 "$SENSISION_HOME/metrics"
|
||||||
|
chmod 1733 "$SENSISION_HOME/targets"
|
||||||
|
chmod 700 "$SENSISION_HOME/queued"
|
||||||
|
|
||||||
|
sed -i 's/@warp:WriteToken@/'"writeTokenStatic"'/' $SENSISION_DATA_DIR/conf/sensision.conf
|
||||||
|
sed -i -e "s_^sensision\.home.*_sensision\.home = ${SENSISION_HOME}_" $SENSISION_DATA_DIR/conf/sensision.conf
|
||||||
|
sed -i -e 's_^sensision\.qf\.url\.default.*_sensision\.qf\.url\.default=http://127.0.0.1:4802/api/v0/update_' $SENSISION_DATA_DIR/conf/sensision.conf
|
|
@ -0,0 +1,26 @@
|
||||||
|
#!/usr/bin/with-contenv sh
|
||||||
|
|
||||||
|
JAVA="/usr/bin/java"
|
||||||
|
JAVA_OPTS=""
|
||||||
|
|
||||||
|
VERSION=1.0.21
|
||||||
|
SENSISION_CONFIG=${SENSISION_DATA_DIR}/conf/sensision.conf
|
||||||
|
SENSISION_JAR=${SENSISION_HOME}/bin/sensision-${VERSION}.jar
|
||||||
|
SENSISION_CP=${SENSISION_HOME}/etc:${SENSISION_JAR}
|
||||||
|
SENSISION_CLASS=io.warp10.sensision.Main
|
||||||
|
export MALLOC_ARENA_MAX=1
|
||||||
|
|
||||||
|
if [ -z "$SENSISION_HEAP" ]; then
|
||||||
|
SENSISION_HEAP=64m
|
||||||
|
fi
|
||||||
|
|
||||||
|
SENSISION_CMD="${JAVA} ${JAVA_OPTS} -Xmx${SENSISION_HEAP} -Dsensision.server.port=0 ${SENSISION_OPTS} -Dsensision.config=${SENSISION_CONFIG} -cp ${SENSISION_CP} ${SENSISION_CLASS}"
|
||||||
|
|
||||||
|
if [ -n "$ENABLE_SENSISION" ]; then
|
||||||
|
echo "Starting Sensision with $SENSISION_CMD ..."
|
||||||
|
exec $SENSISION_CMD | tee -a ${SENSISION_HOME}/logs/sensision.log
|
||||||
|
else
|
||||||
|
echo "Sensision is disabled"
|
||||||
|
# wait indefinitely
|
||||||
|
exec tail -f /dev/null
|
||||||
|
fi
|
|
@ -1,13 +1,37 @@
|
||||||
#!/usr/bin/with-contenv sh
|
#!/usr/bin/with-contenv sh
|
||||||
|
|
||||||
|
export SENSISIONID=warp10
|
||||||
|
export MALLOC_ARENA_MAX=1
|
||||||
|
|
||||||
JAVA="/usr/bin/java"
|
JAVA="/usr/bin/java"
|
||||||
WARP10_JAR=${WARP10_HOME}/bin/warp10-${WARP10_VERSION}.jar
|
WARP10_JAR=${WARP10_HOME}/bin/warp10-${WARP10_VERSION}.jar
|
||||||
WARP10_CLASS=io.warp10.standalone.Warp
|
WARP10_CLASS=io.warp10.standalone.Warp
|
||||||
WARP10_CP="${WARP10_HOME}/etc:${WARP10_JAR}:${WARP10_HOME}/lib/*"
|
WARP10_CP="${WARP10_HOME}/etc:${WARP10_JAR}:${WARP10_HOME}/lib/*"
|
||||||
WARP10_CONFIG_DIR="$WARP10_DATA_DIR/conf"
|
WARP10_CONFIG_DIR="$WARP10_DATA_DIR/conf"
|
||||||
CONFIG_FILES="$(find ${WARP10_CONFIG_DIR} -not -path "*/\.*" -name "*.conf" | sort | tr '\n' ' ' 2> /dev/null)"
|
CONFIG_FILES="$(find ${WARP10_CONFIG_DIR} -not -path "*/\.*" -name "*.conf" | sort | tr '\n' ' ' 2> /dev/null)"
|
||||||
|
LOG4J_CONF=${WARP10_HOME}/etc/log4j.properties
|
||||||
|
|
||||||
WARP10_CMD="${JAVA} ${JAVA_OPTS} -cp ${WARP10_CP} ${WARP10_CLASS} ${CONFIG_FILES}"
|
if [ -z "$WARP10_HEAP" ]; then
|
||||||
|
WARP10_HEAP=1g
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$WARP10_HEAP_MAX" ]; then
|
||||||
|
WARP10_HEAP_MAX=4g
|
||||||
|
fi
|
||||||
|
|
||||||
|
JAVA_OPTS="-Djava.awt.headless=true -Xms${WARP10_HEAP} -Xmx${WARP10_HEAP_MAX} -XX:+UseG1GC ${JAVA_OPTS}"
|
||||||
|
|
||||||
|
SENSISION_OPTS=
|
||||||
|
if [ -n "$ENABLE_SENSISION" ]; then
|
||||||
|
_SENSISION_LABELS=
|
||||||
|
# Expects a comma seperated list of key=value ex key=value,foo=bar
|
||||||
|
if [ -n "$SENSISION_LABELS" ]; then
|
||||||
|
_SENSISION_LABELS="-Dsensision.default.labels=$SENSISION_LABELS"
|
||||||
|
fi
|
||||||
|
SENSISION_OPTS="-Dsensision.server.port=0 ${_SENSISION_LABELS} -Dsensision.events.dir=/var/run/sensision/metrics -Dfile.encoding=UTF-8"
|
||||||
|
fi
|
||||||
|
|
||||||
|
WARP10_CMD="${JAVA} -Dlog4j.configuration=file:${LOG4J_CONF} ${JAVA_OPTS} ${SENSISION_OPTS} -cp ${WARP10_CP} ${WARP10_CLASS} ${CONFIG_FILES}"
|
||||||
|
|
||||||
echo "Starting Warp 10 with $WARP10_CMD ..."
|
echo "Starting Warp 10 with $WARP10_CMD ..."
|
||||||
exec $WARP10_CMD | tee -a ${WARP10_HOME}/logs/warp10.log
|
exec $WARP10_CMD | tee -a ${WARP10_HOME}/logs/warp10.log
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
token.write.0.name=writeTokenStatic
|
||||||
|
token.write.0.producer=42424242-4242-4242-4242-424242424242
|
||||||
|
token.write.0.owner=42424242-4242-4242-4242-424242424242
|
||||||
|
token.write.0.app=utapi
|
||||||
|
|
||||||
|
|
||||||
|
token.read.0.name=readTokenStatic
|
||||||
|
token.read.0.owner=42424242-4242-4242-4242-424242424242
|
||||||
|
token.read.0.app=utapi
|
1
index.js
1
index.js
|
@ -1,5 +1,4 @@
|
||||||
/* eslint-disable global-require */
|
/* eslint-disable global-require */
|
||||||
|
|
||||||
// eslint-disable-line strict
|
// eslint-disable-line strict
|
||||||
let toExport;
|
let toExport;
|
||||||
|
|
||||||
|
|
|
@ -81,6 +81,17 @@ class Datastore {
|
||||||
return this._client.call((backend, done) => backend.incr(key, done), cb);
|
return this._client.call((backend, done) => backend.incr(key, done), cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* increment value of a key by the provided value
|
||||||
|
* @param {string} key - key holding the value
|
||||||
|
* @param {string} value - value containing the data
|
||||||
|
* @param {callback} cb - callback
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
incrby(key, value, cb) {
|
||||||
|
return this._client.incrby(key, value, cb);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* decrement value of a key by 1
|
* decrement value of a key by 1
|
||||||
* @param {string} key - key holding the value
|
* @param {string} key - key holding the value
|
||||||
|
|
|
@ -97,6 +97,7 @@ const metricObj = {
|
||||||
buckets: 'bucket',
|
buckets: 'bucket',
|
||||||
accounts: 'accountId',
|
accounts: 'accountId',
|
||||||
users: 'userId',
|
users: 'userId',
|
||||||
|
location: 'location',
|
||||||
};
|
};
|
||||||
|
|
||||||
class UtapiClient {
|
class UtapiClient {
|
||||||
|
@ -120,13 +121,17 @@ class UtapiClient {
|
||||||
const api = (config || {}).logApi || werelogs;
|
const api = (config || {}).logApi || werelogs;
|
||||||
this.log = new api.Logger('UtapiClient');
|
this.log = new api.Logger('UtapiClient');
|
||||||
// By default, we push all resource types
|
// By default, we push all resource types
|
||||||
this.metrics = ['buckets', 'accounts', 'users', 'service'];
|
this.metrics = ['buckets', 'accounts', 'users', 'service', 'location'];
|
||||||
this.service = 's3';
|
this.service = 's3';
|
||||||
this.disableOperationCounters = false;
|
this.disableOperationCounters = false;
|
||||||
this.enabledOperationCounters = [];
|
this.enabledOperationCounters = [];
|
||||||
this.disableClient = true;
|
this.disableClient = true;
|
||||||
|
|
||||||
if (config) {
|
if (config) {
|
||||||
|
this.disableClient = false;
|
||||||
|
this.expireMetrics = config.expireMetrics;
|
||||||
|
this.expireMetricsTTL = config.expireMetricsTTL || 0;
|
||||||
|
|
||||||
if (config.metrics) {
|
if (config.metrics) {
|
||||||
const message = 'invalid property in UtapiClient configuration';
|
const message = 'invalid property in UtapiClient configuration';
|
||||||
assert(Array.isArray(config.metrics), `${message}: metrics `
|
assert(Array.isArray(config.metrics), `${message}: metrics `
|
||||||
|
@ -154,9 +159,6 @@ class UtapiClient {
|
||||||
if (config.enabledOperationCounters) {
|
if (config.enabledOperationCounters) {
|
||||||
this.enabledOperationCounters = config.enabledOperationCounters;
|
this.enabledOperationCounters = config.enabledOperationCounters;
|
||||||
}
|
}
|
||||||
this.disableClient = false;
|
|
||||||
this.expireMetrics = config.expireMetrics;
|
|
||||||
this.expireMetricsTTL = config.expireMetricsTTL || 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1114,6 +1116,69 @@ class UtapiClient {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {string} location - name of data location
|
||||||
|
* @param {number} updateSize - size in bytes to update location metric by,
|
||||||
|
* could be negative, indicating deleted object
|
||||||
|
* @param {string} reqUid - Request Unique Identifier
|
||||||
|
* @param {function} callback - callback to call
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
pushLocationMetric(location, updateSize, reqUid, callback) {
|
||||||
|
const log = this.log.newRequestLoggerFromSerializedUids(reqUid);
|
||||||
|
const params = {
|
||||||
|
level: 'location',
|
||||||
|
service: 's3',
|
||||||
|
location,
|
||||||
|
};
|
||||||
|
this._checkMetricTypes(params);
|
||||||
|
const action = (updateSize < 0) ? 'decrby' : 'incrby';
|
||||||
|
const size = (updateSize < 0) ? -updateSize : updateSize;
|
||||||
|
return this.ds[action](generateKey(params, 'locationStorage'), size,
|
||||||
|
err => {
|
||||||
|
if (err) {
|
||||||
|
log.error('error pushing metric', {
|
||||||
|
method: 'UtapiClient.pushLocationMetric',
|
||||||
|
error: err,
|
||||||
|
});
|
||||||
|
return callback(errors.InternalError);
|
||||||
|
}
|
||||||
|
return callback();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {string} location - name of data backend to get metric for
|
||||||
|
* @param {string} reqUid - Request Unique Identifier
|
||||||
|
* @param {function} callback - callback to call
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
getLocationMetric(location, reqUid, callback) {
|
||||||
|
const log = this.log.newRequestLoggerFromSerializedUids(reqUid);
|
||||||
|
const params = {
|
||||||
|
level: 'location',
|
||||||
|
service: 's3',
|
||||||
|
location,
|
||||||
|
};
|
||||||
|
const redisKey = generateKey(params, 'locationStorage');
|
||||||
|
return this.ds.get(redisKey, (err, bytesStored) => {
|
||||||
|
if (err) {
|
||||||
|
log.error('error getting metric', {
|
||||||
|
method: 'UtapiClient: getLocationMetric',
|
||||||
|
error: err,
|
||||||
|
});
|
||||||
|
return callback(errors.InternalError);
|
||||||
|
}
|
||||||
|
// if err and bytesStored are null, key does not exist yet
|
||||||
|
if (bytesStored === null) {
|
||||||
|
return callback(null, 0);
|
||||||
|
}
|
||||||
|
return callback(null, bytesStored);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get storage used by bucket/account/user/service
|
* Get storage used by bucket/account/user/service
|
||||||
* @param {object} params - params for the metrics
|
* @param {object} params - params for the metrics
|
||||||
|
|
|
@ -32,7 +32,7 @@ def get_options():
|
||||||
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
|
parser.add_argument("-n", "--sentinel-cluster-name", default='scality-s3', help="Redis cluster name")
|
||||||
parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
|
parser.add_argument("-s", "--bucketd-addr", default='http://127.0.0.1:9000', help="URL of the bucketd server")
|
||||||
parser.add_argument("-w", "--worker", default=10, help="Number of workers")
|
parser.add_argument("-w", "--worker", default=10, help="Number of workers")
|
||||||
parser.add_argument("-b", "--bucket", default=False, help="Bucket to be processed")
|
parser.add_argument("-b", "--bucket", default=None, help="Bucket to be processed")
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
def chunks(iterable, size):
|
def chunks(iterable, size):
|
||||||
|
@ -119,7 +119,7 @@ class BucketDClient:
|
||||||
else:
|
else:
|
||||||
is_truncated = len(payload) > 0
|
is_truncated = len(payload) > 0
|
||||||
|
|
||||||
def list_buckets(self):
|
def list_buckets(self, name = None):
|
||||||
|
|
||||||
def get_next_marker(p):
|
def get_next_marker(p):
|
||||||
if p is None:
|
if p is None:
|
||||||
|
@ -135,8 +135,14 @@ class BucketDClient:
|
||||||
buckets = []
|
buckets = []
|
||||||
for result in payload['Contents']:
|
for result in payload['Contents']:
|
||||||
match = re.match("(\w+)..\|..(\w+.*)", result['key'])
|
match = re.match("(\w+)..\|..(\w+.*)", result['key'])
|
||||||
buckets.append(Bucket(*match.groups()))
|
bucket = Bucket(*match.groups())
|
||||||
yield buckets
|
if name is None or bucket.name == name:
|
||||||
|
buckets.append(bucket)
|
||||||
|
if buckets:
|
||||||
|
yield buckets
|
||||||
|
if name is not None:
|
||||||
|
# Break on the first matching bucket if a name is given
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
def list_mpus(self, bucket):
|
def list_mpus(self, bucket):
|
||||||
|
@ -328,12 +334,15 @@ def log_report(resource, name, obj_count, total_size):
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
options = get_options()
|
options = get_options()
|
||||||
|
if options.bucket is not None and not options.bucket.strip():
|
||||||
|
print('You must provide a bucket name with the --bucket flag')
|
||||||
|
sys.exit(1)
|
||||||
bucket_client = BucketDClient(options.bucketd_addr)
|
bucket_client = BucketDClient(options.bucketd_addr)
|
||||||
redis_client = get_redis_client(options)
|
redis_client = get_redis_client(options)
|
||||||
account_reports = {}
|
account_reports = {}
|
||||||
observed_buckets = set()
|
observed_buckets = set()
|
||||||
with ThreadPoolExecutor(max_workers=options.worker) as executor:
|
with ThreadPoolExecutor(max_workers=options.worker) as executor:
|
||||||
for batch in bucket_client.list_buckets():
|
for batch in bucket_client.list_buckets(options.bucket):
|
||||||
bucket_reports = {}
|
bucket_reports = {}
|
||||||
jobs = [executor.submit(index_bucket, bucket_client, b) for b in batch]
|
jobs = [executor.submit(index_bucket, bucket_client, b) for b in batch]
|
||||||
for job in futures.as_completed(jobs):
|
for job in futures.as_completed(jobs):
|
||||||
|
@ -349,29 +358,15 @@ if __name__ == '__main__':
|
||||||
log_report('buckets', bucket, report['obj_count'], report['total_size'])
|
log_report('buckets', bucket, report['obj_count'], report['total_size'])
|
||||||
pipeline.execute()
|
pipeline.execute()
|
||||||
|
|
||||||
# Update total account reports in chunks
|
|
||||||
for chunk in chunks(account_reports.items(), ACCOUNT_UPDATE_CHUNKSIZE):
|
|
||||||
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
|
|
||||||
for userid, report in chunk:
|
|
||||||
update_redis(pipeline, 'accounts', userid, report['obj_count'], report['total_size'])
|
|
||||||
log_report('accounts', userid, report['obj_count'], report['total_size'])
|
|
||||||
pipeline.execute()
|
|
||||||
|
|
||||||
observed_accounts = set(account_reports.keys())
|
|
||||||
recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts'))
|
|
||||||
recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
|
recorded_buckets = set(get_resources_from_redis(redis_client, 'buckets'))
|
||||||
|
if options.bucket is None:
|
||||||
|
stale_buckets = recorded_buckets.difference(observed_buckets)
|
||||||
|
elif observed_buckets and options.bucket in recorded_buckets:
|
||||||
|
# The provided bucket does not exist, so clean up any metrics
|
||||||
|
stale_buckets = { options.bucket }
|
||||||
|
else:
|
||||||
|
stale_buckets = set()
|
||||||
|
|
||||||
# Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
|
|
||||||
stale_accounts = recorded_accounts.difference(observed_accounts)
|
|
||||||
_log.info('Found %s stale accounts' % len(stale_accounts))
|
|
||||||
for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE):
|
|
||||||
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
|
|
||||||
for account in chunk:
|
|
||||||
update_redis(pipeline, 'accounts', account, 0, 0)
|
|
||||||
log_report('accounts', account, 0, 0)
|
|
||||||
pipeline.execute()
|
|
||||||
|
|
||||||
stale_buckets = recorded_buckets.difference(observed_buckets)
|
|
||||||
_log.info('Found %s stale buckets' % len(stale_buckets))
|
_log.info('Found %s stale buckets' % len(stale_buckets))
|
||||||
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
|
for chunk in chunks(stale_buckets, ACCOUNT_UPDATE_CHUNKSIZE):
|
||||||
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
|
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
|
||||||
|
@ -379,3 +374,26 @@ if __name__ == '__main__':
|
||||||
update_redis(pipeline, 'buckets', bucket, 0, 0)
|
update_redis(pipeline, 'buckets', bucket, 0, 0)
|
||||||
log_report('buckets', bucket, 0, 0)
|
log_report('buckets', bucket, 0, 0)
|
||||||
pipeline.execute()
|
pipeline.execute()
|
||||||
|
|
||||||
|
# Account metrics are not updated if a bucket is specified
|
||||||
|
if options.bucket is None:
|
||||||
|
# Update total account reports in chunks
|
||||||
|
for chunk in chunks(account_reports.items(), ACCOUNT_UPDATE_CHUNKSIZE):
|
||||||
|
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
|
||||||
|
for userid, report in chunk:
|
||||||
|
update_redis(pipeline, 'accounts', userid, report['obj_count'], report['total_size'])
|
||||||
|
log_report('accounts', userid, report['obj_count'], report['total_size'])
|
||||||
|
pipeline.execute()
|
||||||
|
|
||||||
|
observed_accounts = set(account_reports.keys())
|
||||||
|
recorded_accounts = set(get_resources_from_redis(redis_client, 'accounts'))
|
||||||
|
|
||||||
|
# Stale accounts and buckets are ones that do not appear in the listing, but have recorded values
|
||||||
|
stale_accounts = recorded_accounts.difference(observed_accounts)
|
||||||
|
_log.info('Found %s stale accounts' % len(stale_accounts))
|
||||||
|
for chunk in chunks(stale_accounts, ACCOUNT_UPDATE_CHUNKSIZE):
|
||||||
|
pipeline = redis_client.pipeline(transaction=False) # No transaction to reduce redis load
|
||||||
|
for account in chunk:
|
||||||
|
update_redis(pipeline, 'accounts', account, 0, 0)
|
||||||
|
log_report('accounts', account, 0, 0)
|
||||||
|
pipeline.execute()
|
||||||
|
|
|
@ -65,10 +65,10 @@ const keys = {
|
||||||
*/
|
*/
|
||||||
function getSchemaPrefix(params, timestamp) {
|
function getSchemaPrefix(params, timestamp) {
|
||||||
const {
|
const {
|
||||||
bucket, accountId, userId, level, service,
|
bucket, accountId, userId, level, service, location,
|
||||||
} = params;
|
} = params;
|
||||||
// `service` property must remain last because other objects also include it
|
// `service` property must remain last because other objects also include it
|
||||||
const id = bucket || accountId || userId || service;
|
const id = bucket || accountId || userId || location || service;
|
||||||
const prefix = timestamp ? `${service}:${level}:${timestamp}:${id}:`
|
const prefix = timestamp ? `${service}:${level}:${timestamp}:${id}:`
|
||||||
: `${service}:${level}:${id}:`;
|
: `${service}:${level}:${id}:`;
|
||||||
return prefix;
|
return prefix;
|
||||||
|
@ -83,9 +83,13 @@ function getSchemaPrefix(params, timestamp) {
|
||||||
*/
|
*/
|
||||||
function generateKey(params, metric, timestamp) {
|
function generateKey(params, metric, timestamp) {
|
||||||
const prefix = getSchemaPrefix(params, timestamp);
|
const prefix = getSchemaPrefix(params, timestamp);
|
||||||
|
if (params.location) {
|
||||||
|
return `${prefix}locationStorage`;
|
||||||
|
}
|
||||||
return keys[metric](prefix);
|
return keys[metric](prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a list of the counters for a metric type
|
* Returns a list of the counters for a metric type
|
||||||
* @param {object} params - object with metric type and id as a property
|
* @param {object} params - object with metric type and id as a property
|
||||||
|
|
|
@ -51,7 +51,10 @@ class RedisClient extends EventEmitter {
|
||||||
Object.values(this._inFlightTimeouts)
|
Object.values(this._inFlightTimeouts)
|
||||||
.forEach(clearTimeout);
|
.forEach(clearTimeout);
|
||||||
}
|
}
|
||||||
await this._redis.quit();
|
if (this._redis !== null) {
|
||||||
|
await this._redis.quit();
|
||||||
|
this._redis = null;
|
||||||
|
}
|
||||||
}, callback);
|
}, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,29 +106,26 @@ class RedisClient extends EventEmitter {
|
||||||
this.emit('error', error);
|
this.emit('error', error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
_createCommandTimeout() {
|
_createCommandTimeout() {
|
||||||
let timer;
|
let timer;
|
||||||
|
let onTimeout;
|
||||||
|
|
||||||
const cancelTimeout = jsutil.once(() => {
|
const cancelTimeout = jsutil.once(() => {
|
||||||
clearTimeout(timer);
|
clearTimeout(timer);
|
||||||
|
this.off('timeout', onTimeout);
|
||||||
this._inFlightTimeouts.delete(timer);
|
this._inFlightTimeouts.delete(timer);
|
||||||
});
|
});
|
||||||
|
|
||||||
const timeout = new Promise((_, reject) => {
|
const timeout = new Promise((_, reject) => {
|
||||||
timer = setTimeout(
|
timer = setTimeout(this.emit.bind(this, 'timeout'), COMMAND_TIMEOUT);
|
||||||
() => {
|
|
||||||
this.emit('timeout');
|
|
||||||
this._initClient();
|
|
||||||
},
|
|
||||||
COMMAND_TIMEOUT,
|
|
||||||
);
|
|
||||||
|
|
||||||
this._inFlightTimeouts.add(timer);
|
this._inFlightTimeouts.add(timer);
|
||||||
this.once('timeout', () => {
|
onTimeout = () => {
|
||||||
moduleLogger.warn('redis command timed out');
|
moduleLogger.warn('redis command timed out');
|
||||||
cancelTimeout();
|
cancelTimeout();
|
||||||
|
this._initClient();
|
||||||
reject(errors.OperationTimedOut);
|
reject(errors.OperationTimedOut);
|
||||||
});
|
};
|
||||||
|
this.once('timeout', onTimeout);
|
||||||
});
|
});
|
||||||
|
|
||||||
return { timeout, cancelTimeout };
|
return { timeout, cancelTimeout };
|
||||||
|
|
|
@ -26,8 +26,8 @@ async function listMetric(ctx, params) {
|
||||||
|
|
||||||
// A separate request will be made to warp 10 per requested resource
|
// A separate request will be made to warp 10 per requested resource
|
||||||
const results = await Promise.all(
|
const results = await Promise.all(
|
||||||
resources.map(async resource => {
|
resources.map(async ({ resource, id }) => {
|
||||||
const labels = { [labelName]: resource };
|
const labels = { [labelName]: id };
|
||||||
const options = {
|
const options = {
|
||||||
params: {
|
params: {
|
||||||
start,
|
start,
|
||||||
|
|
|
@ -5,7 +5,7 @@ const { ipCheck } = require('arsenal');
|
||||||
const config = require('../config');
|
const config = require('../config');
|
||||||
const { logger, buildRequestLogger } = require('../utils');
|
const { logger, buildRequestLogger } = require('../utils');
|
||||||
const errors = require('../errors');
|
const errors = require('../errors');
|
||||||
const { authenticateRequest, vault } = require('../vault');
|
const { translateAndAuthorize } = require('../vault');
|
||||||
|
|
||||||
const oasOptions = {
|
const oasOptions = {
|
||||||
controllers: path.join(__dirname, './API/'),
|
controllers: path.join(__dirname, './API/'),
|
||||||
|
@ -44,6 +44,17 @@ function loggerMiddleware(req, res, next) {
|
||||||
return next();
|
return next();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function responseLoggerMiddleware(req, res, next) {
|
||||||
|
const info = {
|
||||||
|
httpCode: res.statusCode,
|
||||||
|
httpMessage: res.statusMessage,
|
||||||
|
};
|
||||||
|
req.logger.end('finished handling request', info);
|
||||||
|
if (next !== undefined) {
|
||||||
|
next();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// next is purposely not called as all error responses are handled here
|
// next is purposely not called as all error responses are handled here
|
||||||
// eslint-disable-next-line no-unused-vars
|
// eslint-disable-next-line no-unused-vars
|
||||||
function errorMiddleware(err, req, res, next) {
|
function errorMiddleware(err, req, res, next) {
|
||||||
|
@ -70,15 +81,7 @@ function errorMiddleware(err, req, res, next) {
|
||||||
message,
|
message,
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
responseLoggerMiddleware(req, res);
|
||||||
|
|
||||||
function responseLoggerMiddleware(req, res, next) {
|
|
||||||
const info = {
|
|
||||||
httpCode: res.statusCode,
|
|
||||||
httpMessage: res.statusMessage,
|
|
||||||
};
|
|
||||||
req.logger.end('finished handling request', info);
|
|
||||||
return next();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// eslint-disable-next-line no-unused-vars
|
// eslint-disable-next-line no-unused-vars
|
||||||
|
@ -111,7 +114,7 @@ async function authV4Middleware(request, response, params) {
|
||||||
let authorizedResources;
|
let authorizedResources;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
[passed, authorizedResources] = await authenticateRequest(request, action, params.level, requestedResources);
|
[passed, authorizedResources] = await translateAndAuthorize(request, action, params.level, requestedResources);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
request.logger.error('error during authentication', { error });
|
request.logger.error('error during authentication', { error });
|
||||||
throw errors.InternalError;
|
throw errors.InternalError;
|
||||||
|
@ -122,17 +125,14 @@ async function authV4Middleware(request, response, params) {
|
||||||
throw errors.AccessDenied;
|
throw errors.AccessDenied;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.level === 'accounts') {
|
switch (request.ctx.operationId) {
|
||||||
request.logger.debug('converting account ids to canonical ids');
|
case 'listMetrics':
|
||||||
authorizedResources = await vault.getCanonicalIds(
|
|
||||||
authorizedResources,
|
|
||||||
request.logger.logger,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// authorizedResources is only defined on non-account credentials
|
|
||||||
if (request.ctx.operationId === 'listMetrics' && authorizedResources !== undefined) {
|
|
||||||
params.body[params.level] = authorizedResources;
|
params.body[params.level] = authorizedResources;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
[params.resource] = authorizedResources;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,22 +1,28 @@
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
|
const async = require('async');
|
||||||
const BaseTask = require('./BaseTask');
|
const BaseTask = require('./BaseTask');
|
||||||
const { UtapiMetric } = require('../models');
|
const { UtapiMetric } = require('../models');
|
||||||
const config = require('../config');
|
const config = require('../config');
|
||||||
const {
|
|
||||||
LoggerContext, shardFromTimestamp, convertTimestamp, InterpolatedClock,
|
|
||||||
} = require('../utils');
|
|
||||||
const { checkpointLagSecs } = require('../constants');
|
const { checkpointLagSecs } = require('../constants');
|
||||||
|
const {
|
||||||
|
LoggerContext, shardFromTimestamp, convertTimestamp, InterpolatedClock, now,
|
||||||
|
} = require('../utils');
|
||||||
|
|
||||||
const logger = new LoggerContext({
|
const logger = new LoggerContext({
|
||||||
module: 'IngestShard',
|
module: 'IngestShard',
|
||||||
});
|
});
|
||||||
|
|
||||||
const now = () => convertTimestamp(new Date().getTime());
|
|
||||||
const checkpointLagMicroseconds = convertTimestamp(checkpointLagSecs);
|
const checkpointLagMicroseconds = convertTimestamp(checkpointLagSecs);
|
||||||
|
|
||||||
class IngestShardTask extends BaseTask {
|
class IngestShardTask extends BaseTask {
|
||||||
constructor(...options) {
|
constructor(...options) {
|
||||||
super(...options);
|
super({
|
||||||
|
warp10: {
|
||||||
|
requestTimeout: 30000,
|
||||||
|
connectTimeout: 30000,
|
||||||
|
},
|
||||||
|
...options,
|
||||||
|
});
|
||||||
this._defaultSchedule = config.ingestionSchedule;
|
this._defaultSchedule = config.ingestionSchedule;
|
||||||
this._defaultLag = config.ingestionLagSeconds;
|
this._defaultLag = config.ingestionLagSeconds;
|
||||||
}
|
}
|
||||||
|
@ -35,7 +41,7 @@ class IngestShardTask extends BaseTask {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
await Promise.all(toIngest.map(
|
await async.eachLimit(toIngest, 10,
|
||||||
async shard => {
|
async shard => {
|
||||||
if (await this._cache.shardExists(shard)) {
|
if (await this._cache.shardExists(shard)) {
|
||||||
const metrics = await this._cache.getMetricsForShard(shard);
|
const metrics = await this._cache.getMetricsForShard(shard);
|
||||||
|
@ -68,8 +74,7 @@ class IngestShardTask extends BaseTask {
|
||||||
} else {
|
} else {
|
||||||
logger.warn('shard does not exist', { shard });
|
logger.warn('shard does not exist', { shard });
|
||||||
}
|
}
|
||||||
},
|
});
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -141,7 +141,20 @@ class MigrateTask extends BaseTask {
|
||||||
timestamp,
|
timestamp,
|
||||||
timestamp,
|
timestamp,
|
||||||
));
|
));
|
||||||
const numberOfObjects = MigrateTask._parseMetricValue(numberOfObjectsResp[0]);
|
|
||||||
|
let numberOfObjects;
|
||||||
|
if (numberOfObjectsResp.length === 1) {
|
||||||
|
numberOfObjects = MigrateTask._parseMetricValue(numberOfObjectsResp[0]);
|
||||||
|
} else {
|
||||||
|
numberOfObjects = numberOfObjectsOffset;
|
||||||
|
logger.warn('Could not retrieve value for numberOfObjects, falling back to last seen value',
|
||||||
|
{
|
||||||
|
metricLevel: level,
|
||||||
|
resource,
|
||||||
|
metricTimestamp: timestamp,
|
||||||
|
lastSeen: numberOfObjectsOffset,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
let incomingBytes = 0;
|
let incomingBytes = 0;
|
||||||
let outgoingBytes = 0;
|
let outgoingBytes = 0;
|
||||||
|
@ -183,7 +196,7 @@ class MigrateTask extends BaseTask {
|
||||||
stop: -1,
|
stop: -1,
|
||||||
});
|
});
|
||||||
|
|
||||||
if (resp.result && (resp.result.length === 0 || resp.result[0] === '')) {
|
if (resp.result && (resp.result.length === 0 || resp.result[0] === '' || resp.result[0] === '[]')) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ class InterpolatedClock {
|
||||||
}
|
}
|
||||||
|
|
||||||
getTs() {
|
getTs() {
|
||||||
const ts = new Date().now();
|
const ts = Date.now();
|
||||||
if (ts === this._now) {
|
if (ts === this._now) {
|
||||||
// If this is the same millisecond as the last call
|
// If this is the same millisecond as the last call
|
||||||
this._step += 1;
|
this._step += 1;
|
||||||
|
|
|
@ -2,6 +2,7 @@ const assert = require('assert');
|
||||||
const { auth, policies } = require('arsenal');
|
const { auth, policies } = require('arsenal');
|
||||||
const vaultclient = require('vaultclient');
|
const vaultclient = require('vaultclient');
|
||||||
const config = require('./config');
|
const config = require('./config');
|
||||||
|
const errors = require('./errors');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@class Vault
|
@class Vault
|
||||||
|
@ -83,7 +84,14 @@ class Vault {
|
||||||
reject(err);
|
reject(err);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
resolve(res);
|
if (!res.message || !res.message.body) {
|
||||||
|
reject(errors.InternalError);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
resolve(res.message.body.map(acc => ({
|
||||||
|
resource: acc.accountId,
|
||||||
|
id: acc.canonicalId,
|
||||||
|
})));
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -91,6 +99,14 @@ class Vault {
|
||||||
const vault = new Vault(config);
|
const vault = new Vault(config);
|
||||||
auth.setHandler(vault);
|
auth.setHandler(vault);
|
||||||
|
|
||||||
|
async function translateResourceIds(level, resources, log) {
|
||||||
|
if (level === 'accounts') {
|
||||||
|
return vault.getCanonicalIds(resources, log);
|
||||||
|
}
|
||||||
|
|
||||||
|
return resources.map(resource => ({ resource, id: resource }));
|
||||||
|
}
|
||||||
|
|
||||||
async function authenticateRequest(request, action, level, resources) {
|
async function authenticateRequest(request, action, level, resources) {
|
||||||
const policyContext = new policies.RequestContext(
|
const policyContext = new policies.RequestContext(
|
||||||
request.headers,
|
request.headers,
|
||||||
|
@ -114,10 +130,11 @@ async function authenticateRequest(request, action, level, resources) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Will only have res if request is from a user rather than an account
|
// Will only have res if request is from a user rather than an account
|
||||||
|
let authorizedResources = resources;
|
||||||
if (res) {
|
if (res) {
|
||||||
try {
|
try {
|
||||||
const authorizedResources = (res || [])
|
authorizedResources = res.reduce(
|
||||||
.reduce((authed, result) => {
|
(authed, result) => {
|
||||||
if (result.isAllowed) {
|
if (result.isAllowed) {
|
||||||
// result.arn should be of format:
|
// result.arn should be of format:
|
||||||
// arn:scality:utapi:::resourcetype/resource
|
// arn:scality:utapi:::resourcetype/resource
|
||||||
|
@ -128,24 +145,32 @@ async function authenticateRequest(request, action, level, resources) {
|
||||||
request.logger.trace('access granted for resource', { resource });
|
request.logger.trace('access granted for resource', { resource });
|
||||||
}
|
}
|
||||||
return authed;
|
return authed;
|
||||||
}, []);
|
}, [],
|
||||||
resolve([
|
);
|
||||||
authorizedResources.length !== 0,
|
|
||||||
authorizedResources,
|
|
||||||
]);
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
reject(err);
|
reject(err);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
request.logger.trace('granted access to all resources');
|
request.logger.trace('granted access to all resources');
|
||||||
resolve([true]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resolve([
|
||||||
|
authorizedResources.length !== 0,
|
||||||
|
authorizedResources,
|
||||||
|
]);
|
||||||
}, 's3', [policyContext]);
|
}, 's3', [policyContext]);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function translateAndAuthorize(request, action, level, resources) {
|
||||||
|
const [authed, authorizedResources] = await authenticateRequest(request, action, level, resources);
|
||||||
|
const translated = await translateResourceIds(level, authorizedResources, request.logger.logger);
|
||||||
|
return [authed, translated];
|
||||||
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
authenticateRequest,
|
authenticateRequest,
|
||||||
|
translateAndAuthorize,
|
||||||
Vault,
|
Vault,
|
||||||
vault,
|
vault,
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=10.19.0"
|
"node": ">=10.19.0"
|
||||||
},
|
},
|
||||||
"version": "7.8.0",
|
"version": "8.1.0",
|
||||||
"description": "API for tracking resource utilization and reporting metrics",
|
"description": "API for tracking resource utilization and reporting metrics",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"repository": {
|
"repository": {
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
"node-schedule": "^1.3.2",
|
"node-schedule": "^1.3.2",
|
||||||
"oas-tools": "^2.1.8",
|
"oas-tools": "^2.1.8",
|
||||||
"uuid": "^3.3.2",
|
"uuid": "^3.3.2",
|
||||||
"vaultclient": "scality/vaultclient#21d03b1",
|
"vaultclient": "scality/vaultclient#ff9e92f",
|
||||||
"werelogs": "scality/werelogs#0a4c576"
|
"werelogs": "scality/werelogs#0a4c576"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
|
|
@ -266,6 +266,10 @@ class Router {
|
||||||
*/
|
*/
|
||||||
_processSecurityChecks(utapiRequest, route, cb) {
|
_processSecurityChecks(utapiRequest, route, cb) {
|
||||||
const log = utapiRequest.getLog();
|
const log = utapiRequest.getLog();
|
||||||
|
if (process.env.UTAPI_AUTH === 'false') {
|
||||||
|
// Zenko route request does not need to go through Vault
|
||||||
|
return this._startRequest(utapiRequest, route, cb);
|
||||||
|
}
|
||||||
return this._authSquared(utapiRequest, err => {
|
return this._authSquared(utapiRequest, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error from vault', { errors: err });
|
log.trace('error from vault', { errors: err });
|
||||||
|
|
|
@ -21,6 +21,9 @@ const config = {
|
||||||
localCache: redisLocal,
|
localCache: redisLocal,
|
||||||
component: 's3',
|
component: 's3',
|
||||||
};
|
};
|
||||||
|
const location = 'foo-backend';
|
||||||
|
const incrby = 100;
|
||||||
|
const decrby = -30;
|
||||||
|
|
||||||
function isSortedSetKey(key) {
|
function isSortedSetKey(key) {
|
||||||
return key.endsWith('storageUtilized') || key.endsWith('numberOfObjects');
|
return key.endsWith('storageUtilized') || key.endsWith('numberOfObjects');
|
||||||
|
@ -76,6 +79,29 @@ function setMockData(data, timestamp, cb) {
|
||||||
return cb();
|
return cb();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function getLocationObject(bytesValue) {
|
||||||
|
const obj = {};
|
||||||
|
obj[`s3:location:${location}:locationStorage`] = `${bytesValue}`;
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
function testLocationMetric(c, params, expected, cb) {
|
||||||
|
const { location, updateSize } = params;
|
||||||
|
if (updateSize) {
|
||||||
|
c.pushLocationMetric(location, updateSize, REQUID, err => {
|
||||||
|
assert.equal(err, null);
|
||||||
|
assert.deepStrictEqual(memoryBackend.data, expected);
|
||||||
|
return cb();
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
c.getLocationMetric(location, REQUID, (err, bytesStored) => {
|
||||||
|
assert.equal(err, null);
|
||||||
|
assert.strictEqual(bytesStored, expected);
|
||||||
|
return cb();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
describe('UtapiClient:: enable/disable client', () => {
|
describe('UtapiClient:: enable/disable client', () => {
|
||||||
it('should disable client when no redis config is provided', () => {
|
it('should disable client when no redis config is provided', () => {
|
||||||
const c = new UtapiClient();
|
const c = new UtapiClient();
|
||||||
|
@ -747,3 +773,27 @@ tests.forEach(test => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('UtapiClient:: location quota metrics', () => {
|
||||||
|
beforeEach(function beFn() {
|
||||||
|
this.currentTest.c = new UtapiClient(config);
|
||||||
|
this.currentTest.c.setDataStore(ds);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => memoryBackend.flushDb());
|
||||||
|
|
||||||
|
it('should increment location metric', function itFn(done) {
|
||||||
|
const expected = getLocationObject(incrby);
|
||||||
|
testLocationMetric(this.test.c, { location, updateSize: incrby },
|
||||||
|
expected, done);
|
||||||
|
});
|
||||||
|
it('should decrement location metric', function itFn(done) {
|
||||||
|
const expected = getLocationObject(decrby);
|
||||||
|
testLocationMetric(this.test.c, { location, updateSize: decrby },
|
||||||
|
expected, done);
|
||||||
|
});
|
||||||
|
it('should list location metric', function itFn(done) {
|
||||||
|
const expected = 0;
|
||||||
|
testLocationMetric(this.test.c, { location }, expected, done);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
|
@ -38,14 +38,16 @@ describe('Test middleware', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('test errorMiddleware', () => {
|
describe('test errorMiddleware', () => {
|
||||||
|
let req;
|
||||||
let resp;
|
let resp;
|
||||||
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
|
req = templateRequest();
|
||||||
resp = new ExpressResponseStub();
|
resp = new ExpressResponseStub();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should set a default code and message', () => {
|
it('should set a default code and message', () => {
|
||||||
middleware.errorMiddleware({}, null, resp);
|
middleware.errorMiddleware({}, req, resp);
|
||||||
assert.strictEqual(resp._status, 500);
|
assert.strictEqual(resp._status, 500);
|
||||||
assert.deepStrictEqual(resp._body, {
|
assert.deepStrictEqual(resp._body, {
|
||||||
error: {
|
error: {
|
||||||
|
@ -56,7 +58,7 @@ describe('Test middleware', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should set the correct info from an error', () => {
|
it('should set the correct info from an error', () => {
|
||||||
middleware.errorMiddleware({ code: 123, message: 'Hello World!', utapiError: true }, null, resp);
|
middleware.errorMiddleware({ code: 123, message: 'Hello World!', utapiError: true }, req, resp);
|
||||||
assert.deepStrictEqual(resp._body, {
|
assert.deepStrictEqual(resp._body, {
|
||||||
error: {
|
error: {
|
||||||
code: '123',
|
code: '123',
|
||||||
|
@ -66,7 +68,7 @@ describe('Test middleware', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should replace an error's message if it's internal and not in development mode", () => {
|
it("should replace an error's message if it's internal and not in development mode", () => {
|
||||||
middleware.errorMiddleware({ code: 123, message: 'Hello World!' }, null, resp);
|
middleware.errorMiddleware({ code: 123, message: 'Hello World!' }, req, resp);
|
||||||
assert.deepStrictEqual(resp._body, {
|
assert.deepStrictEqual(resp._body, {
|
||||||
error: {
|
error: {
|
||||||
code: '123',
|
code: '123',
|
||||||
|
@ -74,5 +76,16 @@ describe('Test middleware', () => {
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should call responseLoggerMiddleware after response', () => {
|
||||||
|
const spy = sinon.spy();
|
||||||
|
req.logger.end = spy;
|
||||||
|
resp.statusMessage = 'Hello World!';
|
||||||
|
middleware.errorMiddleware({ code: 123 }, req, resp);
|
||||||
|
assert(spy.calledOnceWith('finished handling request', {
|
||||||
|
httpCode: 123,
|
||||||
|
httpMessage: 'Hello World!',
|
||||||
|
}));
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
const sinon = require('sinon');
|
||||||
|
|
||||||
|
const { InterpolatedClock } = require('../../../../libV2/utils');
|
||||||
|
|
||||||
|
|
||||||
|
describe('Test InterpolatedClock', () => {
|
||||||
|
let fakeClock;
|
||||||
|
let iClock;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
fakeClock = sinon.useFakeTimers();
|
||||||
|
iClock = new InterpolatedClock();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
fakeClock.restore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should get the current timestamp', () => {
|
||||||
|
const ts = iClock.getTs();
|
||||||
|
assert(Number.isInteger(ts));
|
||||||
|
assert.strictEqual(ts, 0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should interpolate microseconds if called too fast', () => {
|
||||||
|
const initial = iClock.getTs();
|
||||||
|
const second = iClock.getTs();
|
||||||
|
assert.strictEqual(second - initial, 1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not interpolate if last call >= 1ms ago', () => {
|
||||||
|
const initial = iClock.getTs();
|
||||||
|
fakeClock.tick(1);
|
||||||
|
const second = iClock.getTs();
|
||||||
|
assert.strictEqual(second - initial, 1000);
|
||||||
|
});
|
||||||
|
});
|
|
@ -8,7 +8,7 @@ function decode(type, data, includeDefaults = true) {
|
||||||
if (!Type) {
|
if (!Type) {
|
||||||
throw new Error(`Unknown type ${type}`);
|
throw new Error(`Unknown type ${type}`);
|
||||||
}
|
}
|
||||||
const msg = Type.decode(Buffer.from(data, 'hex'));
|
const msg = Type.decode(Buffer.from(data));
|
||||||
return Type.toObject(msg, {
|
return Type.toObject(msg, {
|
||||||
longs: Number,
|
longs: Number,
|
||||||
defaults: includeDefaults,
|
defaults: includeDefaults,
|
||||||
|
|
|
@ -7,6 +7,10 @@ class ExpressResponseStub {
|
||||||
this._redirect = null;
|
this._redirect = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
get statusCode() {
|
||||||
|
return this._status;
|
||||||
|
}
|
||||||
|
|
||||||
status(code) {
|
status(code) {
|
||||||
this._status = code;
|
this._status = code;
|
||||||
return this;
|
return this;
|
||||||
|
|
|
@ -17,18 +17,22 @@ message Event {
|
||||||
'>
|
'>
|
||||||
PROTOC 'proto' STORE
|
PROTOC 'proto' STORE
|
||||||
<%
|
<%
|
||||||
HEX-> !$proto 'Event' PB->
|
'iso8859-1' ->BYTES
|
||||||
|
!$proto 'Event' PB->
|
||||||
%>
|
%>
|
||||||
'macro' STORE
|
'macro' STORE
|
||||||
|
|
||||||
'0a0b6d794f7065726174696f6e32096d796163636f756e74480150ffffffffffffffffff01' @macro
|
'0a0b6d794f7065726174696f6e32096d796163636f756e74480150ffffffffffffffffff01'
|
||||||
|
HEX-> 'iso8859-1' BYTES-> @macro
|
||||||
|
|
||||||
DUP 'op' GET 'myOperation' == ASSERT
|
DUP 'op' GET 'myOperation' == ASSERT
|
||||||
DUP 'acc' GET 'myaccount' == ASSERT
|
DUP 'acc' GET 'myaccount' == ASSERT
|
||||||
DUP 'objD' GET 1 == ASSERT
|
DUP 'objD' GET 1 == ASSERT
|
||||||
'sizeD' GET -1 == ASSERT
|
'sizeD' GET -1 == ASSERT
|
||||||
|
|
||||||
'0a0568656c6c6f120568656c6c6f1a0568656c6c6f220568656c6c6f2a0568656c6c6f320568656c6c6f3a0568656c6c6f420568656c6c6f4801500158016000' @macro
|
'0a0568656c6c6f120568656c6c6f1a0568656c6c6f220568656c6c6f2a0568656c6c6f320568656c6c6f3a0568656c6c6f420568656c6c6f4801500158016000'
|
||||||
|
HEX-> 'iso8859-1' BYTES-> @macro
|
||||||
|
|
||||||
DUP "op" GET "hello" == ASSERT
|
DUP "op" GET "hello" == ASSERT
|
||||||
DUP "id" GET "hello" == ASSERT
|
DUP "id" GET "hello" == ASSERT
|
||||||
DUP "bck" GET "hello" == ASSERT
|
DUP "bck" GET "hello" == ASSERT
|
||||||
|
|
|
@ -33,7 +33,8 @@ PROTOC 'proto' STORE
|
||||||
!$info INFO
|
!$info INFO
|
||||||
SAVE 'context' STORE
|
SAVE 'context' STORE
|
||||||
<%
|
<%
|
||||||
HEX-> !$proto 'Record' PB->
|
'iso8859-1' ->BYTES
|
||||||
|
!$proto 'Record' PB->
|
||||||
%>
|
%>
|
||||||
<% // catch any exception
|
<% // catch any exception
|
||||||
RETHROW
|
RETHROW
|
||||||
|
@ -45,7 +46,7 @@ PROTOC 'proto' STORE
|
||||||
'macro' STORE
|
'macro' STORE
|
||||||
|
|
||||||
// Unit tests
|
// Unit tests
|
||||||
'081e101e181e20002a0d0a097075744f626a656374101e'
|
'081e101e181e20002a0d0a097075744f626a656374101e' HEX-> 'iso8859-1' BYTES->
|
||||||
@macro
|
@macro
|
||||||
{ 'outB' 0 'ops' { 'putObject' 30 } 'sizeD' 30 'inB' 30 'objD' 30 } == ASSERT
|
{ 'outB' 0 'ops' { 'putObject' 30 } 'sizeD' 30 'inB' 30 'objD' 30 } == ASSERT
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ message Event {
|
||||||
'>
|
'>
|
||||||
PROTOC 'proto' STORE
|
PROTOC 'proto' STORE
|
||||||
<%
|
<%
|
||||||
!$proto 'Event' ->PB ->HEX
|
!$proto 'Event' ->PB
|
||||||
%>
|
%>
|
||||||
'macro' STORE
|
'macro' STORE
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@ PROTOC 'proto' STORE
|
||||||
'sizeD' -1
|
'sizeD' -1
|
||||||
} @macro
|
} @macro
|
||||||
|
|
||||||
|
->HEX
|
||||||
'0a0b6d794f7065726174696f6e32096d796163636f756e74480150ffffffffffffffffff01' == ASSERT
|
'0a0b6d794f7065726174696f6e32096d796163636f756e74480150ffffffffffffffffff01' == ASSERT
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -45,6 +46,7 @@ PROTOC 'proto' STORE
|
||||||
"outB" 0
|
"outB" 0
|
||||||
} @macro
|
} @macro
|
||||||
|
|
||||||
|
->HEX
|
||||||
'0a0568656c6c6f120568656c6c6f1a0568656c6c6f220568656c6c6f2a0568656c6c6f320568656c6c6f3a0568656c6c6f420568656c6c6f4801500158016000' == ASSERT
|
'0a0568656c6c6f120568656c6c6f1a0568656c6c6f220568656c6c6f2a0568656c6c6f320568656c6c6f3a0568656c6c6f420568656c6c6f4801500158016000' == ASSERT
|
||||||
|
|
||||||
$macro
|
$macro
|
||||||
|
|
|
@ -34,7 +34,7 @@ PROTOC 'proto' STORE
|
||||||
!$info INFO
|
!$info INFO
|
||||||
SAVE 'context' STORE
|
SAVE 'context' STORE
|
||||||
<%
|
<%
|
||||||
!$proto 'Record' ->PB ->HEX
|
!$proto 'Record' ->PB
|
||||||
%>
|
%>
|
||||||
<% // catch any exception
|
<% // catch any exception
|
||||||
RETHROW
|
RETHROW
|
||||||
|
@ -49,6 +49,7 @@ PROTOC 'proto' STORE
|
||||||
{ 'outB' 0 'ops' { 'putObject' 30 } 'sizeD' 30 'inB' 30 'objD' 30 }
|
{ 'outB' 0 'ops' { 'putObject' 30 } 'sizeD' 30 'inB' 30 'objD' 30 }
|
||||||
@macro
|
@macro
|
||||||
|
|
||||||
|
->HEX
|
||||||
'081e101e181e20002a0d0a097075744f626a656374101e' == ASSERT
|
'081e101e181e20002a0d0a097075744f626a656374101e' == ASSERT
|
||||||
|
|
||||||
$macro
|
$macro
|
||||||
|
|
Loading…
Reference in New Issue