Compare commits

...

13 Commits

Author SHA1 Message Date
alexandre merle c371293dd2 wip 2021-02-08 07:12:11 +01:00
alexandre merle 42f3eedffc to-remove 2021-02-07 05:39:57 +01:00
alexandre merle 721ab3c882 tests: skip retention / object lock / legal hold for ceph 2021-02-07 05:39:11 +01:00
alexandre merle 151c8137c5 testfix: fix versioning state change if replication 2021-02-06 07:11:41 +01:00
alexandre merle b174d6a971 bugfix: init metadata wrapper for tests 2021-02-06 07:11:41 +01:00
alexandre merle f86d0b5f8b bugfix: use MPU testing for multiple backend 2021-02-06 06:53:44 +01:00
alexandre merle f50ddcd265 fix sanity check on complete upload 2021-02-06 06:53:44 +01:00
alexandre merle 21f0a1ee6e bugfix: considering range start 0 as valid 2021-02-06 06:53:44 +01:00
alexandre merle fc3e7787ed tests: activate aws debug logging 2021-02-06 06:53:44 +01:00
alexandre merle 33ce1a18cc bugfix: ZENKO-2352: fix aws tests 2021-02-06 06:53:44 +01:00
alexandre merle c85c0c4e3f improv: use agentkeepalive for global agent 2021-02-06 06:53:43 +01:00
alexandre merle 07f7825aa2 fix aws sdk upgrade 2021-02-06 06:48:09 +01:00
alexandre merle c58f42d9eb Remove .only() 2021-02-06 06:48:09 +01:00
34 changed files with 177 additions and 83 deletions

View File

@ -66,6 +66,7 @@ models:
- env: &multiple-backend-vars
S3BACKEND: "mem"
S3DATA: "multiple"
MPU_TESTING: "yes"
- env: &file-mem-mpu
S3BACKEND: "file"
S3VAULT: "mem"

View File

@ -17,8 +17,13 @@ function locationKeysSanityCheck(prev, curr) {
return curr.every(v => v.key !== prev);
}
const keysMap = {};
prev.forEach(v => { keysMap[v.key] = true; });
return curr.every(v => !keysMap[v.key]);
prev.forEach(v => {
if (!keysMap[v.dataStoreType]) {
keysMap[v.dataStoreType] = {};
}
keysMap[v.dataStoreType][v.key] = true;
});
return curr.every(v => !(keysMap[v.dataStoreType] && keysMap[v.dataStoreType][v.key]));
}
module.exports = locationKeysSanityCheck;

View File

@ -130,7 +130,7 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
range[1] - range[0] + 1;
responseMetaHeaders['Content-Range'] =
`bytes ${range[0]}-${range[1]}/${objLength}`;
streamingParams.rangeStart = range[0] ?
streamingParams.rangeStart = (range[0] || typeof range[0] === 'number') ?
range[0].toString() : undefined;
streamingParams.rangeEnd = range[1] ?
range[1].toString() : undefined;

View File

@ -24,6 +24,7 @@ const {
isManagementAgentUsed,
} = require('./management/agentClient');
const HttpAgent = require('agentkeepalive');
const routes = arsenal.s3routes.routes;
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
const websiteEndpoints = _config.websiteEndpoints;
@ -65,7 +66,10 @@ class S3Server {
constructor(worker) {
this.worker = worker;
this.cluster = true;
http.globalAgent.keepAlive = true;
http.globalAgent = new HttpAgent({
keepAlive: true,
freeSocketTimeout: arsenal.constants.httpClientFreeSocketTimeout,
});
process.on('SIGINT', this.cleanUp.bind(this));
process.on('SIGHUP', this.cleanUp.bind(this));

View File

@ -20,7 +20,8 @@
"homepage": "https://github.com/scality/S3#readme",
"dependencies": {
"@hapi/joi": "^17.1.0",
"arsenal": "github:scality/Arsenal#372df63",
"agentkeepalive": "^4.1.3",
"arsenal": "github:scality/Arsenal#bugfix/ZENKO-2153-fix-tests",
"async": "~2.5.0",
"aws-sdk": "2.831.0",
"azure-storage": "^2.1.0",

View File

@ -1,4 +1,6 @@
const bluebird = require('bluebird');
const AWS = require('aws-sdk');
AWS.config.logger = console;
const { S3 } = require('aws-sdk');
const projectFixture = require('../fixtures/project');
const getConfig = require('../../test/support/config');

View File

@ -302,7 +302,7 @@ describe('PUT Bucket - AWS.S3.createBucket', () => {
assert.ifError(err);
assert.strictEqual(res.Location, `/${bucketName}`);
return next();
}).promise(),
}),
next => bucketUtil.s3.getBucketLocation(
{
Bucket: bucketName,

View File

@ -30,7 +30,7 @@ describeSkipIfNotMultiple('Multiple backend delete', () => {
process.stdout.write('Creating bucket\n');
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
return s3.createBucketPromsie({ Bucket: bucket })
return s3.createBucket({ Bucket: bucket }).promise()
.catch(err => {
process.stdout.write(`Error creating bucket: ${err}\n`);
throw err;
@ -154,5 +154,42 @@ describeSkipIfNotMultiple('Multiple backend delete', () => {
});
});
});
it('should delete object while mpu in progress', () => {
let uploadId = null;
return s3.putObject({
Bucket: bucket,
Key: fileObject,
Body: body,
Metadata: {
'scal-location-constraint': fileLocation,
},
}).promise().then(() => { // eslint-disable-line arrow-body-style
return s3.createMultipartUpload({
Bucket: bucket,
Key: fileObject,
}).promise();
}).then(res => {
uploadId = res.UploadId;
return s3.deleteObject({
Bucket: bucket,
Key: fileObject,
}).promise();
}).then(() => { // eslint-disable-line arrow-body-style
return s3.abortMultipartUpload({
Bucket: bucket,
Key: fileObject,
UploadId: uploadId,
}).promise();
}).then(() => { // eslint-disable-line arrow-body-style
return s3.getObject({
Bucket: bucket,
Key: fileObject,
}).promise().catch(err => {
if (err.code !== 'NoSuchKey') {
throw err;
}
});
});
});
});
});

View File

@ -55,7 +55,7 @@ function _assertDeleteResult(result, resultType, requestVersionId) {
`did not expect version id in result, got "${result.VersionId}"`);
}
if (expectDeleteMarker) {
assert.strictEqual(result.DeleteMarker, 'true');
assert.strictEqual(result.DeleteMarker, true);
} else {
assert.strictEqual(result.DeleteMarker, undefined);
}

View File

@ -119,7 +119,7 @@ describe('Multiple backend get object', function testSuite() {
}, (err, res) => {
assert.equal(err, null, 'Expected success but got ' +
`error ${err}`);
assert.strictEqual(res.ContentLength, '10');
assert.strictEqual(res.ContentLength, 10);
assert.strictEqual(res.Body.toString(), 'helloworld');
assert.deepStrictEqual(res.Metadata,
{ 'scal-location-constraint': awsLocation });
@ -171,7 +171,7 @@ describe('Multiple backend get object', function testSuite() {
}, (err, res) => {
assert.equal(err, null, 'Expected success but got ' +
`error ${err}`);
assert.strictEqual(res.ContentLength, '10');
assert.strictEqual(res.ContentLength, 10);
assert.strictEqual(res.Body.toString(), 'helloworld');
assert.deepStrictEqual(res.Metadata,
{ 'scal-location-constraint': awsLocationMismatch });
@ -293,7 +293,7 @@ describe('Multiple backend get object', function testSuite() {
(err, res) => {
assert.equal(err, null, 'Expected success but got ' +
`error ${err}`);
assert.strictEqual(res.ContentLength, '10');
assert.strictEqual(res.ContentLength, 10);
assert.strictEqual(res.ContentRange,
`bytes 0-9/${bigBodyLen}`);
assert.strictEqual(res.ETag, `"${bigMD5}"`);

View File

@ -304,7 +304,7 @@ function testSuite() {
], done);
});
it('should return a ServiceUnavailable if trying to get an object ' +
it('should return a LocationNotFound if trying to get an object ' +
'that was deleted in AWS but exists in s3 metadata',
done => {
const key = `somekey-${genUniqID()}`;
@ -321,14 +321,14 @@ function testSuite() {
err => next(err, s3VerId)),
(s3VerId, next) => s3.getObject({ Bucket: bucket, Key: key },
err => {
assert.strictEqual(err.code, 'ServiceUnavailable');
assert.strictEqual(err.statusCode, 503);
assert.strictEqual(err.code, 'LocationNotFound');
assert.strictEqual(err.statusCode, 424);
next();
}),
], done);
});
it('should return a ServiceUnavailable if trying to get a version ' +
it('should return a LocationNotFound if trying to get a version ' +
'that was deleted in AWS but exists in s3 metadata',
done => {
const key = `somekey-${genUniqID()}`;
@ -345,8 +345,8 @@ function testSuite() {
err => next(err, s3VerId)),
(s3VerId, next) => s3.getObject({ Bucket: bucket, Key: key,
VersionId: s3VerId }, err => {
assert.strictEqual(err.code, 'ServiceUnavailable');
assert.strictEqual(err.statusCode, 503);
assert.strictEqual(err.code, 'LocationNotFound');
assert.strictEqual(err.statusCode, 424);
next();
}),
], done);

View File

@ -154,7 +154,7 @@ function testSuite() {
Bucket: azureContainerName,
Key: azureObject,
}, err => {
assert.strictEqual(err.code, 'ServiceUnavailable');
assert.strictEqual(err.code, 'LocationNotFound');
done();
});
});

View File

@ -113,7 +113,7 @@ describe('Multiple backend get object', function testSuite() {
assert.equal(err, null,
`Expected success but got error ${err}`);
if (range) {
assert.strictEqual(res.ContentLength, `${size}`);
assert.strictEqual(res.ContentLength, size);
assert.strictEqual(res.ContentRange, contentRange);
}
assert.strictEqual(res.ETag, `"${MD5}"`);

View File

@ -61,11 +61,14 @@ function mpuSetup(key, location, cb) {
Metadata: { 'scal-location-constraint': location },
};
s3.createMultipartUpload(params, (err, res) => {
if (err) {
return next(err);
}
const uploadId = res.UploadId;
assert(uploadId);
assert.strictEqual(res.Bucket, azureContainerName);
assert.strictEqual(res.Key, key);
next(err, uploadId);
return next(null, uploadId);
});
},
(uploadId, next) => {
@ -77,8 +80,11 @@ function mpuSetup(key, location, cb) {
Body: smallBody,
};
s3.uploadPart(partParams, (err, res) => {
if (err) {
return next(err);
}
partArray.push({ ETag: res.ETag, PartNumber: 1 });
next(err, uploadId);
return next(null, uploadId);
});
},
(uploadId, next) => {
@ -90,8 +96,11 @@ function mpuSetup(key, location, cb) {
Body: bigBody,
};
s3.uploadPart(partParams, (err, res) => {
if (err) {
return next(err);
}
partArray.push({ ETag: res.ETag, PartNumber: 2 });
next(err, uploadId);
return next(null, uploadId);
});
},
], (err, uploadId) => {

View File

@ -11,7 +11,7 @@ const bucket = `completempugcp${genUniqID()}`;
const smallBody = Buffer.from('I am a body', 'utf8');
const bigBody = Buffer.alloc(10485760);
const s3MD5 = 'bfb875032e51cbe2a60c5b6b99a2153f-2';
const expectedContentLength = '10485771';
const expectedContentLength = 10485771;
const gcpTimeout = 5000;
let s3;

View File

@ -31,6 +31,8 @@ const bigMD5 = '5f363e0e58a95f06cbe9bbc662c5dfb6';
const emptyMD5 = 'd41d8cd98f00b204e9800998ecf8427e';
const locMetaHeader = constants.objectLocationConstraintHeader.substring(11);
const Promise = require('bluebird');
const azureTimeout = 40000;
let bucketUtil;

View File

@ -19,6 +19,7 @@ const body = Buffer.from('I am a body', 'utf8');
const correctMD5 = 'be747eb4b75517bf6b3cf7c5fbb62f3a';
const emptyMD5 = 'd41d8cd98f00b204e9800998ecf8427e';
const locMetaHeader = constants.objectLocationConstraintHeader.substring(11);
const Promise = require('bluebird');
let bucketUtil;
let s3;

View File

@ -70,7 +70,7 @@ function awsGet(key, tagCheck, isEmpty, isMpu, callback) {
assert.strictEqual(res.ETag, `"${correctMD5}"`);
}
if (tagCheck) {
assert.strictEqual(res.TagCount, '2');
assert.strictEqual(res.TagCount, 2);
} else {
assert.strictEqual(res.TagCount, undefined);
}
@ -114,7 +114,7 @@ function getObject(key, backend, tagCheck, isEmpty, isMpu, callback) {
assert.strictEqual(res.Metadata['scal-location-constraint'],
backend);
if (tagCheck) {
assert.strictEqual(res.TagCount, '2');
assert.strictEqual(res.TagCount, 2);
} else {
assert.strictEqual(res.TagCount, undefined);
}

View File

@ -74,7 +74,7 @@ function awsGetCheck(objectKey, s3MD5, awsMD5, location, cb) {
});
}
describe.only('MultipleBackend put object', function testSuite() {
describe('MultipleBackend put object', function testSuite() {
this.timeout(250000);
withV4(sigCfg => {
beforeEach(() => {

View File

@ -2,6 +2,7 @@ const assert = require('assert');
const crypto = require('crypto');
const { errors, storage } = require('arsenal');
const AWS = require('aws-sdk');
AWS.config.logger = console;
const uuid = require('uuid/v4');
const async = require('async');
@ -279,7 +280,7 @@ utils.getAndAssertResult = (s3, params, cb) => {
if (expectedTagCount && expectedTagCount === '0') {
assert.strictEqual(data.TagCount, undefined);
} else if (expectedTagCount) {
assert.strictEqual(data.TagCount, expectedTagCount);
assert.strictEqual(data.TagCount, parseInt(expectedTagCount, 10));
}
return cb();
});

View File

@ -5,7 +5,7 @@ const BucketUtility = require('../../../lib/utility/bucket-util');
const { makeTagQuery, updateRequestContexts } =
require('../../../../../../lib/api/apiUtils/authorization/tagConditionKeys');
const { DummyRequestLogger, TaggingConfigTester, createRequestContext } = require('../../../../../unit/helpers');
const { initMetadata } = require('../../utils/init');
const taggingUtil = new TaggingConfigTester();
const log = new DummyRequestLogger();
const bucket = 'bucket2testconditionkeys';
@ -14,11 +14,16 @@ const objPutTaggingReq = taggingUtil
.createObjectTaggingRequest('PUT', bucket, object);
const requestContexts = [createRequestContext('objectPutTagging', objPutTaggingReq)];
describe('Tag condition keys updateRequestContext', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('Tag condition keys updateRequestContext', () => {
withV4(sigCfg => {
let bucketUtil;
let s3;
before(done => initMetadata(done));
beforeEach(() => {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;

View File

@ -9,6 +9,9 @@ const bucketName = 'testdeletempu';
const objectName = 'key';
const objectNameTwo = 'secondkey';
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describe('DELETE object', () => {
withV4(sigCfg => {
let uploadId;
@ -95,7 +98,7 @@ describe('DELETE object', () => {
});
});
describe('with object lock', () => {
describeSkipIfCeph('with object lock', () => {
let versionIdOne;
let versionIdTwo;
const retainDate = moment().add(10, 'days').toISOString();

View File

@ -1050,7 +1050,10 @@ describe('GET object', () => {
});
});
describe('GET object with object lock', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('GET object with object lock', () => {
withV4(sigCfg => {
const bucketUtil = new BucketUtility('default', sigCfg);
const s3 = bucketUtil.s3;

View File

@ -13,7 +13,10 @@ const unlockedBucket = 'mock-bucket-no-lock';
const key = 'mock-object-legalhold';
const keyNoHold = 'mock-object-no-legalhold';
describe('GET object legal hold', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('GET object legal hold', () => {
withV4(sigCfg => {
const bucketUtil = new BucketUtility('default', sigCfg);
const s3 = bucketUtil.s3;

View File

@ -33,7 +33,10 @@ const expectedConfig = {
RetainUntilDate: manipulateDate(),
};
describe('GET object retention', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('GET object retention', () => {
withV4(sigCfg => {
const bucketUtil = new BucketUtility('default', sigCfg);
const s3 = bucketUtil.s3;

View File

@ -278,7 +278,10 @@ describe('Multi-Object Delete Access', function access() {
});
});
describe('Multi-Object Delete with Object Lock', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('Multi-Object Delete with Object Lock', () => {
let bucketUtil;
let s3;
const versionIds = [];

View File

@ -1231,7 +1231,10 @@ describe('Object Copy', () => {
});
});
describe('Object Copy with object lock enabled on both destination ' +
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('Object Copy with object lock enabled on both destination ' +
'bucket and source bucket', () => {
withV4(sigCfg => {
let bucketUtil;

View File

@ -535,7 +535,10 @@ describe('HEAD object, conditions', () => {
});
});
describe('HEAD object with object lock', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('HEAD object with object lock', () => {
withV4(sigCfg => {
const bucketUtil = new BucketUtility('default', sigCfg);
const s3 = bucketUtil.s3;

View File

@ -270,7 +270,10 @@ describe('PUT object', () => {
});
});
describe('PUT object with object lock', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('PUT object with object lock', () => {
withV4(sigCfg => {
let bucketUtil;
let s3;

View File

@ -31,7 +31,10 @@ function createLegalHoldParams(bucket, key, status) {
};
}
describe('PUT object legal hold', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('PUT object legal hold', () => {
withV4(sigCfg => {
const bucketUtil = new BucketUtility('default', sigCfg);
const s3 = bucketUtil.s3;

View File

@ -15,7 +15,10 @@ const retentionConfig = {
RetainUntilDate: moment().add(1, 'Days').toISOString(),
};
describe('PUT object retention', () => {
const isCEPH = process.env.CI_CEPH !== undefined;
const describeSkipIfCeph = isCEPH ? describe.skip : describe;
describeSkipIfCeph('PUT object retention', () => {
withV4(sigCfg => {
const bucketUtil = new BucketUtility('default', sigCfg);
const s3 = bucketUtil.s3;

View File

@ -0,0 +1,19 @@
const metadata = require('../../../../../lib/metadata/wrapper');
let metadataInit = false;
function initMetadata(done) {
if (metadataInit === true) {
return done();
}
return metadata.setup(err => {
if (err) {
return done(err);
}
metadataInit = true;
return done();
});
}
module.exports = {
initMetadata,
};

View File

@ -16,7 +16,7 @@ function checkNoError(err) {
assert.ifError(err, `Expected success, got error ${JSON.stringify(err)}`);
}
function testVersioning(s3, versioningStatus, replicationStatus, cb) {
function testVersioning(s3, versioningStatus, replicationStatus, removeReplication, cb) {
const versioningParams = { Bucket: bucketName,
VersioningConfiguration: { Status: versioningStatus } };
const replicationParams = {
@ -38,6 +38,12 @@ function testVersioning(s3, versioningStatus, replicationStatus, cb) {
};
async.waterfall([
cb => s3.putBucketReplication(replicationParams, e => cb(e)),
cb => {
if (removeReplication) {
return s3.deleteBucketReplication({ Bucket: bucketName }, e => cb(e));
}
return process.nextTick(() => cb());
},
cb => s3.putBucketVersioning(versioningParams, e => cb(e)),
], cb);
}
@ -63,15 +69,22 @@ describe('Versioning on a replication source bucket', () => {
it('should not be able to disable versioning if replication enabled',
done => {
testVersioning(s3, 'Suspended', 'Enabled', err => {
testVersioning(s3, 'Suspended', 'Enabled', false, err => {
checkError(err, 'InvalidBucketState');
done();
});
});
it('should be able to disable versioning if replication disabled',
it('should not be able to disable versioning if replication disabled',
done => {
testVersioning(s3, 'Suspended', 'Disabled', err => {
testVersioning(s3, 'Suspended', 'Disabled', false, err => {
checkError(err, 'InvalidBucketState');
done();
});
});
it('should be able to disable versioning after removed replication', done => {
testVersioning(s3, 'Suspended', 'Disabled', true, err => {
checkNoError(err);
done();
});

View File

@ -657,9 +657,9 @@ arraybuffer.slice@~0.0.7:
resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.7.tgz#3bbc4275dd584cc1b10809b89d4e8b63a69e7675"
integrity sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog==
"arsenal@github:scality/Arsenal#372df63":
"arsenal@github:scality/Arsenal#bugfix/ZENKO-2153-fix-tests":
version "8.2.1"
resolved "https://codeload.github.com/scality/Arsenal/tar.gz/372df634c4cf983f26b8a9439ecde90aedeb0645"
resolved "https://codeload.github.com/scality/Arsenal/tar.gz/b169bfc4566bc0df3701db41df78dc35a0b5d7d0"
dependencies:
"@hapi/joi" "^15.1.0"
JSONStream "^1.0.0"
@ -667,43 +667,7 @@ arraybuffer.slice@~0.0.7:
ajv "6.12.2"
async "~2.6.1"
aws-sdk "2.80.0"
azure-storage "^2.1.0"
backo "^1.1.0"
bson "4.0.0"
debug "~4.1.0"
diskusage "^1.1.1"
fcntl "github:scality/node-fcntl"
hdclient scality/hdclient#5145e04e5ed33e85106765b1caa90cd245ef482b
https-proxy-agent "^2.2.0"
ioredis "4.9.5"
ipaddr.js "1.9.1"
level "~5.0.1"
level-sublevel "~6.6.5"
mongodb "^3.0.1"
node-forge "^0.7.1"
prom-client "10.2.3"
simple-glob "^0.2.0"
socket.io "~2.3.0"
socket.io-client "~2.3.0"
sproxydclient "github:scality/sproxydclient#30e7115"
utf8 "3.0.0"
uuid "^3.0.1"
werelogs scality/werelogs#0ff7ec82
xml2js "~0.4.23"
optionalDependencies:
ioctl "2.0.1"
"arsenal@github:scality/Arsenal#5d100645aaa7083ce4195939bf968119e118b93a":
version "8.2.1"
resolved "https://codeload.github.com/scality/Arsenal/tar.gz/5d100645aaa7083ce4195939bf968119e118b93a"
dependencies:
"@hapi/joi" "^15.1.0"
JSONStream "^1.0.0"
agentkeepalive "^4.1.3"
ajv "6.12.2"
async "~2.6.1"
aws-sdk "2.80.0"
azure-storage "^2.1.0"
azure-storage "2.10.3"
backo "^1.1.0"
bson "4.0.0"
debug "~4.1.0"
@ -985,7 +949,7 @@ axios@^0.18.0:
follow-redirects "1.5.10"
is-buffer "^2.0.2"
azure-storage@^2.1.0:
azure-storage@2.10.3, azure-storage@^2.1.0:
version "2.10.3"
resolved "https://registry.yarnpkg.com/azure-storage/-/azure-storage-2.10.3.tgz#c5966bf929d87587d78f6847040ea9a4b1d4a50a"
integrity sha512-IGLs5Xj6kO8Ii90KerQrrwuJKexLgSwYC4oLWmc11mzKe7Jt2E5IVg+ZQ8K53YWZACtVTMBNO3iGuA+4ipjJxQ==