Compare commits

...

2 Commits

Author SHA1 Message Date
Nicolas Humbert ec4d525db7 CLDSRV-461 Backbeat API supports non versioned bucket 2023-10-24 12:49:59 +02:00
Bennett Buchanan 6a5e2a0704 feature: ZENKO-733 Lifecycle non-versioned buckets
(cherry picked from commit d47b71d612)
2023-10-24 11:47:36 +02:00
4 changed files with 328 additions and 48 deletions

View File

@ -19,6 +19,10 @@ const invalidBucketStateMessage = 'A replication configuration is present on ' +
const objectLockErrorMessage = 'An Object Lock configuration is present on ' + const objectLockErrorMessage = 'An Object Lock configuration is present on ' +
'this bucket, so the versioning state cannot be changed.'; 'this bucket, so the versioning state cannot be changed.';
const replicationVersioningErrorMessage = 'A replication configuration is ' +
'present on this bucket, so you cannot change the versioning state. To ' +
'change the versioning state, first delete the replication configuration.';
/** /**
* Format of xml request: * Format of xml request:
@ -72,6 +76,18 @@ function _checkBackendVersioningImplemented(bucket) {
return true; return true;
} }
function _isValidReplicationVersioning(result, bucket) {
if (result.VersioningConfiguration &&
result.VersioningConfiguration.Status) {
// Is there a replication configuration set on the bucket and is the
// user attempting to suspend versioning?
if (bucket.getReplicationConfiguration()) {
return result.VersioningConfiguration.Status[0] !== 'Suspended';
}
}
return true;
}
/** /**
* Bucket Put Versioning - Create or update bucket Versioning * Bucket Put Versioning - Create or update bucket Versioning
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info * @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
@ -110,6 +126,15 @@ function bucketPutVersioning(authInfo, request, log, callback) {
externalVersioningErrorMessage); externalVersioningErrorMessage);
return next(error, bucket); return next(error, bucket);
} }
if (!_isValidReplicationVersioning(result, bucket)) {
log.debug(replicationVersioningErrorMessage, {
method: 'bucketPutVersioning',
error: errors.InvalidBucketState,
});
const error = errors.InvalidBucketState
.customizeDescription(replicationVersioningErrorMessage);
return next(error, bucket);
}
const versioningConfiguration = {}; const versioningConfiguration = {};
if (result.VersioningConfiguration.Status) { if (result.VersioningConfiguration.Status) {
versioningConfiguration.Status = versioningConfiguration.Status =

View File

@ -1280,16 +1280,6 @@ function routeBackbeat(clientIP, request, response, log) {
return backbeatRoutes[request.method][request.resourceType] return backbeatRoutes[request.method][request.resourceType]
[request.query.operation](request, response, log, next); [request.query.operation](request, response, log, next);
} }
const versioningConfig = bucketInfo.getVersioningConfiguration();
if (!versioningConfig || versioningConfig.Status !== 'Enabled') {
log.debug('bucket versioning is not enabled', {
method: request.method,
bucketName: request.bucketName,
objectKey: request.objectKey,
resourceType: request.resourceType,
});
return next(errors.InvalidBucketState);
}
return backbeatRoutes[request.method][request.resourceType]( return backbeatRoutes[request.method][request.resourceType](
request, response, bucketInfo, objMd, log, next); request, response, bucketInfo, objMd, log, next);
}], }],

View File

@ -19,7 +19,7 @@ const backbeatAuthCredentials = {
const TEST_BUCKET = 'backbeatbucket'; const TEST_BUCKET = 'backbeatbucket';
const TEST_ENCRYPTED_BUCKET = 'backbeatbucket-encrypted'; const TEST_ENCRYPTED_BUCKET = 'backbeatbucket-encrypted';
const TEST_KEY = 'fookey'; const TEST_KEY = 'fookey';
const NONVERSIONED_BUCKET = 'backbeatbucket-non-versioned'; const NONVERSIONED_BUCKET = 'backbeatbucket-non-versioned3';
const BUCKET_FOR_NULL_VERSION = 'backbeatbucket-null-version'; const BUCKET_FOR_NULL_VERSION = 'backbeatbucket-null-version';
const testArn = 'aws::iam:123456789012:user/bart'; const testArn = 'aws::iam:123456789012:user/bart';
@ -63,9 +63,49 @@ const testMd = {
}, },
}; };
function checkObjectData(s3, objectKey, dataValue, done) { const nonVersionedTestMd = {
'owner-display-name': 'Bart',
'owner-id': ('79a59df900b949e55d96a1e698fbaced' +
'fd6e09d98eacf8f8d5218e7cd47ef2be'),
'content-length': testData.length,
'content-md5': testDataMd5,
'x-amz-version-id': 'null',
'x-amz-server-version-id': '',
'x-amz-storage-class': 'awsbackend',
'x-amz-server-side-encryption': '',
'x-amz-server-side-encryption-aws-kms-key-id': '',
'x-amz-server-side-encryption-customer-algorithm': '',
'acl': {
Canned: 'private',
FULL_CONTROL: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
'location': null,
'isNull': '',
'nullVersionId': '',
'isDeleteMarker': false,
'tags': {},
'replicationInfo': {
status: '',
backends: [],
content: [],
destination: '',
storageClass: '',
role: '',
storageType: '',
dataStoreVersionId: '',
isNFS: null,
},
'dataStoreName': 'us-east-1',
'last-modified': '2018-12-18T01:22:15.986Z',
'md-model-version': 3,
};
function checkObjectData(s3, bucket, objectKey, dataValue, done) {
s3.getObject({ s3.getObject({
Bucket: TEST_BUCKET, Bucket: bucket,
Key: objectKey, Key: objectKey,
}, (err, data) => { }, (err, data) => {
assert.ifError(err); assert.ifError(err);
@ -197,6 +237,7 @@ describeSkipIfAWS('backbeat routes', () => {
.then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise()) .then(() => s3.deleteBucket({ Bucket: TEST_BUCKET }).promise())
.then(() => bucketUtil.empty(TEST_ENCRYPTED_BUCKET)) .then(() => bucketUtil.empty(TEST_ENCRYPTED_BUCKET))
.then(() => s3.deleteBucket({ Bucket: TEST_ENCRYPTED_BUCKET }).promise()) .then(() => s3.deleteBucket({ Bucket: TEST_ENCRYPTED_BUCKET }).promise())
.then(() => bucketUtil.empty(NONVERSIONED_BUCKET))
.then(() => .then(() =>
s3.deleteBucket({ Bucket: NONVERSIONED_BUCKET }).promise()) s3.deleteBucket({ Bucket: NONVERSIONED_BUCKET }).promise())
.then(() => done(), err => done(err)) .then(() => done(), err => done(err))
@ -1284,6 +1325,60 @@ describeSkipIfAWS('backbeat routes', () => {
}); });
}); });
it('should PUT data and metadata for a non-versioned bucket', done => {
const bucket = NONVERSIONED_BUCKET;
const objectKey = 'non-versioned-key';
async.waterfall([
next =>
makeBackbeatRequest({
method: 'PUT',
bucket,
objectKey,
resourceType: 'data',
queryObj: { v2: '' },
headers: {
'content-length': testData.length,
'content-md5': testDataMd5,
'x-scal-canonical-id': testArn,
},
authCredentials: backbeatAuthCredentials,
requestBody: testData,
}, (err, response) => {
console.log('error!!!!', err);
assert.ifError(err);
const metadata = Object.assign({}, nonVersionedTestMd, {
location: JSON.parse(response.body),
});
return next(null, metadata);
}),
(metadata, next) =>
makeBackbeatRequest({
method: 'PUT',
bucket,
objectKey,
resourceType: 'metadata',
authCredentials: backbeatAuthCredentials,
requestBody: JSON.stringify(metadata),
}, (err, response) => {
console.log('error2!!!!', err);
assert.ifError(err);
assert.strictEqual(response.statusCode, 200);
next();
}),
next =>
s3.headObject({
Bucket: bucket,
Key: objectKey,
}, (err, data) => {
console.log('error3!!!!', err);
assert.ifError(err);
assert.strictEqual(data.StorageClass, 'awsbackend');
next();
}),
next => checkObjectData(s3, bucket, objectKey, testData, next),
], done);
});
it('PUT metadata with "x-scal-replication-content: METADATA"' + it('PUT metadata with "x-scal-replication-content: METADATA"' +
'header should replicate metadata only', done => { 'header should replicate metadata only', done => {
async.waterfall([next => { async.waterfall([next => {
@ -1347,39 +1442,6 @@ describeSkipIfAWS('backbeat routes', () => {
}); });
}); });
it('should refuse PUT data if bucket is not versioned',
done => makeBackbeatRequest({
method: 'PUT', bucket: NONVERSIONED_BUCKET,
objectKey: testKey, resourceType: 'data',
queryObj: { v2: '' },
headers: {
'content-length': testData.length,
'content-md5': testDataMd5,
'x-scal-canonical-id': testArn,
},
authCredentials: backbeatAuthCredentials,
requestBody: testData,
},
err => {
assert.strictEqual(err.code, 'InvalidBucketState');
done();
}));
it('should refuse PUT metadata if bucket is not versioned',
done => makeBackbeatRequest({
method: 'PUT', bucket: NONVERSIONED_BUCKET,
objectKey: testKey, resourceType: 'metadata',
queryObj: {
versionId: versionIdUtils.encode(testMd.versionId),
},
authCredentials: backbeatAuthCredentials,
requestBody: JSON.stringify(testMd),
},
err => {
assert.strictEqual(err.code, 'InvalidBucketState');
done();
}));
it('should refuse PUT data if no x-scal-canonical-id header ' + it('should refuse PUT data if no x-scal-canonical-id header ' +
'is provided', done => makeBackbeatRequest({ 'is provided', done => makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET, method: 'PUT', bucket: TEST_BUCKET,
@ -1516,7 +1578,7 @@ describeSkipIfAWS('backbeat routes', () => {
}, (response, next) => { }, (response, next) => {
assert.strictEqual(response.statusCode, 200); assert.strictEqual(response.statusCode, 200);
// give some time for the async deletes to complete // give some time for the async deletes to complete
setTimeout(() => checkObjectData(s3, testKey, testData, next), setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, testData, next),
1000); 1000);
}, next => { }, next => {
// check that the object copy referencing the old data // check that the object copy referencing the old data
@ -1604,7 +1666,7 @@ describeSkipIfAWS('backbeat routes', () => {
}, (response, next) => { }, (response, next) => {
assert.strictEqual(response.statusCode, 200); assert.strictEqual(response.statusCode, 200);
// give some time for the async deletes to complete // give some time for the async deletes to complete
setTimeout(() => checkObjectData(s3, testKey, '', next), setTimeout(() => checkObjectData(s3, TEST_BUCKET, testKey, '', next),
1000); 1000);
}, next => { }, next => {
// check that the object copy referencing the old data // check that the object copy referencing the old data
@ -1904,6 +1966,28 @@ describeSkipIfAWS('backbeat routes', () => {
}); });
}); });
}); });
describe('GET Metadata route for non-versioned bucket', () => {
beforeEach(done => s3.putObject({
Bucket: NONVERSIONED_BUCKET,
Key: TEST_KEY,
Body: new Buffer('hello'),
}, done));
it('should return metadata blob for a versionId', done => {
makeBackbeatRequest({
method: 'GET', bucket: NONVERSIONED_BUCKET,
objectKey: TEST_KEY, resourceType: 'metadata',
authCredentials: backbeatAuthCredentials,
}, (err, data) => {
const parsedBody = JSON.parse(JSON.parse(data.body).Body);
assert.strictEqual(data.statusCode, 200);
assert.deepStrictEqual(parsedBody['content-length'], 5);
done();
});
});
});
describe('Batch Delete Route', () => { describe('Batch Delete Route', () => {
it('should batch delete a location', done => { it('should batch delete a location', done => {
let versionId; let versionId;

View File

@ -0,0 +1,181 @@
const assert = require('assert');
const async = require('async');
const { errors } = require('arsenal');
const { bucketPut } = require('../../../lib/api/bucketPut');
const bucketPutVersioning = require('../../../lib/api/bucketPutVersioning');
const bucketPutReplication = require('../../../lib/api/bucketPutReplication');
const { cleanup,
DummyRequestLogger,
makeAuthInfo } = require('../helpers');
const metadata = require('../../../lib/metadata/wrapper');
const xmlEnableVersioning =
'<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<Status>Enabled</Status>' +
'</VersioningConfiguration>';
const xmlSuspendVersioning =
'<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<Status>Suspended</Status>' +
'</VersioningConfiguration>';
const locConstraintVersioned =
'<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<LocationConstraint>awsbackend</LocationConstraint>' +
'</CreateBucketConfiguration>';
const locConstraintNonVersioned =
'<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<LocationConstraint>azurebackend</LocationConstraint>' +
'</CreateBucketConfiguration>';
const xmlReplicationConfiguration =
'<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
'<Role>arn:aws:iam::account-id:role/src-resource,arn:aws:iam::account-id:role/dest-resource</Role>' +
'<Rule>' +
'<Prefix></Prefix>' +
'<Status>Enabled</Status>' +
'<Destination>' +
'<Bucket>arn:aws:s3:::destination-bucket</Bucket>' +
'<StorageClass>STANDARD</StorageClass>' +
'</Destination>' +
'</Rule>' +
'</ReplicationConfiguration>';
const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure or GCP.';
const log = new DummyRequestLogger();
const bucketName = 'bucketname';
const authInfo = makeAuthInfo('accessKey1');
function _getPutBucketRequest(xml) {
const request = {
bucketName,
headers: { host: `${bucketName}.s3.amazonaws.com` },
url: '/',
};
request.post = xml;
return request;
}
function _putReplicationRequest(xml) {
const request = {
bucketName,
headers: { host: `${bucketName}.s3.amazonaws.com` },
url: '/?replication',
};
request.post = xml;
return request;
}
function _putVersioningRequest(xml) {
const request = {
bucketName,
headers: { host: `${bucketName}.s3.amazonaws.com` },
url: '/?versioning',
query: { versioning: '' },
};
request.post = xml;
return request;
}
describe('bucketPutVersioning API', () => {
before(() => cleanup());
afterEach(() => cleanup());
describe('with version enabled location constraint', () => {
beforeEach(done => {
const request = _getPutBucketRequest(locConstraintVersioned);
bucketPut(authInfo, request, log, done);
});
const tests = [
{
msg: 'should successfully enable versioning on location ' +
'constraint with supportsVersioning set to true',
input: xmlEnableVersioning,
output: { Status: 'Enabled' },
},
{
msg: 'should successfully suspend versioning on location ' +
'constraint with supportsVersioning set to true',
input: xmlSuspendVersioning,
output: { Status: 'Suspended' },
},
];
tests.forEach(test => it(test.msg, done => {
const request = _putVersioningRequest(test.input);
bucketPutVersioning(authInfo, request, log, err => {
assert.ifError(err,
`Expected success, but got err: ${err}`);
metadata.getBucket(bucketName, log, (err, bucket) => {
assert.ifError(err,
`Expected success, but got err: ${err}`);
assert.deepStrictEqual(bucket._versioningConfiguration,
test.output);
done();
});
});
}));
it('should not suspend versioning on bucket with replication', done => {
async.series([
// Enable versioning to allow putting a replication config.
next => {
const request = _putVersioningRequest(xmlEnableVersioning);
bucketPutVersioning(authInfo, request, log, next);
},
// Put the replication config on the bucket.
next => {
const request =
_putReplicationRequest(xmlReplicationConfiguration);
bucketPutReplication(authInfo, request, log, next);
},
// Attempt to suspend versioning.
next => {
const request = _putVersioningRequest(xmlSuspendVersioning);
bucketPutVersioning(authInfo, request, log, err => {
assert(err.InvalidBucketState);
next();
});
},
], done);
});
});
describe('with version disabled location constraint', () => {
beforeEach(done => {
const request = _getPutBucketRequest(locConstraintNonVersioned);
bucketPut(authInfo, request, log, done);
});
const tests = [
{
msg: 'should return error if enabling versioning on location ' +
'constraint with supportsVersioning set to false',
input: xmlEnableVersioning,
output: { error: errors.NotImplemented.customizeDescription(
externalVersioningErrorMessage) },
},
{
msg: 'should return error if suspending versioning on ' +
' location constraint with supportsVersioning set to false',
input: xmlSuspendVersioning,
output: { error: errors.NotImplemented.customizeDescription(
externalVersioningErrorMessage) },
},
];
tests.forEach(test => it(test.msg, done => {
const putBucketVersioningRequest =
_putVersioningRequest(test.input);
bucketPutVersioning(authInfo, putBucketVersioningRequest, log,
err => {
assert.deepStrictEqual(err, test.output.error);
done();
});
}));
});
});