Compare commits
1 Commits
developmen
...
ft/improve
Author | SHA1 | Date |
---|---|---|
alexandremerle | 6fe661f7ef |
|
@ -22,9 +22,10 @@ const routeMap = {
|
|||
const utapi = new UtapiClient(_config.utapi);
|
||||
|
||||
function checkUnsuportedRoutes(req, res, log) {
|
||||
if (req.query.policy !== undefined ||
|
||||
req.query.cors !== undefined ||
|
||||
req.query.tagging !== undefined) {
|
||||
const query = req.query;
|
||||
if (query.policy !== undefined ||
|
||||
query.cors !== undefined ||
|
||||
query.tagging !== undefined) {
|
||||
return routesUtils.responseXMLBody(
|
||||
errors.NotImplemented, null, res, log);
|
||||
}
|
||||
|
@ -72,6 +73,16 @@ function checkIP(clientIP) {
|
|||
_config.healthChecks.allowFrom, clientIP);
|
||||
}
|
||||
|
||||
function safeNormalize(req) {
|
||||
try {
|
||||
utils.normalizeRequest(req);
|
||||
return true;
|
||||
} catch (err) {
|
||||
log.trace('could not normalize request', { error: err.stack || err });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export default function routes(req, res, logger) {
|
||||
const clientInfo = {
|
||||
clientIP: req.socket.remoteAddress,
|
||||
|
@ -91,30 +102,26 @@ export default function routes(req, res, logger) {
|
|||
}
|
||||
return healthcheckRouteHandler(req, res, log);
|
||||
}
|
||||
|
||||
try {
|
||||
utils.normalizeRequest(req);
|
||||
} catch (err) {
|
||||
log.trace('could not normalize request', { error: err });
|
||||
return routesUtils.responseXMLBody(
|
||||
errors.InvalidURI, undefined, res, log);
|
||||
if (!safeNormalize(req)) {
|
||||
return routesUtils.responseXMLBody(errors.InvalidURI, undefined, res,
|
||||
log);
|
||||
}
|
||||
|
||||
log.addDefaultFields({
|
||||
bucketName: req.bucketName,
|
||||
objectKey: req.objectKey,
|
||||
});
|
||||
const bucketName = req.bucketName;
|
||||
// if empty name and request not a list Buckets
|
||||
if (!req.bucketName &&
|
||||
if (!bucketName &&
|
||||
!(req.method.toUpperCase() === 'GET' && !req.objectKey)) {
|
||||
log.warn('empty bucket name', { method: 'routes' });
|
||||
return routesUtils.responseXMLBody(errors.MethodNotAllowed,
|
||||
undefined, res, log);
|
||||
}
|
||||
|
||||
if (req.bucketName !== undefined &&
|
||||
utils.isValidBucketName(req.bucketName) === false) {
|
||||
log.warn('invalid bucket name', { bucketName: req.bucketName });
|
||||
if (bucketName !== undefined &&
|
||||
utils.isValidBucketName(bucketName) === false) {
|
||||
log.warn('invalid bucket name', { bucketName });
|
||||
return routesUtils.responseXMLBody(errors.InvalidBucketName,
|
||||
undefined, res, log);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ const encryptionHeaders = [
|
|||
'x-amz-server-side-encryption-customer-key',
|
||||
'x-amz-server-side-encryption-customer-key-md5',
|
||||
];
|
||||
const encryptionHeadersLen = encryptionHeaders.length;
|
||||
|
||||
const validStatuses = ['Enabled', 'Suspended'];
|
||||
const validMfaDeletes = [undefined, 'Enabled', 'Disabled'];
|
||||
|
@ -145,10 +146,13 @@ export default function routePUT(request, response, log, utapi) {
|
|||
log);
|
||||
}
|
||||
}
|
||||
const headers = request.headers;
|
||||
// object level encryption
|
||||
if (encryptionHeaders.some(i => request.headers[i] !== undefined)) {
|
||||
return routesUtils.responseXMLBody(errors.NotImplemented, null,
|
||||
response, log);
|
||||
for (let i = 0; i < encryptionHeadersLen; i++) {
|
||||
if (headers[encryptionHeaders[i]] !== undefined) {
|
||||
return routesUtils.responseXMLBody(errors.NotImplemented, null,
|
||||
response, log);
|
||||
}
|
||||
}
|
||||
if (request.query.partNumber) {
|
||||
if (request.headers['x-amz-copy-source']) {
|
||||
|
|
|
@ -12,16 +12,21 @@ import data from '../data/wrapper';
|
|||
function setCommonResponseHeaders(headers, response, log) {
|
||||
if (headers && typeof headers === 'object') {
|
||||
log.trace('setting response headers', { headers });
|
||||
Object.keys(headers).forEach(key => {
|
||||
if (headers[key] !== undefined) {
|
||||
response.setHeader(key, headers[key]);
|
||||
const keys = Object.keys(headers);
|
||||
const keysLen = keys.length;
|
||||
for (let i = 0; i < keysLen; i++) {
|
||||
const key = keys[i];
|
||||
const header = headers[key];
|
||||
if (header !== undefined) {
|
||||
response.setHeader(key, header);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
const uids = log.getSerializedUids();
|
||||
response.setHeader('server', 'AmazonS3');
|
||||
// to be expanded in further implementation of logging of requests
|
||||
response.setHeader('x-amz-id-2', log.getSerializedUids());
|
||||
response.setHeader('x-amz-request-id', log.getSerializedUids());
|
||||
response.setHeader('x-amz-id-2', uids);
|
||||
response.setHeader('x-amz-request-id', uids);
|
||||
return response;
|
||||
}
|
||||
/**
|
||||
|
@ -33,9 +38,10 @@ function setCommonResponseHeaders(headers, response, log) {
|
|||
* @return {object} response - response object with additional headers
|
||||
*/
|
||||
function okHeaderResponse(headers, response, httpCode, log) {
|
||||
log.trace('sending success header response');
|
||||
log.debug('sending success header response', {
|
||||
httpCode,
|
||||
});
|
||||
setCommonResponseHeaders(headers, response, log);
|
||||
log.debug('response http code', { httpCode });
|
||||
response.writeHead(httpCode);
|
||||
return response.end(() => {
|
||||
log.end().info('responded to request', {
|
||||
|
@ -53,11 +59,12 @@ function okHeaderResponse(headers, response, httpCode, log) {
|
|||
* @return {object} response - response object with additional headers
|
||||
*/
|
||||
function okXMLResponse(xml, response, log, additionalHeaders) {
|
||||
log.trace('sending success xml response');
|
||||
log.debug('sending success xml response', {
|
||||
xml,
|
||||
httpCode: 200,
|
||||
});
|
||||
setCommonResponseHeaders(additionalHeaders, response, log);
|
||||
response.writeHead(200, { 'Content-type': 'application/xml' });
|
||||
log.debug('response http code', { httpCode: 200 });
|
||||
log.trace('xml response', { xml });
|
||||
return response.end(xml, 'utf8', () => {
|
||||
log.end().info('responded with XML', {
|
||||
httpCode: response.statusCode,
|
||||
|
@ -76,8 +83,7 @@ function errorXMLResponse(errCode, response, log) {
|
|||
<RequestId>4442587FB7D0A2F9</RequestId>
|
||||
</Error>
|
||||
*/
|
||||
const xml = [];
|
||||
xml.push(
|
||||
const xml = [
|
||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||
'<Error>',
|
||||
`<Code>${errCode.message}</Code>`,
|
||||
|
@ -85,7 +91,7 @@ function errorXMLResponse(errCode, response, log) {
|
|||
'<Resource></Resource>',
|
||||
`<RequestId>${log.getSerializedUids()}</RequestId>`,
|
||||
'</Error>'
|
||||
);
|
||||
];
|
||||
setCommonResponseHeaders(null, response, log);
|
||||
response.writeHead(errCode.code, { 'Content-type': 'application/xml' });
|
||||
return response.end(xml.join(''), 'utf8', () => {
|
||||
|
@ -238,38 +244,40 @@ const routesUtils = {
|
|||
// Range is inclusive of endpoint so need plus 1
|
||||
const max = end - begin + 1;
|
||||
let total = 0;
|
||||
for (let i = 0; i < dataLocations.length; i++) {
|
||||
const dataLocationsLen = dataLocations.length;
|
||||
for (let i = 0; i < dataLocationsLen; i++) {
|
||||
if (total >= max) {
|
||||
break;
|
||||
}
|
||||
const partStart = parseInt(dataLocations[i].start, 10);
|
||||
const partSize = parseInt(dataLocations[i].size, 10);
|
||||
const current = dataLocations[i];
|
||||
const partStart = parseInt(current.start, 10);
|
||||
const partSize = parseInt(current.size, 10);
|
||||
if (partStart + partSize < begin) {
|
||||
continue;
|
||||
}
|
||||
if (partStart >= begin) {
|
||||
// If the whole part is in the range, just include it
|
||||
if (partSize + total <= max) {
|
||||
const partWithoutRange = dataLocations[i];
|
||||
const partWithoutRange = current;
|
||||
partWithoutRange.size = partSize.toString();
|
||||
parsedLocations.push(partWithoutRange);
|
||||
total += partSize;
|
||||
// Otherwise set a range limit on the part end
|
||||
// and we're done
|
||||
} else {
|
||||
const partWithRange = dataLocations[i];
|
||||
const partWithRange = current;
|
||||
// Need to subtract one from endPart since range
|
||||
// includes endPart in byte count
|
||||
const endPart = Math.min(partSize - 1, max - total - 1);
|
||||
partWithRange.range = [0, endPart];
|
||||
// modify size to be stored for object put part copy
|
||||
partWithRange.size = (endPart + 1).toString();
|
||||
parsedLocations.push(dataLocations[i]);
|
||||
parsedLocations.push(current);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// Offset start (and end if necessary)
|
||||
const partWithRange = dataLocations[i];
|
||||
const partWithRange = current;
|
||||
const startOffset = begin - partStart;
|
||||
// Use full remaining part if remaining partSize is less
|
||||
// than byte range we need to satisfy. Or use byte range
|
||||
|
|
|
@ -48,7 +48,7 @@ class S3Server {
|
|||
rejectUnauthorized: true,
|
||||
}, (req, res) => {
|
||||
// disable nagle algorithm
|
||||
req.socket.setNoDelay();
|
||||
req.socket.setNoDelay(true);
|
||||
routes(req, res, logger);
|
||||
});
|
||||
logger.info('Https server configuration', {
|
||||
|
@ -57,7 +57,7 @@ class S3Server {
|
|||
} else {
|
||||
this.server = http.createServer((req, res) => {
|
||||
// disable nagle algorithm
|
||||
req.socket.setNoDelay();
|
||||
req.socket.setNoDelay(true);
|
||||
routes(req, res, logger);
|
||||
});
|
||||
logger.info('Http server configuration', {
|
||||
|
|
Loading…
Reference in New Issue