S3: Fine tuning, supporting bucket per file/dir and single bucket.

master
Julian M. Kunkel 2020-06-30 20:02:36 +01:00
parent 87c9906d70
commit fcae6ff0b9
2 changed files with 102 additions and 47 deletions

View File

@ -58,9 +58,9 @@ static option_help * S3_options(aiori_mod_opt_t ** init_backend_options, aiori_m
o->bucket_prefix = "ior";
option_help h [] = {
{0, "S3.bucket-per-file", "Use one bucket to map one file, otherwise only one bucket is used to store all files.", OPTION_FLAG, 'd', & o->bucket_per_file},
{0, "S3.bucket-name-prefix", "The name of the bucket (when using without -b), otherwise it is used as prefix.", OPTION_OPTIONAL_ARGUMENT, 's', & o->bucket_prefix},
{0, "S3.dont-suffix-bucket", "If not selected, then a hash will be added to the bucket name to increase uniqueness.", OPTION_FLAG, 'd', & o->dont_suffix },
{0, "S3.bucket-per-file", "Use one bucket to map one file/directory, otherwise one bucket is used to store all dirs/files.", OPTION_FLAG, 'd', & o->bucket_per_file},
{0, "S3.bucket-name-prefix", "The prefix of the bucket(s).", OPTION_OPTIONAL_ARGUMENT, 's', & o->bucket_prefix},
{0, "S3.dont-suffix-bucket", "By default a hash will be added to the bucket name to increase uniqueness, this disables the option.", OPTION_FLAG, 'd', & o->dont_suffix },
{0, "S3.s3-compatible", "to be selected when using S3 compatible storage", OPTION_FLAG, 'd', & o->s3_compatible },
{0, "S3.use-ssl", "used to specify that SSL is needed for the connection", OPTION_FLAG, 'd', & o->use_ssl },
{0, "S3.host", "The host optionally followed by:port.", OPTION_OPTIONAL_ARGUMENT, 's', & o->host},
@ -74,12 +74,18 @@ static option_help * S3_options(aiori_mod_opt_t ** init_backend_options, aiori_m
}
static void def_file_name(s3_options_t * o, char * out_name, char const * path){
if(o->bucket_per_file){
out_name += sprintf(out_name, "%s-", o->bucket_prefix_cur);
}
// duplicate path except "/"
while(*path != 0){
char c = *path;
if(((c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') )){
if(((c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') )){
*out_name = *path;
out_name++;
}else if(c >= 'A' && c <= 'Z'){
*out_name = *path + ('a' - 'A');
out_name++;
}
path++;
}
@ -88,16 +94,16 @@ static void def_file_name(s3_options_t * o, char * out_name, char const * path){
static void def_bucket_name(s3_options_t * o, char * out_name, char const * path){
// S3_MAX_BUCKET_NAME_SIZE
if (o->bucket_per_file){
out_name += sprintf(out_name, "%s-", o->bucket_prefix_cur);
}
out_name += sprintf(out_name, "%s-", o->bucket_prefix_cur);
// duplicate path except "/"
while(*path != 0){
char c = *path;
if(((c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') )){
if(((c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') )){
*out_name = *path;
out_name++;
}else if(c >= 'A' && c <= 'Z'){
*out_name = *path + ('a' - 'A');
out_name++;
}
path++;
}
@ -129,8 +135,9 @@ static void responseCompleteCallback(S3Status status, const S3ErrorDetails *erro
return;
}
#define CHECK_ERROR if (s3status != S3StatusOK){ \
EWARNF("S3 %s:%d \"%s\": %s - %s", __FUNCTION__, __LINE__, S3_get_status_name(s3status), s3error.message, s3error.furtherDetails); \
#define CHECK_ERROR(p) \
if (s3status != S3StatusOK){ \
EWARNF("S3 %s:%d (path:%s) \"%s\": %s %s", __FUNCTION__, __LINE__, p, S3_get_status_name(s3status), s3error.message, s3error.furtherDetails ? s3error.furtherDetails : ""); \
}
@ -173,7 +180,7 @@ static int S3_statfs (const char * path, ior_aiori_statfs_t * stat, aiori_mod_op
S3_list_service(o->s3_protocol, o->access_key, o->secret_key, NULL, o->host,
o->authRegion, NULL, o->timeout, & listhandler, & buckets);
stat->f_files = buckets;
CHECK_ERROR
CHECK_ERROR(o->authRegion);
return 0;
}
@ -208,15 +215,22 @@ static aiori_fd_t *S3_Create(char *path, int iorflags, aiori_mod_opt_t * options
s3_options_t * o = (s3_options_t*) options;
char p[FILENAME_MAX];
def_file_name(o, p, path);
S3_fd_t * fd = malloc(sizeof(S3_fd_t));
fd->object = strdup(p);
if(iorflags & IOR_CREAT){
struct data_handling dh = { .buf = NULL, .size = 0 };
S3_put_object(& o->bucket_context, p, 0, NULL, NULL, o->timeout, &putObjectHandler, & dh);
CHECK_ERROR
if(o->bucket_per_file){
S3_create_bucket(o->s3_protocol, o->access_key, o->secret_key, NULL, o->host, p, o->authRegion, S3CannedAclPrivate, o->locationConstraint, NULL, o->timeout, & responseHandler, NULL);
}else{
struct data_handling dh = { .buf = NULL, .size = 0 };
S3_put_object(& o->bucket_context, p, 0, NULL, NULL, o->timeout, &putObjectHandler, & dh);
}
if (s3status != S3StatusOK){
CHECK_ERROR(p);
return NULL;
}
}
S3_fd_t * fd = malloc(sizeof(S3_fd_t));
fd->object = strdup(p);
return (aiori_fd_t*) fd;
}
@ -250,15 +264,21 @@ static aiori_fd_t *S3_Open(char *path, int flags, aiori_mod_opt_t * options)
char p[FILENAME_MAX];
def_file_name(o, p, path);
struct stat buf;
S3_head_object(& o->bucket_context, p, NULL, o->timeout, & statResponseHandler, & buf);
if (o->bucket_per_file){
S3_test_bucket(o->s3_protocol, S3UriStylePath, o->access_key, o->secret_key,
NULL, o->host, p, o->authRegion, 0, NULL,
NULL, o->timeout, & responseHandler, NULL);
}else{
struct stat buf;
S3_head_object(& o->bucket_context, p, NULL, o->timeout, & statResponseHandler, & buf);
}
if (s3status != S3StatusOK){
CHECK_ERROR(p);
return NULL;
}
S3_fd_t * fd = malloc(sizeof(S3_fd_t));
fd->object = strdup(p);
return (aiori_fd_t*) fd;
}
@ -280,14 +300,29 @@ static IOR_offset_t S3_Xfer(int access, aiori_fd_t * afd, IOR_size_t * buffer, I
s3_options_t * o = (s3_options_t*) options;
char p[FILENAME_MAX];
sprintf(p, "%s-%ld-%ld", fd->object, (long) offset, (long) length);
if(o->bucket_per_file){
o->bucket_context.bucketName = fd->object;
if(offset != 0){
sprintf(p, "%ld-%ld", (long) offset, (long) length);
}else{
sprintf(p, "0");
}
}else{
if(offset != 0){
sprintf(p, "%s-%ld-%ld", fd->object, (long) offset, (long) length);
}else{
sprintf(p, "%s", fd->object);
}
}
if(access == WRITE){
S3_put_object(& o->bucket_context, p, length, NULL, NULL, o->timeout, &putObjectHandler, & dh);
}else{
S3_get_object(& o->bucket_context, p, NULL, 0, length, NULL, o->timeout, &getObjectHandler, & dh);
}
if (! o->s3_compatible){
CHECK_ERROR
CHECK_ERROR(p);
}
return length;
}
@ -300,13 +335,30 @@ static void S3_Close(aiori_fd_t * afd, aiori_mod_opt_t * options)
free(afd);
}
S3Status list_delete_cb(int isTruncated, const char *nextMarker, int contentsCount, const S3ListBucketContent *contents, int commonPrefixesCount, const char **commonPrefixes, void *callbackData){
s3_options_t * o = (s3_options_t*) callbackData;
S3_delete_object(& o->bucket_context, contents->key, NULL, o->timeout, & responseHandler, NULL);
return S3StatusOK;
}
static S3ListBucketHandler list_delete_handler = {{&responsePropertiesCallback, &responseCompleteCallback }, list_delete_cb};
static void S3_Delete(char *path, aiori_mod_opt_t * options)
{
s3_options_t * o = (s3_options_t*) options;
char p[FILENAME_MAX];
def_file_name(o, p, path);
S3_delete_object(& o->bucket_context, p, NULL, o->timeout, & responseHandler, NULL);
CHECK_ERROR
if(o->bucket_per_file){
o->bucket_context.bucketName = p;
S3_list_bucket(& o->bucket_context, NULL, NULL, NULL, INT_MAX, NULL, o->timeout, & list_delete_handler, o);
S3_delete_bucket(o->s3_protocol, S3UriStylePath, o->access_key, o->secret_key, NULL, o->host, p, o->authRegion, NULL, o->timeout, & responseHandler, NULL);
}else{
S3_delete_object(& o->bucket_context, p, NULL, o->timeout, & responseHandler, NULL);
}
CHECK_ERROR(p);
}
static int S3_mkdir (const char *path, mode_t mode, aiori_mod_opt_t * options){
@ -316,13 +368,13 @@ static int S3_mkdir (const char *path, mode_t mode, aiori_mod_opt_t * options){
def_bucket_name(o, p, path);
if (o->bucket_per_file){
S3_create_bucket(o->s3_protocol, o->access_key, o->secret_key, NULL, o->host, p, o->authRegion, S3CannedAclPrivate, o->locationConstraint, NULL, o->timeout, & responseHandler, NULL);
CHECK_ERROR
CHECK_ERROR(p);
return 0;
}else{
struct data_handling dh = { .buf = NULL, .size = 0 };
S3_put_object(& o->bucket_context, p, 0, NULL, NULL, o->timeout, & putObjectHandler, & dh);
if (! o->s3_compatible){
CHECK_ERROR
CHECK_ERROR(p);
}
return 0;
}
@ -335,40 +387,39 @@ static int S3_rmdir (const char *path, aiori_mod_opt_t * options){
def_bucket_name(o, p, path);
if (o->bucket_per_file){
S3_delete_bucket(o->s3_protocol, S3UriStylePath, o->access_key, o->secret_key, NULL, o->host, p, o->authRegion, NULL, o->timeout, & responseHandler, NULL);
CHECK_ERROR
CHECK_ERROR(p);
return 0;
}else{
S3_delete_object(& o->bucket_context, p, NULL, o->timeout, & responseHandler, NULL);
CHECK_ERROR
CHECK_ERROR(p);
return 0;
}
}
static int S3_access (const char *path, int mode, aiori_mod_opt_t * options){
s3_options_t * o = (s3_options_t*) options;
char p[FILENAME_MAX];
def_file_name(o, p, path);
S3_head_object(& o->bucket_context, p, NULL, o->timeout, & statResponseHandler, NULL);
if (s3status != S3StatusOK){
return -1;
}
return 0;
}
static int S3_stat(const char *path, struct stat *buf, aiori_mod_opt_t * options){
s3_options_t * o = (s3_options_t*) options;
char p[FILENAME_MAX];
def_file_name(o, p, path);
memset(buf, 0, sizeof(struct stat));
S3_head_object(& o->bucket_context, p, NULL, o->timeout, & statResponseHandler, buf);
// TODO count the individual file fragment sizes together
if (o->bucket_per_file){
S3_test_bucket(o->s3_protocol, S3UriStylePath, o->access_key, o->secret_key,
NULL, o->host, p, o->authRegion, 0, NULL,
NULL, o->timeout, & responseHandler, NULL);
}else{
S3_head_object(& o->bucket_context, p, NULL, o->timeout, & statResponseHandler, buf);
}
if (s3status != S3StatusOK){
return -1;
}
return 0;
}
static int S3_access (const char *path, int mode, aiori_mod_opt_t * options){
struct stat buf;
return S3_stat(path, & buf, options);
}
static IOR_offset_t S3_GetFileSize(aiori_mod_opt_t * options, char *testFileName)
{
struct stat buf;
@ -410,11 +461,6 @@ static void S3_init(aiori_mod_opt_t * options){
memset(& o->bucket_context, 0, sizeof(o->bucket_context));
o->bucket_context.hostName = o->host;
o->bucket_context.bucketName = o->bucket_prefix_cur;
if (! o->bucket_per_file){
S3_create_bucket(o->s3_protocol, o->access_key, o->secret_key, NULL, o->host, o->bucket_context.bucketName, o->authRegion, S3CannedAclPrivate, o->locationConstraint, NULL, o->timeout, & responseHandler, NULL);
CHECK_ERROR
}
if (o->use_ssl){
o->s3_protocol = S3ProtocolHTTPS;
}else{
@ -425,6 +471,11 @@ static void S3_init(aiori_mod_opt_t * options){
o->bucket_context.accessKeyId = o->access_key;
o->bucket_context.secretAccessKey = o->secret_key;
if (! o->bucket_per_file){
S3_create_bucket(o->s3_protocol, o->access_key, o->secret_key, NULL, o->host, o->bucket_context.bucketName, o->authRegion, S3CannedAclPrivate, o->locationConstraint, NULL, o->timeout, & responseHandler, NULL);
CHECK_ERROR(o->bucket_context.bucketName);
}
if ( ret != S3StatusOK ){
FAIL("S3 error %s", S3_get_status_name(ret));
}

View File

@ -24,4 +24,8 @@ I=100 # Start with this ID
IOR 2 -a S3-libs3 --S3.host=localhost:9000 --S3.secret-key=secretkey --S3.access-key=accesskey -b $((10*1024*1024)) -t $((10*1024*1024))
MDTEST 2 -a S3-libs3 --S3.host=localhost:9000 --S3.secret-key=secretkey --S3.access-key=accesskey
IOR 1 -a S3-libs3 --S3.host=localhost:9000 --S3.secret-key=secretkey --S3.access-key=accesskey -b $((10*1024)) -t $((10*1024)) --S3.bucket-per-file
MDTEST 1 -a S3-libs3 --S3.host=localhost:9000 --S3.secret-key=secretkey --S3.access-key=accesskey --S3.bucket-per-file
kill -9 %1