Basic API converted.

master
Julian M. Kunkel 2020-11-02 20:12:15 +00:00
parent 654b797788
commit 82d20f2744
4 changed files with 23 additions and 13 deletions

View File

@ -100,7 +100,7 @@ option_help * POSIX_options(aiori_mod_opt_t ** init_backend_options, aiori_mod_o
#endif
#ifdef HAVE_LUSTRE_USER
{0, "posix.lustre.stripecount", "", OPTION_OPTIONAL_ARGUMENT, 'd', & o->lustre_stripe_count},
{0, "posix.lustre.stribeegfs_chunkSizepecount", "", OPTION_OPTIONAL_ARGUMENT, 'd', & o->lustre_stripe_count},
{0, "posix.lustre.stripesize", "", OPTION_OPTIONAL_ARGUMENT, 'd', & o->lustre_stripe_size},
{0, "posix.lustre.startost", "", OPTION_OPTIONAL_ARGUMENT, 'd', & o->lustre_start_ost},
{0, "posix.lustre.ignorelocks", "", OPTION_FLAG, 'd', & o->lustre_ignore_locks},

View File

@ -4,7 +4,10 @@
int main(int argc, char ** argv){
MPI_Init(& argc, & argv);
int ret = md_workbench_run(argc, argv, MPI_COMM_WORLD, stdout);
//phase_stat_t* results =
md_workbench_run(argc, argv, MPI_COMM_WORLD, stdout);
// API check, access the results of the first phase which is precrate.
//printf("Max op runtime: %f\n", results->max_op_time);
MPI_Finalize();
return ret;
return 0;
}

View File

@ -38,6 +38,8 @@ struct benchmark_options{
int precreate;
int dset_count;
int result_position; // in the global structure
int offset;
int iterations;
int global_iteration;
@ -339,7 +341,7 @@ static void compute_histogram(const char * name, time_result_t * times, time_sta
stats->max = times[repeats - 1].runtime;
}
static void end_phase(const char * name, phase_stat_t * p){
static void end_phase(const char * name, phase_stat_t * p, phase_stat_t * result){
int ret;
char buff[MAX_PATHLEN];
@ -449,6 +451,10 @@ static void end_phase(const char * name, phase_stat_t * p){
free(g_stat.time_delete);
}
// copy the result back for the API
memcpy(& result[o.result_position], & g_stat, sizeof(g_stat));
o.result_position++;
// allocate memory if necessary
// ret = mem_preallocate(& limit_memory_P, o.limit_memory_between_phases, o.verbosity >= 3);
// if( ret != 0){
@ -777,7 +783,7 @@ static void store_position(int position){
fclose(f);
}
int md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logfile){
phase_stat_t* md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logfile){
int ret;
int printhelp = 0;
char * limit_memory_P = NULL;
@ -867,6 +873,7 @@ int md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logf
double bench_start;
bench_start = GetTimeStamp();
phase_stat_t phase_stats;
phase_stat_t* all_phases_stats = malloc(sizeof(phase_stat_t) * (2 + o.iterations));
if(o.rank == 0 && o.print_detailed_stats && ! o.quiet_output){
print_detailed_stat_header();
@ -885,7 +892,7 @@ int md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logf
phase_stats.phase_start_timer = GetTimeStamp();
run_precreate(& phase_stats, current_index);
phase_stats.t = GetTimeStamp() - phase_stats.phase_start_timer;
end_phase("precreate", & phase_stats);
end_phase("precreate", & phase_stats, all_phases_stats);
}
if (o.phase_benchmark){
@ -898,7 +905,7 @@ int md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logf
MPI_Barrier(o.com);
phase_stats.phase_start_timer = GetTimeStamp();
run_benchmark(& phase_stats, & current_index);
end_phase("benchmark", & phase_stats);
end_phase("benchmark", & phase_stats, all_phases_stats);
if(o.adaptive_waiting_mode){
o.relative_waiting_factor = 0.0625;
@ -907,7 +914,7 @@ int md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logf
MPI_Barrier(o.com);
phase_stats.phase_start_timer = GetTimeStamp();
run_benchmark(& phase_stats, & current_index);
end_phase("benchmark", & phase_stats);
end_phase("benchmark", & phase_stats, all_phases_stats);
o.relative_waiting_factor *= 2;
}
}
@ -920,7 +927,7 @@ int md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logf
phase_stats.phase_start_timer = GetTimeStamp();
run_cleanup(& phase_stats, current_index);
phase_stats.t = GetTimeStamp() - phase_stats.phase_start_timer;
end_phase("cleanup", & phase_stats);
end_phase("cleanup", & phase_stats, all_phases_stats);
if (o.rank == 0){
if (o.backend->rmdir(o.prefix, o.backend_options) != 0) {
@ -939,7 +946,6 @@ int md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logf
oprintf("Total runtime: %.0fs time: ", t_all);
printTime();
}
//mem_free_preallocated(& limit_memory_P);
return 0;
return all_phases_stats;
}

View File

@ -40,7 +40,7 @@ typedef struct{ // NOTE: if this type is changed, adjust end_phase() !!!
op_stat_t obj_stat;
op_stat_t obj_delete;
// time measurements individual runs
// time measurements of individual runs, these are not returned for now by the API!
uint64_t repeats;
time_result_t * time_create;
time_result_t * time_read;
@ -58,6 +58,7 @@ typedef struct{ // NOTE: if this type is changed, adjust end_phase() !!!
int stonewall_iterations;
} phase_stat_t;
int md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logfile);
// @Return The first statistics returned are precreate, then iteration many benchmark runs, the last is cleanup
phase_stat_t* md_workbench_run(int argc, char ** argv, MPI_Comm world_com, FILE * out_logfile);
#endif