diff --git a/COPYRIGHT b/COPYRIGHT old mode 100644 new mode 100755 diff --git a/ChangeLog b/ChangeLog old mode 100644 new mode 100755 diff --git a/META b/META old mode 100644 new mode 100755 diff --git a/Makefile.am b/Makefile.am old mode 100644 new mode 100755 diff --git a/README b/README old mode 100644 new mode 100755 index 258beae..f51aef3 --- a/README +++ b/README @@ -1,3 +1,5 @@ +[See also NOTES.txt] + Building -------- diff --git a/README_S3 b/README_S3 new file mode 100644 index 0000000..fff7ec0 --- /dev/null +++ b/README_S3 @@ -0,0 +1,82 @@ +[This is some help for building with experimental S3 support.] + + +--- BUILDING + +module load openmpi-gnu + +./bootstrap + + +# configure must be able to find libaws4c, libxml2, libcurl, libcrypto, etc. +# On some machines, the libxml2 include files are in: +# +# /usr/include/libxml2 +# +# which is not a standard include location, so we must define CPPFLAGS +# to put that path in the include path. Do this: +# +# setenv CPPFLAGS -I/usr/include/libxml2 +# [or add "CPPFLAGS=-I/usr/include/libxml2" to the end of the configure command] +# +# Use the --with-aws4c=DIR to point to the path where your libaws4c library +# sits. +# +# Use the --prefix=iorInstallDir to point to the path where you want IOR +# to install all the files it builds during the "make" process. + +./configure --prefix=iorInstallDir --with-S3 [ --with-aws4c=DIR ] + + +# Here is an example configure-command invocation, for building with the +# S3-targeting extensions, which require access to additional libraries +# mentioned above. This will probably not work verbatim on your +# installation, because libraries will be installed in different locations, +# but it could help you to understand what is needed. + +./configure --prefix=`pwd`/installed \ + --with-S3 \ + --with-aws4c=`pwd`/aws4c \ + LDFLAGS="-L/usr/lib64 -L`pwd`/libxml2-2.9.1/installed/lib" \ + CFLAGS="-I`pwd`/libxml2-2.9.1/installed/include/libxml2" + + +# 14-May-2015: +# To change the target of the experiment, there is an #if block from line +# 284-309. The "if" portion is activated by putting a 1 as the argument +# instead of a 0. In that case, the experiment will use the four ECS nodes +# directly, splitting the load up between all four. +# +# If the #if argument is 0, then the "#else" portion is executed. In this +# case you can use the load balancer, haproxy.ccstar.lanl.gov, by using +# the IP 10.143.0.1:80. If you want to use one of the ECS nodes directly +# us the IP 10.140.0.[15-17]:9020. +# +# To specify the bucket where the experiment file(s) will go, you need +# to set that with the "const char* bucket_name declaration. There are +# a couple options at lines 207-208. + + +make +make install + + +-- RUNNING (various options ...) + +# llogin -np 4 +msub -IX -l nodes=4:ppn=4,walltime=11:00:00 + + + +# For debugging, run on 1 node, -vvvv turns on detailed curl debugging +mpirun -np 1 MY_INSTALL_DIR/bin/ior -a S3 -o test_`date +"%Y%m%d_%H%M%S"` -vvvv -t1k -b1k + +# this defaults the number of processors +mpirun MY_INSTALL_DIRbin/ior -a S3 -C -o test_`date +"%Y%m%d_%H%M%S"` + +# this does one parallel run, putting a heavy load on the server [assumes bash] +mpirun -npernode 8 MY_INSTALL_DIR/bin/ior -a S3_EMC -C -o test_`date +"%Y%m%d_%H%M%S"` \ + -b $(( 128 * 1024 * 1024 )) \ + -t $(( 128 * 1024 * 1024 )) \ + -i 1 + diff --git a/TBD.txt b/TBD.txt deleted file mode 100644 index e32b1f9..0000000 --- a/TBD.txt +++ /dev/null @@ -1,90 +0,0 @@ --- fix indentation in all source files - - Do not use some fixed-width spacing. Use tabs. Someday, editors will - distinguish between "indentation" (which should be done with tabs), and - "alignment" which should be done with spaces. That way, you choose your - own indentation, and everything just works. - - --- change "HDFS" everywhere to "HDFs" (or something) - - for easier distinction from "HDF5" - - --- maybe parse_options.c should just parse options, not create tests. - - The latter brings an ugly mix of calls across files. Fixing involves - having parse routines just set flags in params, then, in ior.c, someone - checks flags and calls test-creation stuff. Should also move - test-creation to ior.c [Results creation, too.] - - --- collect various special-purpose parameter-fixing into one place. - - see e.g. CheckRunSettings(). Should each aiori have a "fix the - parameters" generic function, as well? This could be called at a known - time, allowing incompatibilities to be handled in a regular way. - - --- does MPIIO_GetFileSize() have to be in aiori.h? - - --- change integers to 0x'..', in aiori.h - - --- add hdfs-related command-line options - - (e.g. HDFS user-name, replica-count, etc.) - see ior.h - see init_IOR_Param_t(), in ior.c - - --- is the getenv() I added to ior.c a problem for Win support? - - --- see "TBD" notes in src/Makefile.am - - (1) for HDFS: - - # TBD: figure out how to find this from the corresponding bin/ dir in $PATH - # or pick an environment var to use (and set it in modulefiles) - # or provide a config-flag, to set a variable we use here - - - (2) for PLFS: - - # TBD: modulefiles don't add PLFS_VER to env, but you can find it embedded in - # $PLFS_HOME. configure.ac should extract this for us. Then we should - # add -D... to ior_CPPFLAGS, here. Then aiori-PLFS.c can conditionalize - # compilation of calls that vary according to PLFS version. - - - - - ---------------------------------------------------------------------------- -S3 ---------------------------------------------------------------------------- - - --- RemoveFile (ior.c) calls POSIX "access" to see whether file exists - - This is called between iterations, when iterations > 1. Should use - backend->GetFileSize(), or something, and catch errors. Or the aiori - struct could be extended to include a lower-level "stat" that could - return 0 for non-existent files. - - --- add 'init' and 'exit' functions to aiori struct - - This would allow HDFS/S3/etc to do their one-time initializations and - cleanups. Call from top-level in ior.c - - Something also needed in AioriBind(). - - --- ior should assert the IOR_TRUNC flag in params, when opening? - - The CREAT flag does not reset a file, if it already exists, when opening - for write. [But ... wait. If you're opening for WRITE, you should be - resetting it anyhow. D'oh.] diff --git a/aws4c-0.5.1.tgz b/aws4c-0.5.1.tgz deleted file mode 100644 index 75ddd99..0000000 Binary files a/aws4c-0.5.1.tgz and /dev/null differ diff --git a/config/ax_prog_cc_mpi.m4 b/config/ax_prog_cc_mpi.m4 old mode 100644 new mode 100755 diff --git a/config/x_ac_meta.m4 b/config/x_ac_meta.m4 old mode 100644 new mode 100755 diff --git a/configure.ac b/configure.ac old mode 100644 new mode 100755 index 323876e..ea90471 --- a/configure.ac +++ b/configure.ac @@ -112,17 +112,81 @@ AM_COND_IF([USE_POSIX_AIORI],[ AC_DEFINE([USE_POSIX_AIORI], [], [Build POSIX backend AIORI]) ]) -# Amazon S3 support + + + +# aws4c is needed for the S3 backend (see --with-S3, below). +# Version 0.5.2 of aws4c is available at https://github.com/jti-lanl/aws4c.git +# Install it something like this: +# +# cd $my_install_dir +# git clone https://github.com/jti-lanl/aws4c.git +# cd aws4c +# make +# +# Then: +# --with-S3 --with-aws4c=$my_install_dir/aws4c + +aws4c_dir= +AC_ARG_WITH([aws4c], + [AS_HELP_STRING([--with-aws4c=DIR], + [aws4c library is needed for Amazon S3 backend])], + [aws4c_dir="$withval"]) +AM_CONDITIONAL([AWS4C_DIR], [test x$aws4c_dir != x]) + +# AC_SUBST([AWS4C_DIR],[$aws4c_dir]) +AM_COND_IF([AWS4C_DIR],[ + AC_SUBST([AWS4C_CPPFLAGS],[-I$aws4c_dir]) + AC_SUBST([AWS4C_LDFLAGS], [-L$aws4c_dir]) +]) + + +# Amazon S3 support [see also: --with-aws4c] AC_ARG_WITH([S3], [AS_HELP_STRING([--with-S3], - [support IO with Amazon S3 backend @<:@default=yes@:>@])], + [support IO with Amazon S3 backend @<:@default=no@:>@])], [], - [with_S3=yes]) + [with_S3=no]) AM_CONDITIONAL([USE_S3_AIORI], [test x$with_S3 = xyes]) AM_COND_IF([USE_S3_AIORI],[ AC_DEFINE([USE_S3_AIORI], [], [Build Amazon-S3 backend AIORI]) ]) +err=0 +AS_IF([test "x$with_S3" != xno], [ + AC_MSG_NOTICE([beginning of S3-related checks]) + + # save user's values, while we use AC_CHECK_HEADERS with $AWS4C_DIR + ORIG_CPPFLAGS=$CPPFLAGS + ORIG_LDFLAGS=$LDFLAGS + + CPPFLAGS="$CPPFLAGS $AWS4C_CPPFLAGS" + LDFLAGS=" $LDFLAGS $AWS4C_LDFLAGS" + + AC_CHECK_HEADERS([aws4c.h], [], [err=1]) + AC_CHECK_HEADERS([libxml/parser.h], [], [err=1]) + + # Autotools thinks searching for a library means I want it added to LIBS + ORIG_LIBS=$LIBS + AC_CHECK_LIB([curl], [curl_easy_init], [], [err=1]) + AC_CHECK_LIB([xml2], [xmlDocGetRootElement], [], [err=1]) + AC_CHECK_LIB([aws4c], [s3_get], [], [err=1], [-lcurl -lxml2 -lcrypto]) + LIBS=$ORIG_LIBS + + AC_MSG_NOTICE([end of S3-related checks]) + if test "$err" == 1; then + AC_MSG_FAILURE([S3 support is missing. dnl +Make sure you have access to libaws4c, libcurl, libxml2, and libcrypto. dnl +Consider --with-aws4c=, CPPFLAGS, LDFLAGS, etc]) + fi + + # restore user's values + CPPFLAGS=$ORIG_CPPFLAGS + LDFLAGS=$ORIG_LDFLAGS +]) + + + diff --git a/contrib/Makefile.am b/contrib/Makefile.am old mode 100644 new mode 100755 diff --git a/contrib/cbif.c b/contrib/cbif.c old mode 100644 new mode 100755 diff --git a/doc/Makefile.am b/doc/Makefile.am old mode 100644 new mode 100755 diff --git a/doc/USER_GUIDE b/doc/USER_GUIDE old mode 100644 new mode 100755 diff --git a/scripts/exampleScript b/scripts/exampleScript old mode 100644 new mode 100755 diff --git a/scripts/run_script.cnl b/scripts/run_script.cnl old mode 100644 new mode 100755 diff --git a/scripts/run_script.linux b/scripts/run_script.linux old mode 100644 new mode 100755 diff --git a/src/Makefile.am b/src/Makefile.am old mode 100644 new mode 100755 index c4ed72c..6a77dab --- a/src/Makefile.am +++ b/src/Makefile.am @@ -52,12 +52,11 @@ endif if USE_S3_AIORI -# TBD: Find the aws4c and libxml installations programmatically -AWS4C = /users/jti/projects/ecs_hobo/tools/aws4c-0.5 ior_SOURCES += aiori-S3.c -ior_CPPFLAGS += -I/usr/include/libxml2 -ior_CPPFLAGS += -I$(AWS4C) -ior_LDFLAGS += -L$(AWS4C) +if AWS4C_DIR + ior_CPPFLAGS += $(AWS4C_CPPFLAGS) + ior_LDFLAGS += $(AWS4C_LDFLAGS) +endif ior_LDADD += -lcurl ior_LDADD += -lxml2 ior_LDADD += -laws4c diff --git a/src/aiori-HDF5.c b/src/aiori-HDF5.c old mode 100644 new mode 100755 diff --git a/src/aiori-HDFS.c b/src/aiori-HDFS.c old mode 100644 new mode 100755 diff --git a/src/aiori-MPIIO.c b/src/aiori-MPIIO.c old mode 100644 new mode 100755 diff --git a/src/aiori-NCMPI.c b/src/aiori-NCMPI.c old mode 100644 new mode 100755 diff --git a/src/aiori-PLFS.c b/src/aiori-PLFS.c old mode 100644 new mode 100755 diff --git a/src/aiori-POSIX.c b/src/aiori-POSIX.c old mode 100644 new mode 100755 diff --git a/src/aiori-S3.c b/src/aiori-S3.c old mode 100644 new mode 100755 index 1784e62..ed646c0 --- a/src/aiori-S3.c +++ b/src/aiori-S3.c @@ -205,6 +205,7 @@ CURLcode rc; /* Any objects we create or delete will be under this bucket */ const char* bucket_name = "ior"; +//const char* bucket_name = "brettk"; /***************************** F U N C T I O N S ******************************/ @@ -259,9 +260,9 @@ s3_connect( IOR_param_t* param ) { // NOTE: These inits could be done in init_IORParam_t(), in ior.c, but // would require conditional compilation, there. + aws_set_debug(param->verbose >= 4); aws_read_config(getenv("USER")); // requires ~/.awsAuth aws_reuse_connections(1); - aws_set_debug(param->verbose >= 4); // initalize IOBufs. These are basically dynamically-extensible // linked-lists. "growth size" controls the increment of new memory @@ -272,24 +273,59 @@ s3_connect( IOR_param_t* param ) { param->etags = aws_iobuf_new(); aws_iobuf_growth_size(param->etags, 1024*1024*8); + // WARNING: if you have http_proxy set in your environment, you may need + // to override it here. TBD: add a command-line variable to + // allow you to define a proxy. + // // our hosts are currently 10.140.0.15 - 10.140 0.18 // TBD: Try DNS-round-robin server at vi-lb.ccstar.lanl.gov - snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4)); - s3_set_host(buff); + // TBD: try HAProxy round-robin at 10.143.0.1 + +#if 1 + // snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4)); + // s3_set_proxy(buff); + // + // snprintf(buff, BUFF_SIZE, "10.140.0.%d", 15 + (rank % 4)); + // s3_set_host(buff); + + snprintf(buff, BUFF_SIZE, "10.140.0.%d:9020", 15 + (rank % 4)); + s3_set_host(buff); + +#else +/* + * If you just want to go to one if the ECS nodes, put that IP + * address in here directly with port 9020. + * + */ +// s3_set_host("10.140.0.15:9020"); + +/* + * If you want to go to haproxy.ccstar.lanl.gov, this is its IP + * address. + * + */ +// s3_set_proxy("10.143.0.1:80"); +// s3_set_host( "10.143.0.1:80"); +#endif // make sure test-bucket exists s3_set_bucket((char*)bucket_name); - AWS4C_CHECK( s3_head(param->io_buf, "") ); - if ( param->io_buf->code == 404 ) { // "404 Not Found" - printf(" bucket '%s' doesn't exist\n", bucket_name); - AWS4C_CHECK( s3_put(param->io_buf, "") ); /* creates URL as bucket + obj */ - AWS4C_CHECK_OK( param->io_buf ); // assure "200 OK" - printf("created bucket '%s'\n", bucket_name); - } - else { // assure "200 OK" - AWS4C_CHECK_OK( param->io_buf ); - } + if (rank == 0) { + AWS4C_CHECK( s3_head(param->io_buf, "") ); + if ( param->io_buf->code == 404 ) { // "404 Not Found" + printf(" bucket '%s' doesn't exist\n", bucket_name); + + AWS4C_CHECK( s3_put(param->io_buf, "") ); /* creates URL as bucket + obj */ + AWS4C_CHECK_OK( param->io_buf ); // assure "200 OK" + printf("created bucket '%s'\n", bucket_name); + } + else { // assure "200 OK" + AWS4C_CHECK_OK( param->io_buf ); + } + } + MPI_CHECK(MPI_Barrier(param->testComm), "barrier error"); + // Maybe allow EMC extensions to S3 s3_enable_EMC_extensions(param->curl_flags & IOR_CURL_S3_EMC_EXT); @@ -821,7 +857,6 @@ S3_Xfer_internal(int access, // we're "extending" rather than "appending". That means the // buffer represents empty storage, which will be filled by the // libcurl writefunction, invoked via aws4c. - aws_iobuf_reset(param->io_buf); aws_iobuf_extend_static(param->io_buf, data_ptr, remaining); AWS4C_CHECK( s3_get(param->io_buf, file) ); diff --git a/src/aiori.h b/src/aiori.h old mode 100644 new mode 100755 diff --git a/src/ior.c b/src/ior.c old mode 100644 new mode 100755 index 3775586..c150ac2 --- a/src/ior.c +++ b/src/ior.c @@ -92,7 +92,7 @@ static void ShowSetup(IOR_param_t *params); static void ShowTest(IOR_param_t *); static void PrintLongSummaryAllTests(IOR_test_t *tests_head); static void TestIoSys(IOR_test_t *); -static void ValidTests(IOR_param_t *); +static void ValidateTests(IOR_param_t *); static IOR_offset_t WriteOrRead(IOR_param_t *, void *, int); static void WriteTimes(IOR_param_t *, double **, int, int); @@ -140,12 +140,12 @@ int main(int argc, char **argv) "Run 'configure --with-', and recompile."); } - /* setup tests before verifying test validity */ + /* setup tests, and validate parameters */ tests_head = SetupTests(argc, argv); verbose = tests_head->params.verbose; tests_head->params.testComm = MPI_COMM_WORLD; - /* check for commandline usage */ + /* check for commandline 'help' request */ if (rank == 0 && tests_head->params.showHelp == TRUE) { DisplayUsage(argv); } @@ -860,6 +860,17 @@ FillBuffer(void *buffer, unsigned long long hi, lo; unsigned long long *buf = (unsigned long long *)buffer; +/* + * Consider adding a parameter to use incompressible data or what is here now. + * The way to get incompressible data would be to use some random transfer + * buffer content. In Linux we can read from /dev/urandom. In C we can use + * the rand() function in stdlib.h. + * + * # include + * + * hi = (( unsigned long long )rand() ) << 32; + * lo = (( unsigned long long )rand() ); + */ hi = ((unsigned long long)fillrank) << 32; lo = (unsigned long long)test->timeStampSignatureValue; for (i = 0; i < test->transferSize / sizeof(unsigned long long); i++) { @@ -1373,6 +1384,7 @@ static double TimeDeviation(void) /* * Setup tests by parsing commandline and creating test script. + * Perform a sanity-check on the configured parameters. */ static IOR_test_t *SetupTests(int argc, char **argv) { @@ -1391,7 +1403,7 @@ static IOR_test_t *SetupTests(int argc, char **argv) /* check validity of tests and create test queue */ while (tests != NULL) { - ValidTests(&tests->params); + ValidateTests(&tests->params); tests = tests->next; } @@ -2323,11 +2335,11 @@ static void TestIoSys(IOR_test_t *test) /* * Determine if valid tests from parameters. */ -static void ValidTests(IOR_param_t * test) +static void ValidateTests(IOR_param_t * test) { IOR_param_t defaults; - init_IOR_Param_t(&defaults); + /* get the version of the tests */ AioriBind(test->api, test); backend->set_version(test); @@ -2366,6 +2378,8 @@ static void ValidTests(IOR_param_t * test) } if (test->blockSize < test->transferSize) ERR("block size must not be smaller than transfer size"); + + /* specific APIs */ if ((strcmp(test->api, "MPIIO") == 0) && (test->blockSize < sizeof(IOR_size_t) || test->transferSize < sizeof(IOR_size_t))) @@ -2418,6 +2432,8 @@ static void ValidTests(IOR_param_t * test) if ((strcmp(test->api, "POSIX") == 0) && test->collective) WARN_RESET("collective not available in POSIX", test, &defaults, collective); + + /* parameter consitency */ if (test->reorderTasks == TRUE && test->reorderTasksRandom == TRUE) ERR("Both Constant and Random task re-ordering specified. Choose one and resubmit"); if (test->randomOffset && test->reorderTasksRandom @@ -2430,6 +2446,8 @@ static void ValidTests(IOR_param_t * test) ERR("random offset not available with read check option (use write check)"); if (test->randomOffset && test->storeFileOffset) ERR("random offset not available with store file offset option)"); + + if ((strcmp(test->api, "MPIIO") == 0) && test->randomOffset && test->collective) ERR("random offset not available with collective MPIIO"); @@ -2471,6 +2489,17 @@ static void ValidTests(IOR_param_t * test) } if (test->useExistingTestFile && test->lustre_set_striping) ERR("Lustre stripe options are incompatible with useExistingTestFile"); + + /* N:1 and N:N */ + IOR_offset_t NtoN = test->filePerProc; + IOR_offset_t Nto1 = ! NtoN; + IOR_offset_t s = test->segmentCount; + IOR_offset_t t = test->transferSize; + IOR_offset_t b = test->blockSize; + + if (Nto1 && (s != 1) && (b != t)) { + ERR("N:1 (strided) requires xfer-size == block-size"); + } } static IOR_offset_t *GetOffsetArraySequential(IOR_param_t * test, diff --git a/src/ior.h b/src/ior.h old mode 100644 new mode 100755 diff --git a/src/iordef.h b/src/iordef.h old mode 100644 new mode 100755 diff --git a/src/parse_options.c b/src/parse_options.c old mode 100644 new mode 100755 index 6c93bae..fd69372 --- a/src/parse_options.c +++ b/src/parse_options.c @@ -114,6 +114,7 @@ static void CheckRunSettings(IOR_test_t *tests) /* If only read or write is requested, then fix the default * openFlags to not be open RDWR. It matters in the case * of HDFS, which doesn't support opening RDWR. + * (We assume int-valued params are exclusively 0 or 1.) */ if ((params->openFlags & IOR_RDWR) && ((params->readFile | params->checkRead) @@ -441,12 +442,12 @@ IOR_test_t *ParseCommandLine(int argc, char **argv) while ((c = getopt(argc, argv, opts)) != -1) { switch (c) { - case 'A': - initialTestParams.referenceNumber = atoi(optarg); - break; case 'a': strcpy(initialTestParams.api, optarg); break; + case 'A': + initialTestParams.referenceNumber = atoi(optarg); + break; case 'b': initialTestParams.blockSize = StringToBytes(optarg); RecalculateExpectedFileSize(&initialTestParams); @@ -460,15 +461,6 @@ IOR_test_t *ParseCommandLine(int argc, char **argv) case 'C': initialTestParams.reorderTasks = TRUE; break; - case 'Q': - initialTestParams.taskPerNodeOffset = atoi(optarg); - break; - case 'Z': - initialTestParams.reorderTasksRandom = TRUE; - break; - case 'X': - initialTestParams.reorderTasksRandomSeed = atoi(optarg); - break; case 'd': initialTestParams.interTestDelay = atoi(optarg); break; @@ -476,9 +468,6 @@ IOR_test_t *ParseCommandLine(int argc, char **argv) initialTestParams.deadlineForStonewalling = atoi(optarg); break; - case 'Y': - initialTestParams.fsyncPerWrite = TRUE; - break; case 'e': initialTestParams.fsync = TRUE; break; @@ -524,13 +513,13 @@ IOR_test_t *ParseCommandLine(int argc, char **argv) case 'l': initialTestParams.storeFileOffset = TRUE; break; + case 'm': + initialTestParams.multiFile = TRUE; + break; case 'M': initialTestParams.memoryPerNode = NodeMemoryStringToBytes(optarg); break; - case 'm': - initialTestParams.multiFile = TRUE; - break; case 'n': initialTestParams.noFill = TRUE; break; @@ -553,6 +542,9 @@ IOR_test_t *ParseCommandLine(int argc, char **argv) case 'q': initialTestParams.quitOnError = TRUE; break; + case 'Q': + initialTestParams.taskPerNodeOffset = atoi(optarg); + break; case 'r': initialTestParams.readFile = TRUE; break; @@ -593,9 +585,18 @@ IOR_test_t *ParseCommandLine(int argc, char **argv) case 'x': initialTestParams.singleXferAttempt = TRUE; break; + case 'X': + initialTestParams.reorderTasksRandomSeed = atoi(optarg); + break; + case 'Y': + initialTestParams.fsyncPerWrite = TRUE; + break; case 'z': initialTestParams.randomOffset = TRUE; break; + case 'Z': + initialTestParams.reorderTasksRandom = TRUE; + break; default: fprintf(stdout, "ParseCommandLine: unknown option `-%c'.\n", diff --git a/src/parse_options.h b/src/parse_options.h old mode 100644 new mode 100755 diff --git a/src/utilities.c b/src/utilities.c old mode 100644 new mode 100755 diff --git a/src/utilities.h b/src/utilities.h old mode 100644 new mode 100755 diff --git a/src/win/getopt.c b/src/win/getopt.c old mode 100644 new mode 100755 diff --git a/src/win/getopt.h b/src/win/getopt.h old mode 100644 new mode 100755 diff --git a/testing/IOR-tester.README b/testing/IOR-tester.README old mode 100644 new mode 100755 diff --git a/testing/hintsFile b/testing/hintsFile old mode 100644 new mode 100755