# Supported and current values of the ini file: [global] # The directory where the IO500 runs datadir = /work/projects/hpcbenchs/IO500/datafiles # The data directory is suffixed by a timestamp. Useful for running several IO500 tests concurrently. timestamp-datadir = TRUE # The result directory. resultdir = /mnt/irisgpfs/users/svarrette/git/gitlab.uni.lu/svarrette/devel/benchmarks/IO500/runs/IO500-isc21_2021-09-28/N64-128clients.openmpi # The result directory is suffixed by a timestamp. Useful for running several IO500 tests concurrently. timestamp-resultdir = TRUE # The general API for the tests (to create/delete the datadir, extra options will be passed to IOR/mdtest) api = POSIX # Purge the caches, this is useful for testing and needed for single node runs drop-caches = FALSE # Cache purging command, invoked before each I/O phase drop-caches-cmd = sudo -n bash -c "echo 3 > /proc/sys/vm/drop_caches" # Allocate the I/O buffers on the GPU io-buffers-on-gpu = FALSE # The verbosity level between 1 and 10 verbosity = 1 # Use the rules for the Student Cluster Competition scc = FALSE [debug] # For a valid result, the stonewall timer must be set to the value according to the rules, it can be smaller for testing stonewall-time = 300 [ior-easy] # The API to be used API = # Transfer size transferSize = 16m # Block size; must be a multiple of transferSize blockSize = 48g # Create one file per process filePerProc = TRUE # Use unique directory per file per process uniqueDir = FALSE # Run this phase run = TRUE # The verbosity level verbosity = [ior-easy-write] # The API to be used API = [ior-rnd] # The API to be used API = # Size of a random block, change only if explicitly allowed blockSize = 1073741824 # Run this phase run = FALSE # The verbosity level verbosity = # Prefill the file with this blocksize in bytes, e.g., 2097152 randomPrefill = 0 [ior-rnd-write] # The API to be used API = [mdtest-easy] # The API to be used API = # Files per proc n = 240000 # Run this phase run = TRUE [mdtest-easy-write] # The API to be used API = # Run this phase run = TRUE [mdworkbench] # The API to be used API = # Waiting time of an IO operation relative to runtime (1.0 is 100%%) waitingTime = 0.0 # Files to precreate per set (always 10 sets), this is normally dynamically determined precreatePerSet = # Files to run per iteration and set (always 10 sets), this is normally dynamically determined filesPerProc = # Run this phase run = FALSE # The verbosity level verbosity = [mdworkbench-create] # Run this phase run = FALSE [timestamp] [find-easy] # Set to an external script to perform the find phase external-script = # Startup arguments for external scripts, some MPI's may not support this! external-mpi-args = # Extra arguments for the external scripts external-extra-args = # Set the number of processes for pfind/the external script nproc = # Run this phase run = FALSE # Pfind queue length pfind-queue-length = 10000 # Pfind Steal from next pfind-steal-next = FALSE # Parallelize the readdir by using hashing. Your system must support this! pfind-parallelize-single-dir-access-using-hashing = FALSE [ior-hard] # The API to be used API = # Number of segments segmentCount = 7500 # Collective operation (for supported backends) collective = # Run this phase run = TRUE # The verbosity level verbosity = [ior-hard-write] # The API to be used API = # Collective operation (for supported backends) collective = [mdtest-hard] # The API to be used API = # Files per proc n = 48000 # File limit per directory (MDTest -I flag) to overcome file system limitations files-per-dir = # Run this phase run = TRUE [mdtest-hard-write] # The API to be used API = # Run this phase run = TRUE [find] # Set to an external script to perform the find phase external-script = # Startup arguments for external scripts, some MPI's may not support this! external-mpi-args = # Extra arguments for the external scripts external-extra-args = # Set the number of processes for pfind/the external script nproc = # Run this phase run = TRUE # Pfind queue length pfind-queue-length = 10000 # Pfind Steal from next pfind-steal-next = FALSE # Parallelize the readdir by using hashing. Your system must support this! pfind-parallelize-single-dir-access-using-hashing = FALSE [ior-rnd-read] # The API to be used API = [find-hard] # Set to an external script to perform the find phase external-script = # Startup arguments for external scripts, some MPI's may not support this! external-mpi-args = # Extra arguments for the external scripts external-extra-args = # Set the number of processes for pfind/the external script nproc = # Run this phase run = FALSE # Pfind queue length pfind-queue-length = 10000 # Pfind Steal from next pfind-steal-next = FALSE # Parallelize the readdir by using hashing. Your system must support this! pfind-parallelize-single-dir-access-using-hashing = FALSE [mdworkbench-bench] # Run this phase run = FALSE [ior-easy-read] # The API to be used API = [mdtest-easy-stat] # The API to be used API = # Run this phase run = TRUE [ior-hard-read] # The API to be used API = # Collective operation (for supported backends) collective = [mdtest-hard-stat] # The API to be used API = # Run this phase run = TRUE [mdworkbench-delete] # Run this phase run = FALSE [mdtest-easy-delete] # The API to be used API = # Run this phase run = TRUE [mdtest-hard-read] # The API to be used API = # Run this phase run = TRUE [mdtest-hard-delete] # The API to be used API = # Run this phase run = TRUE