Shaheen II

Institution KAUST
Client Procs Per Node
Client Operating System SUSE
Client Operating System Version
Client Kernel Version 12.3

DATA SERVER

Storage Type
Volatile Memory
Storage Interface PCIe 3.0 x8
Network Cray Aries
Software Version
OS Version CLE 6.05

INFORMATION

Client Nodes 10
Client Total Procs 80

METADATA

Easy Write 16.21 kIOP/s
Easy Stat 19.95 kIOP/s
Easy Delete 23.30 kIOP/s
Hard Write 3.54 kIOP/s
Hard Read 21.49 kIOP/s
Hard Stat 16.61 kIOP/s
Hard Delete 16.68 kIOP/s

Submitted Files

io500
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.

set -euo pipefail  # better error handling

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.

# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  io500_workdir=$PWD/datafiles/io500.$timestamp # directory where the data will be stored
  io500_result_dir=$PWD/results/$timestamp      # the directory where the output results will be kept
  mkdir -p $io500_workdir $io500_result_dir
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
  io500_mpirun="srun -m block --hint=nomultithread"
#  io500_mpirun="srun --export=MPICH_MPIIO_HINTS --hint=nomultithread"
  io500_mpiargs="-n 80 --ntasks-per-node=8"
}

function setup_ior_easy {
  io500_ior_easy_params="-t 2m -b 192616m -F" # 2M writes, 2 GB per proc, file per proc
}

function setup_mdt_easy {
  io500_mdtest_easy_params="-v -u -L" # unique dir per thread, files only at leaves
  io500_mdtest_easy_files_per_proc=67800
}

function setup_ior_hard {
  io500_ior_hard_writes_per_proc=344787 #2
  io500_ior_hard_other_options="" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}

function setup_mdt_hard {
  io500_mdtest_hard_files_per_proc=16630 #0
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  io500_find_mpi="False"
  io500_find_cmd=$PWD/bin/sfind.sh
  io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script.
  io500_find_mpi="False"
  io500_find_cmd="srun --hint=nomultithread -n 10 --ntasks-per-node=1 $PWD/bin/pfind"
  
#  io500_find_cmd_args="$io500_workdir -s 60 -name \*01*\ -D rates -C -r /project/k01/markomg/datawarp_regression/io500/io-500-dev/res/ -size 3000c"

  # for GPFS systems, you should probably use the provided mmfind wrapper
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='ShaheenII'      # e.g. Oakforest-PACS
  io500_info_institute_name='KAUST'   # e.g. JCAHPC
  io500_info_storage_age_in_months='03/18' # not install date but age since last refresh
  io500_info_storage_install_date='09/15'  # MM/YY
  io500_info_filesystem='DataWarp'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='xxx'
  io500_info_filesystem_vendor='Intel'
  # client side info
  io500_info_num_client_nodes='4096'
  io500_info_procs_per_node='8'
  # server side info
  io500_info_num_metadata_server_nodes='xxx'
  io500_info_num_data_server_nodes='268'
  io500_info_num_data_storage_devices='536'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='xxx'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='Intel SSD P3608' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='xxx' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='Cray Aries' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='PCIe 3.0 x8' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='LE 6.05, MPI 7.7.0, GNU 7.2.0'
}

main
ior_easy_read
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX/MMAP.  Using value of 0.
Began: Mon Aug 20 19:00:41 2018
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 2m -b 192616m -F -o /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/ior_easy/ior_file_easy
Machine: Linux nid00143

Test 0 started: Mon Aug 20 19:00:41 2018
Summary:
	api                = MPIIO (version=3, subversion=1)
	test filename      = /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/ior_easy/ior_file_easy
	access             = file-per-process
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 80 (8 per node)
	repetitions        = 1
	xfersize           = 2 MiB
	blocksize          = 188.10 GiB
	aggregate filesize = 15048.12 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      41491      197238784  2048.00    0.049488   371.32     0.012948   371.38     0   

Max Read:  41491.46 MiB/sec (43506.95 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
read        41491.46   41491.46   41491.46       0.00   20745.73   20745.73   20745.73       0.00  371.38441 0 80 8 1 1 1 1 0 0 1 201972514816 2097152 16157801185280 MPIIO 0

Finished: Mon Aug 20 19:06:53 2018
ior_easy_write
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX/MMAP.  Using value of 0.
Began: Mon Aug 20 18:34:02 2018
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 2m -b 192616m -F -o /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/ior_easy/ior_file_easy
Machine: Linux nid00143

Test 0 started: Mon Aug 20 18:34:02 2018
Summary:
	api                = MPIIO (version=3, subversion=1)
	test filename      = /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/ior_easy/ior_file_easy
	access             = file-per-process
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 80 (8 per node)
	repetitions        = 1
	xfersize           = 2 MiB
	blocksize          = 188.10 GiB
	aggregate filesize = 15048.12 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
write     42431      197238784  2048.00    0.050585   363.09     0.023651   363.16     0   

Max Write: 42430.99 MiB/sec (44492.11 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
write       42430.99   42430.99   42430.99       0.00   21215.49   21215.49   21215.49       0.00  363.16102 0 80 8 1 1 1 1 0 0 1 201972514816 2097152 16157801185280 MPIIO 0

Finished: Mon Aug 20 18:40:05 2018
ior_hard_read
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX/MMAP.  Using value of 0.
Began: Mon Aug 20 19:11:31 2018
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 344787 -o /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/ior_hard/IOR_file
Machine: Linux nid00143

Test 0 started: Mon Aug 20 19:11:31 2018
Summary:
	api                = MPIIO (version=3, subversion=1)
	test filename      = /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/ior_hard/IOR_file
	access             = single-shared-file
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 80 (8 per node)
	repetitions        = 1
	xfersize           = 47008 bytes
	blocksize          = 47008 bytes
	aggregate filesize = 1207.57 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      7163       45.91      45.91      0.006607   172.62     0.011041   172.64     0   

Max Read:  7162.59 MiB/sec (7510.52 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
read         7162.59    7162.59    7162.59       0.00  159771.12  159771.12  159771.12       0.00  172.64046 0 80 8 1 0 1 1 0 0 344787 47008 47008 1296619783680 MPIIO 0

Finished: Mon Aug 20 19:14:24 2018
ior_hard_write
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX/MMAP.  Using value of 0.
Began: Mon Aug 20 18:45:46 2018
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 344787 -o /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/ior_hard/IOR_file
Machine: Linux nid00143

Test 0 started: Mon Aug 20 18:45:46 2018
Summary:
	api                = MPIIO (version=3, subversion=1)
	test filename      = /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/ior_hard/IOR_file
	access             = single-shared-file
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 80 (8 per node)
	repetitions        = 1
	xfersize           = 47008 bytes
	blocksize          = 47008 bytes
	aggregate filesize = 1207.57 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
write     3803       45.91      45.91      0.006104   325.10     0.024568   325.13     0   

Max Write: 3803.26 MiB/sec (3988.01 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
write        3803.26    3803.26    3803.26       0.00   84836.80   84836.80   84836.80       0.00  325.12965 0 80 8 1 0 1 1 0 0 344787 47008 47008 1296619783680 MPIIO 0

Finished: Mon Aug 20 18:51:12 2018
mdtest_easy_delete
-- started at 08/20/2018 19:15:52 --

mdtest-1.9.4-rc1 was launched with 80 total task(s) on 10 node(s)
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/mdtest -r -F -d -u /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/mdt_easy -n 67800 -v -u -L
V-1: Entering valid_tests...
barriers                : True
collective_creates      : False
create_only             : False
dirpath(s):
	-u
dirs_only               : False
read_bytes              : 0
read_only               : False
first                   : 1
files_only              : True
iterations              : 1
items_per_dir           : 0
last                    : 0
leaf_only               : True
items                   : 67800
nstride                 : 0
pre_delay               : 0
remove_only             : True
random_seed             : 0
stride                  : 1
shared_file             : False
time_unique_dir_overhead: False
stat_only               : False
unique_dir_per_task     : True
write_bytes             : 0
sync_file               : False
depth                   : 0
V-1: Entering display_freespace...
V-1: Entering show_file_system_size...
Path: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch
FS: 23.0 TiB   Used FS: 69.6%   Inodes: 23551.9 Mi   Used Inodes: 0.1%

80 tasks, 5424000 files

   Operation               Duration              Rate
   ---------               --------              ----
V-1: Entering timestamp...
V-1: main: * iteration 1 08/20/2018 19:15:52 *
V-1: Entering file_test...
V-1: Entering unique_dir_access...
V-1: Entering offset_timers...
V-1: Entering create_remove_items, currDepth = 0...
V-1: Entering create_remove_items_helper...
V-1: Entering unique_dir_access...
V-1: Entering offset_timers...
V-1:   File creation     :          0.000 sec,          0.000 ops/sec
V-1:   File stat         :          0.000 sec,          0.000 ops/sec
V-1:   File read         :          0.000 sec,          0.000 ops/sec
V-1:   File removal      :        232.801 sec,      23298.911 ops/sec
V-1: Entering create_remove_directory_tree, currDepth = 0...
V-1: Entering create_remove_directory_tree, currDepth = 1...
V-1: main   Tree removal      :          0.208 sec,          4.809 ops/sec
V-1: Entering summarize_results...

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      23298.911      23298.911      23298.911          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          4.809          4.809          4.809          0.000
V-1: Entering timestamp...

-- finished at 08/20/2018 19:19:45 --
mdtest_easy_stat
-- started at 08/20/2018 19:06:55 --

mdtest-1.9.4-rc1 was launched with 80 total task(s) on 10 node(s)
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/mdtest -T -F -d -u /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/mdt_easy -n 67800 -v -u -L
V-1: Entering valid_tests...
barriers                : True
collective_creates      : False
create_only             : False
dirpath(s):
	-u
dirs_only               : False
read_bytes              : 0
read_only               : False
first                   : 1
files_only              : True
iterations              : 1
items_per_dir           : 0
last                    : 0
leaf_only               : True
items                   : 67800
nstride                 : 0
pre_delay               : 0
remove_only             : True
random_seed             : 0
stride                  : 1
shared_file             : False
time_unique_dir_overhead: False
stat_only               : True
unique_dir_per_task     : True
write_bytes             : 0
sync_file               : False
depth                   : 0
V-1: Entering display_freespace...
V-1: Entering show_file_system_size...
Path: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch
FS: 23.0 TiB   Used FS: 69.6%   Inodes: 23551.9 Mi   Used Inodes: 0.1%

80 tasks, 5424000 files

   Operation               Duration              Rate
   ---------               --------              ----
V-1: Entering timestamp...
V-1: main: * iteration 1 08/20/2018 19:06:55 *
V-1: Entering file_test...
V-1: Entering unique_dir_access...
V-1: Entering offset_timers...
V-1: Entering mdtest_stat...
V-1: Entering offset_timers...
V-1:   File creation     :          0.000 sec,          0.000 ops/sec
V-1:   File stat         :        271.844 sec,      19952.618 ops/sec
V-1:   File read         :          0.000 sec,          0.000 ops/sec
V-1:   File removal      :          0.000 sec,          0.000 ops/sec
V-1: Entering summarize_results...

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      19952.618      19952.618      19952.618          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000
V-1: Entering timestamp...

-- finished at 08/20/2018 19:11:27 --
mdtest_easy_write
-- started at 08/20/2018 18:40:08 --

mdtest-1.9.4-rc1 was launched with 80 total task(s) on 10 node(s)
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/mdtest -C -F -d -u /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/mdt_easy -n 67800 -v -u -L
V-1: Entering valid_tests...
barriers                : True
collective_creates      : False
create_only             : True
dirpath(s):
	-u
dirs_only               : False
read_bytes              : 0
read_only               : False
first                   : 1
files_only              : True
iterations              : 1
items_per_dir           : 0
last                    : 0
leaf_only               : True
items                   : 67800
nstride                 : 0
pre_delay               : 0
remove_only             : True
random_seed             : 0
stride                  : 1
shared_file             : False
time_unique_dir_overhead: False
stat_only               : False
unique_dir_per_task     : True
write_bytes             : 0
sync_file               : False
depth                   : 0
V-1: Entering display_freespace...
V-1: Entering show_file_system_size...
Path: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch
FS: 23.0 TiB   Used FS: 63.9%   Inodes: 23551.9 Mi   Used Inodes: 0.0%

80 tasks, 5424000 files

   Operation               Duration              Rate
   ---------               --------              ----
V-1: Entering timestamp...
V-1: main: * iteration 1 08/20/2018 18:40:08 *
V-1: Entering create_remove_directory_tree, currDepth = 0...
V-1: Entering create_remove_directory_tree, currDepth = 1...
V-1: main:   Tree creation     :          0.004 sec,        224.162 ops/sec
V-1: Entering file_test...
V-1: Entering unique_dir_access...
V-1: Entering offset_timers...
V-1: Entering create_remove_items, currDepth = 0...
V-1: Entering create_remove_items_helper...
V-1: Entering offset_timers...
V-1:   File creation     :        334.598 sec,      16210.486 ops/sec
V-1:   File stat         :          0.000 sec,          0.000 ops/sec
V-1:   File read         :          0.000 sec,          0.000 ops/sec
V-1:   File removal      :          0.000 sec,          0.000 ops/sec
V-1: Entering summarize_results...

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      16210.486      16210.486      16210.486          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :        224.162        224.162        224.162          0.000
   Tree removal      :          0.000          0.000          0.000          0.000
V-1: Entering timestamp...

-- finished at 08/20/2018 18:45:42 --
mdtest_hard_delete
-- started at 08/20/2018 19:20:53 --

mdtest-1.9.4-rc1 was launched with 80 total task(s) on 10 node(s)
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/mdtest -r -t -F -w 3901 -e 3901 -d /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/mdt_hard -n 16630
Path: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55
FS: 23.0 TiB   Used FS: 69.2%   Inodes: 23551.9 Mi   Used Inodes: 0.0%

80 tasks, 1330400 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      16675.954      16675.954      16675.954          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :         22.728         22.728         22.728          0.000

-- finished at 08/20/2018 19:22:13 --
mdtest_hard_read
-- started at 08/20/2018 19:19:48 --

mdtest-1.9.4-rc1 was launched with 80 total task(s) on 10 node(s)
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/mdtest -E -t -F -w 3901 -e 3901 -d /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/mdt_hard -n 16630
Path: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55
FS: 23.0 TiB   Used FS: 69.2%   Inodes: 23551.9 Mi   Used Inodes: 0.0%

80 tasks, 1330400 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      21491.211      21491.211      21491.211          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 08/20/2018 19:20:50 --
mdtest_hard_stat
-- started at 08/20/2018 19:14:29 --

mdtest-1.9.4-rc1 was launched with 80 total task(s) on 10 node(s)
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/mdtest -T -t -F -w 3901 -e 3901 -d /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/mdt_hard -n 16630
Path: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55
FS: 23.0 TiB   Used FS: 69.6%   Inodes: 23551.9 Mi   Used Inodes: 0.1%

80 tasks, 1330400 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      16613.947      16613.947      16613.947          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 08/20/2018 19:15:49 --
mdtest_hard_write
-- started at 08/20/2018 18:51:14 --

mdtest-1.9.4-rc1 was launched with 80 total task(s) on 10 node(s)
Command line used: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/bin/mdtest -C -t -F -w 3901 -e 3901 -d /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55/mdt_hard -n 16630
Path: /var/opt/cray/dws/mounts/batch/6335677_striped_scratch/datafiles/io500.2018.08.20-18.33.55
FS: 23.0 TiB   Used FS: 69.5%   Inodes: 23551.9 Mi   Used Inodes: 0.0%

80 tasks, 1330400 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :       3542.701       3542.701       3542.701          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :       3077.259       3077.259       3077.259          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 08/20/2018 18:57:30 --
result_summary
[RESULT] BW   phase 1            ior_easy_write               41.436 GB/s : time 363.16 seconds
[RESULT] IOPS phase 1         mdtest_easy_write               16.210 kiops : time 338.43 seconds
[RESULT] BW   phase 2            ior_hard_write                3.714 GB/s : time 325.13 seconds
[RESULT] IOPS phase 2         mdtest_hard_write                3.543 kiops : time 378.68 seconds
[RESULT] IOPS phase 3                      find                7.080 kiops : time 187.90 seconds
[RESULT] BW   phase 3             ior_easy_read               40.519 GB/s : time 371.38 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat               19.953 kiops : time 275.67 seconds
[RESULT] BW   phase 4             ior_hard_read                6.995 GB/s : time 172.64 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat               16.614 kiops : time  85.60 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete               23.299 kiops : time 235.85 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               21.491 kiops : time  65.04 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete               16.676 kiops : time  82.65 seconds
[SCORE] Bandwidth 14.4516 GB/s : IOPS 13.5337 kiops : TOTAL 13.9851