ofsdev

Institution Clemson University
Client Procs Per Node
Client Operating System Oracle Linux
Client Operating System Version 7.5
Client Kernel Version 3.10.0-862.14.4.el7.x86_64

DATA SERVER

Storage Type HDD
Volatile Memory 16GB
Storage Interface SAS
Network InfiniBand FDR
Software Version 2.10.5
OS Version 7.5

INFORMATION

Client Nodes 16
Client Total Procs 64
Metadata Nodes 16
Metadata Storage Devices 2
Data Nodes 16
Data Storage Devices 12

METADATA

Easy Write 20.34 kIOP/s
Easy Stat 81.42 kIOP/s
Easy Delete 20.97 kIOP/s
Hard Write 15.49 kIOP/s
Hard Read 28.89 kIOP/s
Hard Stat 37.20 kIOP/s
Hard Delete 11.14 kIOP/s

Submitted Files

io500
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
#set -x

if [ "$1" == "" ]
then
	SCALE=1
else
	SCALE=$1
fi


NP=$(( $SCALE * 16 ))

echo "$SCALE processes per node for $NP processes."

set -euo pipefail  # better error handling

export OFS_MOUNT=/mnt/lustre/jburto2

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"     
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"  
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="True"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=300 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...


# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths    
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it. 
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  io500_workdir=$OFS_MOUNT/io500/datafiles/io500.$timestamp # directory where the data will be stored
  io500_result_dir=$PWD/results/$timestamp      # the directory where the output results will be kept

  mkdir -p $io500_workdir $io500_result_dir
  mkdir -p ${io500_workdir}/ior_easy ${io500_workdir}/ior_hard 
  #mkdir -p ${io500_workdir}/mdt_easy ${io500_workdir}/mdt_hard

# for ior_easy, large chunks, as few targets as will allow the files to be evenly spread.
  lfs setstripe -c 1 ${io500_workdir}/ior_easy  # turn off striping for ior_easy
# stripe across all OSTs for ior_hard, 64k chunksize
  lfs setstripe -c -1 -S 256k ${io500_workdir}/ior_hard 
#  lfs setstripe -c -1 -S 64k ${io500_workdir}/ior_hard 
# Enable DNE2
# https://lustre.ornl.gov/ecosystem-2016/documents/papers/LustreEco2016-Simmons-DNE.pdf 
  lfs setdirstripe -c 16 ${io500_workdir}/mdt_easy
  lfs setdirstripe -c 16 ${io500_workdir}/mdt_hard
  echo "Stripes set"
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
  io500_mpi_prefix="/usr/lib64/openmpi"
  #io500_mpi_prefix="/home/jburto2/openmpi/1.10.7"
  io500_mpirun="$io500_mpi_prefix/bin/mpirun"

  # Run OpenMPI over IB to keep the ethernet network clear for data. Map by node to balance processes.
  # The I/O 500 benchmarks are not heavy on interprocess communication.
  io500_mpiargs="-np $NP --mca btl_tcp_if_exclude ib0 --mca btl ^openib --map-by node --machinefile /home/jburto2/pvfsnodelistmpi --prefix $io500_mpi_prefix"
}

function setup_ior_easy {
# 2M writes, 240 GB per proc, file per proc. 
  io500_ior_easy_size=$((300 * 1024 / $SCALE))
  io500_ior_easy_params="-t 4m -b ${io500_ior_easy_size}m -F -a MPIIO"
   
}

function setup_mdt_easy {
# one level, 11 directories, unique dir per thread, files only at leaves.
# BeeGFS doesn't have distributed directories, so more directories = better distribution. 
#  io500_mdtest_easy_params="-z 1 -b 6 -u -L" 
  io500_mdtest_easy_params="-u -L" 
  io500_mdtest_easy_files_per_proc=$((1280000 / $SCALE ))
}

function setup_ior_hard {
  if [ "$SCALE" == "1" ] 
  then
  	io500_ior_hard_writes_per_proc=128000
  else	
  	io500_ior_hard_writes_per_proc=$(( 128000 / $SCALE ))
  fi

  io500_ior_hard_other_options=" -a MPIIO"

}

function setup_mdt_hard {
  io500_mdtest_hard_files_per_proc="$(( 500000 / $SCALE ))"
  io500_mdtest_files_per_proc=$(( 500000 / $SCALE )) 
  io500_mdtest_hard_other_options=""
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  #io500_find_mpi="False"
  #io500_find_cmd=$PWD/bin/sfind.sh
  #io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/ 
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script. 
  io500_find_mpi="True"
  io500_find_cmd="$PWD/bin/pfind"
  io500_find_cmd_args="-s 10000 -r $io500_result_dir/pfind_results"
  
  # for GPFS systems, you should probably use the provided mmfind wrapper 
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./bin/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system 
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='Palmetto ofstest'      # e.g. Oakforest-PACS
  io500_info_institute_name='Clemson University'   # e.g. JCAHPC
  io500_info_storage_age_in_months='0' # not install date but age since last refresh
  io500_info_storage_install_date='4/12'  # MM/YY
  io500_info_filesysem='Lustre'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='2.10.5'
  # client side info
  io500_info_num_client_nodes="$(( ${SCALE} * 16 ))"
  io500_info_procs_per_node="1"
  # server side info
  io500_info_num_metadata_server_nodes='16'
  io500_info_num_data_server_nodes='16'
  io500_info_num_data_storage_devices='160'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='32'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='HDD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='infiniband' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='SAS' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='infiniband'
}

main
ior_easy_read
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Thu Nov  8 15:02:04 2018
Command line        : /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 4m -b 76800m -F -a MPIIO -o /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_easy/ior_file_easy -O stoneWallingStatusFile=/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_easy/stonewall
Machine             : Linux pvfs017-ib0.palmetto.clemson.edu
TestID              : 0
StartTime           : Thu Nov  8 15:02:04 2018
Path                : /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_easy
FS                  : 144.2 TiB   Used FS: 12.2%   Inodes: 290.9 Mi   Used Inodes: 4.2%

Options: 
api                 : MPIIO
apiVersion          : (3.0)
test filename       : /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 64
clients per node    : 4
repetitions         : 1
xfersize            : 4 MiB
blocksize           : 75 GiB
aggregate filesize  : 4.69 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      7471       78643200   4096       0.010009   657.90     0.021376   657.91     0   
Max Read:  7470.90 MiB/sec (7833.81 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read         7470.90    7470.90    7470.90       0.00    1867.73    1867.73    1867.73       0.00  657.91244     0     64   4    1   1     1        1         0    0      1 80530636800  4194304 4915200.0 MPIIO      0
Finished            : Thu Nov  8 15:13:02 2018
ior_easy_write
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Thu Nov  8 14:22:09 2018
Command line        : /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 4m -b 76800m -F -a MPIIO -o /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_easy/ior_file_easy -O stoneWallingStatusFile=/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_easy/stonewall -O stoneWallingWearOut=1 -D 300
Machine             : Linux pvfs017-ib0.palmetto.clemson.edu
TestID              : 0
StartTime           : Thu Nov  8 14:22:09 2018
Path                : /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_easy
FS                  : 144.2 TiB   Used FS: 8.8%   Inodes: 287.4 Mi   Used Inodes: 0.0%

Options: 
api                 : MPIIO
apiVersion          : (3.0)
test filename       : /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 64
clients per node    : 4
repetitions         : 1
xfersize            : 4 MiB
blocksize           : 75 GiB
aggregate filesize  : 4.69 TiB
stonewallingTime    : 300
stoneWallingWearOut : 1

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 11848 max: 19200 -- min data: 46.3 GiB mean data: 67.3 GiB time: 302.9s
write     13361      78643200   4096       0.005825   367.79     0.078086   367.87     0   
Max Write: 13361.22 MiB/sec (14010.26 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write       13361.22   13361.22   13361.22       0.00    3340.31    3340.31    3340.31       0.00  367.87058     0     64   4    1   1     1        1         0    0      1 80530636800  4194304 4915200.0 MPIIO      0
Finished            : Thu Nov  8 14:28:17 2018
ior_hard_read
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Thu Nov  8 15:14:43 2018
Command line        : /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 32000 -a MPIIO -o /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_hard/IOR_file -O stoneWallingStatusFile=/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_hard/stonewall
Machine             : Linux pvfs017-ib0.palmetto.clemson.edu
TestID              : 0
StartTime           : Thu Nov  8 15:14:43 2018
Path                : /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_hard
FS                  : 144.2 TiB   Used FS: 12.2%   Inodes: 290.9 Mi   Used Inodes: 4.2%

Options: 
api                 : MPIIO
apiVersion          : (3.0)
test filename       : /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 32000
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 64
clients per node    : 4
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 89.66 GiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      1228.18    45.91      45.91      0.023325   74.73      0.001193   74.76      0   
Max Read:  1228.18 MiB/sec (1287.84 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read         1228.18    1228.18    1228.18       0.00   27396.11   27396.11   27396.11       0.00   74.75513     0     64   4    1   0     1        1         0    0  32000    47008    47008   91812.5 MPIIO      0
Finished            : Thu Nov  8 15:15:58 2018
ior_hard_write
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Thu Nov  8 14:43:25 2018
Command line        : /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 32000 -a MPIIO -o /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_hard/IOR_file -O stoneWallingStatusFile=/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_hard/stonewall -O stoneWallingWearOut=1 -D 300
Machine             : Linux pvfs017-ib0.palmetto.clemson.edu
TestID              : 0
StartTime           : Thu Nov  8 14:43:25 2018
Path                : /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_hard
FS                  : 144.2 TiB   Used FS: 12.1%   Inodes: 290.9 Mi   Used Inodes: 2.6%

Options: 
api                 : MPIIO
apiVersion          : (3.0)
test filename       : /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 32000
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 64
clients per node    : 4
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 89.66 GiB
stonewallingTime    : 300
stoneWallingWearOut : 1

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 11468 max: 32000 -- min data: 0.5 GiB mean data: 1.0 GiB time: 300.1s
write     176.56     45.91      45.91      0.222565   519.79     0.025582   520.02     0   
Max Write: 176.56 MiB/sec (185.13 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write         176.56     176.56     176.56       0.00    3938.32    3938.32    3938.32       0.00  520.01813     0     64   4    1   0     1        1         0    0  32000    47008    47008   91812.5 MPIIO      0
Finished            : Thu Nov  8 14:52:05 2018
mdtest_easy_delete
-- started at 11/08/2018 15:18:14 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-r" "-F" "-d" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_easy" "-n" "320000" "-u" "-L" "-x" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_easy-stonewall"
Path: /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06
FS: 144.2 TiB   Used FS: 12.2%   Inodes: 290.9 Mi   Used Inodes: 4.2%

64 tasks, 20480000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      20967.129      20967.129      20967.129          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          2.000          2.000          2.000          0.000

-- finished at 11/08/2018 15:24:29 --
mdtest_easy_stat
-- started at 11/08/2018 15:13:04 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-T" "-F" "-d" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_easy" "-n" "320000" "-u" "-L" "-x" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_easy-stonewall"
Path: /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06
FS: 144.2 TiB   Used FS: 12.2%   Inodes: 290.9 Mi   Used Inodes: 4.2%

64 tasks, 20480000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      81417.707      81417.707      81417.707          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/08/2018 15:14:41 --
mdtest_easy_write
-- started at 11/08/2018 14:28:19 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-C" "-F" "-d" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_easy" "-n" "320000" "-u" "-L" "-x" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_easy-stonewall" "-W" "300"
Path: /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06
FS: 144.2 TiB   Used FS: 12.1%   Inodes: 290.3 Mi   Used Inodes: 0.0%

64 tasks, 20480000 files
stonewall rank 32: 110007 of 122648 
Continue stonewall hit min: 89951 max: 122648 avg: 104278.6 
stonewall rank 0: 110448 of 122648 
stonewall rank 16: 109202 of 122648 
stonewall rank 48: 109202 of 122648 
stonewall rank 20: 94081 of 122648 
stonewall rank 56: 118174 of 122648 
stonewall rank 36: 103487 of 122648 
stonewall rank 24: 118778 of 122648 
stonewall rank 52: 94340 of 122648 
stonewall rank 40: 122350 of 122648 
stonewall rank 4: 103913 of 122648 
stonewall rank 60: 90216 of 122648 
stonewall rank 12: 90164 of 122648 
stonewall rank 28: 89951 of 122648 
stonewall rank 44: 90812 of 122648 

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      53046.466      53046.466      53046.466          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :         96.446         96.446         96.446          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/08/2018 14:34:45 --
stonewall rank 18: 119028 of 122648 
stonewall rank 2: 121061 of 122648 
stonewall rank 50: 119192 of 122648 
stonewall rank 34: 120755 of 122648 
stonewall rank 54: 91388 of 122648 
stonewall rank 6: 92967 of 122648 
stonewall rank 38: 93111 of 122648 
stonewall rank 22: 91026 of 122648 
stonewall rank 58: 111201 of 122648 
stonewall rank 42: 112754 of 122648 
stonewall rank 26: 111047 of 122648 
stonewall rank 10: 112912 of 122648 
stonewall rank 46: 91883 of 122648 
stonewall rank 30: 92143 of 122648 
stonewall rank 62: 92292 of 122648 
stonewall rank 14: 92391 of 122648 
stonewall rank 33: 97192 of 122648 
stonewall rank 1: 97770 of 122648 
stonewall rank 17: 96749 of 122648 
stonewall rank 49: 96673 of 122648 
stonewall rank 41: 91023 of 122648 
stonewall rank 57: 90272 of 122648 
stonewall rank 9: 91514 of 122648 
stonewall rank 25: 90300 of 122648 
stonewall rank 21: 112337 of 122648 
stonewall rank 37: 112824 of 122648 
stonewall rank 53: 112062 of 122648 
stonewall rank 5: 113251 of 122648 
stonewall rank 13: 113007 of 122648 
stonewall rank 45: 113052 of 122648 
stonewall rank 29: 109713 of 122648 
stonewall rank 61: 109390 of 122648 
stonewall rank 3: 108307 of 122648 
stonewall rank 35: 108527 of 122648 
stonewall rank 19: 108556 of 122648 
stonewall rank 51: 108705 of 122648 
stonewall rank 23: 95078 of 122648 
stonewall rank 43: 96714 of 122648 
stonewall rank 7: 98345 of 122648 
stonewall rank 11: 96912 of 122648 
stonewall rank 55: 95560 of 122648 
stonewall rank 59: 96227 of 122648 
stonewall rank 39: 97835 of 122648 
stonewall rank 27: 96187 of 122648 
stonewall rank 47: 120736 of 122648 
stonewall rank 31: 117720 of 122648 
stonewall rank 63: 117440 of 122648 
stonewall rank 15: 120930 of 122648 
mdtest_hard_delete
-- started at 11/08/2018 15:27:24 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_hard" "-n" "125000" "-x" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_hard-stonewall"
Path: /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06
FS: 144.2 TiB   Used FS: 12.2%   Inodes: 286.8 Mi   Used Inodes: 1.6%

64 tasks, 8000000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      11140.783      11140.783      11140.783          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          7.563          7.563          7.563          0.000

-- finished at 11/08/2018 15:34:45 --
mdtest_hard_read
-- started at 11/08/2018 15:24:31 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_hard" "-n" "125000" "-x" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_hard-stonewall"
Path: /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06
FS: 144.2 TiB   Used FS: 12.2%   Inodes: 285.7 Mi   Used Inodes: 1.6%

64 tasks, 8000000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      28893.552      28893.552      28893.552          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/08/2018 15:27:21 --
mdtest_hard_stat
-- started at 11/08/2018 15:16:00 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_hard" "-n" "125000" "-x" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_hard-stonewall"
Path: /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06
FS: 144.2 TiB   Used FS: 12.2%   Inodes: 290.9 Mi   Used Inodes: 4.2%

64 tasks, 8000000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      37196.551      37196.551      37196.551          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/08/2018 15:18:12 --
mdtest_hard_write
-- started at 11/08/2018 14:52:07 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_hard" "-n" "125000" "-x" "/mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06/mdt_hard-stonewall" "-W" "300"
Path: /mnt/lustre/jburto2/io500/datafiles/io500.2018.11.08-14.22.06
FS: 144.2 TiB   Used FS: 12.1%   Inodes: 290.9 Mi   Used Inodes: 2.6%

64 tasks, 8000000 files
stonewall rank 16: 75685 of 76731 
stonewall rank 48: 75611 of 76731 
Continue stonewall hit min: 71226 max: 76731 avg: 73717.0 
stonewall rank 0: 76005 of 76731 
stonewall rank 32: 75753 of 76731 
stonewall rank 36: 75892 of 76731 
stonewall rank 52: 73018 of 76731 
stonewall rank 8: 74487 of 76731 
stonewall rank 4: 75833 of 76731 
stonewall rank 20: 72986 of 76731 
stonewall rank 56: 74017 of 76731 
stonewall rank 60: 72815 of 76731 
stonewall rank 28: 72693 of 76731 
stonewall rank 24: 74093 of 76731 
stonewall rank 44: 72803 of 76731 
stonewall rank 40: 74330 of 76731 
stonewall rank 12: 72773 of 76731 

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      25188.896      25188.896      25188.896          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :        880.597        880.597        880.597          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/08/2018 14:57:24 --
stonewall rank 34: 75411 of 76731 
stonewall rank 18: 75109 of 76731 
stonewall rank 50: 74928 of 76731 
stonewall rank 2: 75439 of 76731 
stonewall rank 54: 71784 of 76731 
stonewall rank 42: 74902 of 76731 
stonewall rank 38: 71893 of 76731 
stonewall rank 22: 71911 of 76731 
stonewall rank 26: 75328 of 76731 
stonewall rank 6: 71870 of 76731 
stonewall rank 10: 74894 of 76731 
stonewall rank 46: 71618 of 76731 
stonewall rank 58: 75288 of 76731 
stonewall rank 14: 71481 of 76731 
stonewall rank 30: 71329 of 76731 
stonewall rank 62: 71293 of 76731 
stonewall rank 17: 73587 of 76731 
stonewall rank 1: 73619 of 76731 
stonewall rank 49: 73495 of 76731 
stonewall rank 33: 73485 of 76731 
stonewall rank 9: 71400 of 76731 
stonewall rank 21: 75195 of 76731 
stonewall rank 57: 71261 of 76731 
stonewall rank 37: 75037 of 76731 
stonewall rank 41: 71269 of 76731 
stonewall rank 53: 75240 of 76731 
stonewall rank 25: 71226 of 76731 
stonewall rank 5: 75156 of 76731 
stonewall rank 13: 74810 of 76731 
stonewall rank 45: 74843 of 76731 
stonewall rank 29: 73861 of 76731 
stonewall rank 61: 73856 of 76731 
stonewall rank 19: 76336 of 76731 
stonewall rank 51: 76367 of 76731 
stonewall rank 3: 76619 of 76731 
stonewall rank 43: 72383 of 76731 
stonewall rank 55: 72186 of 76731 
stonewall rank 11: 72452 of 76731 
stonewall rank 23: 72068 of 76731 
stonewall rank 59: 72437 of 76731 
stonewall rank 39: 72165 of 76731 
stonewall rank 7: 72202 of 76731 
stonewall rank 27: 72378 of 76731 
stonewall rank 63: 73149 of 76731 
stonewall rank 47: 73358 of 76731 
stonewall rank 31: 73131 of 76731 
stonewall rank 15: 73314 of 76731 
result_summary
[RESULT] BW   phase 1            ior_easy_write               13.048 GB/s : time 367.87 seconds
[RESULT] IOPS phase 1         mdtest_easy_write               53.046 kiops : time 903.74 seconds
[RESULT] BW   phase 2            ior_hard_write                0.172 GB/s : time 520.02 seconds
[RESULT] IOPS phase 2         mdtest_hard_write               25.189 kiops : time 563.16 seconds
[RESULT] IOPS phase 3                      find              380.350 kiops : time  33.55 seconds
[RESULT] BW   phase 3             ior_easy_read                7.296 GB/s : time 657.91 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat               81.418 kiops : time  98.56 seconds
[RESULT] BW   phase 4             ior_hard_read                1.199 GB/s : time  74.76 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat               37.197 kiops : time 134.21 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete               20.967 kiops : time 377.03 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               28.894 kiops : time 172.29 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete               11.141 kiops : time 443.27 seconds
[SCORE] Bandwidth 2.10641 GB/s : IOPS 42.3707 kiops : TOTAL 9.44723