Palmetto

Institution Clemson University
Client Procs Per Node
Client Operating System Oracle Linux
Client Operating System Version 7.5
Client Kernel Version 3.10.0-862.9.1.el7.x86_64

DATA SERVER

Storage Type HDD
Volatile Memory 256GB
Storage Interface SATA
Network InfiniBand FDR
Software Version 7.1
OS Version 7.5

INFORMATION

Client Nodes 32
Client Total Procs 32
Metadata Nodes 6
Metadata Storage Devices 2
Data Nodes 16
Data Storage Devices 10

METADATA

Easy Write 24.54 kIOP/s
Easy Stat 170.20 kIOP/s
Easy Delete 24.70 kIOP/s
Hard Write 5.32 kIOP/s
Hard Read 13.69 kIOP/s
Hard Stat 88.67 kIOP/s
Hard Delete 6.22 kIOP/s

Submitted Files

io500
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
#set -x

if [ "$1" == "" ]
then
	SCALE=1
else
	SCALE=$1
fi


NP=$(( $SCALE * 16 ))

echo "$SCALE processes per node for $NP processes."

set -euo pipefail  # better error handling

export OFS_MOUNT=/scratch4/jburto2

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"     
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"  
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="True"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=300 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...


# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths    
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it. 
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  io500_workdir=$OFS_MOUNT/io500/datafiles/io500.$timestamp # directory where the data will be stored
  io500_result_dir=$PWD/results/$timestamp      # the directory where the output results will be kept

  mkdir -p $io500_workdir $io500_result_dir
  mkdir -p ${io500_workdir}/ior_easy ${io500_workdir}/ior_hard 
  mkdir -p ${io500_workdir}/mdt_easy ${io500_workdir}/mdt_hard 
# for ior_easy, large chunks, as few targets as will allow the files to be evenly spread. 
  beegfs-ctl --setpattern --numtargets=3 --chunksize=4m --mount=/scratch4 ${io500_workdir}/ior_easy
# stripe across all OSTs for ior_hard, 64k chunksize
# best pattern is minimal chunksize to fit one I/O in, regardless of RAID stripe.
  beegfs-ctl --setpattern --numtargets=6 --chunksize=64k --mount=/scratch4 ${io500_workdir}/ior_hard 
# turn off striping and use small chunks for mdtest
  beegfs-ctl --setpattern --numtargets=1 --chunksize=64k --mount=/scratch4 ${io500_workdir}/mdt_easy 
  beegfs-ctl --setpattern --numtargets=1 --chunksize=64k --mount=/scratch4 ${io500_workdir}/mdt_hard 
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
  io500_mpi_prefix="/usr/lib64/openmpi"
  #io500_mpi_prefix="/home/jburto2/openmpi/1.10.7"
  io500_mpirun="$io500_mpi_prefix/bin/mpirun"

  # Run OpenMPI over ethernet to keep the IB network clear for data. Map by node to balance processes.
  # The I/O 500 benchmarks are not heavy on interprocess communication.
  io500_mpiargs="-np $NP --mca btl_tcp_if_exclude ib0 --mca btl ^openib --map-by node --machinefile /home/jburto2/hpccnodelistmpi --prefix $io500_mpi_prefix"
}

function setup_ior_easy {
# 2M writes, 128 GB per proc, file per proc. 
  io500_ior_easy_size=$((120 * 1024 / $SCALE))
  io500_ior_easy_params="-t 4m -b ${io500_ior_easy_size}m -F -a POSIX"
   
}

function setup_mdt_easy {
# one level, 11 directories, unique dir per thread, files only at leaves.
# BeeGFS doesn't have distributed directories, so more directories = better distribution. 
#  io500_mdtest_easy_params="-z 1 -b 6 -u -L" 
  io500_mdtest_easy_params="-u -L" 
  io500_mdtest_easy_files_per_proc=800000
}

function setup_ior_hard {
  if [ "$SCALE" == "1" ] 
  then
	# One process per node is significantly faster because of buffering.
  	io500_ior_hard_writes_per_proc=2200000
  else	
  	io500_ior_hard_writes_per_proc=$(( 2200000 / $SCALE ))
  fi

  io500_ior_hard_other_options=" -a POSIX"

}

function setup_mdt_hard {
# Multiple directories might improve mdt_hard slightly, but this test is storage bound, not md bound.
  io500_mdtest_hard_files_per_proc="$(( 150000 / $SCALE ))"
  io500_mdtest_files_per_proc=$(( 150000 / $SCALE )) 
  io500_mdtest_hard_other_options=""
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  #io500_find_mpi="False"
  #io500_find_cmd=$PWD/bin/sfind.sh
  #io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/ 
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script. 
  io500_find_mpi="True"
  io500_find_cmd="$PWD/bin/pfind"
  io500_find_cmd_args="-s 10000 -r $io500_result_dir/pfind_results"
  
  # for GPFS systems, you should probably use the provided mmfind wrapper 
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./bin/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system 
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='Palmetto scratch4'      # e.g. Oakforest-PACS
  io500_info_institute_name='Clemson University'   # e.g. JCAHPC
  io500_info_storage_age_in_months='0' # not install date but age since last refresh
  io500_info_storage_install_date='01/15'  # MM/YY
  io500_info_storage_refresh_date='10/18'  # MM/YY
  io500_info_filesysem='BeeGFS'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='7.1'
  # client side info
  io500_info_num_client_nodes="$(( ${SCALE} * 16 ))"
  io500_info_procs_per_node="1"
  # server side info
  io500_info_num_metadata_server_nodes='6'
  io500_info_num_data_server_nodes='6'
  io500_info_num_data_storage_devices='60'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='12'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='HDD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='infiniband' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='SATA' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='infiniband'
}

main
ior_easy_read
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Wed Oct 24 11:20:54 2018
Command line        : /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 4m -b 65536m -F -a POSIX -o /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_easy/ior_file_easy -O stoneWallingStatusFile=/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_easy/stonewall
Machine             : Linux ofstest008.ofsdev.clemson.edu
TestID              : 0
StartTime           : Wed Oct 24 11:20:54 2018
Path                : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_easy
FS                  : 174.6 TiB   Used FS: 19.0%   Inodes: 0.0 Mi   Used Inodes: -nan%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 32
clients per node    : 1
repetitions         : 1
xfersize            : 4 MiB
blocksize           : 64 GiB
aggregate filesize  : 2 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      4536       67108864   4096       0.003099   462.27     0.059596   462.28     0   
Max Read:  4536.49 MiB/sec (4756.86 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read         4536.49    4536.49    4536.49       0.00    1134.12    1134.12    1134.12       0.00  462.28489     0     32   1    1   1     1        1         0    0      1 68719476736  4194304 2097152.0 POSIX      0
Finished            : Wed Oct 24 11:28:36 2018
ior_easy_write
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Wed Oct 24 10:38:57 2018
Command line        : /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 4m -b 65536m -F -a POSIX -o /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_easy/ior_file_easy -O stoneWallingStatusFile=/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_easy/stonewall -O stoneWallingWearOut=1 -D 300
Machine             : Linux ofstest008.ofsdev.clemson.edu
TestID              : 0
StartTime           : Wed Oct 24 10:38:57 2018
Path                : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_easy
FS                  : 174.6 TiB   Used FS: 17.2%   Inodes: 0.0 Mi   Used Inodes: -nan%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 32
clients per node    : 1
repetitions         : 1
xfersize            : 4 MiB
blocksize           : 64 GiB
aggregate filesize  : 2 TiB
stonewallingTime    : 300
stoneWallingWearOut : 1

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 12283 max: 16384 -- min data: 48.0 GiB mean data: 54.9 GiB time: 300.1s
write     3816       67108864   4096       0.004014   549.59     0.038325   549.61     0   
Max Write: 3815.68 MiB/sec (4001.03 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write        3815.68    3815.68    3815.68       0.00     953.92     953.92     953.92       0.00  549.61371     0     32   1    1   1     1        1         0    0      1 68719476736  4194304 2097152.0 POSIX      0
Finished            : Wed Oct 24 10:48:06 2018
ior_hard_read
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Wed Oct 24 11:30:30 2018
Command line        : /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 1100000 -a POSIX -o /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_hard/IOR_file -O stoneWallingStatusFile=/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_hard/stonewall
Machine             : Linux ofstest008.ofsdev.clemson.edu
TestID              : 0
StartTime           : Wed Oct 24 11:30:30 2018
Path                : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_hard
FS                  : 174.6 TiB   Used FS: 19.0%   Inodes: 0.0 Mi   Used Inodes: -nan%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 1100000
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 32
clients per node    : 1
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 1.50 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
WARNING: Expected aggregate file size       = 1654681600000.
WARNING: Stat() of aggregate file size      = 1247393758208.
WARNING: Using actual aggregate bytes moved = 1247393758208.
read      3008.45    45.91      45.91      0.140964   395.27     0.045751   395.42     0   
Max Read:  3008.45 MiB/sec (3154.59 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read         3008.45    3008.45    3008.45       0.00   67107.56   67107.56   67107.56       0.00  395.42157     0     32   1    1   0     1        1         0    0 1100000    47008    47008 1189607.4 POSIX      0
Finished            : Wed Oct 24 11:37:05 2018
ior_hard_write
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Wed Oct 24 11:00:50 2018
Command line        : /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 1100000 -a POSIX -o /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_hard/IOR_file -O stoneWallingStatusFile=/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_hard/stonewall -O stoneWallingWearOut=1 -D 300
Machine             : Linux ofstest008.ofsdev.clemson.edu
TestID              : 0
StartTime           : Wed Oct 24 11:00:50 2018
Path                : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_hard
FS                  : 174.6 TiB   Used FS: 18.4%   Inodes: 0.0 Mi   Used Inodes: -nan%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 1100000
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 32
clients per node    : 1
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 1.50 TiB
stonewallingTime    : 300
stoneWallingWearOut : 1

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 665645 max: 829243 -- min data: 29.1 GiB mean data: 35.5 GiB time: 300.0s
WARNING: Expected aggregate file size       = 1654681600000.
WARNING: Stat() of aggregate file size      = 1247393758208.
WARNING: Using actual aggregate bytes moved = 1247393758208.
WARNING: maybe caused by deadlineForStonewalling
write     1577.19    45.91      45.91      0.068406   754.18     0.019051   754.26     0   
Max Write: 1577.19 MiB/sec (1653.81 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write        1577.19    1577.19    1577.19       0.00   35181.42   35181.42   35181.42       0.00  754.25538     0     32   1    1   0     1        1         0    0 1100000    47008    47008 1189607.4 POSIX      0
Finished            : Wed Oct 24 11:13:24 2018
mdtest_easy_delete
-- started at 10/24/2018 11:37:28 --

mdtest-1.9.3 was launched with 32 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-r" "-F" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_easy" "-n" "800000" "-u" "-L" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_easy-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54
FS: 174.6 TiB   Used FS: 19.0%   Inodes: 0.0 Mi   Used Inodes: -nan%

32 tasks, 25600000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      24695.309      24695.309      24695.309          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.135          0.135          0.135          0.000

-- finished at 10/24/2018 11:50:11 --
mdtest_easy_stat
-- started at 10/24/2018 11:28:38 --

mdtest-1.9.3 was launched with 32 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-T" "-F" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_easy" "-n" "800000" "-u" "-L" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_easy-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54
FS: 174.6 TiB   Used FS: 19.0%   Inodes: 0.0 Mi   Used Inodes: -nan%

32 tasks, 25600000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :     170203.960     170203.960     170203.960          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/24/2018 11:30:28 --
mdtest_easy_write
-- started at 10/24/2018 10:48:08 --

mdtest-1.9.3 was launched with 32 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-C" "-F" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_easy" "-n" "800000" "-u" "-L" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_easy-stonewall" "-W" "300"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54
FS: 174.6 TiB   Used FS: 18.4%   Inodes: 0.0 Mi   Used Inodes: -nan%

32 tasks, 25600000 files
stonewall rank 3: 395904 of 582851 
stonewall rank 14: 453835 of 582851 
stonewall rank 7: 453803 of 582851 
stonewall rank 21: 546914 of 582851 
stonewall rank 1: 397282 of 582851 
stonewall rank 11: 397805 of 582851 
stonewall rank 5: 451399 of 582851 
stonewall rank 17: 561917 of 582851 
stonewall rank 16: 234976 of 582851 
stonewall rank 13: 445054 of 582851 
stonewall rank 20: 552788 of 582851 
stonewall rank 24: 466296 of 582851 
stonewall rank 19: 527409 of 582851 
stonewall rank 18: 534814 of 582851 
stonewall rank 28: 473291 of 582851 
stonewall rank 29: 459121 of 582851 
stonewall rank 9: 450904 of 582851 
stonewall rank 2: 413374 of 582851 
stonewall rank 22: 462959 of 582851 
stonewall rank 4: 409897 of 582851 
stonewall rank 26: 534072 of 582851 
Continue stonewall hit min: 234976 max: 582851 avg: 471852.2 
stonewall rank 0: 537076 of 582851 
stonewall rank 30: 422478 of 582851 
stonewall rank 6: 541025 of 582851 
stonewall rank 27: 516681 of 582851 
stonewall rank 8: 445442 of 582851 
stonewall rank 23: 560747 of 582851 
stonewall rank 10: 392658 of 582851 
stonewall rank 12: 462022 of 582851 
stonewall rank 15: 473671 of 582851 
stonewall rank 31: 540805 of 582851 

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      33707.315      33707.315      33707.315          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          6.350          6.350          6.350          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/24/2018 11:00:48 --
mdtest_hard_delete
-- started at 10/24/2018 11:52:21 --

mdtest-1.9.3 was launched with 32 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_hard" "-n" "75000" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_hard-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54
FS: 174.6 TiB   Used FS: 19.0%   Inodes: 0.0 Mi   Used Inodes: -nan%

32 tasks, 2400000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :       6222.655       6222.655       6222.655          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.442          0.442          0.442          0.000

-- finished at 10/24/2018 11:57:00 --
mdtest_hard_read
-- started at 10/24/2018 11:50:13 --

mdtest-1.9.3 was launched with 32 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_hard" "-n" "75000" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_hard-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54
FS: 174.6 TiB   Used FS: 19.0%   Inodes: 0.0 Mi   Used Inodes: -nan%

32 tasks, 2400000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      13691.847      13691.847      13691.847          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/24/2018 11:52:19 --
mdtest_hard_stat
-- started at 10/24/2018 11:37:07 --

mdtest-1.9.3 was launched with 32 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_hard" "-n" "75000" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_hard-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54
FS: 174.6 TiB   Used FS: 19.0%   Inodes: 0.0 Mi   Used Inodes: -nan%

32 tasks, 2400000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      88674.533      88674.533      88674.533          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/24/2018 11:37:27 --
mdtest_hard_write
-- started at 10/24/2018 11:13:26 --

mdtest-1.9.3 was launched with 32 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_hard" "-n" "75000" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54/mdt_hard-stonewall" "-W" "300"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-10.38.54
FS: 174.6 TiB   Used FS: 19.0%   Inodes: 0.0 Mi   Used Inodes: -nan%

32 tasks, 2400000 files
stonewall rank 14: 53631 of 53867 
stonewall rank 3: 53644 of 53867 
stonewall rank 7: 53810 of 53867 
stonewall rank 1: 53786 of 53867 
stonewall rank 11: 53620 of 53867 
stonewall rank 17: 50230 of 53867 
stonewall rank 5: 53662 of 53867 
stonewall rank 18: 49868 of 53867 
stonewall rank 19: 50068 of 53867 
stonewall rank 28: 50373 of 53867 
stonewall rank 16: 46108 of 53867 
stonewall rank 24: 50354 of 53867 
stonewall rank 26: 50452 of 53867 
stonewall rank 20: 50157 of 53867 
stonewall rank 21: 50008 of 53867 
stonewall rank 9: 53688 of 53867 
stonewall rank 4: 53765 of 53867 
stonewall rank 13: 53753 of 53867 
stonewall rank 25: 50186 of 53867 
stonewall rank 22: 50386 of 53867 
stonewall rank 30: 50279 of 53867 
stonewall rank 8: 53593 of 53867 
stonewall rank 15: 50191 of 53867 
Continue stonewall hit min: 46108 max: 53867 avg: 51721.8 
stonewall rank 0: 53509 of 53867 
stonewall rank 23: 50365 of 53867 
stonewall rank 29: 50049 of 53867 
stonewall rank 12: 53727 of 53867 
stonewall rank 27: 50138 of 53867 
stonewall rank 6: 53649 of 53867 
stonewall rank 10: 53825 of 53867 
stonewall rank 31: 50355 of 53867 

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :       7406.617       7406.617       7406.617          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :         23.903         23.903         23.903          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/24/2018 11:18:50 --
result_summary
[RESULT] BW   phase 1            ior_easy_write                3.727 GB/s : time 549.61 seconds
[RESULT] IOPS phase 1         mdtest_easy_write               33.707 kiops : time 761.51 seconds
[RESULT] BW   phase 2            ior_hard_write                1.540 GB/s : time 754.26 seconds
[RESULT] IOPS phase 2         mdtest_hard_write                7.407 kiops : time 325.95 seconds
[RESULT] IOPS phase 3                      find              166.520 kiops : time 122.35 seconds
[RESULT] BW   phase 3             ior_easy_read                4.430 GB/s : time 462.28 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat              170.204 kiops : time 111.45 seconds
[RESULT] BW   phase 4             ior_hard_read                2.938 GB/s : time 395.42 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat               88.675 kiops : time  21.30 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete               24.695 kiops : time 764.53 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               13.692 kiops : time 127.78 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete                6.223 kiops : time 281.12 seconds
[SCORE] Bandwidth 2.93986 GB/s : IOPS 32.74 kiops : TOTAL 9.81076