Raijin

Institution National Computational Infrastructure Australia
Client Procs Per Node
Client Operating System CentOS
Client Operating System Version 7.6.1810
Client Kernel Version Linux 3.10.0-957.5.1.el7.x86_64

DATA SERVER

Storage Type NL-SAS,7.2K HDD
Volatile Memory 384GB
Storage Interface InfiniBand EDR iSER
Network InfiniBand EDR
Software Version 2.10.7
OS Version CentOS 7.6.1810

INFORMATION

Client Nodes 10
Client Total Procs 150
Metadata Nodes 2
Metadata Storage Devices 12
Data Nodes 12
Data Storage Devices 150

METADATA

Easy Write 136.10 kIOP/s
Easy Stat 433.13 kIOP/s
Easy Delete 55.78 kIOP/s
Hard Write 42.37 kIOP/s
Hard Read 58.43 kIOP/s
Hard Stat 93.45 kIOP/s
Hard Delete 30.65 kIOP/s

Submitted Files

io500
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.

#set -euo pipefail  # better error handling

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=0 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...

# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {

  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
  timestamp=`date +%Y.%m.%d-%H.%M.%S`       # create a uniquifier
  io500_workdir=/g/data4/io500/datafiles    # directory where the data will be stored
  io500_result_dir=$PWD/results  # the directory where the output results will be kept
  mkdir -p $io500_workdir $io500_result_dir

  lfs mkdir --mdt-count 2 ${io500_workdir}/ior_easy

  mkdir ${io500_workdir}/ior_hard
  mkdir ${io500_workdir}/mdt_easy

  lfs mkdir --mdt-count 1 --mdt-index 0 ${io500_workdir}/mdt_easy/mdt0
  lfs mkdir --mdt-count 1 --mdt-index 0 --default ${io500_workdir}/mdt_easy/mdt0
  lfs mkdir --mdt-count 1 --mdt-index 1 ${io500_workdir}/mdt_easy/mdt1
  lfs mkdir --mdt-count 1 --mdt-index 1 --default ${io500_workdir}/mdt_easy/mdt1

  lfs mkdir --mdt-count 2 --mdt-hash all_char ${io500_workdir}/mdt_hard
  lfs mkdir --mdt-count 2 --mdt-hash all_char --default ${io500_workdir}/mdt_hard

  for i in {0..359}; do
    lfs setstripe --stripe-count 1 --stripe-size 4194304 --stripe-index $((i % 180)) ${io500_workdir}/ior_easy/ior_file_easy.$(printf %0.8d $i)
  done

  lfs setstripe --stripe-count 180 --stripe-size 4194304 ${io500_workdir}/ior_hard/IOR_file

}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
  io500_mpirun="/g/data4/builds/openmpi/bin/mpirun"
  io500_mpirun="mpirun"
  io500_mpiargs="--allow-run-as-root -hostfile hosts.10 -map-by ppr:9:socket -rank-by slot -bind-to core -np 180 /g/data4/io500/io-500-dev/set-hca.sh"
}

function setup_ior_easy {
  # io500_ior_easy_size is the amount of data written per rank in MiB units,
  # but it can be any number as long as it is somehow used to scale the IOR
  # runtime as part of io500_ior_easy_params
  io500_ior_easy_size=131072
  # 2M writes, 2 GB per proc, file per proc
  io500_ior_easy_params="-t 2048k -b ${io500_ior_easy_size}m -F -E"
}

function setup_mdt_easy {
  io500_mdtest_easy_params="-u -L -d ${io500_workdir}/mdt_easy/mdt0@${io500_workdir}/mdt_easy/mdt1" # unique dir per thread, files only at leaves
  io500_mdtest_easy_files_per_proc=262144
}

function setup_ior_hard {
  io500_ior_hard_writes_per_proc=180224
  io500_ior_hard_other_options="-E" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}

function setup_mdt_hard {
  io500_mdtest_hard_files_per_proc=92256
  io500_mdtest_hard_other_options=""
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  #io500_find_mpi="False"
  #io500_find_cmd=$PWD/bin/sfind.sh
  #io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script.
  io500_find_mpi="True"
  io500_find_cmd="$PWD/bin/pfind"
  # uses stonewalling, run pfind 
  io500_find_cmd_args="-s $io500_stonewall_timer -r ${io500_result_dir}/pfind_results"

  # for GPFS systems, you should probably use the provided mmfind wrapper
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='/g/data4'      # e.g. Oakforest-PACS
  io500_info_institute_name='NCI Australia'   # e.g. JCAHPC
  io500_info_storage_age_in_months='0' # not install date but age since last refresh
  io500_info_storage_install_date='03/19'  # MM/YY
  io500_info_filesystem='Lustre'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='2.10.7'
  io500_info_filesystem_vendor='Fujitsu / NetAPP'
  # client side info
  io500_info_num_client_nodes='10'
  io500_info_procs_per_node='18'
  # server side info
  io500_info_num_metadata_server_nodes='2'
  io500_info_num_data_server_nodes='12'
  io500_info_num_data_storage_devices='1800'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='24'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='HDD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='InfiniBand' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='SAS / iSER' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='OSTs on NetAPP E5670, MDTs on NetAPP EF570'
}

main
ior_easy_read
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Fri Jun  7 10:23:19 2019
Command line        : /g/data4/io500/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 2048k -b 131072m -F -E -o /g/data4/io500/datafiles/ior_easy/ior_file_easy -O stoneWallingStatusFile=/g/data4/io500/datafiles/ior_easy/stonewall
Machine             : Linux g4-lnet01
TestID              : 0
StartTime           : Fri Jun  7 10:23:19 2019
Path                : /g/data4/io500/datafiles/ior_easy
FS                  : 15260.4 TiB   Used FS: 32.0%   Inodes: 4750.9 Mi   Used Inodes: 2.2%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /g/data4/io500/datafiles/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 180
clients per node    : 18
repetitions         : 1
xfersize            : 2 MiB
blocksize           : 128 GiB
aggregate filesize  : 22.50 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      106178     134217728  2048.00    0.003579   222.20     0.002185   222.20     0   
Max Read:  106177.92 MiB/sec (111335.62 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read       106177.92  106177.92  106177.92       0.00   53088.96   53088.96   53088.96       0.00  222.20214     0    180  18    1   1     1        1         0    0      1 137438953472  2097152 23592960.0 POSIX      0
Finished            : Fri Jun  7 10:27:01 2019
ior_easy_write
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Fri Jun  7 09:57:19 2019
Command line        : /g/data4/io500/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 2048k -b 131072m -F -E -o /g/data4/io500/datafiles/ior_easy/ior_file_easy -O stoneWallingStatusFile=/g/data4/io500/datafiles/ior_easy/stonewall -O stoneWallingWearOut=1 -D 0
Machine             : Linux g4-lnet01
TestID              : 0
StartTime           : Fri Jun  7 09:57:19 2019
Path                : /g/data4/io500/datafiles/ior_easy
FS                  : 15260.4 TiB   Used FS: 31.8%   Inodes: 4750.9 Mi   Used Inodes: 0.9%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /g/data4/io500/datafiles/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 180
clients per node    : 18
repetitions         : 1
xfersize            : 2 MiB
blocksize           : 128 GiB
aggregate filesize  : 22.50 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 65536 max: 65536 -- min data: 128.0 GiB mean data: 128.0 GiB time: 368.5s
write     63952      134217728  2048.00    0.003494   368.91     0.001652   368.92     0   
Max Write: 63952.19 MiB/sec (67058.73 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write       63952.19   63952.19   63952.19       0.00   31976.09   31976.09   31976.09       0.00  368.91560     0    180  18    1   1     1        1         0    0      1 137438953472  2097152 23592960.0 POSIX      0
Finished            : Fri Jun  7 10:03:28 2019
ior_hard_read
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Fri Jun  7 10:28:53 2019
Command line        : /g/data4/io500/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 180224 -E -o /g/data4/io500/datafiles/ior_hard/IOR_file -O stoneWallingStatusFile=/g/data4/io500/datafiles/ior_hard/stonewall
Machine             : Linux g4-lnet01
TestID              : 0
StartTime           : Fri Jun  7 10:28:53 2019
Path                : /g/data4/io500/datafiles/ior_hard
FS                  : 15260.4 TiB   Used FS: 32.0%   Inodes: 4750.9 Mi   Used Inodes: 2.2%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /g/data4/io500/datafiles/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 180224
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 180
clients per node    : 18
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 1.39 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      1142.86    45.91      45.91      0.000905   1272.51    0.001867   1272.52    0   
Max Read:  1142.86 MiB/sec (1198.38 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read         1142.86    1142.86    1142.86       0.00   25493.05   25493.05   25493.05       0.00 1272.51614     0    180  18    1   0     1        1         0    0 180224    47008    47008 1454310.0 POSIX      0
Finished            : Fri Jun  7 10:50:05 2019
ior_hard_write
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Fri Jun  7 10:09:17 2019
Command line        : /g/data4/io500/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 180224 -E -o /g/data4/io500/datafiles/ior_hard/IOR_file -O stoneWallingStatusFile=/g/data4/io500/datafiles/ior_hard/stonewall -O stoneWallingWearOut=1 -D 0
Machine             : Linux g4-lnet01
TestID              : 0
StartTime           : Fri Jun  7 10:09:17 2019
Path                : /g/data4/io500/datafiles/ior_hard
FS                  : 15260.4 TiB   Used FS: 31.9%   Inodes: 4750.9 Mi   Used Inodes: 1.8%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /g/data4/io500/datafiles/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 180224
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 180
clients per node    : 18
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 1.39 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 180224 max: 180224 -- min data: 7.9 GiB mean data: 7.9 GiB time: 379.5s
write     3831       45.91      45.91      0.002503   379.65     0.000798   379.65     0   
Max Write: 3830.62 MiB/sec (4016.70 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write        3830.62    3830.62    3830.62       0.00   85447.05   85447.05   85447.05       0.00  379.65405     0    180  18    1   0     1        1         0    0 180224    47008    47008 1454310.0 POSIX      0
Finished            : Fri Jun  7 10:15:38 2019
mdtest_easy_delete
-- started at 06/07/2019 10:53:05 --

mdtest-1.9.3 was launched with 180 total task(s) on 10 node(s)
Command line used: /g/data4/io500/io-500-dev/bin/mdtest "-r" "-F" "-d" "/g/data4/io500/datafiles/mdt_easy" "-n" "262144" "-u" "-L" "-d" "/g/data4/io500/datafiles/mdt_easy/mdt0@/g/data4/io500/datafiles/mdt_easy/mdt1" "-x" "/g/data4/io500/datafiles/mdt_easy-stonewall"
Path: /g/data4/io500/datafiles/mdt_easy
FS: 15260.4 TiB   Used FS: 32.0%   Inodes: 4750.9 Mi   Used Inodes: 2.2%

180 tasks, 47185920 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      55780.322      55780.313      55780.317          0.002
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.507          0.507          0.507          0.000

-- finished at 06/07/2019 11:07:13 --
mdtest_easy_stat
-- started at 06/07/2019 10:27:03 --

mdtest-1.9.3 was launched with 180 total task(s) on 10 node(s)
Command line used: /g/data4/io500/io-500-dev/bin/mdtest "-T" "-F" "-d" "/g/data4/io500/datafiles/mdt_easy" "-n" "262144" "-u" "-L" "-d" "/g/data4/io500/datafiles/mdt_easy/mdt0@/g/data4/io500/datafiles/mdt_easy/mdt1" "-x" "/g/data4/io500/datafiles/mdt_easy-stonewall"
Path: /g/data4/io500/datafiles/mdt_easy
FS: 15260.4 TiB   Used FS: 32.0%   Inodes: 4750.9 Mi   Used Inodes: 2.2%

180 tasks, 47185920 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :     433134.029     433131.694     433132.504          0.742
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/07/2019 10:28:52 --
mdtest_easy_write
-- started at 06/07/2019 10:03:29 --

mdtest-1.9.3 was launched with 180 total task(s) on 10 node(s)
Command line used: /g/data4/io500/io-500-dev/bin/mdtest "-C" "-F" "-d" "/g/data4/io500/datafiles/mdt_easy" "-n" "262144" "-u" "-L" "-d" "/g/data4/io500/datafiles/mdt_easy/mdt0@/g/data4/io500/datafiles/mdt_easy/mdt1" "-x" "/g/data4/io500/datafiles/mdt_easy-stonewall" "-W" "0"
Path: /g/data4/io500/datafiles/mdt_easy
FS: 15260.4 TiB   Used FS: 31.9%   Inodes: 4750.9 Mi   Used Inodes: 0.9%

180 tasks, 47185920 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :     136095.783     136095.718     136095.752          0.013
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :        401.865        401.865        401.865          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/07/2019 10:09:16 --
mdtest_hard_delete
-- started at 06/07/2019 11:12:00 --

mdtest-1.9.3 was launched with 180 total task(s) on 10 node(s)
Command line used: /g/data4/io500/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/g/data4/io500/datafiles/mdt_hard" "-n" "92256" "-x" "/g/data4/io500/datafiles/mdt_hard-stonewall"
Path: /g/data4/io500/datafiles
FS: 15260.4 TiB   Used FS: 32.0%   Inodes: 4750.9 Mi   Used Inodes: 1.2%

180 tasks, 16606080 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      30645.582      30645.573      30645.577          0.002
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          1.865          1.865          1.865          0.000

-- finished at 06/07/2019 11:21:02 --
mdtest_hard_read
-- started at 06/07/2019 11:07:14 --

mdtest-1.9.3 was launched with 180 total task(s) on 10 node(s)
Command line used: /g/data4/io500/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/g/data4/io500/datafiles/mdt_hard" "-n" "92256" "-x" "/g/data4/io500/datafiles/mdt_hard-stonewall"
Path: /g/data4/io500/datafiles
FS: 15260.4 TiB   Used FS: 32.0%   Inodes: 4750.9 Mi   Used Inodes: 1.2%

180 tasks, 16606080 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      58432.919      58432.894      58432.907          0.006
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/07/2019 11:11:58 --
mdtest_hard_stat
-- started at 06/07/2019 10:50:06 --

mdtest-1.9.3 was launched with 180 total task(s) on 10 node(s)
Command line used: /g/data4/io500/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/g/data4/io500/datafiles/mdt_hard" "-n" "92256" "-x" "/g/data4/io500/datafiles/mdt_hard-stonewall"
Path: /g/data4/io500/datafiles
FS: 15260.4 TiB   Used FS: 32.0%   Inodes: 4750.9 Mi   Used Inodes: 2.2%

180 tasks, 16606080 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      93455.105      93454.993      93455.057          0.027
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/07/2019 10:53:04 --
mdtest_hard_write
-- started at 06/07/2019 10:15:39 --

mdtest-1.9.3 was launched with 180 total task(s) on 10 node(s)
Command line used: /g/data4/io500/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/g/data4/io500/datafiles/mdt_hard" "-n" "92256" "-x" "/g/data4/io500/datafiles/mdt_hard-stonewall" "-W" "0"
Path: /g/data4/io500/datafiles
FS: 15260.4 TiB   Used FS: 32.0%   Inodes: 4750.9 Mi   Used Inodes: 1.8%

180 tasks, 16606080 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      42371.480      42371.417      42371.444          0.019
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :        859.754        859.754        859.754          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/07/2019 10:22:11 --
result_summary
[RESULT] BW   phase 1            ior_easy_write               62.453 GB/s : time 368.92 seconds
[RESULT] IOPS phase 1         mdtest_easy_write              136.096 kiops : time 347.83 seconds
[RESULT] BW   phase 2            ior_hard_write                3.741 GB/s : time 379.65 seconds
[RESULT] IOPS phase 2         mdtest_hard_write               42.372 kiops : time 393.10 seconds
[RESULT] IOPS phase 3                      find              944.550 kiops : time  67.54 seconds
[RESULT] BW   phase 3             ior_easy_read              103.689 GB/s : time 222.20 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat              433.134 kiops : time 110.18 seconds
[RESULT] BW   phase 4             ior_hard_read                1.116 GB/s : time 1272.52 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat               93.455 kiops : time 178.71 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete               55.780 kiops : time 849.27 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               58.433 kiops : time 285.26 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete               30.646 kiops : time 543.55 seconds
[SCORE] Bandwidth 12.8232 GB/s : IOPS 110.372 kiops : TOTAL 37.6208