BEAR-AI

Institution University of Birmingham
Client Procs Per Node
Client Operating System RHEL
Client Operating System Version 7.5
Client Kernel Version 4.14.0-49.13.1.el7a.ppc64le

DATA SERVER

Storage Type SSD
Volatile Memory 14GB
Storage Interface NVMe
Network InfiniBand EDR
Software Version 5.0.2.3
OS Version Grid Scalar

INFORMATION

Client Nodes 10
Client Total Procs 20

METADATA

Easy Write 82.17 kIOP/s
Easy Stat 55.14 kIOP/s
Easy Delete 36.00 kIOP/s
Hard Write 15.95 kIOP/s
Hard Read 51.36 kIOP/s
Hard Stat 56.62 kIOP/s
Hard Delete 20.67 kIOP/s

Submitted Files

io500
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.

set -euo pipefail  # better error handling

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=300 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...

# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  io500_workdir=$PWD/datafiles/io500.$timestamp # directory where the data will be stored
  io500_result_dir=$PWD/results/$timestamp      # the directory where the output results will be kept
  mkdir -p $io500_workdir $io500_result_dir
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
#  io500_mpirun="/rds/bear-apps/2019a/EL7-power9/software/OpenMPI/3.1.3-GCC-8.2.0-2.31.1/bin/mpirun"
  io500_mpirun="/usr/mpi/gcc/openmpi-4.0.0rc5/bin/mpirun "
  io500_mpiargs="-np 20 --map-by node -machinefile 10nodes --allow-run-as-root " # --mca btl openib,self,vader "
}

function setup_ior_easy {
  # io500_ior_easy_size is the amount of data written per rank in MiB units,
  # but it can be any number as long as it is somehow used to scale the IOR
  # runtime as part of io500_ior_easy_params
  io500_ior_easy_size=301620
  # 2M writes, 2 GB per proc, file per proc
  io500_ior_easy_params="-t 2048k -b ${io500_ior_easy_size}m -F"
}

function setup_mdt_easy {
  io500_mdtest_easy_params="-u -L" # unique dir per thread, files only at leaves
  io500_mdtest_easy_files_per_proc=1300000
}

function setup_ior_hard {
  #io500_ior_hard_writes_per_proc=500000
  io500_ior_hard_writes_per_proc=280000
  io500_ior_hard_other_options="" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}

function setup_mdt_hard {
  io500_mdtest_hard_files_per_proc=347086
  io500_mdtest_hard_other_options=""
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  #io500_find_mpi="False"
  #io500_find_cmd=$PWD/bin/sfind.sh
  #io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script.
  io500_find_mpi="True"
  io500_find_cmd="$PWD/bin/pfind"
  # uses stonewalling, run pfind 
  io500_find_cmd_args="-s $io500_stonewall_timer -r $io500_result_dir/pfind_results"

  # for GPFS systems, you should probably use the provided mmfind wrapper
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='BEAR-AI'      # e.g. Oakforest-PACS
  io500_info_institute_name='University of Birmingham'   # e.g. JCAHPC
  io500_info_storage_age_in_months='1' # not install date but age since last refresh
  io500_info_storage_install_date='04/19'  # MM/YY
  io500_info_filesystem='Spectrum Scale'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='5.0.2.3'
  io500_info_filesystem_vendor='DDN'
  # client side info
  io500_info_num_client_nodes='10'
  io500_info_procs_per_node='2'
  # server side info
  io500_info_num_metadata_server_nodes='xxx'
  io500_info_num_data_server_nodes='xxx'
  io500_info_num_data_storage_devices='xxx'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='xxx'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='infiniband' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='NVMe' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='DDN GS200nv storage system, Client hosts IBM AC922, Mellanox EDR InfiniBand'
}

main
ior_easy_read
IOR-3.3.0+dev: MPI Coordinated Test of Parallel I/O
Began               : Thu May 23 22:25:15 2019
Command line        : /ai/io-500-dev/bin/ior -r -R -t 2048k -b 301620m -F -i 1 -C -Q 1 -g -G 27 -k -e -o /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_easy/ior_file_easy -O stoneWallingStatusFile=/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_easy/stonewall
Machine             : Linux bear-pg0305u03a.bear.cluster
TestID              : 0
StartTime           : Thu May 23 22:25:15 2019
Path                : /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_easy
FS                  : 109.5 TiB   Used FS: 19.8%   Inodes: 147.6 Mi   Used Inodes: 50.1%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 20
clients per node    : 2
repetitions         : 1
xfersize            : 2 MiB
blocksize           : 294.55 GiB
aggregate filesize  : 5.75 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
WARNING: Expected aggregate file size       = 6325429862400.
WARNING: Stat() of aggregate file size      = 4620864716800.
WARNING: Using actual aggregate bytes moved = 4620864716800.
read      13090      308858880  2048.00    0.002598   336.65     0.000230   336.65     0   
Max Read:  13090.20 MiB/sec (13726.07 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read        13090.20   13090.20   13090.20       0.00    6545.10    6545.10    6545.10       0.00  336.64879     0     20   2    1   1     1        1         0    0      1 316271493120  2097152 4406800.0 POSIX      0
Finished            : Thu May 23 22:30:52 2019
ior_easy_write
IOR-3.3.0+dev: MPI Coordinated Test of Parallel I/O
Began               : Thu May 23 22:00:15 2019
Command line        : /ai/io-500-dev/bin/ior -w -t 2048k -b 301620m -F -i 1 -C -Q 1 -g -G 27 -k -e -o /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_easy/ior_file_easy -O stoneWallingStatusFile=/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_easy/stonewall -O stoneWallingWearOut=1 -D 300
Machine             : Linux bear-pg0305u03a.bear.cluster
TestID              : 0
StartTime           : Thu May 23 22:00:15 2019
Path                : /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_easy
FS                  : 109.5 TiB   Used FS: 15.8%   Inodes: 147.6 Mi   Used Inodes: 29.5%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 20
clients per node    : 2
repetitions         : 1
xfersize            : 2 MiB
blocksize           : 294.55 GiB
aggregate filesize  : 5.75 TiB
stonewallingTime    : 300
stoneWallingWearOut : 1

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 82073 max: 110170 -- min data: 160.3 GiB mean data: 191.2 GiB time: 300.3s
WARNING: Expected aggregate file size       = 6325429862400.
WARNING: Stat() of aggregate file size      = 4620864716800.
WARNING: Using actual aggregate bytes moved = 4620864716800.
WARNING: maybe caused by deadlineForStonewalling
write     12495      308858880  2048.00    0.444568   352.25     0.000586   352.69     0   
Max Write: 12494.82 MiB/sec (13101.76 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write       12494.82   12494.82   12494.82       0.00    6247.41    6247.41    6247.41       0.00  352.69025     0     20   2    1   1     1        1         0    0      1 316271493120  2097152 4406800.0 POSIX      0
Finished            : Thu May 23 22:06:08 2019
ior_hard_read
IOR-3.3.0+dev: MPI Coordinated Test of Parallel I/O
Began               : Thu May 23 22:38:47 2019
Command line        : /ai/io-500-dev/bin/ior -r -R -s 280000 -i 1 -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -o /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_hard/IOR_file -O stoneWallingStatusFile=/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_hard/stonewall
Machine             : Linux bear-pg0305u03a.bear.cluster
TestID              : 0
StartTime           : Thu May 23 22:38:47 2019
Path                : /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_hard
FS                  : 109.5 TiB   Used FS: 19.8%   Inodes: 147.6 Mi   Used Inodes: 50.1%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 280000
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 20
clients per node    : 2
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 245.17 GiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      764.89     45.91      45.91      0.001879   328.22     0.000368   328.22     0   
Max Read:  764.89 MiB/sec (802.04 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read          764.89     764.89     764.89       0.00   17061.83   17061.83   17061.83       0.00  328.21798     0     20   2    1   0     1        1         0    0 280000    47008    47008  251049.8 POSIX      0
Finished            : Thu May 23 22:44:16 2019
ior_hard_write
IOR-3.3.0+dev: MPI Coordinated Test of Parallel I/O
Began               : Thu May 23 22:11:29 2019
Command line        : /ai/io-500-dev/bin/ior -w -s 280000 -i 1 -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -o /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_hard/IOR_file -O stoneWallingStatusFile=/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_hard/stonewall -O stoneWallingWearOut=1 -D 300
Machine             : Linux bear-pg0305u03a.bear.cluster
TestID              : 0
StartTime           : Thu May 23 22:11:29 2019
Path                : /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_hard
FS                  : 109.5 TiB   Used FS: 19.6%   Inodes: 147.6 Mi   Used Inodes: 43.0%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 280000
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 20
clients per node    : 2
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 245.17 GiB
stonewallingTime    : 300
stoneWallingWearOut : 1

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 217497 max: 280000 -- min data: 9.5 GiB mean data: 10.5 GiB time: 300.0s
write     687.20     45.91      45.91      0.010299   365.31     0.000278   365.32     0   
Max Write: 687.20 MiB/sec (720.59 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write         687.20     687.20     687.20       0.00   15328.99   15328.99   15328.99       0.00  365.32081     0     20   2    1   0     1        1         0    0 280000    47008    47008  251049.8 POSIX      0
Finished            : Thu May 23 22:17:34 2019
mdtest_easy_delete
-- started at 05/23/2019 22:46:05 --

mdtest-3.3.0+dev was launched with 20 total task(s) on 10 node(s)
Command line used: /ai/io-500-dev/bin/mdtest "-r" "-F" "-d" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_easy" "-n" "1300000" "-u" "-L" "-x" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_easy-stonewall"
Path: /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12
FS: 109.5 TiB   Used FS: 19.8%   Inodes: 147.6 Mi   Used Inodes: 50.1%

20 tasks, 26000000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      36002.228      36002.220      36002.225          0.002
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.328          0.328          0.328          0.000

-- finished at 05/23/2019 22:58:11 --
mdtest_easy_stat
-- started at 05/23/2019 22:30:54 --

mdtest-3.3.0+dev was launched with 20 total task(s) on 10 node(s)
Command line used: /ai/io-500-dev/bin/mdtest "-T" "-F" "-d" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_easy" "-n" "1300000" "-u" "-L" "-x" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_easy-stonewall"
Path: /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12
FS: 109.5 TiB   Used FS: 19.8%   Inodes: 147.6 Mi   Used Inodes: 50.1%

20 tasks, 26000000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      55143.005      55143.000      55143.002          0.001
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/23/2019 22:38:45 --
mdtest_easy_write
-- started at 05/23/2019 22:06:10 --

mdtest-3.3.0+dev was launched with 20 total task(s) on 10 node(s)
Command line used: /ai/io-500-dev/bin/mdtest "-C" "-F" "-d" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_easy" "-n" "1300000" "-u" "-L" "-x" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_easy-stonewall" "-W" "300"
Path: /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12
FS: 109.5 TiB   Used FS: 19.1%   Inodes: 147.6 Mi   Used Inodes: 29.5%

20 tasks, 26000000 files
Continue stonewall hit min: 1227479 max: 1300000 avg: 1284303.1 

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      82172.600      82172.583      82172.593          0.005
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          7.202          7.202          7.202          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/23/2019 22:11:27 --
mdtest_hard_delete
-- started at 05/23/2019 23:00:13 --

mdtest-3.3.0+dev was launched with 20 total task(s) on 10 node(s)
Command line used: /ai/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_hard" "-n" "347086" "-x" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_hard-stonewall"
Path: /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12
FS: 109.5 TiB   Used FS: 19.8%   Inodes: 147.6 Mi   Used Inodes: 33.3%

20 tasks, 6941720 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      20670.970      20670.964      20670.967          0.002
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.154          0.154          0.154          0.000

-- finished at 05/23/2019 23:05:08 --
mdtest_hard_read
-- started at 05/23/2019 22:58:13 --

mdtest-3.3.0+dev was launched with 20 total task(s) on 10 node(s)
Command line used: /ai/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_hard" "-n" "347086" "-x" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_hard-stonewall"
Path: /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12
FS: 109.5 TiB   Used FS: 19.8%   Inodes: 147.6 Mi   Used Inodes: 34.6%

20 tasks, 6941720 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      51357.825      51357.809      51357.820          0.005
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/23/2019 23:00:09 --
mdtest_hard_stat
-- started at 05/23/2019 22:44:18 --

mdtest-3.3.0+dev was launched with 20 total task(s) on 10 node(s)
Command line used: /ai/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_hard" "-n" "347086" "-x" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_hard-stonewall"
Path: /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12
FS: 109.5 TiB   Used FS: 19.8%   Inodes: 147.6 Mi   Used Inodes: 50.1%

20 tasks, 6941720 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      56617.282      56617.265      56617.272          0.005
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/23/2019 22:46:03 --
mdtest_hard_write
-- started at 05/23/2019 22:17:36 --

mdtest-3.3.0+dev was launched with 20 total task(s) on 10 node(s)
Command line used: /ai/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_hard" "-n" "347086" "-x" "/ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12/mdt_hard-stonewall" "-W" "300"
Path: /ai/io-500-dev/datafiles/io500.2019.05.23-22.00.12
FS: 109.5 TiB   Used FS: 19.8%   Inodes: 147.6 Mi   Used Inodes: 46.3%

20 tasks, 6941720 files
Continue stonewall hit min: 228318 max: 298409 avg: 237466.6 

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      15946.279      15946.274      15946.276          0.001
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :       4471.212       4471.212       4471.212          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/23/2019 22:23:51 --
result_summary
[RESULT] BW   phase 1            ior_easy_write               12.202 GB/s : time 352.69 seconds
[RESULT] IOPS phase 1         mdtest_easy_write               82.173 kiops : time 318.87 seconds
[RESULT] BW   phase 2            ior_hard_write                0.671 GB/s : time 365.32 seconds
[RESULT] IOPS phase 2         mdtest_hard_write               15.946 kiops : time 376.47 seconds
[RESULT] IOPS phase 3                      find              389.490 kiops : time  82.08 seconds
[RESULT] BW   phase 3             ior_easy_read               12.783 GB/s : time 336.65 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat               55.143 kiops : time 473.60 seconds
[RESULT] BW   phase 4             ior_hard_read                0.747 GB/s : time 328.22 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat               56.617 kiops : time 107.58 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete               36.002 kiops : time 727.60 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               51.358 kiops : time 118.36 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete               20.671 kiops : time 298.71 seconds
[SCORE] Bandwidth 2.97365 GB/s : IOPS 52.854 kiops : TOTAL 12.5367