AI400

Institution DDN
Client Procs Per Node
Client Operating System CentOS
Client Operating System Version 7.5
Client Kernel Version 3.10.0-862.el7.x86_64

DATA SERVER

Storage Type SSD
Volatile Memory 64
Storage Interface NVMe
Network InfiniBand
Software Version 2.12.53
OS Version CentOS 7.6

INFORMATION

Client Nodes 10
Client Total Procs 160
Metadata Nodes 4
Metadata Storage Devices 0
Data Nodes 4
Data Storage Devices 12

METADATA

Easy Write 253.93 kIOP/s
Easy Stat 2,765.04 kIOP/s
Easy Delete 160.12 kIOP/s
Hard Write 40.22 kIOP/s
Hard Read 4,247.80 kIOP/s
Hard Stat 3,062.62 kIOP/s
Hard Delete 45.00 kIOP/s

Submitted Files

io500
#!/bin/bash

#LUSTRE_MDS=mds[11-14]
LUSTRE_MDS=sv16[0-3],sv19[0-3]
LUSTRE_OSS=sv16[0-3],sv19[0-3]
LUSTRE_CLIENT=c0[82-83,85-87],c2[10,12-14],c208
MGS=172.16.254.190@o2ib
FSNAME=/es90
MNT=/es90
DSH=pdsh

#PATH=/usr/mpi/gcc/openmpi-4.0.2a1/bin:$PATH
#PATH=/work/bin/benchmark_bin/openmpi-3.1.3/bin:$PATH

# Lustre Server Setting
${DSH} -w ${LUSTRE_MDS} "echo 128 > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
${DSH} -w ${LUSTRE_MDS} lctl set_param \
mdt.*.dom_lock=trylock

${DSH} -w ${LUSTRE_OSS} "sysctl -w vm.min_free_kbytes=524288"
${DSH} -w ${LUSTRE_OSS} lctl set_param \
osd-ldiskfs.*.read_cache_enable=0 \
obdfilter.*.writethrough_cache_enable=0 \
obdfilter.*.brw_size=16 \
obdfilter.*.precreate_batch=1024

# ReMount Lustre Client
${DSH} -w ${LUSTRE_CLIENT} umount -t lustre -a
${DSH} -w ${LUSTRE_CLIENT} mount -t lustre ${MGS}:${FSNAME} ${MNT}
sleep 2

# Lustre Client Setting
${DSH} -w ${LUSTRE_CLIENT} lctl set_param \
osc.*.max_pages_per_rpc=16M \
osc.*.max_rpcs_in_flight=16 \
osc.*.max_dirty_mb=512 \
osc.*.checksums=0 \
llite.*.max_read_ahead_mb=2048 \
ldlm.namespaces.*.lru_size=4000000 \
mdc.*.max_rpcs_in_flight=128 \
mdc.*.max_mod_rpcs_in_flight=127 \
llite.*.max_read_ahead_per_file_mb=256
sleep 2

# Cleanup & TRIM to all OSTS
${DSH} -w ${LUSTRE_CLIENT} lctl set_param ldlm.namespaces.*.lru_size=clear
${DSH} -w ${LUSTRE_OSS} fstrim -av
${DSH} -w ${LUSTRE_MDS},${LUSTRE_OSS} "echo 3 > /proc/sys/vm/drop_caches"
${DSH} -w ${LUSTRE_CLIENT} "cpupower frequency-set -g performance"

#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.

set -euo pipefail  # better error handling

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=300 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...

# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  #io500_workdir=$PWD/datafiles/io500.$timestamp # directory where the data will be stored
  io500_workdir=${MNT}/io500.out
  io500_result_dir=$PWD/results/$timestamp      # the directory where the output results will be kept
  mkdir -p $io500_workdir $io500_result_dir
  mkdir -p $io500_workdir/ior_hard $io500_workdir/ior_easy
  lfs setdirstripe -c 8 $io500_workdir/mdt_easy
  lfs setdirstripe -c 8 $io500_workdir/mdt_hard
  lfs setdirstripe -c 8 -D $io500_workdir/mdt_easy
  lfs setdirstripe -c 8 -D $io500_workdir/mdt_hard

  lfs setstripe -L mdt -E 1M $io500_workdir/mdt_easy
  #lfs setstripe -L mdt -E 1M $io500_workdir/mdt_hard
  lfs setstripe -C 320 -S 16M $io500_workdir/ior_hard
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
  io500_mpirun="/work/home/sihara/mpi/mpich-3.3/bin/mpirun"
  io500_mpiargs="-np 160 -hostfile hostfile -ppn 16"
}

function setup_ior_easy {
  # io500_ior_easy_size is the amount of data written per rank in MiB units,
  # but it can be any number as long as it is somehow used to scale the IOR
  # runtime as part of io500_ior_easy_params
  io500_ior_easy_size=120000
  # 2M writes, 2 GB per proc, file per proc
  io500_ior_easy_params="-t 2048k -b ${io500_ior_easy_size}m -F"
}

function setup_mdt_easy {
  io500_mdtest_easy_params="-u -L" # unique dir per thread, files only at leaves
  io500_mdtest_easy_files_per_proc=500000
}

function setup_ior_hard {
  io500_ior_hard_writes_per_proc=230000
  io500_ior_hard_other_options="" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}

function setup_mdt_hard {
  io500_mdtest_hard_files_per_proc=81500
  io500_mdtest_hard_other_options=""
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  #io500_find_mpi="False"
  #io500_find_cmd=$PWD/bin/sfind.sh
  #io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script.
  io500_find_mpi="True"
  io500_find_cmd="$PWD/bin/pfind"
  # uses stonewalling, run pfind 
  io500_find_cmd_args="-q 200000 -s $io500_stonewall_timer -r $io500_result_dir/pfind_results"

  # for GPFS systems, you should probably use the provided mmfind wrapper
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='Bancholab'      # e.g. Oakforest-PACS
  io500_info_institute_name='DDN'   # e.g. JCAHPC
  io500_info_storage_age_in_months='2' # not install date but age since last refresh
  io500_info_storage_install_date='05/19'  # MM/YY
  io500_info_filesystem='Lustre'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='2.13'
  io500_info_filesystem_vendor='DDN'
  # client side info
  io500_info_num_client_nodes='10'
  io500_info_procs_per_node='16'
  # server side info
  io500_info_num_metadata_server_nodes='8'
  io500_info_num_data_server_nodes='8'
  io500_info_num_data_storage_devices='40'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='8'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='infiniband' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='NVMe' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='WhateverElseYouThinkRelevant'
}

main
ior_easy_read
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began               : Sun Jun  9 13:03:00 2019
Command line        : /work/home/sihara/io-500-dev/bin/ior -r -R -t 2048k -b 120000m -F -i 1 -C -Q 1 -g -G 27 -k -e -o /es90/io500.out/ior_easy/ior_file_easy -O stoneWallingStatusFile=/es90/io500.out/ior_easy/stonewall
Machine             : Linux c082
TestID              : 0
StartTime           : Sun Jun  9 13:03:00 2019
Path                : /es90/io500.out/ior_easy
FS                  : 106.7 TiB   Used FS: 18.6%   Inodes: 952.3 Mi   Used Inodes: 9.3%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /es90/io500.out/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 160
clients per node    : 16
repetitions         : 1
xfersize            : 2 MiB
blocksize           : 117.19 GiB
aggregate filesize  : 18.31 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      75918      122880000  2048.00    0.012357   252.87     0.013549   252.90     0   
Max Read:  75917.93 MiB/sec (79605.71 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read        75917.93   75917.93   75917.93       0.00   37958.96   37958.96   37958.96       0.00  252.90470     0    160  16    1   1     1        1         0    0      1 125829120000  2097152 19200000.0 POSIX      0
Finished            : Sun Jun  9 13:07:13 2019
ior_easy_write
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began               : Sun Jun  9 12:40:56 2019
Command line        : /work/home/sihara/io-500-dev/bin/ior -w -t 2048k -b 120000m -F -i 1 -C -Q 1 -g -G 27 -k -e -o /es90/io500.out/ior_easy/ior_file_easy -O stoneWallingStatusFile=/es90/io500.out/ior_easy/stonewall -O stoneWallingWearOut=1 -D 300
Machine             : Linux c082
TestID              : 0
StartTime           : Sun Jun  9 12:40:56 2019
Path                : /es90/io500.out/ior_easy
FS                  : 106.7 TiB   Used FS: 0.0%   Inodes: 864.0 Mi   Used Inodes: 0.0%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /es90/io500.out/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 160
clients per node    : 16
repetitions         : 1
xfersize            : 2 MiB
blocksize           : 117.19 GiB
aggregate filesize  : 18.31 TiB
stonewallingTime    : 300
stoneWallingWearOut : 1

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 54440 max: 60000 -- min data: 106.3 GiB mean data: 117.0 GiB time: 300.0s
write     61523      122880000  2048.00    0.010937   312.05     0.014708   312.08     0   
Max Write: 61522.69 MiB/sec (64511.21 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write       61522.69   61522.69   61522.69       0.00   30761.34   30761.34   30761.34       0.00  312.07999     0    160  16    1   1     1        1         0    0      1 125829120000  2097152 19200000.0 POSIX      0
Finished            : Sun Jun  9 12:46:08 2019
ior_hard_read
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began               : Sun Jun  9 13:07:45 2019
Command line        : /work/home/sihara/io-500-dev/bin/ior -r -R -s 230000 -i 1 -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -o /es90/io500.out/ior_hard/IOR_file -O stoneWallingStatusFile=/es90/io500.out/ior_hard/stonewall
Machine             : Linux c082
TestID              : 0
StartTime           : Sun Jun  9 13:07:45 2019
Path                : /es90/io500.out/ior_hard
FS                  : 106.7 TiB   Used FS: 18.6%   Inodes: 952.3 Mi   Used Inodes: 9.3%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /es90/io500.out/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 230000
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 160
clients per node    : 16
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 1.57 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      7482       45.91      45.91      0.007307   220.47     0.025692   220.50     0   
Max Read:  7481.79 MiB/sec (7845.23 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read         7481.79    7481.79    7481.79       0.00  166891.33  166891.33  166891.33       0.00  220.50276     0    160  16    1   0     1        1         0    0 230000    47008    47008 1649755.9 POSIX      0
Finished            : Sun Jun  9 13:11:29 2019
ior_hard_write
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began               : Sun Jun  9 12:51:25 2019
Command line        : /work/home/sihara/io-500-dev/bin/ior -w -s 230000 -i 1 -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -o /es90/io500.out/ior_hard/IOR_file -O stoneWallingStatusFile=/es90/io500.out/ior_hard/stonewall -O stoneWallingWearOut=1 -D 300
Machine             : Linux c082
TestID              : 0
StartTime           : Sun Jun  9 12:51:25 2019
Path                : /es90/io500.out/ior_hard
FS                  : 106.7 TiB   Used FS: 17.2%   Inodes: 940.3 Mi   Used Inodes: 8.1%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /es90/io500.out/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 230000
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 160
clients per node    : 16
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 1.57 TiB
stonewallingTime    : 300
stoneWallingWearOut : 1

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 183350 max: 230000 -- min data: 8.0 GiB mean data: 9.2 GiB time: 300.0s
write     4694       45.91      45.91      0.032990   351.39     0.000876   351.43     0   
Max Write: 4694.46 MiB/sec (4922.50 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write        4694.46    4694.46    4694.46       0.00  104716.18  104716.18  104716.18       0.00  351.42611     0    160  16    1   0     1        1         0    0 230000    47008    47008 1649755.9 POSIX      0
Finished            : Sun Jun  9 12:57:17 2019
mdtest_easy_delete
-- started at 06/09/2019 13:11:35 --

mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-r" "-F" "-d" "/es90/io500.out/mdt_easy" "-n" "500000" "-u" "-L" "-x" "/es90/io500.out/mdt_easy-stonewall"
Path: /es90/io500.out
FS: 106.7 TiB   Used FS: 18.6%   Inodes: 952.3 Mi   Used Inodes: 9.3%

160 tasks, 80000000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :     160116.491     160109.803     160116.224          1.031
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          1.210          1.210          1.210          0.000

-- finished at 06/09/2019 13:19:55 --
mdtest_easy_stat
-- started at 06/09/2019 13:07:15 --

mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-T" "-F" "-d" "/es90/io500.out/mdt_easy" "-n" "500000" "-u" "-L" "-x" "/es90/io500.out/mdt_easy-stonewall"
Path: /es90/io500.out
FS: 106.7 TiB   Used FS: 18.6%   Inodes: 952.3 Mi   Used Inodes: 9.3%

160 tasks, 80000000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :    2765039.815    2763796.622    2763825.720         97.337
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/09/2019 13:07:44 --
mdtest_easy_write
-- started at 06/09/2019 12:46:09 --

mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-C" "-F" "-d" "/es90/io500.out/mdt_easy" "-n" "500000" "-u" "-L" "-x" "/es90/io500.out/mdt_easy-stonewall" "-W" "300"
Path: /es90/io500.out
FS: 106.7 TiB   Used FS: 17.2%   Inodes: 864.0 Mi   Used Inodes: 0.0%

160 tasks, 80000000 files
Continue stonewall hit min: 472777 max: 500000 avg: 489367.7 

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :     253930.470     253929.732     253930.029          0.195
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          7.442          7.442          7.442          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/09/2019 12:51:24 --
mdtest_hard_delete
-- started at 06/09/2019 13:20:01 --

mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/es90/io500.out/mdt_hard" "-n" "81500" "-x" "/es90/io500.out/mdt_hard-stonewall"
Path: /es90/io500.out
FS: 106.7 TiB   Used FS: 18.6%   Inodes: 876.0 Mi   Used Inodes: 1.4%

160 tasks, 13040000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      44998.688      44996.624      44998.645          0.162
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          5.042          5.042          5.042          0.000

-- finished at 06/09/2019 13:24:41 --
mdtest_hard_read
-- started at 06/09/2019 13:19:57 --

mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/es90/io500.out/mdt_hard" "-n" "81500" "-x" "/es90/io500.out/mdt_hard-stonewall"
Path: /es90/io500.out
FS: 106.7 TiB   Used FS: 18.6%   Inodes: 876.0 Mi   Used Inodes: 1.4%

160 tasks, 13040000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :    4247798.599    4247280.303    4247591.098        185.900
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/09/2019 13:20:00 --
mdtest_hard_stat
-- started at 06/09/2019 13:11:30 --

mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/es90/io500.out/mdt_hard" "-n" "81500" "-x" "/es90/io500.out/mdt_hard-stonewall"
Path: /es90/io500.out
FS: 106.7 TiB   Used FS: 18.6%   Inodes: 952.3 Mi   Used Inodes: 9.3%

160 tasks, 13040000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :    3062624.471    3062341.895    3062521.931         85.892
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/09/2019 13:11:34 --
mdtest_hard_write
-- started at 06/09/2019 12:57:19 --

mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/es90/io500.out/mdt_hard" "-n" "81500" "-x" "/es90/io500.out/mdt_hard-stonewall" "-W" "300"
Path: /es90/io500.out
FS: 106.7 TiB   Used FS: 18.6%   Inodes: 940.3 Mi   Used Inodes: 8.1%

160 tasks, 13040000 files
Continue stonewall hit min: 75304 max: 78687 avg: 77149.5 

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      40215.107      40213.540      40215.052          0.171
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :         84.438         84.438         84.438          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 06/09/2019 13:02:32 --
result_summary
[RESULT] BW   phase 1            ior_easy_write               60.081 GB/s : time 312.08 seconds
[RESULT] IOPS phase 1         mdtest_easy_write              253.930 kiops : time 316.20 seconds
[RESULT] BW   phase 2            ior_hard_write                4.584 GB/s : time 351.43 seconds
[RESULT] IOPS phase 2         mdtest_hard_write               40.215 kiops : time 314.32 seconds
[RESULT] IOPS phase 3                      find             3351.390 kiops : time  27.63 seconds
[RESULT] BW   phase 3             ior_easy_read               74.139 GB/s : time 252.90 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat             2765.040 kiops : time  31.09 seconds
[RESULT] BW   phase 4             ior_hard_read                7.307 GB/s : time 220.50 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat             3062.620 kiops : time   4.95 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete              160.116 kiops : time 501.45 seconds
[RESULT] IOPS phase 7          mdtest_hard_read             4247.800 kiops : time   5.15 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete               44.999 kiops : time 280.96 seconds
[SCORE] Bandwidth 19.6533 GB/s : IOPS 553.975 kiops : TOTAL 104.343