- io500
-
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
#SBATCH -A SUPPORT-CPU
#SBATCH -p skylake
#SBATCH -o slurm-out/io_500_out_%J
#SBATCH -e slurm-err/io_500_err_%J
#SBATCH -t 08:00:00
#SBATCH --exclusive
#SBATCH --job-name=io500
#SBATCH --dependency=singleton
#SBATCH --reservation=dac_o
module purge
module load rhel7/default-peta4 ior/3.1.0 hdf5-1.10.1-intel-17.0.4-nsuex4z
ROOT=$PWD
workdir=/dac/fs1001
set -xeuo pipefail # better error handling
# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True" # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True" # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False" # this one is optional
io500_cleanup_workdir=/dac/fs1001
io500_stonewall_timer=0 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...
# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
setup_directories
setup_paths
setup_ior_easy # required if you want a complete score
setup_ior_hard # required if you want a complete score
setup_mdt_easy # required if you want a complete score
setup_mdt_hard # required if you want a complete score
setup_find # required if you want a complete score
setup_mdreal # optional
run_benchmarks
}
function setup_directories {
# set directories for where the benchmark files are created and where the results will go.
# If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
timestamp=`date +%Y.%m.%d-%H.%M.%S` # create a uniquifier
tag=24n-10nv
io500_workdir=/dac/fs1001
io500_result_dir=$PWD/results.${tag} # the directory where the output results will be kept
mkdir -p $io500_workdir $io500_result_dir
# precreate directories for lustre with the appropriate striping
mkdir -p ${io500_workdir}/ior_easy
#lfs setstripe --stripe-count 2 ${io500_workdir}/ior_easy
# mkdir -p ${io500_workdir}/ior_hard
#lfs setstripe --stripe-count 100 ${io500_workdir}/ior_hard
}
function setup_paths {
# Set the paths to the binaries. If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
io500_ior_cmd=$PWD/bin/ior
io500_mdtest_cmd=$PWD/bin/mdtest
io500_mdreal_cmd=$PWD/bin/md-real-io
io500_mpirun="mpirun"
io500_mpiargs=""
}
function setup_ior_easy {
# io500_ior_easy_size is the amount of data written per rank in MiB units,
# but it can be any number as long as it is somehow used to scale the IOR
# runtime as part of io500_ior_easy_params
io500_ior_easy_size=8
# 2M writes, 2 GB per proc, file per proc
io500_ior_easy_params="-i2 -B -a POSIX -t 1m -b ${io500_ior_easy_size}g -F"
}
function setup_mdt_easy {
io500_mdtest_easy_params="-i1 -u -L -b 8" # unique dir per thread, files only at leaves
io500_mdtest_easy_files_per_proc=25000
}
function setup_ior_hard {
io500_ior_hard_writes_per_proc=10000
io500_ior_hard_other_options="-i1" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}
function setup_mdt_hard {
io500_mdtest_hard_files_per_proc=2000
io500_mdtest_hard_other_options=""
}
function setup_find {
#
# setup the find command. This is an area where innovation is allowed.
# There are three default options provided. One is a serial find, one is python
# parallel version, one is C parallel version. Current default is to use serial.
# But it is very slow. We recommend to either customize or use the C parallel version.
# For GPFS, we recommend to use the provided mmfind wrapper described below.
# Instructions below.
# If a custom approach is used, please provide enough info so others can reproduce.
# the serial version that should run (SLOWLY) without modification
#io500_find_mpi="False"
#io500_find_cmd=$PWD/bin/sfind.sh
#io500_find_cmd_args=""
# a parallel version in C, the -s adds a stonewall
# for a real run, turn -s (stonewall) off or set it at 300 or more
# to prepare this (assuming you've run ./utilities/prepare.sh already):
# > cd build/pfind
# > ./prepare.sh
# > ./compile.sh
# > cp pfind ../../bin/
# If you use io500_find_mpi="True", then this will run with the same
# number of MPI nodes and ranks as the other phases.
# If you prefer another number, and fewer might be better here,
# Then you can set io500_find_mpi to be "False" and write a wrapper
# script for this which sets up MPI as you would like. Then change
# io500_find_cmd to point to your wrapper script.
io500_find_mpi="True"
io500_find_cmd="$PWD/bin/pfind"
# uses stonewalling, run pfind
io500_find_cmd_args="-s $io500_stonewall_timer -r $io500_result_dir/pfind_results"
# for GPFS systems, you should probably use the provided mmfind wrapper
# if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
#io500_find_mpi="False"
#io500_find_cmd="$PWD/bin/mmfind.sh"
#io500_find_cmd_args=""
}
function setup_mdreal {
io500_mdreal_params="-P=5000 -I=1000"
}
function run_benchmarks {
# Important: source the io500_fixed.sh script. Do not change it. If you discover
# a need to change it, please email the mailing list to discuss
source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}
# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
# top level info
io500_info_system_name='Cumulus' # e.g. Oakforest-PACS
io500_info_institute_name='UoC-RCS' # e.g. JCAHPC
io500_info_storage_age_in_months='4' # not install date but age since last refresh
io500_info_storage_install_date='07/2018' # MM/YY
io500_info_filesystem='BeeGFS' # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
io500_info_filesystem_version='7.1'
io500_info_filesystem_vendor='xxx'
# client side info
io500_info_num_client_nodes='184'
io500_info_procs_per_node='32'
# server side info
io500_info_num_metadata_server_nodes='8'
io500_info_num_data_server_nodes='36'
io500_info_num_data_storage_devices='180' # if you have 5 data servers, and each has 5 drives, then this number is 25
io500_info_num_metadata_storage_devices='8' # if you have 2 metadata servers, and each has 5 drives, then this number is 10
io500_info_data_storage_type='Intel SSD P4600' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_metadata_storage_type='Intel SSD P4600' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_storage_network='omnipath' # infiniband, omnipath, ethernet, etc
io500_info_storage_interface='NVMe' # SAS, SATA, NVMe, etc
# miscellaneous
io500_info_whatever='Data Accelerator system for commodity per job ephemeral file systems (BurstBuffer)'
}
main
- ior_easy_read
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Wed Oct 24 17:26:03 2018
Command line : /home/ajk203/io-500-dev-2/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -i2 -B -a POSIX -t 1m -b 8g -F -o /dac/fs1001/ior_easy/ior_file_easy -O stoneWallingStatusFile=/dac/fs1001/ior_easy/stonewall
Machine : Linux cpu-e-1
TestID : 0
StartTime : Wed Oct 24 17:26:03 2018
Path : /dac/fs1001/ior_easy
FS : 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /dac/fs1001/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 5888
clients per node : 32
repetitions : 2
xfersize : 1 MiB
blocksize : 8 GiB
aggregate filesize : 46 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
read 412684 8388608 1024.00 0.152808 116.62 0.110663 116.88 0
read 411929 8388608 1024.00 0.132364 116.85 0.112586 117.09 1
Max Read: 412683.97 MiB/sec (432730.51 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 412683.97 411929.11 412306.54 377.43 412683.97 411929.11 412306.54 377.43 116.98707 0 5888 32 2 1 1 1 0 0 1 8589934592 1048576 48234496.0 POSIX 0
Finished : Wed Oct 24 17:29:57 2018
- ior_easy_write
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Wed Oct 24 16:27:57 2018
Command line : /home/ajk203/io-500-dev-2/bin/ior -w -C -Q 1 -g -G 27 -k -e -i2 -B -a POSIX -t 1m -b 8g -F -o /dac/fs1001/ior_easy/ior_file_easy -O stoneWallingStatusFile=/dac/fs1001/ior_easy/stonewall -O stoneWallingWearOut=1 -D 0
Machine : Linux cpu-e-1
TestID : 0
StartTime : Wed Oct 24 16:27:57 2018
Path : /dac/fs1001/ior_easy
FS : 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /dac/fs1001/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 5888
clients per node : 32
repetitions : 2
xfersize : 1 MiB
blocksize : 8 GiB
aggregate filesize : 46 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 8192 max: 8192 -- min data: 8.0 GiB mean data: 8.0 GiB time: 219.1s
write 219714 8388608 1024.00 0.317764 219.11 0.106886 219.53 0
stonewalling pairs accessed min: 8192 max: 8192 -- min data: 8.0 GiB mean data: 8.0 GiB time: 434.6s
write 110861 8388608 1024.00 0.362499 434.62 0.109312 435.09 1
Max Write: 219713.95 MiB/sec (230386.78 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 219713.95 110861.44 165287.70 54426.26 219713.95 110861.44 165287.70 54426.26 327.31063 0 5888 32 2 1 1 1 0 0 1 8589934592 1048576 48234496.0 POSIX 0
Finished : Wed Oct 24 16:38:53 2018
- ior_hard_read
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Wed Oct 24 17:36:22 2018
Command line : /home/ajk203/io-500-dev-2/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 10000 -i1 -o /dac/fs1001/ior_hard/IOR_file -O stoneWallingStatusFile=/dac/fs1001/ior_hard/stonewall
Machine : Linux cpu-e-1
TestID : 0
StartTime : Wed Oct 24 17:36:22 2018
Path : /dac/fs1001/ior_hard
FS : 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /dac/fs1001/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 10000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 5888
clients per node : 32
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 2.52 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
read 28530 45.91 45.91 0.727596 91.67 0.124579 92.52 0
Max Read: 28529.92 MiB/sec (29915.79 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 28529.92 28529.92 28529.92 0.00 636397.87 636397.87 636397.87 0.00 92.52074 0 5888 32 1 0 1 1 0 0 10000 47008 47008 2639609.5 POSIX 0
Finished : Wed Oct 24 17:37:55 2018
- ior_hard_write
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Wed Oct 24 16:54:22 2018
Command line : /home/ajk203/io-500-dev-2/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 10000 -i1 -o /dac/fs1001/ior_hard/IOR_file -O stoneWallingStatusFile=/dac/fs1001/ior_hard/stonewall -O stoneWallingWearOut=1 -D 0
Machine : Linux cpu-e-1
TestID : 0
StartTime : Wed Oct 24 16:54:22 2018
Path : /dac/fs1001/ior_hard
FS : 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /dac/fs1001/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 10000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 5888
clients per node : 32
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 2.52 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 10000 max: 10000 -- min data: 0.4 GiB mean data: 0.4 GiB time: 309.9s
write 7165 45.91 45.91 0.570468 367.79 0.026006 368.39 0
Max Write: 7165.33 MiB/sec (7513.39 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 7165.33 7165.33 7165.33 0.00 159832.26 159832.26 159832.26 0.00 368.38621 0 5888 32 1 0 1 1 0 0 10000 47008 47008 2639609.5 POSIX 0
Finished : Wed Oct 24 17:00:30 2018
- mdtest_easy_delete
-
-- started at 10/24/2018 17:42:45 --
mdtest-1.9.3 was launched with 5888 total task(s) on 184 node(s)
Command line used: /home/ajk203/io-500-dev-2/bin/mdtest "-r" "-F" "-d" "/dac/fs1001/mdt_easy" "-n" "25000" "-i1" "-u" "-L" "-b" "8" "-x" "/dac/fs1001/mdt_easy-stonewall"
Path: /dac/fs1001
FS: 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
5888 tasks, 147200000 files
WARNING: could not read stonewall status file
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 221394.079 221394.079 221394.079 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.048 0.048 0.048 0.000
-- finished at 10/24/2018 17:54:10 --
- mdtest_easy_stat
-
-- started at 10/24/2018 17:32:17 --
mdtest-1.9.3 was launched with 5888 total task(s) on 184 node(s)
Command line used: /home/ajk203/io-500-dev-2/bin/mdtest "-T" "-F" "-d" "/dac/fs1001/mdt_easy" "-n" "25000" "-i1" "-u" "-L" "-b" "8" "-x" "/dac/fs1001/mdt_easy-stonewall"
Path: /dac/fs1001
FS: 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
5888 tasks, 147200000 files
WARNING: could not read stonewall status file
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 823402.850 823402.850 823402.850 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/24/2018 17:35:16 --
- mdtest_easy_write
-
-- started at 10/24/2018 16:41:05 --
mdtest-1.9.3 was launched with 5888 total task(s) on 184 node(s)
Command line used: /home/ajk203/io-500-dev-2/bin/mdtest "-C" "-F" "-d" "/dac/fs1001/mdt_easy" "-n" "25000" "-i1" "-u" "-L" "-b" "8" "-x" "/dac/fs1001/mdt_easy-stonewall" "-W" "0"
Path: /dac/fs1001
FS: 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
5888 tasks, 147200000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 221527.480 221527.480 221527.480 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 3.015 3.015 3.015 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/24/2018 16:52:09 --
- mdtest_hard_delete
-
-- started at 10/24/2018 18:06:39 --
mdtest-1.9.3 was launched with 5888 total task(s) on 184 node(s)
Command line used: /home/ajk203/io-500-dev-2/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/dac/fs1001/mdt_hard" "-n" "2000" "-x" "/dac/fs1001/mdt_hard-stonewall"
Path: /dac/fs1001
FS: 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
5888 tasks, 11776000 files
WARNING: could not read stonewall status file
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 12508.055 12508.055 12508.055 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.097 0.097 0.097 0.000
-- finished at 10/24/2018 18:22:31 --
- mdtest_hard_read
-
-- started at 10/24/2018 17:56:22 --
mdtest-1.9.3 was launched with 5888 total task(s) on 184 node(s)
Command line used: /home/ajk203/io-500-dev-2/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/dac/fs1001/mdt_hard" "-n" "2000" "-x" "/dac/fs1001/mdt_hard-stonewall"
Path: /dac/fs1001
FS: 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
5888 tasks, 11776000 files
WARNING: could not read stonewall status file
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 24240.251 24240.251 24240.251 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/24/2018 18:04:28 --
- mdtest_hard_stat
-
-- started at 10/24/2018 17:40:02 --
mdtest-1.9.3 was launched with 5888 total task(s) on 184 node(s)
Command line used: /home/ajk203/io-500-dev-2/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/dac/fs1001/mdt_hard" "-n" "2000" "-x" "/dac/fs1001/mdt_hard-stonewall"
Path: /dac/fs1001
FS: 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
5888 tasks, 11776000 files
WARNING: could not read stonewall status file
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 80203.794 80203.794 80203.794 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/24/2018 17:42:29 --
- mdtest_hard_write
-
-- started at 10/24/2018 17:02:47 --
mdtest-1.9.3 was launched with 5888 total task(s) on 184 node(s)
Command line used: /home/ajk203/io-500-dev-2/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/dac/fs1001/mdt_hard" "-n" "2000" "-x" "/dac/fs1001/mdt_hard-stonewall" "-W" "0"
Path: /dac/fs1001
FS: 270.1 TiB Used FS: 18.0% Inodes: 0.0 Mi Used Inodes: -nan%
5888 tasks, 11776000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 10471.146 10471.146 10471.146 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 503.035 503.035 503.035 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/24/2018 17:21:32 --
- result_summary
-
[RESULT] BW phase 1 ior_easy_write 214.564 GB/s : time 219.53 seconds
[RESULT] IOPS phase 1 mdtest_easy_write 221.527 kiops : time 797.40 seconds
[RESULT] BW phase 2 ior_hard_write 6.997 GB/s : time 368.39 seconds
[RESULT] IOPS phase 2 mdtest_hard_write 10.471 kiops : time 1261.71 seconds
[RESULT] IOPS phase 3 find 622.250 kiops : time 255.49 seconds
[RESULT] BW phase 3 ior_easy_read 403.012 GB/s : time 116.88 seconds
[RESULT] IOPS phase 4 mdtest_easy_stat 823.403 kiops : time 318.08 seconds
[RESULT] BW phase 4 ior_hard_read 27.861 GB/s : time 92.52 seconds
[RESULT] IOPS phase 5 mdtest_hard_stat 80.204 kiops : time 272.86 seconds
[RESULT] IOPS phase 6 mdtest_easy_delete 221.394 kiops : time 702.14 seconds
[RESULT] IOPS phase 7 mdtest_hard_read 24.240 kiops : time 617.01 seconds
[RESULT] IOPS phase 8 mdtest_hard_delete 12.508 kiops : time 1083.24 seconds
[SCORE] Bandwidth 64.0764 GB/s : IOPS 94.5716 kiops : TOTAL 77.8448