- io500
-
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
#set -x
if [ "$1" == "" ]
then
SCALE=1
else
SCALE=$1
fi
NP=$(( $SCALE * 16 ))
echo "$SCALE processes per node for $NP processes."
set -euo pipefail # better error handling
export OFS_MOUNT=/scratch4/jburto2
# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True" # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True" # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="True" # this one is optional
io500_cleanup_workdir="False" # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=300 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...
# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
setup_directories
setup_paths
setup_ior_easy # required if you want a complete score
setup_ior_hard # required if you want a complete score
setup_mdt_easy # required if you want a complete score
setup_mdt_hard # required if you want a complete score
setup_find # required if you want a complete score
setup_mdreal # optional
run_benchmarks
}
function setup_directories {
# set directories for where the benchmark files are created and where the results will go.
# If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
timestamp=`date +%Y.%m.%d-%H.%M.%S` # create a uniquifier
io500_workdir=$OFS_MOUNT/io500/datafiles/io500.$timestamp # directory where the data will be stored
io500_result_dir=$PWD/results/$timestamp # the directory where the output results will be kept
mkdir -p $io500_workdir $io500_result_dir
mkdir -p ${io500_workdir}/ior_easy ${io500_workdir}/ior_hard
mkdir -p ${io500_workdir}/mdt_easy ${io500_workdir}/mdt_hard
# for ior_easy, large chunks, as few targets as will allow the files to be evenly spread.
beegfs-ctl --setpattern --numtargets=3 --chunksize=4m --mount=/scratch4 ${io500_workdir}/ior_easy
# stripe across all OSTs for ior_hard, 64k chunksize
# best pattern is minimal chunksize to fit one I/O in, regardless of RAID stripe.
beegfs-ctl --setpattern --numtargets=6 --chunksize=64k --mount=/scratch4 ${io500_workdir}/ior_hard
# turn off striping and use small chunks for mdtest
beegfs-ctl --setpattern --numtargets=1 --chunksize=64k --mount=/scratch4 ${io500_workdir}/mdt_easy
beegfs-ctl --setpattern --numtargets=1 --chunksize=64k --mount=/scratch4 ${io500_workdir}/mdt_hard
}
function setup_paths {
# Set the paths to the binaries. If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
io500_ior_cmd=$PWD/bin/ior
io500_mdtest_cmd=$PWD/bin/mdtest
io500_mdreal_cmd=$PWD/bin/md-real-io
io500_mpi_prefix="/usr/lib64/openmpi"
#io500_mpi_prefix="/home/jburto2/openmpi/1.10.7"
io500_mpirun="$io500_mpi_prefix/bin/mpirun"
# Run OpenMPI over ethernet to keep the IB network clear for data. Map by node to balance processes.
# The I/O 500 benchmarks are not heavy on interprocess communication.
io500_mpiargs="-np $NP --mca btl_tcp_if_exclude ib0 --mca btl ^openib --map-by node --machinefile /home/jburto2/hpccnodelistmpi --prefix $io500_mpi_prefix"
}
function setup_ior_easy {
# 2M writes, 128 GB per proc, file per proc.
io500_ior_easy_size=$((120 * 1024 / $SCALE))
io500_ior_easy_params="-t 4m -b ${io500_ior_easy_size}m -F -a POSIX"
}
function setup_mdt_easy {
# one level, 11 directories, unique dir per thread, files only at leaves.
# BeeGFS doesn't have distributed directories, so more directories = better distribution.
# io500_mdtest_easy_params="-z 1 -b 6 -u -L"
io500_mdtest_easy_params="-u -L"
io500_mdtest_easy_files_per_proc=800000
}
function setup_ior_hard {
if [ "$SCALE" == "1" ]
then
# One process per node is significantly faster because of buffering.
io500_ior_hard_writes_per_proc=2200000
else
io500_ior_hard_writes_per_proc=$(( 2200000 / $SCALE ))
fi
io500_ior_hard_other_options=" -a POSIX"
}
function setup_mdt_hard {
# Multiple directories might improve mdt_hard slightly, but this test is storage bound, not md bound.
io500_mdtest_hard_files_per_proc="$(( 150000 / $SCALE ))"
io500_mdtest_files_per_proc=$(( 150000 / $SCALE ))
io500_mdtest_hard_other_options=""
}
function setup_find {
#
# setup the find command. This is an area where innovation is allowed.
# There are three default options provided. One is a serial find, one is python
# parallel version, one is C parallel version. Current default is to use serial.
# But it is very slow. We recommend to either customize or use the C parallel version.
# For GPFS, we recommend to use the provided mmfind wrapper described below.
# Instructions below.
# If a custom approach is used, please provide enough info so others can reproduce.
# the serial version that should run (SLOWLY) without modification
#io500_find_mpi="False"
#io500_find_cmd=$PWD/bin/sfind.sh
#io500_find_cmd_args=""
# a parallel version in C, the -s adds a stonewall
# for a real run, turn -s (stonewall) off or set it at 300 or more
# to prepare this (assuming you've run ./utilities/prepare.sh already):
# > cd build/pfind
# > ./prepare.sh
# > ./compile.sh
# > cp pfind ../../bin/
# If you use io500_find_mpi="True", then this will run with the same
# number of MPI nodes and ranks as the other phases.
# If you prefer another number, and fewer might be better here,
# Then you can set io500_find_mpi to be "False" and write a wrapper
# script for this which sets up MPI as you would like. Then change
# io500_find_cmd to point to your wrapper script.
io500_find_mpi="True"
io500_find_cmd="$PWD/bin/pfind"
io500_find_cmd_args="-s 10000 -r $io500_result_dir/pfind_results"
# for GPFS systems, you should probably use the provided mmfind wrapper
# if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
#io500_find_mpi="False"
#io500_find_cmd="$PWD/bin/mmfind.sh"
#io500_find_cmd_args=""
}
function setup_mdreal {
io500_mdreal_params="-P=5000 -I=1000"
}
function run_benchmarks {
# Important: source the io500_fixed.sh script. Do not change it. If you discover
# a need to change it, please email the mailing list to discuss
source ./bin/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}
# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
# top level info
io500_info_system_name='Palmetto scratch4' # e.g. Oakforest-PACS
io500_info_institute_name='Clemson University' # e.g. JCAHPC
io500_info_storage_age_in_months='0' # not install date but age since last refresh
io500_info_storage_install_date='01/15' # MM/YY
io500_info_storage_refresh_date='10/18' # MM/YY
io500_info_filesysem='BeeGFS' # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
io500_info_filesystem_version='7.1'
# client side info
io500_info_num_client_nodes="$(( ${SCALE} * 16 ))"
io500_info_procs_per_node="1"
# server side info
io500_info_num_metadata_server_nodes='6'
io500_info_num_data_server_nodes='6'
io500_info_num_data_storage_devices='60' # if you have 5 data servers, and each has 5 drives, then this number is 25
io500_info_num_metadata_storage_devices='12' # if you have 2 metadata servers, and each has 5 drives, then this number is 10
io500_info_data_storage_type='HDD' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_storage_network='infiniband' # infiniband, omnipath, ethernet, etc
io500_info_storage_interface='SATA' # SAS, SATA, NVMe, etc
# miscellaneous
io500_info_whatever='infiniband'
}
main
- ior_easy_read
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Thu Oct 25 00:53:35 2018
Command line : /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 4m -b 15360m -F -a POSIX -o /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_easy/ior_file_easy -O stoneWallingStatusFile=/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_easy/stonewall
Machine : Linux ofstest008.ofsdev.clemson.edu
TestID : 0
StartTime : Thu Oct 25 00:53:35 2018
Path : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_easy
FS : 174.6 TiB Used FS: 27.8% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 128
clients per node : 4
repetitions : 1
xfersize : 4 MiB
blocksize : 15 GiB
aggregate filesize : 1.88 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
WARNING: Expected aggregate file size = 2061584302080.
WARNING: Stat() of aggregate file size = 2001454759936.
WARNING: Using actual aggregate bytes moved = 2001454759936.
read 3192.70 15728640 4096 0.014651 597.80 0.148690 597.84 0
Max Read: 3192.70 MiB/sec (3347.79 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 3192.70 3192.70 3192.70 0.00 798.17 798.17 798.17 0.00 597.84395 0 128 4 1 1 1 1 0 0 1 16106127360 4194304 1908736.0 POSIX 0
Finished : Thu Oct 25 01:03:33 2018
- ior_easy_write
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Wed Oct 24 23:58:57 2018
Command line : /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 4m -b 15360m -F -a POSIX -o /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_easy/ior_file_easy -O stoneWallingStatusFile=/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_easy/stonewall -O stoneWallingWearOut=1 -D 300
Machine : Linux ofstest008.ofsdev.clemson.edu
TestID : 0
StartTime : Wed Oct 24 23:58:57 2018
Path : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_easy
FS : 174.6 TiB Used FS: 26.6% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 128
clients per node : 4
repetitions : 1
xfersize : 4 MiB
blocksize : 15 GiB
aggregate filesize : 1.88 TiB
stonewallingTime : 300
stoneWallingWearOut : 1
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 2415 max: 3728 -- min data: 9.4 GiB mean data: 12.2 GiB time: 300.8s
WARNING: Expected aggregate file size = 2061584302080.
WARNING: Stat() of aggregate file size = 2001454759936.
WARNING: Using actual aggregate bytes moved = 2001454759936.
WARNING: maybe caused by deadlineForStonewalling
write 3102.43 15728640 4096 0.064518 614.92 0.337736 615.24 0
Max Write: 3102.43 MiB/sec (3253.13 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 3102.43 3102.43 3102.43 0.00 775.61 775.61 775.61 0.00 615.23993 0 128 4 1 1 1 1 0 0 1 16106127360 4194304 1908736.0 POSIX 0
Finished : Thu Oct 25 00:09:12 2018
- ior_hard_read
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Thu Oct 25 01:05:06 2018
Command line : /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 275000 -a POSIX -o /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_hard/IOR_file -O stoneWallingStatusFile=/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_hard/stonewall
Machine : Linux ofstest008.ofsdev.clemson.edu
TestID : 0
StartTime : Thu Oct 25 01:05:06 2018
Path : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_hard
FS : 174.6 TiB Used FS: 27.8% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 275000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 128
clients per node : 4
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 1.50 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
WARNING: Expected aggregate file size = 1654681600000.
WARNING: Stat() of aggregate file size = 313565171712.
WARNING: Using actual aggregate bytes moved = 313565171712.
read 932.33 45.91 45.91 0.038815 320.70 0.075992 320.75 0
Max Read: 932.33 MiB/sec (977.61 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 932.33 932.33 932.33 0.00 20796.76 20796.76 20796.76 0.00 320.74528 0 128 4 1 0 1 1 0 0 275000 47008 47008 299039.1 POSIX 0
Finished : Thu Oct 25 01:10:27 2018
- ior_hard_write
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Thu Oct 25 00:28:56 2018
Command line : /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 275000 -a POSIX -o /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_hard/IOR_file -O stoneWallingStatusFile=/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_hard/stonewall -O stoneWallingWearOut=1 -D 300
Machine : Linux ofstest008.ofsdev.clemson.edu
TestID : 0
StartTime : Thu Oct 25 00:28:56 2018
Path : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_hard
FS : 174.6 TiB Used FS: 27.6% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 275000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 128
clients per node : 4
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 1.50 TiB
stonewallingTime : 300
stoneWallingWearOut : 1
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 223 max: 52113 -- min data: 0.0 GiB mean data: 0.8 GiB time: 301.8s
WARNING: Expected aggregate file size = 1654681600000.
WARNING: Stat() of aggregate file size = 313565171712.
WARNING: Using actual aggregate bytes moved = 313565171712.
WARNING: maybe caused by deadlineForStonewalling
write 287.82 45.91 45.91 0.016902 1038.87 0.198334 1038.99 0
Max Write: 287.82 MiB/sec (301.80 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 287.82 287.82 287.82 0.00 6420.15 6420.15 6420.15 0.00 1038.98822 0 128 4 1 0 1 1 0 0 275000 47008 47008 299039.1 POSIX 0
Finished : Thu Oct 25 00:46:15 2018
- mdtest_easy_delete
-
-- started at 10/25/2018 01:10:59 --
mdtest-1.9.3 was launched with 128 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-r" "-F" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_easy" "-n" "800000" "-u" "-L" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_easy-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53
FS: 174.6 TiB Used FS: 27.8% Inodes: 0.0 Mi Used Inodes: -nan%
128 tasks, 102400000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 13941.461 13941.461 13941.461 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.059 0.059 0.059 0.000
-- finished at 10/25/2018 01:54:16 --
- mdtest_easy_stat
-
-- started at 10/25/2018 01:03:36 --
mdtest-1.9.3 was launched with 128 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-T" "-F" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_easy" "-n" "800000" "-u" "-L" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_easy-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53
FS: 174.6 TiB Used FS: 27.8% Inodes: 0.0 Mi Used Inodes: -nan%
128 tasks, 102400000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 408518.008 408518.008 408518.008 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/25/2018 01:05:04 --
- mdtest_easy_write
-
-- started at 10/25/2018 00:09:15 --
mdtest-1.9.3 was launched with 128 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-C" "-F" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_easy" "-n" "800000" "-u" "-L" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_easy-stonewall" "-W" "300"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53
FS: 174.6 TiB Used FS: 27.6% Inodes: 0.0 Mi Used Inodes: -nan%
128 tasks, 102400000 files
stonewall rank 95: 101292 of 280962
stonewall rank 21: 253562 of 280962
stonewall rank 58: 101389 of 280962
stonewall rank 111: 206451 of 280962
stonewall rank 7: 150334 of 280962
stonewall rank 77: 150258 of 280962
stonewall rank 15: 143364 of 280962
stonewall rank 69: 153933 of 280962
stonewall rank 1: 202534 of 280962
stonewall rank 39: 99897 of 280962
stonewall rank 106: 84071 of 280962
stonewall rank 81: 137405 of 280962
stonewall rank 3: 194977 of 280962
stonewall rank 38: 207976 of 280962
stonewall rank 52: 101699 of 280962
stonewall rank 118: 123735 of 280962
stonewall rank 37: 268703 of 280962
stonewall rank 40: 109253 of 280962
stonewall rank 32: 177992 of 280962
stonewall rank 75: 84083 of 280962
stonewall rank 44: 141074 of 280962
stonewall rank 74: 154964 of 280962
stonewall rank 36: 83889 of 280962
stonewall rank 89: 83689 of 280962
stonewall rank 61: 182699 of 280962
stonewall rank 71: 116368 of 280962
stonewall rank 33: 143234 of 280962
stonewall rank 11: 150508 of 280962
stonewall rank 113: 143833 of 280962
stonewall rank 126: 100254 of 280962
stonewall rank 98: 196038 of 280962
stonewall rank 34: 218437 of 280962
stonewall rank 73: 140934 of 280962
stonewall rank 127: 83803 of 280962
stonewall rank 24: 194284 of 280962
stonewall rank 9: 119037 of 280962
stonewall rank 85: 124581 of 280962
stonewall rank 119: 123803 of 280962
stonewall rank 19: 143222 of 280962
stonewall rank 46: 118638 of 280962
stonewall rank 41: 152712 of 280962
stonewall rank 110: 155094 of 280962
stonewall rank 42: 258534 of 280962
stonewall rank 53: 94838 of 280962
stonewall rank 122: 194567 of 280962
stonewall rank 107: 141312 of 280962
stonewall rank 93: 96632 of 280962
stonewall rank 112: 208016 of 280962
stonewall rank 65: 138958 of 280962
stonewall rank 45: 161376 of 280962
stonewall rank 102: 246653 of 280962
stonewall rank 79: 107307 of 280962
stonewall rank 28: 144196 of 280962
stonewall rank 108: 101354 of 280962
stonewall rank 48: 124064 of 280962
stonewall rank 55: 102995 of 280962
stonewall rank 56: 117042 of 280962
stonewall rank 83: 117327 of 280962
stonewall rank 115: 155051 of 280962
stonewall rank 5: 123204 of 280962
stonewall rank 90: 87474 of 280962
stonewall rank 17: 213448 of 280962
stonewall rank 70: 142693 of 280962
stonewall rank 63: 144182 of 280962
stonewall rank 82: 126170 of 280962
stonewall rank 87: 126282 of 280962
stonewall rank 2: 186387 of 280962
stonewall rank 78: 148653 of 280962
stonewall rank 54: 83353 of 280962
stonewall rank 99: 156607 of 280962
stonewall rank 100: 216527 of 280962
stonewall rank 76: 145081 of 280962
stonewall rank 59: 116491 of 280962
stonewall rank 4: 201200 of 280962
stonewall rank 91: 85565 of 280962
Continue stonewall hit min: 83314 max: 280962 avg: 148253.4
stonewall rank 0: 245446 of 280962
stonewall rank 26: 100241 of 280962
stonewall rank 25: 133998 of 280962
stonewall rank 116: 258585 of 280962
stonewall rank 43: 198699 of 280962
stonewall rank 123: 203893 of 280962
stonewall rank 120: 83667 of 280962
stonewall rank 8: 159974 of 280962
stonewall rank 13: 127025 of 280962
stonewall rank 47: 126680 of 280962
stonewall rank 22: 170604 of 280962
stonewall rank 49: 203972 of 280962
stonewall rank 50: 156079 of 280962
stonewall rank 16: 124310 of 280962
stonewall rank 67: 116284 of 280962
stonewall rank 62: 164762 of 280962
stonewall rank 124: 152438 of 280962
stonewall rank 18: 83493 of 280962
stonewall rank 30: 115005 of 280962
stonewall rank 57: 84326 of 280962
stonewall rank 104: 183884 of 280962
stonewall rank 96: 168809 of 280962
stonewall rank 117: 104564 of 280962
stonewall rank 86: 150607 of 280962
stonewall rank 94: 172695 of 280962
stonewall rank 12: 189464 of 280962
stonewall rank 20: 85713 of 280962
stonewall rank 121: 203635 of 280962
stonewall rank 103: 150540 of 280962
stonewall rank 35: 205012 of 280962
stonewall rank 29: 83808 of 280962
stonewall rank 6: 105368 of 280962
stonewall rank 66: 139983 of 280962
stonewall rank 10: 101271 of 280962
stonewall rank 92: 151996 of 280962
stonewall rank 109: 205174 of 280962
stonewall rank 84: 83595 of 280962
stonewall rank 72: 152764 of 280962
stonewall rank 51: 151006 of 280962
stonewall rank 31: 83736 of 280962
stonewall rank 101: 147106 of 280962
stonewall rank 125: 155824 of 280962
stonewall rank 27: 188372 of 280962
stonewall rank 14: 189758 of 280962
stonewall rank 105: 156140 of 280962
stonewall rank 80: 124398 of 280962
stonewall rank 68: 219926 of 280962
stonewall rank 23: 83314 of 280962
stonewall rank 88: 223230 of 280962
stonewall rank 97: 83543 of 280962
stonewall rank 60: 184950 of 280962
stonewall rank 64: 118264 of 280962
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 86981.522 86981.522 86981.522 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 4.228 4.228 4.228 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/25/2018 00:28:53 --
- mdtest_hard_delete
-
-- started at 10/25/2018 01:56:16 --
mdtest-1.9.3 was launched with 128 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_hard" "-n" "18750" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_hard-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53
FS: 174.6 TiB Used FS: 27.8% Inodes: 0.0 Mi Used Inodes: -nan%
128 tasks, 2400000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 3420.738 3420.738 3420.738 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.498 0.498 0.498 0.000
-- finished at 10/25/2018 02:04:09 --
- mdtest_hard_read
-
-- started at 10/25/2018 01:54:18 --
mdtest-1.9.3 was launched with 128 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_hard" "-n" "18750" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_hard-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53
FS: 174.6 TiB Used FS: 27.8% Inodes: 0.0 Mi Used Inodes: -nan%
128 tasks, 2400000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 14004.612 14004.612 14004.612 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/25/2018 01:56:13 --
- mdtest_hard_stat
-
-- started at 10/25/2018 01:10:33 --
mdtest-1.9.3 was launched with 128 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_hard" "-n" "18750" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_hard-stonewall"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53
FS: 174.6 TiB Used FS: 27.8% Inodes: 0.0 Mi Used Inodes: -nan%
128 tasks, 2400000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 68237.775 68237.775 68237.775 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/25/2018 01:10:56 --
- mdtest_hard_write
-
-- started at 10/25/2018 00:46:17 --
mdtest-1.9.3 was launched with 128 total task(s) on 32 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_hard" "-n" "18750" "-x" "/scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53/mdt_hard-stonewall" "-W" "300"
Path: /scratch4/jburto2/io500/datafiles/io500.2018.10.24-23.58.53
FS: 174.6 TiB Used FS: 27.8% Inodes: 0.0 Mi Used Inodes: -nan%
128 tasks, 2400000 files
stonewall rank 21: 12484 of 12574
stonewall rank 95: 12435 of 12574
stonewall rank 58: 12434 of 12574
stonewall rank 89: 11006 of 12574
stonewall rank 77: 11284 of 12574
stonewall rank 37: 11302 of 12574
stonewall rank 1: 11321 of 12574
stonewall rank 7: 11192 of 12574
stonewall rank 69: 12466 of 12574
stonewall rank 40: 11304 of 12574
stonewall rank 111: 11303 of 12574
stonewall rank 106: 12484 of 12574
stonewall rank 15: 10868 of 12574
stonewall rank 38: 11364 of 12574
stonewall rank 44: 11326 of 12574
stonewall rank 32: 12478 of 12574
stonewall rank 114: 11347 of 12574
stonewall rank 45: 11346 of 12574
stonewall rank 75: 11312 of 12574
stonewall rank 81: 11360 of 12574
stonewall rank 126: 10954 of 12574
stonewall rank 3: 11337 of 12574
stonewall rank 119: 11345 of 12574
stonewall rank 85: 11297 of 12574
stonewall rank 17: 12349 of 12574
stonewall rank 41: 11226 of 12574
stonewall rank 118: 11352 of 12574
stonewall rank 93: 12321 of 12574
stonewall rank 53: 10729 of 12574
stonewall rank 52: 11005 of 12574
stonewall rank 9: 11280 of 12574
stonewall rank 25: 12500 of 12574
stonewall rank 11: 11354 of 12574
stonewall rank 61: 12457 of 12574
stonewall rank 79: 11277 of 12574
stonewall rank 127: 10776 of 12574
stonewall rank 19: 12364 of 12574
stonewall rank 46: 11322 of 12574
stonewall rank 74: 11330 of 12574
stonewall rank 24: 12424 of 12574
stonewall rank 122: 11315 of 12574
stonewall rank 116: 11310 of 12574
stonewall rank 39: 11306 of 12574
stonewall rank 36: 12492 of 12574
stonewall rank 71: 12425 of 12574
stonewall rank 78: 11387 of 12574
stonewall rank 73: 12441 of 12574
stonewall rank 113: 11284 of 12574
stonewall rank 110: 12490 of 12574
stonewall rank 83: 11381 of 12574
stonewall rank 82: 11231 of 12574
stonewall rank 48: 11317 of 12574
stonewall rank 5: 11354 of 12574
stonewall rank 33: 12508 of 12574
stonewall rank 108: 12484 of 12574
stonewall rank 56: 12346 of 12574
stonewall rank 62: 12314 of 12574
stonewall rank 28: 12484 of 12574
stonewall rank 112: 11336 of 12574
stonewall rank 65: 12422 of 12574
stonewall rank 70: 12433 of 12574
stonewall rank 102: 12448 of 12574
stonewall rank 54: 12366 of 12574
stonewall rank 121: 11348 of 12574
stonewall rank 120: 11307 of 12574
stonewall rank 8: 11356 of 12574
stonewall rank 42: 11350 of 12574
stonewall rank 115: 11318 of 12574
stonewall rank 34: 12438 of 12574
stonewall rank 107: 12497 of 12574
stonewall rank 91: 12383 of 12574
stonewall rank 76: 11324 of 12574
stonewall rank 63: 12449 of 12574
stonewall rank 99: 12433 of 12574
stonewall rank 59: 12426 of 12574
stonewall rank 4: 11308 of 12574
stonewall rank 26: 12471 of 12574
stonewall rank 117: 11298 of 12574
stonewall rank 13: 11313 of 12574
stonewall rank 57: 12293 of 12574
stonewall rank 100: 12519 of 12574
stonewall rank 90: 10753 of 12574
stonewall rank 50: 11286 of 12574
Continue stonewall hit min: 10720 max: 12574 avg: 11812.5
stonewall rank 0: 11355 of 12574
stonewall rank 47: 11262 of 12574
stonewall rank 22: 12484 of 12574
stonewall rank 49: 11354 of 12574
stonewall rank 87: 11323 of 12574
stonewall rank 16: 10720 of 12574
stonewall rank 55: 12293 of 12574
stonewall rank 124: 11344 of 12574
stonewall rank 94: 12361 of 12574
stonewall rank 30: 12482 of 12574
stonewall rank 2: 11296 of 12574
stonewall rank 67: 12405 of 12574
stonewall rank 123: 11310 of 12574
stonewall rank 96: 12534 of 12574
stonewall rank 43: 11342 of 12574
stonewall rank 29: 12489 of 12574
stonewall rank 104: 12448 of 12574
stonewall rank 12: 11289 of 12574
stonewall rank 10: 11331 of 12574
stonewall rank 109: 12441 of 12574
stonewall rank 86: 11316 of 12574
stonewall rank 84: 11283 of 12574
stonewall rank 103: 12476 of 12574
stonewall rank 92: 12361 of 12574
stonewall rank 20: 12290 of 12574
stonewall rank 35: 12481 of 12574
stonewall rank 66: 12440 of 12574
stonewall rank 125: 11460 of 12574
stonewall rank 72: 12424 of 12574
stonewall rank 6: 11283 of 12574
stonewall rank 18: 12273 of 12574
stonewall rank 14: 11360 of 12574
stonewall rank 51: 11292 of 12574
stonewall rank 27: 12508 of 12574
stonewall rank 23: 12462 of 12574
stonewall rank 101: 12472 of 12574
stonewall rank 80: 11337 of 12574
stonewall rank 31: 12455 of 12574
stonewall rank 97: 12349 of 12574
stonewall rank 88: 11305 of 12574
stonewall rank 60: 12471 of 12574
stonewall rank 105: 12433 of 12574
stonewall rank 68: 12411 of 12574
stonewall rank 64: 12456 of 12574
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 7435.257 7435.257 7435.257 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 178.396 178.396 178.396 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 10/25/2018 00:51:40 --
- result_summary
-
[RESULT] BW phase 1 ior_easy_write 3.030 GB/s : time 615.24 seconds
[RESULT] IOPS phase 1 mdtest_easy_write 86.981 kiops : time 1180.41 seconds
[RESULT] BW phase 2 ior_hard_write 0.281 GB/s : time 1038.99 seconds
[RESULT] IOPS phase 2 mdtest_hard_write 7.435 kiops : time 325.70 seconds
[RESULT] IOPS phase 3 find 335.410 kiops : time 112.02 seconds
[RESULT] BW phase 3 ior_easy_read 3.118 GB/s : time 597.84 seconds
[RESULT] IOPS phase 4 mdtest_easy_stat 408.518 kiops : time 90.87 seconds
[RESULT] BW phase 4 ior_hard_read 0.910 GB/s : time 320.75 seconds
[RESULT] IOPS phase 5 mdtest_hard_stat 68.238 kiops : time 29.08 seconds
[RESULT] IOPS phase 6 mdtest_easy_delete 13.942 kiops : time 2599.44 seconds
[RESULT] IOPS phase 7 mdtest_hard_read 14.005 kiops : time 117.66 seconds
[RESULT] IOPS phase 8 mdtest_hard_delete 3.421 kiops : time 475.33 seconds
[SCORE] Bandwidth 1.24692 GB/s : IOPS 37.6513 kiops : TOTAL 6.85187