- io500
-
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
#set -x
if [ "$1" == "" ]
then
SCALE=1
else
SCALE=$1
fi
NP=$(( $SCALE * 10 ))
echo "$SCALE processes per node for $NP processes."
set -euo pipefail # better error handling
export OFS_MOUNT=/mnt/beegfs/jburto2
# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True" # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True" # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="True" # this one is optional
io500_cleanup_workdir="False" # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=300 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...
# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
setup_directories
setup_paths
setup_ior_easy # required if you want a complete score
setup_ior_hard # required if you want a complete score
setup_mdt_easy # required if you want a complete score
setup_mdt_hard # required if you want a complete score
setup_find # required if you want a complete score
setup_mdreal # optional
run_benchmarks
}
function setup_directories {
# set directories for where the benchmark files are created and where the results will go.
# If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
timestamp=`date +%Y.%m.%d-%H.%M.%S` # create a uniquifier
io500_workdir=$OFS_MOUNT/io500/datafiles/io500.$timestamp # directory where the data will be stored
io500_result_dir=$PWD/results/$timestamp # the directory where the output results will be kept
mkdir -p $io500_workdir $io500_result_dir
mkdir -p ${io500_workdir}/ior_easy ${io500_workdir}/ior_hard
mkdir -p ${io500_workdir}/mdt_easy ${io500_workdir}/mdt_hard
# for ior_easy, large chunks, as few targets as will allow the files to be evenly spread.
beegfs-ctl --setpattern --numtargets=$(( 8 / $SCALE )) --chunksize=4m --mount=/mnt/beegfs ${io500_workdir}/ior_easy
# stripe across all OSTs for ior_hard, 64k chunksize
# best pattern is minimal chunksize to fit one I/O in, regardless of RAID stripe.
beegfs-ctl --setpattern --numtargets=16 --chunksize=64k --mount=/mnt/beegfs ${io500_workdir}/ior_hard
# turn off striping and use small chunks for mdtest
beegfs-ctl --setpattern --numtargets=1 --chunksize=64k --mount=/mnt/beegfs ${io500_workdir}/mdt_easy
beegfs-ctl --setpattern --numtargets=1 --chunksize=64k --mount=/mnt/beegfs ${io500_workdir}/mdt_hard
}
function setup_paths {
# Set the paths to the binaries. If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
io500_ior_cmd=$PWD/bin/ior
io500_mdtest_cmd=$PWD/bin/mdtest
io500_mdreal_cmd=$PWD/bin/md-real-io
io500_mpi_prefix="/usr/lib64/openmpi"
#io500_mpi_prefix="/home/jburto2/openmpi/1.10.7"
io500_mpirun="$io500_mpi_prefix/bin/mpirun"
# Run OpenMPI over IB to keep the ethernet network clear for data. Map by node to balance processes.
# The I/O 500 benchmarks are not heavy on interprocess communication.
io500_mpiargs="-np $NP --mca btl_tcp_if_exclude ib0 --mca btl ^openib --map-by node --machinefile /home/jburto2/pvfs10nodelistmpi --prefix $io500_mpi_prefix"
}
function setup_ior_easy {
# 2M writes, 240 GB per proc, file per proc.
io500_ior_easy_size=$((400 * 1024 / $SCALE))
io500_ior_easy_params="-t 4m -b ${io500_ior_easy_size}m -F -a POSIX"
}
function setup_mdt_easy {
# one level, 11 directories, unique dir per thread, files only at leaves.
# BeeGFS doesn't have distributed directories, so more directories = better distribution.
# io500_mdtest_easy_params="-z 1 -b 6 -u -L"
io500_mdtest_easy_params="-u -L"
io500_mdtest_easy_files_per_proc=1500000
}
function setup_ior_hard {
if [ "$SCALE" == "1" ]
then
# One process per node is significantly faster because of buffering.
io500_ior_hard_writes_per_proc=2500000
else
io500_ior_hard_writes_per_proc=$(( 1000000 / $SCALE ))
fi
io500_ior_hard_other_options=" -a POSIX"
}
function setup_mdt_hard {
# Multiple directories might improve mdt_hard slightly, but this test is storage bound, not md bound.
io500_mdtest_hard_files_per_proc="$(( 400000 / $SCALE ))"
io500_mdtest_files_per_proc=$(( 400000 / $SCALE ))
io500_mdtest_hard_other_options=""
}
function setup_find {
#
# setup the find command. This is an area where innovation is allowed.
# There are three default options provided. One is a serial find, one is python
# parallel version, one is C parallel version. Current default is to use serial.
# But it is very slow. We recommend to either customize or use the C parallel version.
# For GPFS, we recommend to use the provided mmfind wrapper described below.
# Instructions below.
# If a custom approach is used, please provide enough info so others can reproduce.
# the serial version that should run (SLOWLY) without modification
#io500_find_mpi="False"
#io500_find_cmd=$PWD/bin/sfind.sh
#io500_find_cmd_args=""
# a parallel version in C, the -s adds a stonewall
# for a real run, turn -s (stonewall) off or set it at 300 or more
# to prepare this (assuming you've run ./utilities/prepare.sh already):
# > cd build/pfind
# > ./prepare.sh
# > ./compile.sh
# > cp pfind ../../bin/
# If you use io500_find_mpi="True", then this will run with the same
# number of MPI nodes and ranks as the other phases.
# If you prefer another number, and fewer might be better here,
# Then you can set io500_find_mpi to be "False" and write a wrapper
# script for this which sets up MPI as you would like. Then change
# io500_find_cmd to point to your wrapper script.
io500_find_mpi="True"
io500_find_cmd="$PWD/bin/pfind"
io500_find_cmd_args="-s 10000 -r $io500_result_dir/pfind_results"
# for GPFS systems, you should probably use the provided mmfind wrapper
# if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
#io500_find_mpi="False"
#io500_find_cmd="$PWD/bin/mmfind.sh"
#io500_find_cmd_args=""
}
function setup_mdreal {
io500_mdreal_params="-P=5000 -I=1000"
}
function run_benchmarks {
# Important: source the io500_fixed.sh script. Do not change it. If you discover
# a need to change it, please email the mailing list to discuss
source ./bin/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}
# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
# top level info
io500_info_system_name='Palmetto ofstest' # e.g. Oakforest-PACS
io500_info_institute_name='Clemson University' # e.g. JCAHPC
io500_info_storage_age_in_months='0' # not install date but age since last refresh
io500_info_storage_install_date='4/12' # MM/YY
io500_info_filesysem='BeeGFS' # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
io500_info_filesystem_version='7.1'
# client side info
io500_info_num_client_nodes="10"
io500_info_procs_per_node=${SCALE}
# server side info
io500_info_num_metadata_server_nodes='16'
io500_info_num_data_server_nodes='16'
io500_info_num_data_storage_devices='160' # if you have 5 data servers, and each has 5 drives, then this number is 25
io500_info_num_metadata_storage_devices='32' # if you have 2 metadata servers, and each has 5 drives, then this number is 10
io500_info_data_storage_type='HDD' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_storage_network='infiniband' # infiniband, omnipath, ethernet, etc
io500_info_storage_interface='SAS' # SAS, SATA, NVMe, etc
# miscellaneous
io500_info_whatever='infiniband'
}
main
- ior_easy_read
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Sun Nov 4 21:01:57 2018
Command line : /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 4m -b 51200m -F -a POSIX -o /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_easy/ior_file_easy -O stoneWallingStatusFile=/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_easy/stonewall
Machine : Linux pvfs017.ofsdev.clemson.edu
TestID : 0
StartTime : Sun Nov 4 21:01:57 2018
Path : /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_easy
FS : 145.4 TiB Used FS: 76.9% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 80
clients per node : 8
repetitions : 1
xfersize : 4 MiB
blocksize : 50 GiB
aggregate filesize : 3.91 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
read 10197 52428800 4096 0.017184 401.45 0.222987 401.69 0
Max Read: 10196.83 MiB/sec (10692.16 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 10196.83 10196.83 10196.83 0.00 2549.21 2549.21 2549.21 0.00 401.69329 0 80 8 1 1 1 1 0 0 1 53687091200 4194304 4096000.0 POSIX 0
Finished : Sun Nov 4 21:08:39 2018
- ior_easy_write
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Sun Nov 4 20:11:52 2018
Command line : /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 4m -b 51200m -F -a POSIX -o /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_easy/ior_file_easy -O stoneWallingStatusFile=/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_easy/stonewall -O stoneWallingWearOut=1 -D 300
Machine : Linux pvfs017.ofsdev.clemson.edu
TestID : 0
StartTime : Sun Nov 4 20:11:52 2018
Path : /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_easy
FS : 145.4 TiB Used FS: 74.1% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 80
clients per node : 8
repetitions : 1
xfersize : 4 MiB
blocksize : 50 GiB
aggregate filesize : 3.91 TiB
stonewallingTime : 300
stoneWallingWearOut : 1
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 10533 max: 12800 -- min data: 41.1 GiB mean data: 44.1 GiB time: 300.1s
write 11234 52428800 4096 0.007055 363.55 1.04 364.59 0
Max Write: 11234.39 MiB/sec (11780.11 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 11234.39 11234.39 11234.39 0.00 2808.60 2808.60 2808.60 0.00 364.59487 0 80 8 1 1 1 1 0 0 1 53687091200 4194304 4096000.0 POSIX 0
Finished : Sun Nov 4 20:17:58 2018
- ior_hard_read
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Sun Nov 4 21:10:13 2018
Command line : /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 125000 -a POSIX -o /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_hard/IOR_file -O stoneWallingStatusFile=/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_hard/stonewall
Machine : Linux pvfs017.ofsdev.clemson.edu
TestID : 0
StartTime : Sun Nov 4 21:10:13 2018
Path : /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_hard
FS : 145.4 TiB Used FS: 76.9% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 125000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 80
clients per node : 8
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 437.80 GiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
WARNING: Expected aggregate file size = 470080000000.
WARNING: Stat() of aggregate file size = 114252003840.
WARNING: Using actual aggregate bytes moved = 114252003840.
read 4580 45.91 45.91 1.03 22.76 0.009938 23.79 0
Max Read: 4579.90 MiB/sec (4802.37 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 4579.90 4579.90 4579.90 0.00 102160.71 102160.71 102160.71 0.00 23.79075 0 80 8 1 0 1 1 0 0 125000 47008 47008 108959.2 POSIX 0
Finished : Sun Nov 4 21:10:36 2018
- ior_hard_write
-
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Sun Nov 4 20:24:37 2018
Command line : /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 125000 -a POSIX -o /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_hard/IOR_file -O stoneWallingStatusFile=/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_hard/stonewall -O stoneWallingWearOut=1 -D 300
Machine : Linux pvfs017.ofsdev.clemson.edu
TestID : 0
StartTime : Sun Nov 4 20:24:37 2018
Path : /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_hard
FS : 145.4 TiB Used FS: 76.8% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 125000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 80
clients per node : 8
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 437.80 GiB
stonewallingTime : 300
stoneWallingWearOut : 1
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 13 max: 30381 -- min data: 0.0 GiB mean data: 0.3 GiB time: 300.1s
WARNING: Expected aggregate file size = 470080000000.
WARNING: Stat() of aggregate file size = 114252003840.
WARNING: Using actual aggregate bytes moved = 114252003840.
WARNING: maybe caused by deadlineForStonewalling
write 60.19 45.91 45.91 0.045277 1810.14 0.223003 1810.40 0
Max Write: 60.19 MiB/sec (63.11 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 60.19 60.19 60.19 0.00 1342.51 1342.51 1342.51 0.00 1810.39714 0 80 8 1 0 1 1 0 0 125000 47008 47008 108959.2 POSIX 0
Finished : Sun Nov 4 20:54:48 2018
- mdtest_easy_delete
-
-- started at 11/04/2018 21:11:01 --
mdtest-1.9.3 was launched with 80 total task(s) on 10 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-r" "-F" "-d" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_easy" "-n" "1500000" "-u" "-L" "-x" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_easy-stonewall"
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50
FS: 145.4 TiB Used FS: 76.9% Inodes: 0.0 Mi Used Inodes: -nan%
80 tasks, 120000000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 101046.805 101046.805 101046.805 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.135 0.135 0.135 0.000
-- finished at 11/04/2018 21:16:40 --
- mdtest_easy_stat
-
-- started at 11/04/2018 21:08:41 --
mdtest-1.9.3 was launched with 80 total task(s) on 10 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-T" "-F" "-d" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_easy" "-n" "1500000" "-u" "-L" "-x" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_easy-stonewall"
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50
FS: 145.4 TiB Used FS: 76.9% Inodes: 0.0 Mi Used Inodes: -nan%
80 tasks, 120000000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 373567.201 373567.201 373567.201 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 11/04/2018 21:10:11 --
- mdtest_easy_write
-
-- started at 11/04/2018 20:18:00 --
mdtest-1.9.3 was launched with 80 total task(s) on 10 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-C" "-F" "-d" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_easy" "-n" "1500000" "-u" "-L" "-x" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_easy-stonewall" "-W" "300"
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50
FS: 145.4 TiB Used FS: 76.8% Inodes: 0.0 Mi Used Inodes: -nan%
80 tasks, 120000000 files
stonewall rank 10: 404750 of 418694
stonewall rank 30: 325866 of 418694
stonewall rank 50: 389763 of 418694
stonewall rank 20: 388833 of 418694
stonewall rank 40: 339866 of 418694
stonewall rank 60: 340057 of 418694
stonewall rank 70: 363605 of 418694
stonewall rank 42: 375882 of 418694
stonewall rank 58: 375666 of 418694
stonewall rank 21: 349520 of 418694
Continue stonewall hit min: 325866 max: 418694 avg: 371444.5
stonewall rank 0: 356039 of 418694
stonewall rank 62: 348610 of 418694
stonewall rank 18: 397700 of 418694
stonewall rank 14: 374025 of 418694
stonewall rank 31: 346642 of 418694
stonewall rank 12: 360976 of 418694
stonewall rank 28: 398917 of 418694
stonewall rank 34: 388817 of 418694
stonewall rank 1: 392251 of 418694
stonewall rank 22: 385282 of 418694
stonewall rank 38: 355159 of 418694
stonewall rank 54: 375802 of 418694
stonewall rank 11: 352484 of 418694
stonewall rank 52: 380324 of 418694
stonewall rank 78: 375411 of 418694
stonewall rank 74: 373474 of 418694
stonewall rank 41: 359582 of 418694
stonewall rank 72: 351894 of 418694
stonewall rank 48: 337025 of 418694
stonewall rank 44: 381231 of 418694
stonewall rank 32: 356942 of 418694
stonewall rank 68: 372674 of 418694
stonewall rank 51: 364618 of 418694
stonewall rank 26: 376830 of 418694
stonewall rank 8: 380731 of 418694
stonewall rank 24: 391593 of 418694
stonewall rank 61: 350647 of 418694
stonewall rank 36: 355682 of 418694
stonewall rank 64: 391450 of 418694
stonewall rank 71: 377575 of 418694
stonewall rank 46: 347039 of 418694
stonewall rank 4: 368690 of 418694
stonewall rank 39: 350763 of 418694
stonewall rank 66: 382238 of 418694
stonewall rank 45: 356930 of 418694
stonewall rank 76: 363457 of 418694
stonewall rank 13: 396380 of 418694
stonewall rank 56: 328541 of 418694
stonewall rank 55: 382927 of 418694
stonewall rank 6: 418486 of 418694
stonewall rank 23: 386522 of 418694
stonewall rank 16: 354680 of 418694
stonewall rank 49: 348106 of 418694
stonewall rank 65: 355460 of 418694
stonewall rank 33: 374097 of 418694
stonewall rank 59: 382283 of 418694
stonewall rank 75: 362833 of 418694
stonewall rank 43: 359477 of 418694
stonewall rank 79: 378738 of 418694
stonewall rank 5: 389388 of 418694
stonewall rank 63: 375543 of 418694
stonewall rank 19: 392033 of 418694
stonewall rank 15: 362329 of 418694
stonewall rank 73: 381056 of 418694
stonewall rank 9: 373529 of 418694
stonewall rank 35: 381982 of 418694
stonewall rank 3: 378216 of 418694
stonewall rank 25: 386159 of 418694
stonewall rank 53: 377174 of 418694
stonewall rank 47: 389129 of 418694
stonewall rank 29: 385629 of 418694
stonewall rank 57: 368029 of 418694
stonewall rank 69: 409422 of 418694
stonewall rank 67: 367592 of 418694
stonewall rank 7: 350085 of 418694
stonewall rank 17: 361788 of 418694
stonewall rank 37: 361823 of 418694
stonewall rank 77: 356834 of 418694
stonewall rank 27: 387285 of 418694
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 303450.546 303450.546 303450.546 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 25.077 25.077 25.077 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 11/04/2018 20:24:35 --
- mdtest_hard_delete
-
-- started at 11/04/2018 21:22:09 --
mdtest-1.9.3 was launched with 80 total task(s) on 10 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_hard" "-n" "50000" "-x" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_hard-stonewall"
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50
FS: 145.4 TiB Used FS: 76.9% Inodes: 0.0 Mi Used Inodes: -nan%
80 tasks, 4000000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 5765.905 5765.905 5765.905 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.574 0.574 0.574 0.000
-- finished at 11/04/2018 21:27:23 --
- mdtest_hard_read
-
-- started at 11/04/2018 21:16:42 --
mdtest-1.9.3 was launched with 80 total task(s) on 10 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_hard" "-n" "50000" "-x" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_hard-stonewall"
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50
FS: 145.4 TiB Used FS: 76.9% Inodes: 0.0 Mi Used Inodes: -nan%
80 tasks, 4000000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 5527.286 5527.286 5527.286 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 11/04/2018 21:22:07 --
- mdtest_hard_stat
-
-- started at 11/04/2018 21:10:38 --
mdtest-1.9.3 was launched with 80 total task(s) on 10 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_hard" "-n" "50000" "-x" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_hard-stonewall"
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50
FS: 145.4 TiB Used FS: 76.9% Inodes: 0.0 Mi Used Inodes: -nan%
80 tasks, 4000000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 88411.302 88411.302 88411.302 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 11/04/2018 21:10:59 --
- mdtest_hard_write
-
-- started at 11/04/2018 20:54:49 --
mdtest-1.9.3 was launched with 80 total task(s) on 10 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_hard" "-n" "50000" "-x" "/mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50/mdt_hard-stonewall" "-W" "300"
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.11.04-20.11.50
FS: 145.4 TiB Used FS: 76.9% Inodes: 0.0 Mi Used Inodes: -nan%
80 tasks, 4000000 files
stonewall rank 60: 21733 of 22482
stonewall rank 70: 21826 of 22482
stonewall rank 30: 21954 of 22482
stonewall rank 50: 21718 of 22482
stonewall rank 40: 22160 of 22482
stonewall rank 20: 21960 of 22482
stonewall rank 1: 21708 of 22482
Continue stonewall hit min: 21500 max: 22482 avg: 21949.5
stonewall rank 0: 22149 of 22482
stonewall rank 10: 22100 of 22482
stonewall rank 21: 21800 of 22482
stonewall rank 58: 21659 of 22482
stonewall rank 62: 22193 of 22482
stonewall rank 14: 21688 of 22482
stonewall rank 11: 21681 of 22482
stonewall rank 78: 21689 of 22482
stonewall rank 72: 22031 of 22482
stonewall rank 31: 21851 of 22482
stonewall rank 68: 22438 of 22482
stonewall rank 42: 22121 of 22482
stonewall rank 54: 21682 of 22482
stonewall rank 41: 22084 of 22482
stonewall rank 18: 21821 of 22482
stonewall rank 52: 21971 of 22482
stonewall rank 74: 21863 of 22482
stonewall rank 51: 21820 of 22482
stonewall rank 28: 21822 of 22482
stonewall rank 2: 22097 of 22482
stonewall rank 24: 21934 of 22482
stonewall rank 71: 21841 of 22482
stonewall rank 12: 21628 of 22482
stonewall rank 34: 21632 of 22482
stonewall rank 25: 22235 of 22482
stonewall rank 38: 21762 of 22482
stonewall rank 22: 22172 of 22482
stonewall rank 44: 22108 of 22482
stonewall rank 13: 21916 of 22482
stonewall rank 48: 22256 of 22482
stonewall rank 46: 21919 of 22482
stonewall rank 64: 22167 of 22482
stonewall rank 29: 22275 of 22482
stonewall rank 8: 21714 of 22482
stonewall rank 32: 21744 of 22482
stonewall rank 75: 21755 of 22482
stonewall rank 56: 21794 of 22482
stonewall rank 4: 22184 of 22482
stonewall rank 39: 21832 of 22482
stonewall rank 66: 22192 of 22482
stonewall rank 5: 21898 of 22482
stonewall rank 76: 21500 of 22482
stonewall rank 49: 22384 of 22482
stonewall rank 6: 22051 of 22482
stonewall rank 35: 22036 of 22482
stonewall rank 16: 21844 of 22482
stonewall rank 63: 22031 of 22482
stonewall rank 26: 22146 of 22482
stonewall rank 59: 21846 of 22482
stonewall rank 36: 21901 of 22482
stonewall rank 45: 22338 of 22482
stonewall rank 73: 21599 of 22482
stonewall rank 69: 22206 of 22482
stonewall rank 55: 21766 of 22482
stonewall rank 57: 21640 of 22482
stonewall rank 79: 21768 of 22482
stonewall rank 65: 22083 of 22482
stonewall rank 3: 22322 of 22482
stonewall rank 19: 21677 of 22482
stonewall rank 15: 21790 of 22482
stonewall rank 23: 22065 of 22482
stonewall rank 9: 22356 of 22482
stonewall rank 33: 21800 of 22482
stonewall rank 43: 22012 of 22482
stonewall rank 53: 21926 of 22482
stonewall rank 17: 21818 of 22482
stonewall rank 47: 22212 of 22482
stonewall rank 67: 22126 of 22482
stonewall rank 77: 21593 of 22482
stonewall rank 7: 22225 of 22482
stonewall rank 27: 22174 of 22482
stonewall rank 37: 21668 of 22482
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 12955.113 12955.113 12955.113 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 261.966 261.966 261.966 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 11/04/2018 20:59:58 --
- result_summary
-
[RESULT] BW phase 1 ior_easy_write 10.971 GB/s : time 364.59 seconds
[RESULT] IOPS phase 1 mdtest_easy_write 303.451 kiops : time 397.41 seconds
[RESULT] BW phase 2 ior_hard_write 0.059 GB/s : time 1810.40 seconds
[RESULT] IOPS phase 2 mdtest_hard_write 12.955 kiops : time 310.67 seconds
[RESULT] IOPS phase 3 find 301.830 kiops : time 116.93 seconds
[RESULT] BW phase 3 ior_easy_read 9.958 GB/s : time 401.69 seconds
[RESULT] IOPS phase 4 mdtest_easy_stat 373.567 kiops : time 91.73 seconds
[RESULT] BW phase 4 ior_hard_read 4.473 GB/s : time 23.79 seconds
[RESULT] IOPS phase 5 mdtest_hard_stat 88.411 kiops : time 22.40 seconds
[RESULT] IOPS phase 6 mdtest_easy_delete 101.047 kiops : time 341.05 seconds
[RESULT] IOPS phase 7 mdtest_hard_read 5.527 kiops : time 327.34 seconds
[RESULT] IOPS phase 8 mdtest_hard_delete 5.766 kiops : time 315.87 seconds
[SCORE] Bandwidth 2.31499 GB/s : IOPS 57.8941 kiops : TOTAL 11.5769