- io500
-
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
set -euo pipefail # better error handling
# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True" # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True" # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False" # this one is optional
io500_cleanup_workdir="False" # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=300 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...
# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
setup_directories
setup_paths
setup_ior_easy # required if you want a complete score
setup_ior_hard # required if you want a complete score
setup_mdt_easy # required if you want a complete score
setup_mdt_hard # required if you want a complete score
setup_find # required if you want a complete score
setup_mdreal # optional
run_benchmarks
}
function setup_directories {
# set directories for where the benchmark files are created and where the results will go.
# If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
timestamp=`date +%Y.%m.%d-%H.%M.%S` # create a uniquifier
io500_workdir=$PWD/datafiles/io500.$timestamp # directory where the data will be stored
io500_result_dir=$PWD/results/$timestamp # the directory where the output results will be kept
mkdir -p $io500_workdir $io500_result_dir
# HPE XFS tuning
export EXTSIZE=32m
/usr/sbin/sgi_xfs_io -c "extsize ${EXTSIZE}" $io500_workdir # set default allocation size
}
function setup_paths {
# Set the paths to the binaries. If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
io500_ior_cmd=$PWD/bin/ior
io500_mdtest_cmd=$PWD/bin/mdtest
io500_mdreal_cmd=$PWD/bin/md-real-io
module load mpt
MHN="jp1,jp2,jp3,jp4,gto51,gto52,gto53,gto54,twinsh01,twinsh02"
io500_mpirun="/opt/hpe/hpc/mpt/mpt-2.20/bin/mpirun"
io500_mpiargs="${MHN} -np 20"
# HPE MPI Parameters
export MPI_XPMEM_ENABLED=disabled
# tuning
export MPI_SHEPHERD=true
$io500_mpirun ${MHN} -np 1 \
sudo idbg sgitrcoff > \
$io500_result_dir/idbg_sgitrcoff 2>&1 # disable client tracing
export DCVN_TIMEOUT=3600
$io500_mpirun ${MHN} -np 1 \
sudo sysctl fs.cxfs.cxfs_dcvn_timeout=${DCVN_TIMEOUT} > \
$io500_result_dir/cxfs_dcvn_timeout 2>&1 # set client metadata cache timeout
unset MPI_SHEPHERD
}
function setup_ior_easy {
# io500_ior_easy_size is the amount of data written per rank in MiB units,
# but it can be any number as long as it is somehow used to scale the IOR
# runtime as part of io500_ior_easy_params
io500_ior_easy_size=150000
# 2M writes, 2 GB per proc, file per proc
io500_ior_easy_params="-t 2048k -b ${io500_ior_easy_size}m -a=POSIX --posix.odirect -F"
}
function setup_mdt_easy {
io500_mdtest_easy_params="-u -L" # unique dir per thread, files only at leaves
io500_mdtest_easy_files_per_proc=100000
}
function setup_ior_hard {
io500_ior_hard_writes_per_proc=100000
io500_ior_hard_other_options="-a MPIIO -c -p -H -U ${PWD}/romio-hints" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}
function setup_mdt_hard {
io500_mdtest_hard_files_per_proc=2000
io500_mdtest_hard_other_options=""
}
function setup_find {
#
# setup the find command. This is an area where innovation is allowed.
# There are three default options provided. One is a serial find, one is python
# parallel version, one is C parallel version. Current default is to use serial.
# But it is very slow. We recommend to either customize or use the C parallel version.
# For GPFS, we recommend to use the provided mmfind wrapper described below.
# Instructions below.
# If a custom approach is used, please provide enough info so others can reproduce.
# the serial version that should run (SLOWLY) without modification
#io500_find_mpi="False"
#io500_find_cmd=$PWD/bin/sfind.sh
#io500_find_cmd_args=""
# a parallel version in C, the -s adds a stonewall
# for a real run, turn -s (stonewall) off or set it at 300 or more
# to prepare this (assuming you've run ./utilities/prepare.sh already):
# > cd build/pfind
# > ./prepare.sh
# > ./compile.sh
# > cp pfind ../../bin/
# If you use io500_find_mpi="True", then this will run with the same
# number of MPI nodes and ranks as the other phases.
# If you prefer another number, and fewer might be better here,
# Then you can set io500_find_mpi to be "False" and write a wrapper
# script for this which sets up MPI as you would like. Then change
# io500_find_cmd to point to your wrapper script.
#io500_find_mpi="True"
#io500_find_cmd="$PWD/bin/pfind"
# uses stonewalling, run pfind
#io500_find_cmd_args="-s $io500_stonewall_timer -r $io500_result_dir/pfind_results"
# for GPFS systems, you should probably use the provided mmfind wrapper
# if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
#io500_find_mpi="False"
#io500_find_cmd="$PWD/bin/mmfind.sh"
#io500_find_cmd_args=""
# HPE XFS find
io500_find_mpi="False"
io500_find_cmd=$PWD/bin/rfind.sh
io500_find_cmd_args=""
}
function setup_mdreal {
io500_mdreal_params="-P=5000 -I=1000"
}
function run_benchmarks {
# Important: source the io500_fixed.sh script. Do not change it. If you discover
# a need to change it, please email the mailing list to discuss
source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}
# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
# top level info
io500_info_system_name='GTO' # e.g. Oakforest-PACS
io500_info_institute_name='Hewlett Packard Enterprise' # e.g. JCAHPC
io500_info_storage_age_in_months='N/A' # not install date but age since last refresh
io500_info_storage_install_date='N/A' # MM/YY
io500_info_filesystem='HPE XFS' # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
io500_info_filesystem_version='8.1.1'
io500_info_filesystem_vendor='Hewlett Packard Enterprise'
# client side info
io500_info_num_client_nodes="10"
io500_info_procs_per_node="20"
# server side info
io500_info_num_metadata_server_nodes='1'
io500_info_num_data_server_nodes="5"
io500_info_num_data_storage_devices="45" # if you have 5 data servers, and each has 5 drives, then this number is 25
io500_info_num_metadata_storage_devices="45" # if you have 2 metadata servers, and each has 5 drives, then this number is 10
io500_info_data_storage_type='NVMe' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_metadata_storage_type='NVMe' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_storage_network='InfiniBand (EDR)' # infiniband, omnipath, ethernet, etc
io500_info_storage_interface='IB/SRP' # SAS, SATA, NVMe, etc
# miscellaneous
io500_info_whatever="DCVN_TIMEOUT=${DCVN_TIMEOUT},extsize=${EXTSIZE},MPI_XPMEM_ENABLED=${MPI_XPMEM_ENABLED}"
io500_mount_options="$(findmnt -no OPTIONS $(stat -c %m .))"
}
main
- ior_easy_read
-
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began : Sun Jun 9 15:38:13 2019
Command line : /mnt/rpool/io-500-dev/bin/ior -r -R -t 2048k -b 150000m -a=POSIX --posix.odirect -F -i 1 -C -Q 1 -g -G 27 -k -e -o /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_easy/ior_file_easy -O stoneWallingStatusFile=/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_easy/stonewall
Machine : Linux jp1.eag.rdlabs.hpecorp.net
TestID : 0
StartTime : Sun Jun 9 15:38:13 2019
Path : /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_easy
FS : 130.6 TiB Used FS: 26.6% Inodes: 5350.0 Mi Used Inodes: 0.3%
Options:
api : POSIX
apiVersion :
test filename : /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 200
clients per node : 20
repetitions : 1
xfersize : 2 MiB
blocksize : 146.48 GiB
aggregate filesize : 28.61 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
WARNING: Expected aggregate file size = 31457280000000.
WARNING: Stat() of aggregate file size = 26403143680000.
WARNING: Using actual aggregate bytes moved = 26403143680000.
read 85978 153600000 2048.00 0.209555 292.65 0.001189 292.86 0
Max Read: 85978.50 MiB/sec (90154.99 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 85978.50 85978.50 85978.50 0.00 42989.25 42989.25 42989.25 0.00 292.86393 0 200 20 1 1 1 1 0 0 1 157286400000 2097152 25180000.0 POSIX 0
Finished : Sun Jun 9 15:43:06 2019
- ior_easy_write
-
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began : Sun Jun 9 15:16:43 2019
Command line : /mnt/rpool/io-500-dev/bin/ior -w -t 2048k -b 150000m -a=POSIX --posix.odirect -F -i 1 -C -Q 1 -g -G 27 -k -e -o /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_easy/ior_file_easy -O stoneWallingStatusFile=/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_easy/stonewall -O stoneWallingWearOut=1 -D 300
Machine : Linux jp1.eag.rdlabs.hpecorp.net
TestID : 0
StartTime : Sun Jun 9 15:16:43 2019
Path : /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_easy
FS : 130.6 TiB Used FS: 0.0% Inodes: 5350.0 Mi Used Inodes: 0.0%
Options:
api : POSIX
apiVersion :
test filename : /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 200
clients per node : 20
repetitions : 1
xfersize : 2 MiB
blocksize : 146.48 GiB
aggregate filesize : 28.61 TiB
stonewallingTime : 300
stoneWallingWearOut : 1
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 56056 max: 62950 -- min data: 109.5 GiB mean data: 117.3 GiB time: 300.0s
WARNING: Expected aggregate file size = 31457280000000.
WARNING: Stat() of aggregate file size = 26403143680000.
WARNING: Using actual aggregate bytes moved = 26403143680000.
WARNING: maybe caused by deadlineForStonewalling
write 78909 153600000 2048.00 0.265080 318.84 0.000791 319.10 0
Max Write: 78908.77 MiB/sec (82741.84 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 78908.77 78908.77 78908.77 0.00 39454.39 39454.39 39454.39 0.00 319.10268 0 200 20 1 1 1 1 0 0 1 157286400000 2097152 25180000.0 POSIX 0
Finished : Sun Jun 9 15:22:02 2019
- ior_hard_read
-
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began : Sun Jun 9 15:48:28 2019
Command line : /mnt/rpool/io-500-dev/bin/ior -r -R -s 100000 -a MPIIO -c -p -H -U /mnt/rpool/io-500-dev/romio-hints -i 1 -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -o /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_hard/IOR_file -O stoneWallingStatusFile=/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_hard/stonewall
Machine : Linux jp1.eag.rdlabs.hpecorp.net
TestID : 0
StartTime : Sun Jun 9 15:48:28 2019
Path : /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_hard
FS : 130.6 TiB Used FS: 26.6% Inodes: 5350.0 Mi Used Inodes: 0.3%
Options:
api : MPIIO
apiVersion : (3.1)
test filename : /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_hard/IOR_file
access : single-shared-file
type : collective
segments : 100000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 200
clients per node : 20
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 875.59 GiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
hints passed to MPI_File_open() {
direct_read = true
direct_write = true
romio_cb_read = disable
romio_cb_write = enable
cb_buffer_size = 1073741824
cb_nodes = 1
cb_config_list = *:*
romio_ds_write = enable
ind_wr_buffer_size = 1073741824
}
hints returned from opened file {
direct_read = true
direct_write = true
cb_buffer_size = 1073741824
romio_cb_read = disable
romio_cb_write = enable
cb_nodes = 1
romio_no_indep_rw = false
romio_cb_pfr = disable
romio_cb_fr_types = aar
romio_cb_fr_alignment = 1
romio_cb_ds_threshold = 0
romio_cb_alltoall = automatic
ind_rd_buffer_size = 4194304
ind_wr_buffer_size = 1073741824
romio_ds_read = automatic
romio_ds_write = enable
cb_config_list = *:*
romio_filesystem_type = XFS: SGI XFS
romio_aggregator_list = 0
}
WARNING: Expected aggregate file size = 940160000000.
WARNING: Stat() of aggregate file size = 940160000000.
WARNING: Using actual aggregate bytes moved = 443182022400.
read 16874 45.91 45.91 0.032566 25.01 0.000839 25.05 0
Max Read: 16874.47 MiB/sec (17694.16 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 16874.47 16874.47 16874.47 0.00 376407.45 376407.45 376407.45 0.00 25.04679 0 200 20 1 0 1 1 0 0 100000 47008 47008 422651.3 MPIIO 0
Finished : Sun Jun 9 15:48:53 2019
- ior_hard_write
-
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began : Sun Jun 9 15:27:22 2019
Command line : /mnt/rpool/io-500-dev/bin/ior -w -s 100000 -a MPIIO -c -p -H -U /mnt/rpool/io-500-dev/romio-hints -i 1 -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -o /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_hard/IOR_file -O stoneWallingStatusFile=/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_hard/stonewall -O stoneWallingWearOut=1 -D 300
Machine : Linux jp1.eag.rdlabs.hpecorp.net
TestID : 0
StartTime : Sun Jun 9 15:27:22 2019
Path : /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_hard
FS : 130.6 TiB Used FS: 18.4% Inodes: 5350.0 Mi Used Inodes: 0.3%
Options:
api : MPIIO
apiVersion : (3.1)
test filename : /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/ior_hard/IOR_file
access : single-shared-file
type : collective
segments : 100000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 200
clients per node : 20
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 875.59 GiB
stonewallingTime : 300
stoneWallingWearOut : 1
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
hints passed to MPI_File_open() {
direct_read = true
direct_write = true
romio_cb_read = disable
romio_cb_write = enable
cb_buffer_size = 1073741824
cb_nodes = 1
cb_config_list = *:*
romio_ds_write = enable
ind_wr_buffer_size = 1073741824
}
hints returned from opened file {
direct_read = true
direct_write = true
cb_buffer_size = 1073741824
romio_cb_read = disable
romio_cb_write = enable
cb_nodes = 1
romio_no_indep_rw = false
romio_cb_pfr = disable
romio_cb_fr_types = aar
romio_cb_fr_alignment = 1
romio_cb_ds_threshold = 0
romio_cb_alltoall = automatic
ind_rd_buffer_size = 4194304
ind_wr_buffer_size = 1073741824
romio_ds_read = automatic
romio_ds_write = enable
cb_config_list = *:*
romio_filesystem_type = XFS: SGI XFS
romio_aggregator_list = 0
}
stonewalling pairs accessed min: 47139 max: 47139 -- min data: 2.1 GiB mean data: 2.1 GiB time: 300.0s
WARNING: Expected aggregate file size = 940160000000.
WARNING: Stat() of aggregate file size = 940160000000.
WARNING: Using actual aggregate bytes moved = 443182022400.
WARNING: maybe caused by deadlineForStonewalling
write 1407.58 45.91 45.91 0.057031 300.21 0.000589 300.27 0
Max Write: 1407.58 MiB/sec (1475.96 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 1407.58 1407.58 1407.58 0.00 31397.97 31397.97 31397.97 0.00 300.26780 0 200 20 1 0 1 1 0 0 100000 47008 47008 422651.3 MPIIO 0
Finished : Sun Jun 9 15:32:23 2019
- mdtest_easy_delete
-
-- started at 06/09/2019 15:49:01 --
mdtest-3.3alpha1 was launched with 200 total task(s) on 10 node(s)
Command line used: /mnt/rpool/io-500-dev/bin/mdtest "-r" "-F" "-d" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_easy" "-n" "100000" "-u" "-L" "-x" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_easy-stonewall"
Path: /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42
FS: 130.6 TiB Used FS: 26.6% Inodes: 5350.0 Mi Used Inodes: 0.3%
200 tasks, 20000000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 24146.718 24146.675 24146.698 0.013
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 2.310 2.310 2.310 0.000
-- finished at 06/09/2019 16:02:07 --
- mdtest_easy_stat
-
-- started at 06/09/2019 15:43:07 --
mdtest-3.3alpha1 was launched with 200 total task(s) on 10 node(s)
Command line used: /mnt/rpool/io-500-dev/bin/mdtest "-T" "-F" "-d" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_easy" "-n" "100000" "-u" "-L" "-x" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_easy-stonewall"
Path: /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42
FS: 130.6 TiB Used FS: 26.6% Inodes: 5350.0 Mi Used Inodes: 0.3%
200 tasks, 20000000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 59260.788 59260.600 59260.709 0.041
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 15:48:27 --
- mdtest_easy_write
-
-- started at 06/09/2019 15:22:03 --
mdtest-3.3alpha1 was launched with 200 total task(s) on 10 node(s)
Command line used: /mnt/rpool/io-500-dev/bin/mdtest "-C" "-F" "-d" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_easy" "-n" "100000" "-u" "-L" "-x" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_easy-stonewall" "-W" "300"
Path: /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42
FS: 130.6 TiB Used FS: 18.4% Inodes: 5350.0 Mi Used Inodes: 0.0%
200 tasks, 20000000 files
Continue stonewall hit min: 84699 max: 94877 avg: 90064.4
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 59751.427 59751.334 59751.368 0.022
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 5.274 5.274 5.274 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 15:27:21 --
- mdtest_hard_delete
-
-- started at 06/09/2019 16:02:09 --
mdtest-3.3alpha1 was launched with 200 total task(s) on 10 node(s)
Command line used: /mnt/rpool/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_hard" "-n" "2000" "-x" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_hard-stonewall"
Path: /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42
FS: 130.6 TiB Used FS: 26.6% Inodes: 5350.0 Mi Used Inodes: 0.0%
200 tasks, 400000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 492.822 492.821 492.822 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 41.680 41.680 41.680 0.000
-- finished at 06/09/2019 16:13:06 --
- mdtest_hard_read
-
-- started at 06/09/2019 16:02:08 --
mdtest-3.3alpha1 was launched with 200 total task(s) on 10 node(s)
Command line used: /mnt/rpool/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_hard" "-n" "2000" "-x" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_hard-stonewall"
Path: /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42
FS: 130.6 TiB Used FS: 26.6% Inodes: 5350.0 Mi Used Inodes: 0.0%
200 tasks, 400000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 1250726.603 1250616.990 1250678.280 19.246
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 16:02:08 --
- mdtest_hard_stat
-
-- started at 06/09/2019 15:48:54 --
mdtest-3.3alpha1 was launched with 200 total task(s) on 10 node(s)
Command line used: /mnt/rpool/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_hard" "-n" "2000" "-x" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_hard-stonewall"
Path: /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42
FS: 130.6 TiB Used FS: 26.6% Inodes: 5350.0 Mi Used Inodes: 0.3%
200 tasks, 400000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 57517.134 57513.058 57516.463 0.566
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 15:49:00 --
- mdtest_hard_write
-
-- started at 06/09/2019 15:32:23 --
mdtest-3.3alpha1 was launched with 200 total task(s) on 10 node(s)
Command line used: /mnt/rpool/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_hard" "-n" "2000" "-x" "/mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42/mdt_hard-stonewall" "-W" "300"
Path: /mnt/rpool/io-500-dev/datafiles/io500.2019.06.09-15.16.42
FS: 130.6 TiB Used FS: 19.0% Inodes: 5350.0 Mi Used Inodes: 0.3%
200 tasks, 400000 files
Continue stonewall hit min: 1459 max: 1619 avg: 1500.1
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 1004.273 1004.269 1004.273 0.001
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 1343.122 1343.122 1343.122 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 15:37:46 --
- result_summary
-
[RESULT] BW phase 1 ior_easy_write 77.060 GB/s : time 319.10 seconds
[RESULT] IOPS phase 1 mdtest_easy_write 59.751 kiops : time 318.75 seconds
[RESULT] BW phase 2 ior_hard_write 1.375 GB/s : time 300.27 seconds
[RESULT] IOPS phase 2 mdtest_hard_write 1.004 kiops : time 323.42 seconds
[RESULT] IOPS phase 3 find 758.480 kiops : time 25.45 seconds
[RESULT] BW phase 3 ior_easy_read 83.963 GB/s : time 292.86 seconds
[RESULT] IOPS phase 4 mdtest_easy_stat 59.261 kiops : time 321.20 seconds
[RESULT] BW phase 4 ior_hard_read 16.478 GB/s : time 25.05 seconds
[RESULT] IOPS phase 5 mdtest_hard_stat 57.517 kiops : time 6.60 seconds
[RESULT] IOPS phase 6 mdtest_easy_delete 24.147 kiops : time 787.31 seconds
[RESULT] IOPS phase 7 mdtest_hard_read 1250.730 kiops : time 1.29 seconds
[RESULT] IOPS phase 8 mdtest_hard_delete 0.493 kiops : time 658.04 seconds
[SCORE] Bandwidth 19.566 GB/s : IOPS 35.1097 kiops : TOTAL 26.2099