- io500
-
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
set -euo pipefail # better error handling
# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True" # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True" # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False" # this one is optional
io500_cleanup_workdir="False" # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=0 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...
# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
setup_directories
setup_paths
setup_ior_easy # required if you want a complete score
setup_ior_hard # required if you want a complete score
setup_mdt_easy # required if you want a complete score
setup_mdt_hard # required if you want a complete score
setup_find # required if you want a complete score
setup_mdreal # optional
run_benchmarks
}
function setup_directories {
# set directories for where the benchmark files are created and where the results will go.
# If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
timestamp=`date +%Y.%m.%d-%H.%M.%S` # create a uniquifier
io500_workdir=$PWD/datafiles/io500.$timestamp # directory where the data will be stored
io500_result_dir=$PWD/results/$timestamp # the directory where the output results will be kept
mkdir -p $io500_workdir $io500_result_dir
mkdir -p ${io500_workdir}/ior_easy ${io500_workdir}/ior_hard
mkdir -p ${io500_workdir}/mdt_easy ${io500_workdir}/mdt_hard
# for ior_easy.
beegfs-ctl --setpattern --numtargets=4 --chunksize=1024k ${io500_workdir}/ior_easy
# stripe across all OSTs for ior_hard, 256k chunksize
#beegfs-ctl --setpattern --numtargets=96 --chunksize=256k ${io500_workdir}/ior_hard
beegfs-ctl --setpattern --numtargets=96 --chunksize=256k ${io500_workdir}/ior_hard
# turn off striping and use small chunks for mdtest
beegfs-ctl --setpattern --numtargets=1 --chunksize=64k ${io500_workdir}/mdt_easy
beegfs-ctl --setpattern --numtargets=1 --chunksize=64k ${io500_workdir}/mdt_hard
}
function setup_paths {
# Set the paths to the binaries. If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
io500_ior_cmd=$PWD/bin/ior
io500_mdtest_cmd=$PWD/bin/mdtest
io500_mdreal_cmd=$PWD/bin/md-real-io
io500_mpirun="mpirun"
io500_mpiargs="--hostfile host.io500 "
#io500_mpiargs="-x LD_LIBRARY_PATH -np 184 --hostfile hosts_mpich.io500 "
}
function setup_ior_easy {
# io500_ior_easy_size is the amount of data written per rank in MiB units,
# but it can be any number as long as it is somehow used to scale the IOR
# runtime as part of io500_ior_easy_params
#io500_ior_easy_size=330
io500_ior_easy_size=330
# 2M writes, 2 GB per proc, file per proc
io500_ior_easy_params="-t 2048k -b ${io500_ior_easy_size}g -F -B "
}
function setup_mdt_easy {
io500_mdtest_easy_params="-z 1 -b 12 -u -L" # unique dir per thread, files only at leaves
#io500_mdtest_easy_files_per_proc=210000
io500_mdtest_easy_files_per_proc=210000
}
function setup_ior_hard {
#io500_ior_hard_writes_per_proc=80000
io500_ior_hard_writes_per_proc=150000
io500_ior_hard_other_options="" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}
function setup_mdt_hard {
#io500_mdtest_hard_files_per_proc=11500
io500_mdtest_hard_files_per_proc=11500
io500_mdtest_hard_other_options=""
}
function setup_find {
#
# setup the find command. This is an area where innovation is allowed.
# There are three default options provided. One is a serial find, one is python
# parallel version, one is C parallel version. Current default is to use serial.
# But it is very slow. We recommend to either customize or use the C parallel version.
# For GPFS, we recommend to use the provided mmfind wrapper described below.
# Instructions below.
# If a custom approach is used, please provide enough info so others can reproduce.
# the serial version that should run (SLOWLY) without modification
#io500_find_mpi="False"
#io500_find_cmd=$PWD/bin/sfind.sh
#io500_find_cmd_args=""
# a parallel version in C, the -s adds a stonewall
# for a real run, turn -s (stonewall) off or set it at 300 or more
# to prepare this (assuming you've run ./utilities/prepare.sh already):
# > cd build/pfind
# > ./prepare.sh
# > ./compile.sh
# > cp pfind ../../bin/
# If you use io500_find_mpi="True", then this will run with the same
# number of MPI nodes and ranks as the other phases.
# If you prefer another number, and fewer might be better here,
# Then you can set io500_find_mpi to be "False" and write a wrapper
# script for this which sets up MPI as you would like. Then change
# io500_find_cmd to point to your wrapper script.
io500_find_mpi="True"
io500_find_cmd="$PWD/bin/pfind"
# uses stonewalling, run pfind
io500_find_cmd_args="-s 20000 -r $io500_result_dir/pfind_results"
# for GPFS systems, you should probably use the provided mmfind wrapper
# if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
#io500_find_mpi="False"
#io500_find_cmd="$PWD/bin/mmfind.sh"
#io500_find_cmd_args=""
}
function setup_mdreal {
io500_mdreal_params="-P=5000 -I=1000"
}
function run_benchmarks {
# Important: source the io500_fixed.sh script. Do not change it. If you discover
# a need to change it, please email the mailing list to discuss
source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}
# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
# top level info
io500_info_system_name='bracewell' # e.g. Oakforest-PACS
io500_info_institute_name='CSIRO' # e.g. JCAHPC
io500_info_storage_age_in_months='10' # not install date but age since last refresh
io500_info_storage_install_date='10/18' # MM/YY
io500_info_filesystem='BeeGFS' # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
io500_info_filesystem_version='7.1.3'
io500_info_filesystem_vendor='DELL/ThinkParQ'
# client side info
io500_info_num_client_nodes='26'
io500_info_procs_per_node='10'
# server side info
io500_info_num_metadata_server_nodes='2'
io500_info_num_data_server_nodes='16'
io500_info_num_data_storage_devices='384' # if you have 5 data servers, and each has 5 drives, then this number is 25
io500_info_num_metadata_storage_devices='24' # if you have 2 metadata servers, and each has 5 drives, then this number is 10
io500_info_data_storage_type='NVMe' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_metadata_storage_type='NVMe' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_storage_network='IB EDR' # infiniband, omnipath, ethernet, etc
io500_info_storage_interface='NVMe' # SAS, SATA, NVMe, etc
# miscellaneous
io500_info_whatever=''
}
echo contents of host.io500
cat host.io500
echo start run
main
- ior_easy_read
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms03', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Fri May 17 21:02:51 2019
Command line : /scratch1/leh015/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 2048k -b 330g -F -B -o /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_easy/ior_file_easy -O stoneWallingStatusFile=/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_easy/stonewall
Machine : Linux bss017
TestID : 0
StartTime : Fri May 17 21:02:51 2019
Path : /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_easy
FS : 1117.6 TiB Used FS: 14.4% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 260
clients per node : 10
repetitions : 1
xfersize : 2 MiB
blocksize : 330 GiB
aggregate filesize : 83.79 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
[bss017:93855] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:93855] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
read 205987 346030080 2048.00 0.027989 426.49 0.013749 426.53 0
Max Read: 205986.61 MiB/sec (215992.61 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 205986.61 205986.61 205986.61 0.00 102993.30 102993.30 102993.30 0.00 426.52870 0 260 10 1 1 1 1 0 0 1 354334801920 2097152 87859200.0 POSIX 0
Finished : Fri May 17 21:09:58 2019
- ior_easy_write
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms04', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Fri May 17 20:39:21 2019
Command line : /scratch1/leh015/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 2048k -b 330g -F -B -o /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_easy/ior_file_easy -O stoneWallingStatusFile=/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_easy/stonewall -O stoneWallingWearOut=1 -D 0
Machine : Linux bss017
TestID : 0
StartTime : Fri May 17 20:39:21 2019
Path : /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_easy
FS : 1117.6 TiB Used FS: 6.7% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 260
clients per node : 10
repetitions : 1
xfersize : 2 MiB
blocksize : 330 GiB
aggregate filesize : 83.79 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
[bss017:90511] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:90511] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
stonewalling pairs accessed min: 168960 max: 168960 -- min data: 330.0 GiB mean data: 330.0 GiB time: 319.8s
write 274345 346030080 2048.00 0.015440 320.23 0.009578 320.25 0
Max Write: 274345.48 MiB/sec (287672.09 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 274345.48 274345.48 274345.48 0.00 137172.74 137172.74 137172.74 0.00 320.25022 0 260 10 1 1 1 1 0 0 1 354334801920 2097152 87859200.0 POSIX 0
Finished : Fri May 17 20:44:41 2019
- ior_hard_read
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms04', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Fri May 17 21:11:11 2019
Command line : /scratch1/leh015/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 150000 -o /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_hard/IOR_file -O stoneWallingStatusFile=/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_hard/stonewall
Machine : Linux bss017
TestID : 0
StartTime : Fri May 17 21:11:11 2019
Path : /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_hard
FS : 1117.6 TiB Used FS: 14.4% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 150000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 260
clients per node : 10
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 1.67 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
[bss017:95018] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:95018] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
read 83187 45.91 45.91 0.230497 20.76 0.021943 21.02 0
Max Read: 83187.47 MiB/sec (87228.39 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 83187.47 83187.47 83187.47 0.00 1855607.30 1855607.30 1855607.30 0.00 21.01738 0 260 10 1 0 1 1 0 0 150000 47008 47008 1748382.6 POSIX 0
Finished : Fri May 17 21:11:32 2019
- ior_hard_write
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms04', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began : Fri May 17 20:50:54 2019
Command line : /scratch1/leh015/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 150000 -o /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_hard/IOR_file -O stoneWallingStatusFile=/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_hard/stonewall -O stoneWallingWearOut=1 -D 0
Machine : Linux bss017
TestID : 0
StartTime : Fri May 17 20:50:54 2019
Path : /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_hard
FS : 1117.6 TiB Used FS: 14.2% Inodes: 0.0 Mi Used Inodes: -nan%
Options:
api : POSIX
apiVersion :
test filename : /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 150000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 260
clients per node : 10
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 1.67 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
[bss017:92075] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:92075] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
stonewalling pairs accessed min: 150000 max: 150000 -- min data: 6.6 GiB mean data: 6.6 GiB time: 342.3s
write 4839 45.91 45.91 0.340348 360.86 0.104357 361.31 0
Max Write: 4839.06 MiB/sec (5074.12 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 4839.06 4839.06 4839.06 0.00 107941.58 107941.58 107941.58 0.00 361.30656 0 260 10 1 0 1 1 0 0 150000 47008 47008 1748382.6 POSIX 0
Finished : Fri May 17 20:56:55 2019
- mdtest_easy_delete
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms04', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
-- started at 05/17/2019 21:12:03 --
mdtest-1.9.3 was launched with 260 total task(s) on 26 node(s)
Command line used: /scratch1/leh015/io-500-dev/bin/mdtest "-r" "-F" "-d" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_easy" "-n" "210000" "-z" "1" "-b" "12" "-u" "-L" "-x" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_easy-stonewall"
Path: /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18
FS: 1117.6 TiB Used FS: 14.4% Inodes: 0.0 Mi Used Inodes: -nan%
260 tasks, 54600000 files
WARNING: could not read stonewall status file
[bss017:95528] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:95528] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 239758.293 239620.859 239752.809 26.390
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 1.748 1.748 1.748 0.000
-- finished at 05/17/2019 21:15:59 --
- mdtest_easy_stat
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms03', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
-- started at 05/17/2019 21:10:03 --
mdtest-1.9.3 was launched with 260 total task(s) on 26 node(s)
Command line used: /scratch1/leh015/io-500-dev/bin/mdtest "-T" "-F" "-d" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_easy" "-n" "210000" "-z" "1" "-b" "12" "-u" "-L" "-x" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_easy-stonewall"
Path: /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18
FS: 1117.6 TiB Used FS: 14.4% Inodes: 0.0 Mi Used Inodes: -nan%
260 tasks, 54600000 files
WARNING: could not read stonewall status file
[bss017:94714] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:94714] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 869976.467 869969.927 869975.379 1.159
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 05/17/2019 21:11:06 --
- mdtest_easy_write
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms03', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
-- started at 05/17/2019 20:44:46 --
mdtest-1.9.3 was launched with 260 total task(s) on 26 node(s)
Command line used: /scratch1/leh015/io-500-dev/bin/mdtest "-C" "-F" "-d" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_easy" "-n" "210000" "-z" "1" "-b" "12" "-u" "-L" "-x" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_easy-stonewall" "-W" "0"
Path: /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18
FS: 1117.6 TiB Used FS: 14.2% Inodes: 0.0 Mi Used Inodes: -nan%
260 tasks, 54600000 files
[bss017:91262] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:91262] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 150875.577 150785.569 150871.961 17.278
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 167.302 167.302 167.302 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 05/17/2019 20:50:49 --
- mdtest_hard_delete
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms04', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
-- started at 05/17/2019 21:17:42 --
mdtest-1.9.3 was launched with 260 total task(s) on 26 node(s)
Command line used: /scratch1/leh015/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_hard" "-n" "11500" "-x" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_hard-stonewall"
Path: /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18
FS: 1117.6 TiB Used FS: 14.4% Inodes: 0.0 Mi Used Inodes: -nan%
260 tasks, 2990000 files
WARNING: could not read stonewall status file
[bss017:96458] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:96458] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 14344.306 14344.264 14344.298 0.009
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.360 0.360 0.360 0.000
-- finished at 05/17/2019 21:21:13 --
- mdtest_hard_read
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms04', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
-- started at 05/17/2019 21:16:04 --
mdtest-1.9.3 was launched with 260 total task(s) on 26 node(s)
Command line used: /scratch1/leh015/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_hard" "-n" "11500" "-x" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_hard-stonewall"
Path: /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18
FS: 1117.6 TiB Used FS: 14.4% Inodes: 0.0 Mi Used Inodes: -nan%
260 tasks, 2990000 files
WARNING: could not read stonewall status file
[bss017:96139] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:96139] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 32275.521 32275.388 32275.499 0.015
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 05/17/2019 21:17:37 --
- mdtest_hard_stat
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms03', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
-- started at 05/17/2019 21:11:37 --
mdtest-1.9.3 was launched with 260 total task(s) on 26 node(s)
Command line used: /scratch1/leh015/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_hard" "-n" "11500" "-x" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_hard-stonewall"
Path: /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18
FS: 1117.6 TiB Used FS: 14.4% Inodes: 0.0 Mi Used Inodes: -nan%
260 tasks, 2990000 files
WARNING: could not read stonewall status file
[bss017:95247] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:95247] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 139247.896 139245.541 139247.633 0.516
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 05/17/2019 21:11:58 --
- mdtest_hard_write
-
--------------------------------------------------------------------------
WARNING: There are more than one active ports on host 'bms03', but the
default subnet GID prefix was detected on more than one of these
ports. If these ports are connected to different physical IB
networks, this configuration will fail in Open MPI. This version of
Open MPI requires that every physically separate IB subnet that is
used between connected MPI processes must have different subnet ID
values.
Please see this FAQ entry for more details:
http://www.open-mpi.org/faq/?category=openfabrics#ofa-default-subnet-gid
NOTE: You can turn off this warning by setting the MCA parameter
btl_openib_warn_default_gid_prefix to 0.
--------------------------------------------------------------------------
-- started at 05/17/2019 20:57:00 --
mdtest-1.9.3 was launched with 260 total task(s) on 26 node(s)
Command line used: /scratch1/leh015/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_hard" "-n" "11500" "-x" "/scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18/mdt_hard-stonewall" "-W" "0"
Path: /scratch1/leh015/io-500-dev/datafiles/io500.2019.05.17-20.39.18
FS: 1117.6 TiB Used FS: 14.4% Inodes: 0.0 Mi Used Inodes: -nan%
260 tasks, 2990000 files
[bss017:92838] 179 more processes have sent help message help-mpi-btl-openib.txt / default subnet prefix
[bss017:92838] Set MCA parameter "orte_base_help_aggregate" to 0 to see all help / error messages
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 9641.274 9641.237 9641.266 0.007
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 562.922 562.922 562.922 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 05/17/2019 21:02:11 --
- result_summary
-
[RESULT] BW phase 1 ior_easy_write 267.915 GB/s : time 320.25 seconds
[RESULT] IOPS phase 1 mdtest_easy_write 150.876 kiops : time 367.38 seconds
[RESULT] BW phase 2 ior_hard_write 4.726 GB/s : time 361.31 seconds
[RESULT] IOPS phase 2 mdtest_hard_write 9.641 kiops : time 315.23 seconds
[RESULT] IOPS phase 3 find 1619.040 kiops : time 35.70 seconds
[RESULT] BW phase 3 ior_easy_read 201.159 GB/s : time 426.53 seconds
[RESULT] IOPS phase 4 mdtest_easy_stat 869.976 kiops : time 67.79 seconds
[RESULT] BW phase 4 ior_hard_read 81.237 GB/s : time 21.02 seconds
[RESULT] IOPS phase 5 mdtest_hard_stat 139.248 kiops : time 26.56 seconds
[RESULT] IOPS phase 6 mdtest_easy_delete 239.758 kiops : time 240.49 seconds
[RESULT] IOPS phase 7 mdtest_hard_read 32.275 kiops : time 97.80 seconds
[RESULT] IOPS phase 8 mdtest_hard_delete 14.344 kiops : time 216.34 seconds
[SCORE] Bandwidth 67.443 GB/s : IOPS 115.499 kiops : TOTAL 88.2587