- io500
-
#!/bin/bash
#LUSTRE_MDS=mds[11-14]
LUSTRE_MDS=sv19[0-3]
LUSTRE_OSS=sv19[0-3]
LUSTRE_CLIENT=c0[82-83,85-87],c2[10,12-14],c208
MGS=172.16.254.190@o2ib
FSNAME=/es90
MNT=/es90
DSH=pdsh
#PATH=/usr/mpi/gcc/openmpi-4.0.2a1/bin:$PATH
#PATH=/work/bin/benchmark_bin/openmpi-3.1.3/bin:$PATH
# Lustre Server Setting
${DSH} -w ${LUSTRE_MDS} "echo 128 > /sys/module/mdt/parameters/max_mod_rpcs_per_client"
${DSH} -w ${LUSTRE_MDS} lctl set_param \
mdt.*.dom_lock=trylock \
ldlm.lock_limit_mb=24000 \
ldlm.lock_reclaim_threshold_mb=20000
#${DSH} -w ${LUSTRE_MDS} "sysctl -w sysctl vm.swappiness=10"
#${DSH} -w ${LUSTRE_MDS} "sysctl -w sysctl vm.vfs_cache_pressure=30"
${DSH} -w ${LUSTRE_OSS} "sysctl -w vm.min_free_kbytes=524288"
${DSH} -w ${LUSTRE_OSS} lctl set_param \
osd-ldiskfs.*.read_cache_enable=0 \
obdfilter.*.writethrough_cache_enable=0 \
obdfilter.*.brw_size=16 \
obdfilter.*.precreate_batch=1024
# ReMount Lustre Client
${DSH} -w ${LUSTRE_CLIENT} umount -t lustre -a
${DSH} -w ${LUSTRE_CLIENT} mount -t lustre ${MGS}:${FSNAME} ${MNT}
sleep 2
# Lustre Client Setting
${DSH} -w ${LUSTRE_CLIENT} lctl set_param \
osc.*.max_pages_per_rpc=16M \
osc.*.max_rpcs_in_flight=16 \
osc.*.max_dirty_mb=512 \
osc.*.checksums=0 \
llite.*.max_read_ahead_mb=2048 \
ldlm.namespaces.*.lru_size=4000000 \
ldlm.namespaces.*.dirty_age_limit=10000 \
mdc.*.max_rpcs_in_flight=128 \
mdc.*.max_mod_rpcs_in_flight=127 \
llite.*.max_read_ahead_per_file_mb=256
sleep 2
# Cleanup & TRIM to all OSTS
${DSH} -w ${LUSTRE_CLIENT} lctl set_param ldlm.namespaces.*.lru_size=clear
${DSH} -w ${LUSTRE_OSS} fstrim -av
${DSH} -w ${LUSTRE_MDS},${LUSTRE_OSS} "echo 3 > /proc/sys/vm/drop_caches"
${DSH} -w ${LUSTRE_CLIENT} "cpupower frequency-set -g performance"
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
set -euo pipefail # better error handling
# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True" # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True" # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False" # this one is optional
io500_cleanup_workdir="False" # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=300 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...
# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
setup_directories
setup_paths
setup_ior_easy # required if you want a complete score
setup_ior_hard # required if you want a complete score
setup_mdt_easy # required if you want a complete score
setup_mdt_hard # required if you want a complete score
setup_find # required if you want a complete score
setup_mdreal # optional
run_benchmarks
}
function setup_directories {
# set directories for where the benchmark files are created and where the results will go.
# If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
timestamp=`date +%Y.%m.%d-%H.%M.%S` # create a uniquifier
#io500_workdir=$PWD/datafiles/io500.$timestamp # directory where the data will be stored
io500_workdir=${MNT}/io500.out
io500_result_dir=$PWD/results/$timestamp # the directory where the output results will be kept
mkdir -p $io500_workdir $io500_result_dir
mkdir -p $io500_workdir/ior_hard $io500_workdir/ior_easy
lfs setdirstripe -c 4 $io500_workdir/mdt_easy
lfs setdirstripe -c 4 $io500_workdir/mdt_hard
lfs setdirstripe -c 4 -D $io500_workdir/mdt_easy
lfs setdirstripe -c 4 -D $io500_workdir/mdt_hard
lfs setstripe -L mdt -E 1M $io500_workdir/mdt_easy
lfs setstripe -L mdt -E 1M $io500_workdir/mdt_hard
lfs setstripe -C 160 -S 16M $io500_workdir/ior_hard
}
function setup_paths {
# Set the paths to the binaries. If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
io500_ior_cmd=$PWD/bin/ior
io500_mdtest_cmd=$PWD/bin/mdtest
io500_mdreal_cmd=$PWD/bin/md-real-io
io500_mpirun="/work/home/sihara/mpi/mpich-3.3/bin/mpirun"
io500_mpiargs="-np 160 -hostfile hostfile -ppn 16"
}
function setup_ior_easy {
# io500_ior_easy_size is the amount of data written per rank in MiB units,
# but it can be any number as long as it is somehow used to scale the IOR
# runtime as part of io500_ior_easy_params
io500_ior_easy_size=72000
# 2M writes, 2 GB per proc, file per proc
io500_ior_easy_params="-t 2048k -b ${io500_ior_easy_size}m -F"
}
function setup_mdt_easy {
io500_mdtest_easy_params="-u -L" # unique dir per thread, files only at leaves
io500_mdtest_easy_files_per_proc=320000
}
function setup_ior_hard {
io500_ior_hard_writes_per_proc=132000
io500_ior_hard_other_options="" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}
function setup_mdt_hard {
io500_mdtest_hard_files_per_proc=81500
io500_mdtest_hard_other_options=""
}
function setup_find {
#
# setup the find command. This is an area where innovation is allowed.
# There are three default options provided. One is a serial find, one is python
# parallel version, one is C parallel version. Current default is to use serial.
# But it is very slow. We recommend to either customize or use the C parallel version.
# For GPFS, we recommend to use the provided mmfind wrapper described below.
# Instructions below.
# If a custom approach is used, please provide enough info so others can reproduce.
# the serial version that should run (SLOWLY) without modification
#io500_find_mpi="False"
#io500_find_cmd=$PWD/bin/sfind.sh
#io500_find_cmd_args=""
# a parallel version in C, the -s adds a stonewall
# for a real run, turn -s (stonewall) off or set it at 300 or more
# to prepare this (assuming you've run ./utilities/prepare.sh already):
# > cd build/pfind
# > ./prepare.sh
# > ./compile.sh
# > cp pfind ../../bin/
# If you use io500_find_mpi="True", then this will run with the same
# number of MPI nodes and ranks as the other phases.
# If you prefer another number, and fewer might be better here,
# Then you can set io500_find_mpi to be "False" and write a wrapper
# script for this which sets up MPI as you would like. Then change
# io500_find_cmd to point to your wrapper script.
io500_find_mpi="True"
io500_find_cmd="$PWD/bin/pfind"
# uses stonewalling, run pfind
io500_find_cmd_args="-q 200000 -s $io500_stonewall_timer -r $io500_result_dir/pfind_results"
# for GPFS systems, you should probably use the provided mmfind wrapper
# if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
#io500_find_mpi="False"
#io500_find_cmd="$PWD/bin/mmfind.sh"
#io500_find_cmd_args=""
}
function setup_mdreal {
io500_mdreal_params="-P=5000 -I=1000"
}
function run_benchmarks {
# Important: source the io500_fixed.sh script. Do not change it. If you discover
# a need to change it, please email the mailing list to discuss
source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}
# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
# top level info
io500_info_system_name='xxx' # e.g. Oakforest-PACS
io500_info_institute_name='xxx' # e.g. JCAHPC
io500_info_storage_age_in_months='xxx' # not install date but age since last refresh
io500_info_storage_install_date='xxx' # MM/YY
io500_info_filesystem='xxx' # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
io500_info_filesystem_version='xxx'
io500_info_filesystem_vendor='xxx'
# client side info
io500_info_num_client_nodes='xxx'
io500_info_procs_per_node='xxx'
# server side info
io500_info_num_metadata_server_nodes='xxx'
io500_info_num_data_server_nodes='xxx'
io500_info_num_data_storage_devices='xxx' # if you have 5 data servers, and each has 5 drives, then this number is 25
io500_info_num_metadata_storage_devices='xxx' # if you have 2 metadata servers, and each has 5 drives, then this number is 10
io500_info_data_storage_type='xxx' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_metadata_storage_type='xxx' # HDD, SSD, persistent memory, etc, feel free to put specific models
io500_info_storage_network='xxx' # infiniband, omnipath, ethernet, etc
io500_info_storage_interface='xxx' # SAS, SATA, NVMe, etc
# miscellaneous
io500_info_whatever='WhateverElseYouThinkRelevant'
}
main
- ior_easy_read
-
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began : Sun Jun 9 21:21:40 2019
Command line : /work/home/sihara/io-500-dev/bin/ior -r -R -t 2048k -b 72000m -F -i 1 -C -Q 1 -g -G 27 -k -e -o /es90/io500.out/ior_easy/ior_file_easy -O stoneWallingStatusFile=/es90/io500.out/ior_easy/stonewall
Machine : Linux c082
TestID : 0
StartTime : Sun Jun 9 21:21:40 2019
Path : /es90/io500.out/ior_easy
FS : 53.4 TiB Used FS: 22.3% Inodes: 491.9 Mi Used Inodes: 12.2%
Options:
api : POSIX
apiVersion :
test filename : /es90/io500.out/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 160
clients per node : 16
repetitions : 1
xfersize : 2 MiB
blocksize : 70.31 GiB
aggregate filesize : 10.99 TiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
read 42800 73728000 2048.00 0.008845 269.14 0.013772 269.16 0
Max Read: 42800.04 MiB/sec (44879.09 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 42800.04 42800.04 42800.04 0.00 21400.02 21400.02 21400.02 0.00 269.15863 0 160 16 1 1 1 1 0 0 1 75497472000 2097152 11520000.0 POSIX 0
Finished : Sun Jun 9 21:26:09 2019
- ior_easy_write
-
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began : Sun Jun 9 20:58:54 2019
Command line : /work/home/sihara/io-500-dev/bin/ior -w -t 2048k -b 72000m -F -i 1 -C -Q 1 -g -G 27 -k -e -o /es90/io500.out/ior_easy/ior_file_easy -O stoneWallingStatusFile=/es90/io500.out/ior_easy/stonewall -O stoneWallingWearOut=1 -D 300
Machine : Linux c082
TestID : 0
StartTime : Sun Jun 9 20:58:54 2019
Path : /es90/io500.out/ior_easy
FS : 53.4 TiB Used FS: 0.0% Inodes: 432.0 Mi Used Inodes: 0.0%
Options:
api : POSIX
apiVersion :
test filename : /es90/io500.out/ior_easy/ior_file_easy
access : file-per-process
type : independent
segments : 1
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 160
clients per node : 16
repetitions : 1
xfersize : 2 MiB
blocksize : 70.31 GiB
aggregate filesize : 10.99 TiB
stonewallingTime : 300
stoneWallingWearOut : 1
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 19597 max: 36000 -- min data: 38.3 GiB mean data: 66.5 GiB time: 300.3s
write 33454 73728000 2048.00 0.005500 344.34 0.013911 344.36 0
Max Write: 33453.51 MiB/sec (35078.55 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 33453.51 33453.51 33453.51 0.00 16726.76 16726.76 16726.76 0.00 344.35846 0 160 16 1 1 1 1 0 0 1 75497472000 2097152 11520000.0 POSIX 0
Finished : Sun Jun 9 21:04:39 2019
- ior_hard_read
-
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began : Sun Jun 9 21:26:28 2019
Command line : /work/home/sihara/io-500-dev/bin/ior -r -R -s 132000 -i 1 -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -o /es90/io500.out/ior_hard/IOR_file -O stoneWallingStatusFile=/es90/io500.out/ior_hard/stonewall
Machine : Linux c082
TestID : 0
StartTime : Sun Jun 9 21:26:28 2019
Path : /es90/io500.out/ior_hard
FS : 53.4 TiB Used FS: 22.3% Inodes: 491.9 Mi Used Inodes: 12.2%
Options:
api : POSIX
apiVersion :
test filename : /es90/io500.out/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 132000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 160
clients per node : 16
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 924.63 GiB
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
read 5748 45.91 45.91 0.006905 164.71 0.000604 164.72 0
Max Read: 5748.01 MiB/sec (6027.23 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
read 5748.01 5748.01 5748.01 0.00 128217.12 128217.12 128217.12 0.00 164.72060 0 160 16 1 0 1 1 0 0 132000 47008 47008 946816.4 POSIX 0
Finished : Sun Jun 9 21:29:15 2019
- ior_hard_write
-
IOR-3.3alpha1: MPI Coordinated Test of Parallel I/O
Began : Sun Jun 9 21:10:22 2019
Command line : /work/home/sihara/io-500-dev/bin/ior -w -s 132000 -i 1 -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -o /es90/io500.out/ior_hard/IOR_file -O stoneWallingStatusFile=/es90/io500.out/ior_hard/stonewall -O stoneWallingWearOut=1 -D 300
Machine : Linux c082
TestID : 0
StartTime : Sun Jun 9 21:10:22 2019
Path : /es90/io500.out/ior_hard
FS : 53.4 TiB Used FS: 20.6% Inodes: 480.8 Mi Used Inodes: 10.2%
Options:
api : POSIX
apiVersion :
test filename : /es90/io500.out/ior_hard/IOR_file
access : single-shared-file
type : independent
segments : 132000
ordering in a file : sequential
ordering inter file : constant task offset
task offset : 1
tasks : 160
clients per node : 16
repetitions : 1
xfersize : 47008 bytes
blocksize : 47008 bytes
aggregate filesize : 924.63 GiB
stonewallingTime : 300
stoneWallingWearOut : 1
Results:
access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
------ --------- ---------- --------- -------- -------- -------- -------- ----
stonewalling pairs accessed min: 113312 max: 132000 -- min data: 5.0 GiB mean data: 5.7 GiB time: 300.1s
write 2980.18 45.91 45.91 0.112967 317.59 0.000838 317.70 0
Max Write: 2980.18 MiB/sec (3124.95 MB/sec)
Summary of all tests:
Operation Max(MiB) Min(MiB) Mean(MiB) StdDev Max(OPs) Min(OPs) Mean(OPs) StdDev Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggs(MiB) API RefNum
write 2980.18 2980.18 2980.18 0.00 66476.94 66476.94 66476.94 0.00 317.70417 0 160 16 1 0 1 1 0 0 132000 47008 47008 946816.4 POSIX 0
Finished : Sun Jun 9 21:15:41 2019
- mdtest_easy_delete
-
-- started at 06/09/2019 21:29:20 --
mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-r" "-F" "-d" "/es90/io500.out/mdt_easy" "-n" "320000" "-u" "-L" "-x" "/es90/io500.out/mdt_easy-stonewall"
Path: /es90/io500.out
FS: 53.4 TiB Used FS: 22.3% Inodes: 491.9 Mi Used Inodes: 12.2%
160 tasks, 51200000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 85788.802 85787.118 85788.754 0.184
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 1.690 1.690 1.690 0.000
-- finished at 06/09/2019 21:39:18 --
- mdtest_easy_stat
-
-- started at 06/09/2019 21:26:10 --
mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-T" "-F" "-d" "/es90/io500.out/mdt_easy" "-n" "320000" "-u" "-L" "-x" "/es90/io500.out/mdt_easy-stonewall"
Path: /es90/io500.out
FS: 53.4 TiB Used FS: 22.3% Inodes: 491.9 Mi Used Inodes: 12.2%
160 tasks, 51200000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 3334237.760 3330288.697 3330427.909 409.948
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 21:26:26 --
- mdtest_easy_write
-
-- started at 06/09/2019 21:04:40 --
mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-C" "-F" "-d" "/es90/io500.out/mdt_easy" "-n" "320000" "-u" "-L" "-x" "/es90/io500.out/mdt_easy-stonewall" "-W" "300"
Path: /es90/io500.out
FS: 53.4 TiB Used FS: 20.6% Inodes: 432.0 Mi Used Inodes: 0.0%
160 tasks, 51200000 files
Continue stonewall hit min: 278950 max: 320000 avg: 296526.9
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 149969.992 149964.507 149969.620 1.174
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 13.887 13.887 13.887 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 21:10:21 --
- mdtest_hard_delete
-
-- started at 06/09/2019 21:39:23 --
mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/es90/io500.out/mdt_hard" "-n" "81500" "-x" "/es90/io500.out/mdt_hard-stonewall"
Path: /es90/io500.out
FS: 53.4 TiB Used FS: 22.3% Inodes: 443.1 Mi Used Inodes: 2.5%
160 tasks, 13040000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 42085.045 42084.993 42085.021 0.018
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 4.303 4.303 4.303 0.000
-- finished at 06/09/2019 21:43:59 --
- mdtest_hard_read
-
-- started at 06/09/2019 21:39:19 --
mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/es90/io500.out/mdt_hard" "-n" "81500" "-x" "/es90/io500.out/mdt_hard-stonewall"
Path: /es90/io500.out
FS: 53.4 TiB Used FS: 22.3% Inodes: 443.1 Mi Used Inodes: 2.5%
160 tasks, 13040000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 0.000 0.000 0.000 0.000
File read : 4135255.511 4134773.122 4134993.502 178.789
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 21:39:22 --
- mdtest_hard_stat
-
-- started at 06/09/2019 21:29:16 --
mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/es90/io500.out/mdt_hard" "-n" "81500" "-x" "/es90/io500.out/mdt_hard-stonewall"
Path: /es90/io500.out
FS: 53.4 TiB Used FS: 22.3% Inodes: 491.9 Mi Used Inodes: 12.2%
160 tasks, 13040000 files
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 0.000 0.000 0.000 0.000
File stat : 3557743.312 3557325.227 3557537.636 149.709
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 0.000 0.000 0.000 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 21:29:19 --
- mdtest_hard_write
-
-- started at 06/09/2019 21:15:42 --
mdtest-3.3alpha1 was launched with 160 total task(s) on 10 node(s)
Command line used: /work/home/sihara/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/es90/io500.out/mdt_hard" "-n" "81500" "-x" "/es90/io500.out/mdt_hard-stonewall" "-W" "300"
Path: /es90/io500.out
FS: 53.4 TiB Used FS: 22.3% Inodes: 480.8 Mi Used Inodes: 10.2%
160 tasks, 13040000 files
Continue stonewall hit min: 67530 max: 72624 avg: 69740.9
SUMMARY rate: (of 1 iterations)
Operation Max Min Mean Std Dev
--------- --- --- ---- -------
File creation : 36183.456 36182.788 36183.431 0.070
File stat : 0.000 0.000 0.000 0.000
File read : 0.000 0.000 0.000 0.000
File removal : 0.000 0.000 0.000 0.000
Tree creation : 82.079 82.079 82.079 0.000
Tree removal : 0.000 0.000 0.000 0.000
-- finished at 06/09/2019 21:21:03 --
- result_summary
-
[RESULT] BW phase 1 ior_easy_write 32.670 GB/s : time 344.36 seconds
[RESULT] IOPS phase 1 mdtest_easy_write 149.970 kiops : time 342.55 seconds
[RESULT] BW phase 2 ior_hard_write 2.910 GB/s : time 317.70 seconds
[RESULT] IOPS phase 2 mdtest_hard_write 36.183 kiops : time 322.16 seconds
[RESULT] IOPS phase 3 find 1727.400 kiops : time 36.55 seconds
[RESULT] BW phase 3 ior_easy_read 41.797 GB/s : time 269.16 seconds
[RESULT] IOPS phase 4 mdtest_easy_stat 3334.240 kiops : time 16.67 seconds
[RESULT] BW phase 4 ior_hard_read 5.613 GB/s : time 164.72 seconds
[RESULT] IOPS phase 5 mdtest_hard_stat 3557.740 kiops : time 4.25 seconds
[RESULT] IOPS phase 6 mdtest_easy_delete 85.789 kiops : time 598.31 seconds
[RESULT] IOPS phase 7 mdtest_hard_read 4135.260 kiops : time 4.18 seconds
[RESULT] IOPS phase 8 mdtest_hard_delete 42.085 kiops : time 277.17 seconds
[SCORE] Bandwidth 12.2212 GB/s : IOPS 449.28 kiops : TOTAL 74.0995