ofsdev

Institution Clemson University
Client Procs Per Node
Client Operating System Oracle Linux
Client Operating System Version 7.5
Client Kernel Version 4.1.12-103.9.4.el7uek.x86_64

DATA SERVER

Storage Type HDD
Volatile Memory 128GB
Storage Interface SAS
Network InfiniBand FDR
Software Version 7.0
OS Version 7.5

INFORMATION

Client Nodes 16
Client Total Procs 64
Metadata Nodes 16
Metadata Storage Devices 2
Data Nodes 16
Data Storage Devices 12

METADATA

Easy Write 181.41 kIOP/s
Easy Stat 693.87 kIOP/s
Easy Delete 262.28 kIOP/s
Hard Write 6.24 kIOP/s
Hard Read 18.81 kIOP/s
Hard Stat 73.77 kIOP/s
Hard Delete 6.18 kIOP/s

Submitted Files

io500
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.
#set -x

if [ "$1" == "" ]
then
	SCALE=1
else
	SCALE=$1
fi


NP=$(( $SCALE * 16 ))

echo "$SCALE processes per node for $NP processes."

set -euo pipefail  # better error handling

export OFS_MOUNT=/mnt/beegfs/jburto2

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"     
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"  
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="True"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.

# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths    
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it. 
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  io500_workdir=$OFS_MOUNT/io500/datafiles/io500.$timestamp # directory where the data will be stored
  io500_result_dir=$PWD/results/$timestamp      # the directory where the output results will be kept

  mkdir -p $io500_workdir $io500_result_dir
  mkdir -p ${io500_workdir}/ior_easy ${io500_workdir}/ior_hard 
  mkdir -p ${io500_workdir}/mdt_easy ${io500_workdir}/mdt_hard 
# for ior_easy. 
# 1 targets
  beegfs-ctl --setpattern --numtargets=1 --chunksize=2048k ${io500_workdir}/ior_easy
# stripe across all OSTs for ior_hard, 256k chunksize
  beegfs-ctl --setpattern --numtargets=16 --chunksize=256k ${io500_workdir}/ior_hard 
# turn off striping and use small chunks for mdtest
  beegfs-ctl --setpattern --numtargets=1 --chunksize=256k ${io500_workdir}/mdt_easy 
  beegfs-ctl --setpattern --numtargets=1 --chunksize=256k ${io500_workdir}/mdt_hard 
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
  io500_mpi_prefix="/usr/lib64/openmpi"
  io500_mpirun="$io500_mpi_prefix/bin/mpirun"
  
  io500_mpiargs="-np $NP --mca btl_tcp_if_exclude ib0 --mca btl ^openib --map-by node --machinefile /home/jburto2/pvfsnodelistmpi --prefix $io500_mpi_prefix"
}

function setup_ior_easy {
# 1M writes, 300 GB per proc, file per proc. 
  io500_ior_easy_size=$((320 * 1024 / $SCALE))
  io500_ior_easy_params="-t 4m -b ${io500_ior_easy_size}m -F -a POSIX"
   
}

function setup_mdt_easy {
# one level, 11 directories, unique dir per thread, files only at leaves.
# BeeGFS doesn't have distributed directories, so more directories = better distribution. 
  io500_mdtest_easy_params="-z 1 -b 16 -u -L" 
  io500_mdtest_easy_files_per_proc=1000000 
}

function setup_ior_hard {
  if [ "$SCALE" == "1" ] 
  then
	# One process per node is significantly faster because of buffering.
  	io500_ior_hard_writes_per_proc=2500000
  else	
  	io500_ior_hard_writes_per_proc=$(( 180000 / $SCALE ))
  fi

  io500_ior_hard_other_options=" -a POSIX"

}

function setup_mdt_hard {
# Multiple directories might improve mdt_hard slightly, but this test is storage bound, not md bound.
  io500_mdtest_hard_files_per_proc="$(( 150000 / $SCALE ))"
  io500_mdtest_files_per_proc=$(( 150000 / $SCALE )) 

}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  #io500_find_mpi="False"
  #io500_find_cmd=$PWD/bin/sfind.sh
  #io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/ 
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script. 
  io500_find_mpi="True"
  io500_find_cmd="$PWD/bin/pfind"
  io500_find_cmd_args="-s 10000 -r $io500_result_dir/pfind_results"
  
  # for GPFS systems, you should probably use the provided mmfind wrapper 
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./bin/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system 
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='Palmetto ofstest'      # e.g. Oakforest-PACS
  io500_info_institute_name='Clemson University'   # e.g. JCAHPC
  io500_info_storage_age_in_months='0' # not install date but age since last refresh
  io500_info_storage_install_date='4/12'  # MM/YY
  io500_info_filesysem='BeeGFS'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='7'
  # client side info
  io500_info_num_client_nodes='16'
  io500_info_procs_per_node="${SCALE}"
  # server side info
  io500_info_num_metadata_server_nodes='16'
  io500_info_num_data_server_nodes='16'
  io500_info_num_data_storage_devices='192'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='32'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='HDD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='infiniband' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='SAS' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='infiniband'
}

main
ior_easy_read
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

Began: Tue Aug 28 14:46:25 2018
Command line used: /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 4m -b 81920m -F -a POSIX -o /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/ior_easy/ior_file_easy
Machine: Linux pvfs017.ofsdev.clemson.edu

Test 0 started: Tue Aug 28 14:46:25 2018
Summary:
	api                = POSIX
	test filename      = /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/ior_easy/ior_file_easy
	access             = file-per-process
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 64 (4 per node)
	repetitions        = 1
	xfersize           = 4 MiB
	blocksize          = 80 GiB
	aggregate filesize = 5120 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      10571      83886080   4096       0.010786   495.93     0.030007   495.95     0   

Max Read:  10571.49 MiB/sec (11085.01 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
read        10571.49   10571.49   10571.49       0.00    2642.87    2642.87    2642.87       0.00  495.94520 0 64 4 1 1 1 1 0 0 1 85899345920 4194304 5497558138880 POSIX 0

Finished: Tue Aug 28 14:54:41 2018
ior_easy_write
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

Began: Tue Aug 28 13:49:09 2018
Command line used: /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 4m -b 81920m -F -a POSIX -o /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/ior_easy/ior_file_easy
Machine: Linux pvfs017.ofsdev.clemson.edu

Test 0 started: Tue Aug 28 13:49:09 2018
Summary:
	api                = POSIX
	test filename      = /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/ior_easy/ior_file_easy
	access             = file-per-process
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 64 (4 per node)
	repetitions        = 1
	xfersize           = 4 MiB
	blocksize          = 80 GiB
	aggregate filesize = 5120 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
write     12603      83886080   4096       0.056866   415.72     0.397385   416.01     0   

Max Write: 12602.81 MiB/sec (13215.01 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
write       12602.81   12602.81   12602.81       0.00    3150.70    3150.70    3150.70       0.00  416.00876 0 64 4 1 1 1 1 0 0 1 85899345920 4194304 5497558138880 POSIX 0

Finished: Tue Aug 28 13:56:05 2018
ior_hard_read
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

Began: Tue Aug 28 14:56:17 2018
Command line used: /home/jburto2/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 45000 -a POSIX -o /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/ior_hard/IOR_file
Machine: Linux pvfs017.ofsdev.clemson.edu

Test 0 started: Tue Aug 28 14:56:17 2018
Summary:
	api                = POSIX
	test filename      = /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/ior_hard/IOR_file
	access             = single-shared-file
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 64 (4 per node)
	repetitions        = 1
	xfersize           = 47008 bytes
	blocksize          = 47008 bytes
	aggregate filesize = 126.09 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      2632.20    45.91      45.91      0.029964   48.99      0.038245   49.05      0   

Max Read:  2632.20 MiB/sec (2760.06 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
read         2632.20    2632.20    2632.20       0.00   58714.78   58714.78   58714.78       0.00   49.05069 0 64 4 1 0 1 1 0 0 45000 47008 47008 135383040000 POSIX 0

Finished: Tue Aug 28 14:57:06 2018
ior_hard_write
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

Began: Tue Aug 28 14:02:14 2018
Command line used: /home/jburto2/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 45000 -a POSIX -o /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/ior_hard/IOR_file
Machine: Linux pvfs017.ofsdev.clemson.edu

Test 0 started: Tue Aug 28 14:02:14 2018
Summary:
	api                = POSIX
	test filename      = /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/ior_hard/IOR_file
	access             = single-shared-file
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 64 (4 per node)
	repetitions        = 1
	xfersize           = 47008 bytes
	blocksize          = 47008 bytes
	aggregate filesize = 126.09 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
write     60.36      45.91      45.91      0.299972   2138.51    0.078873   2138.88    0   

Max Write: 60.36 MiB/sec (63.30 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
write          60.36      60.36      60.36       0.00    1346.50    1346.50    1346.50       0.00 2138.88233 0 64 4 1 0 1 1 0 0 45000 47008 47008 135383040000 POSIX 0

Finished: Tue Aug 28 14:37:53 2018
mdtest_easy_delete
-- started at 08/28/2018 14:57:43 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest -r -F -d /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/mdt_easy -n 1000000 -z 1 -b 16 -u -L
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06
FS: 145.4 TiB   Used FS: 11.7%   Inodes: 0.0 Mi   Used Inodes: -nan%

64 tasks, 64000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :     262277.669     262277.669     262277.669          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          2.089          2.089          2.089          0.000

-- finished at 08/28/2018 15:01:55 --
mdtest_easy_stat
-- started at 08/28/2018 14:54:43 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest -T -F -d /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/mdt_easy -n 1000000 -z 1 -b 16 -u -L
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06
FS: 145.4 TiB   Used FS: 11.7%   Inodes: 0.0 Mi   Used Inodes: -nan%

64 tasks, 64000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :     693867.197     693867.197     693867.197          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 08/28/2018 14:56:15 --
mdtest_easy_write
-- started at 08/28/2018 13:56:10 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest -C -F -d /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/mdt_easy -n 1000000 -z 1 -b 16 -u -L
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06
FS: 145.4 TiB   Used FS: 11.6%   Inodes: 0.0 Mi   Used Inodes: -nan%

64 tasks, 64000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :     181409.001     181409.001     181409.001          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :         93.562         93.562         93.562          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 08/28/2018 14:02:03 --
mdtest_hard_delete
-- started at 08/28/2018 15:04:07 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest -r -t -F -w 3901 -e 3901 -d /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/mdt_hard -n 37500
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06
FS: 145.4 TiB   Used FS: 11.7%   Inodes: 0.0 Mi   Used Inodes: -nan%

64 tasks, 2400000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :       6177.783       6177.783       6177.783          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.417          0.417          0.417          0.000

-- finished at 08/28/2018 15:10:38 --
mdtest_hard_read
-- started at 08/28/2018 15:01:57 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest -E -t -F -w 3901 -e 3901 -d /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/mdt_hard -n 37500
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06
FS: 145.4 TiB   Used FS: 11.7%   Inodes: 0.0 Mi   Used Inodes: -nan%

64 tasks, 2400000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      18806.375      18806.375      18806.375          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 08/28/2018 15:04:05 --
mdtest_hard_stat
-- started at 08/28/2018 14:57:08 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest -T -t -F -w 3901 -e 3901 -d /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/mdt_hard -n 37500
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06
FS: 145.4 TiB   Used FS: 11.7%   Inodes: 0.0 Mi   Used Inodes: -nan%

64 tasks, 2400000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      73766.263      73766.263      73766.263          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 08/28/2018 14:57:41 --
mdtest_hard_write
-- started at 08/28/2018 14:37:55 --

mdtest-1.9.3 was launched with 64 total task(s) on 16 node(s)
Command line used: /home/jburto2/io-500-dev/bin/mdtest -C -t -F -w 3901 -e 3901 -d /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06/mdt_hard -n 37500
Path: /mnt/beegfs/jburto2/io500/datafiles/io500.2018.08.28-13.49.06
FS: 145.4 TiB   Used FS: 11.7%   Inodes: 0.0 Mi   Used Inodes: -nan%

64 tasks, 2400000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :       6241.263       6241.263       6241.263          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :        377.455        377.455        377.455          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 08/28/2018 14:44:19 --
result_summary
[RESULT] BW   phase 1            ior_easy_write               12.308 GB/s : time 416.01 seconds
[RESULT] IOPS phase 1         mdtest_easy_write              181.409 kiops : time 355.11 seconds
[RESULT] BW   phase 2            ior_hard_write                0.059 GB/s : time 2138.88 seconds
[RESULT] IOPS phase 2         mdtest_hard_write                6.241 kiops : time 389.51 seconds
[RESULT] IOPS phase 3                      find              550.040 kiops : time 120.72 seconds
[RESULT] BW   phase 3             ior_easy_read               10.323 GB/s : time 495.95 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat              693.867 kiops : time  94.14 seconds
[RESULT] BW   phase 4             ior_hard_read                2.571 GB/s : time  49.05 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat               73.766 kiops : time  34.64 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete              262.278 kiops : time 254.23 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               18.806 kiops : time 129.71 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete                6.178 kiops : time 395.99 seconds
[SCORE] Bandwidth 2.09466 GB/s : IOPS 74.7171 kiops : TOTAL 12.5103