Bytecollider

Institution CERN
Client Procs Per Node
Client Operating System CentOS
Client Operating System Version 7.5
Client Kernel Version embargo

DATA SERVER

Storage Type SSD
Volatile Memory 128GB
Storage Interface SATA
Network Ethernet 10Gbps
Software Version 12.2.8
OS Version 7.5

INFORMATION

Client Nodes 64
Client Total Procs 64
Metadata Nodes 72
Metadata Storage Devices 72
Data Nodes 72
Data Storage Devices 3

METADATA

Easy Write 4.56 kIOP/s
Easy Stat 28.17 kIOP/s
Easy Delete 4.50 kIOP/s
Hard Write 5.95 kIOP/s
Hard Read 13.92 kIOP/s
Hard Stat 28.52 kIOP/s
Hard Delete 5.15 kIOP/s

Submitted Files

io500
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.

set -euo pipefail  # better error handling

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"     
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"  
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.

# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths    
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it. 
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  io500_workdir=/bescratch/user/pllopiss/datafiles/io500.$timestamp # directory where the data will be stored
  io500_result_dir=/bescratch/user/pllopiss/results/$timestamp      # the directory where the output results will be kept
  mkdir -p $io500_workdir $io500_result_dir
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
  io500_mpirun="srun"
  io500_mpiargs="-N 64 -p be-short -t 1-0 --reservation pllopiss_1 -w hpc-be[001-002,006-008,011-014,016-017,019-023,025-026,030-032,035,040-042,045,047,049,051,053-056,059,064,067,072,080,085,087-088,094-096,099,102-104,106-108,112-113,116,120,123-124,126,128-129,134-136,139]"
}

function setup_ior_easy {
  io500_ior_easy_params="-t 16m -b 16m -F -s 1860" # 2M writes, 2 GB per proc, file per proc 
}

function setup_mdt_easy {
  io500_mdtest_easy_params="-u -L" # unique dir per thread, files only at leaves
  io500_mdtest_easy_files_per_proc=33800
}

function setup_ior_hard {
  io500_ior_hard_writes_per_proc=89984
}

function setup_mdt_hard {
  io500_mdtest_hard_files_per_proc=24960
#  io500_mdtest_hard_files_per_proc=100
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  #io500_find_mpi="False"
  #io500_find_cmd=$PWD/bin/sfind.sh
  #io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/ 
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script. 
  io500_find_mpi="True"
  io500_find_cmd="$PWD/bin/pfind"
  io500_find_cmd_args=" -r $io500_result_dir/pfind_results"
  
  # for GPFS systems, you should probably use the provided mmfind wrapper 
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./bin/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system 
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='CERN-CEPH-BE'      # e.g. Oakforest-PACS
  io500_info_institute_name='CERN'   # e.g. JCAHPC
  io500_info_storage_age_in_months='3' # not install date but age since last refresh
  io500_info_storage_install_date='10/17'  # MM/YY
  io500_info_filesysem='CephFS'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='12'
  # client side info
  io500_info_num_client_nodes='142'
  io500_info_procs_per_node='20'
  # server side info
  io500_info_num_metadata_server_nodes='3'
  io500_info_num_data_server_nodes='142'
  io500_info_num_data_storage_devices='426'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='xxx'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='HDD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='ethernet' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='SATA' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='ByteCollider'
}

main
ior_easy_read
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX.  Using value of 0.
Began: Fri Nov  9 20:10:15 2018
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 16m -b 16m -F -s 1860 -o /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/ior_easy/ior_file_easy
Machine: Linux hpc-be001.cern.ch

Test 0 started: Fri Nov  9 20:10:15 2018
Summary:
	api                = MPIIO (version=3, subversion=0)
	test filename      = /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/ior_easy/ior_file_easy
	access             = file-per-process
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 64 (1 per node)
	repetitions        = 1
	xfersize           = 16 MiB
	blocksize          = 16 MiB
	aggregate filesize = 1860 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      7587       16384      16384      0.013876   251.02     0.000379   251.03     0   

Max Read:  7587.30 MiB/sec (7955.86 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
read         7587.30    7587.30    7587.30       0.00  251.03007 0 64 1 1 1 1 1 0 0 1860 16777216 16777216 1997159792640 MPIIO 0

Finished: Fri Nov  9 20:14:26 2018
ior_easy_write
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX.  Using value of 0.
Began: Fri Nov  9 19:44:27 2018
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 16m -b 16m -F -s 1860 -o /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/ior_easy/ior_file_easy
Machine: Linux hpc-be001.cern.ch

Test 0 started: Fri Nov  9 19:44:27 2018
Summary:
	api                = MPIIO (version=3, subversion=0)
	test filename      = /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/ior_easy/ior_file_easy
	access             = file-per-process
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 64 (1 per node)
	repetitions        = 1
	xfersize           = 16 MiB
	blocksize          = 16 MiB
	aggregate filesize = 1860 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
write     5054       16384      16384      0.010982   376.87     0.000996   376.88     0   

Max Write: 5053.67 MiB/sec (5299.15 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
write        5053.67    5053.67    5053.67       0.00  376.88274 0 64 1 1 1 1 1 0 0 1860 16777216 16777216 1997159792640 MPIIO 0

Finished: Fri Nov  9 19:50:44 2018
ior_hard_read
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX.  Using value of 0.
Began: Fri Nov  9 20:15:49 2018
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 89984 -o /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/ior_hard/IOR_file
Machine: Linux hpc-be001.cern.ch

Test 0 started: Fri Nov  9 20:15:49 2018
Summary:
	api                = MPIIO (version=3, subversion=0)
	test filename      = /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/ior_hard/IOR_file
	access             = single-shared-file
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 64 (1 per node)
	repetitions        = 1
	xfersize           = 47008 bytes
	blocksize          = 47008 bytes
	aggregate filesize = 252.13 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      2290.78    45.91      45.91      0.012494   112.69     0.000232   112.70     0   

Max Read:  2290.78 MiB/sec (2402.06 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
read         2290.78    2290.78    2290.78       0.00  112.70250 0 64 1 1 0 1 1 0 0 89984 47008 47008 270717943808 MPIIO 0

Finished: Fri Nov  9 20:17:42 2018
ior_hard_write
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX.  Using value of 0.
Began: Fri Nov  9 19:59:48 2018
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 89984 -o /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/ior_hard/IOR_file
Machine: Linux hpc-be001.cern.ch

Test 0 started: Fri Nov  9 19:59:48 2018
Summary:
	api                = MPIIO (version=3, subversion=0)
	test filename      = /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/ior_hard/IOR_file
	access             = single-shared-file
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 64 (1 per node)
	repetitions        = 1
	xfersize           = 47008 bytes
	blocksize          = 47008 bytes
	aggregate filesize = 252.13 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
write     829.16     45.91      45.91      5.25       306.12     0.000346   311.37     0   

Max Write: 829.16 MiB/sec (869.44 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
write         829.16     829.16     829.16       0.00  311.37134 0 64 1 1 0 1 1 0 0 89984 47008 47008 270717943808 MPIIO 0

Finished: Fri Nov  9 20:05:00 2018
mdtest_easy_delete
-- started at 11/09/2018 20:18:44 --

mdtest-1.9.3 was launched with 64 total task(s) on 64 node(s)
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/mdtest -r -F -d /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/mdt_easy -n 33800 -u -L
Path: /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23
FS: 84.9 TiB   Used FS: 9.9%   Inodes: 24.4 Mi   Used Inodes: 100.0%

64 tasks, 2163200 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :       4502.729       4502.729       4502.729          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          6.004          6.004          6.004          0.000

-- finished at 11/09/2018 20:26:44 --
mdtest_easy_stat
-- started at 11/09/2018 20:14:30 --

mdtest-1.9.3 was launched with 64 total task(s) on 64 node(s)
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/mdtest -T -F -d /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/mdt_easy -n 33800 -u -L
Path: /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23
FS: 84.9 TiB   Used FS: 9.9%   Inodes: 24.4 Mi   Used Inodes: 100.0%

64 tasks, 2163200 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      28172.482      28172.482      28172.482          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/09/2018 20:15:47 --
mdtest_easy_write
srun: job 33869 queued and waiting for resources
srun: job 33869 has been allocated resources
-- started at 11/09/2018 19:51:49 --

mdtest-1.9.3 was launched with 64 total task(s) on 64 node(s)
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/mdtest -C -F -d /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/mdt_easy -n 33800 -u -L
Path: /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23
FS: 85.0 TiB   Used FS: 9.5%   Inodes: 19.2 Mi   Used Inodes: 100.0%

64 tasks, 2163200 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :       4560.893       4560.893       4560.893          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          4.316          4.316          4.316          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/09/2018 19:59:44 --
mdtest_hard_delete
-- started at 11/09/2018 20:28:45 --

mdtest-1.9.3 was launched with 64 total task(s) on 64 node(s)
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/mdtest -r -t -F -w 3901 -e 3901 -d /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/mdt_hard -n 24960
Path: /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23
FS: 84.9 TiB   Used FS: 9.9%   Inodes: 22.3 Mi   Used Inodes: 100.0%

64 tasks, 1597440 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :       5145.950       5145.950       5145.950          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          9.310          9.310          9.310          0.000

-- finished at 11/09/2018 20:33:56 --
mdtest_hard_read
-- started at 11/09/2018 20:26:48 --

mdtest-1.9.3 was launched with 64 total task(s) on 64 node(s)
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/mdtest -E -t -F -w 3901 -e 3901 -d /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/mdt_hard -n 24960
Path: /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23
FS: 84.9 TiB   Used FS: 9.9%   Inodes: 22.3 Mi   Used Inodes: 100.0%

64 tasks, 1597440 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      13923.825      13923.825      13923.825          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/09/2018 20:28:43 --
mdtest_hard_stat
-- started at 11/09/2018 20:17:45 --

mdtest-1.9.3 was launched with 64 total task(s) on 64 node(s)
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/mdtest -T -t -F -w 3901 -e 3901 -d /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/mdt_hard -n 24960
Path: /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23
FS: 84.9 TiB   Used FS: 9.9%   Inodes: 24.4 Mi   Used Inodes: 100.0%

64 tasks, 1597440 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :      28524.688      28524.688      28524.688          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/09/2018 20:18:41 --
mdtest_hard_write
-- started at 11/09/2018 20:05:02 --

mdtest-1.9.3 was launched with 64 total task(s) on 64 node(s)
Command line used: /hpcscratch/user/pllopiss/src/io-500-dev/bin/mdtest -C -t -F -w 3901 -e 3901 -d /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23/mdt_hard -n 24960
Path: /bescratch/user/pllopiss/datafiles/io500.2018.11.09-19.44.23
FS: 84.9 TiB   Used FS: 9.9%   Inodes: 21.3 Mi   Used Inodes: 100.0%

64 tasks, 1597440 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :       5951.954       5951.954       5951.954          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :       1983.122       1983.122       1983.122          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 11/09/2018 20:09:30 --
result_summary
[RESULT] BW   phase 1            ior_easy_write                4.936 GB/s : time 376.88 seconds
[RESULT] IOPS phase 1         mdtest_easy_write                4.561 kiops : time 536.33 seconds
[RESULT] BW   phase 2            ior_hard_write                0.810 GB/s : time 311.37 seconds
[RESULT] IOPS phase 2         mdtest_hard_write                5.952 kiops : time 270.65 seconds
[RESULT] IOPS phase 3                      find               92.730 kiops : time  40.55 seconds
[RESULT] BW   phase 3             ior_easy_read                7.409 GB/s : time 251.03 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat               28.173 kiops : time  79.09 seconds
[RESULT] BW   phase 4             ior_hard_read                2.237 GB/s : time 112.70 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat               28.525 kiops : time  58.36 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete                4.503 kiops : time 482.90 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               13.924 kiops : time 118.89 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete                5.146 kiops : time 313.18 seconds
[SCORE] Bandwidth 2.85287 GB/s : IOPS 12.6425 kiops : TOTAL 36.06740