Oakforest-PACS

Institution JCAHPC
Client Procs Per Node
Client Operating System
Client Operating System Version
Client Kernel Version

DATA SERVER

Storage Type HDD
Volatile Memory
Storage Interface SAS
Network OmniPath
Software Version
OS Version

INFORMATION

Client Nodes 256
Client Total Procs 8,192
Metadata Nodes 10
Metadata Storage Devices 44
Data Nodes 40
Data Storage Devices 100

METADATA

Easy Write 58.46 kIOP/s
Easy Stat 152.55 kIOP/s
Easy Delete 37.23 kIOP/s
Hard Write 44.60 kIOP/s
Hard Read 81.70 kIOP/s
Hard Stat 107.72 kIOP/s
Hard Delete 43.99 kIOP/s

Submitted Files

io500
#!/bin/bash
#PJM -L rscgrp=regular-flat
#PJM -L node=256
#PJM --mpi proc=8192
#PJM -L elapse=2:00:00
#PJM -g xg17i000
#PJM -j
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.

set -euo pipefail  # better error handling

DIR=/work/xg17i000/x10007/io-500-dev
export I_MPI_PIN_PROCESSOR_EXCLUDE_LIST=0,68,136,204

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.
io500_stonewall_timer=0 # Stonewalling timer, stop with wearout after 300s with default test, set to 0, if you never want to abort...

# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  io500_workdir=$DIR/datafiles/io500.$timestamp # directory where the data will be stored
  io500_result_dir=$DIR/results/$timestamp      # the directory where the output results will be kept
  mkdir -p $io500_workdir $io500_result_dir
  mkdir -p $io500_workdir/ior_easy
  lfs setstripe --stripe-count 2 ${io500_workdir}/ior_easy
  mkdir -p $io500_workdir/ior_hard
  lfs setstripe --stripe-count 100 ${io500_workdir}/ior_hard
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$DIR/bin/ior
  io500_mdtest_cmd=$DIR/bin/mdtest
  io500_mdreal_cmd=$DIR/bin/md-real-io
  io500_mpirun="mpiexec.hydra"
  io500_mpiargs="-n ${PJM_MPI_PROC}"
}

function setup_ior_easy {
  # io500_ior_easy_size is the amount of data written per rank in MiB units,
  # but it can be any number as long as it is somehow used to scale the IOR
  # runtime as part of io500_ior_easy_params
  io500_ior_easy_size=4000
  # 2M writes, 2 GB per proc, file per proc
  io500_ior_easy_params="-t 2048k -b ${io500_ior_easy_size}m -F"
}

function setup_mdt_easy {
  io500_mdtest_easy_params="-u -L" # unique dir per thread, files only at leaves
  io500_mdtest_easy_files_per_proc=2500
}

function setup_ior_hard {
  io500_ior_hard_writes_per_proc=2300
  io500_ior_hard_other_options="" #e.g., -E to keep precreated files using lfs setstripe, or -a MPIIO
}

function setup_mdt_hard {
  io500_mdtest_hard_files_per_proc=1600
  io500_mdtest_hard_other_options="-I 800"
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
  io500_find_mpi="False"
  io500_find_cmd=$DIR/bin/sfind.sh
  io500_find_cmd_args=""

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script.
  io500_find_mpi="True"
  io500_find_cmd="$DIR/bin/pfind"
  # uses stonewalling, run pfind
  io500_find_cmd_args="-s $io500_stonewall_timer -r $io500_result_dir/pfind_results"

  # for GPFS systems, you should probably use the provided mmfind wrapper
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./bin/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
  # top level info
  io500_info_system_name='Oakforest-PACS'      # e.g. Oakforest-PACS
  io500_info_institute_name='JCAHPC'   # e.g. JCAHPC
  io500_info_storage_age_in_months='22' # not install date but age since last refresh
  io500_info_storage_install_date='12/16'  # MM/YY
  io500_info_filesystem='Lustre'     # e.g. BeeGFS, DataWarp, GPFS, IME, Lustre
  io500_info_filesystem_version='2.7.21.3.ddn21'
  io500_info_filesystem_vendor='DDN'
  # client side info
  io500_info_num_client_nodes='256'
  io500_info_procs_per_node='32'
  # server side info
  io500_info_num_metadata_server_nodes='10'
  io500_info_num_data_server_nodes='40'
  io500_info_num_data_storage_devices='4100'  # if you have 5 data servers, and each has 5 drives, then this number is 25
  io500_info_num_metadata_storage_devices='44'  # if you have 2 metadata servers, and each has 5 drives, then this number is 10
  io500_info_data_storage_type='HDD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_metadata_storage_type='SSD' # HDD, SSD, persistent memory, etc, feel free to put specific models
  io500_info_storage_network='OmniPath' # infiniband, omnipath, ethernet, etc
  io500_info_storage_interface='SAS' # SAS, SATA, NVMe, etc
  # miscellaneous
  io500_info_whatever='WhateverElseYouThinkRelevant'
}

main
rm -rf $io500_workdir
ior_easy_read
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Mon Oct 22 15:15:53 2018
Command line        : /work/xg17i000/x10007/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 2048k -b 4000m -F -o /work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_easy/ior_file_easy -O stoneWallingStatusFile=/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_easy/stonewall
Machine             : Linux c0065.ofp
TestID              : 0
StartTime           : Mon Oct 22 15:15:53 2018
Path                : /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_easy
FS                  : 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 10.1%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 8192
clients per node    : 32
repetitions         : 1
xfersize            : 2 MiB
blocksize           : 3.91 GiB
aggregate filesize  : 31.25 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      124693     4096000    2048.00    0.095161   262.65     0.043696   262.79     0   
Max Read:  124693.46 MiB/sec (130750.57 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read       124693.46  124693.46  124693.46       0.00   62346.73   62346.73   62346.73       0.00  262.78845     0   8192  32    1   1     1        1         0    0      1 4194304000  2097152 32768000.0 POSIX      0
Finished            : Mon Oct 22 15:20:16 2018
ior_easy_write
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Mon Oct 22 14:53:07 2018
Command line        : /work/xg17i000/x10007/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 2048k -b 4000m -F -o /work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_easy/ior_file_easy -O stoneWallingStatusFile=/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_easy/stonewall -O stoneWallingWearOut=1 -D 0
Machine             : Linux c0065.ofp
TestID              : 0
StartTime           : Mon Oct 22 14:53:07 2018
Path                : /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_easy
FS                  : 23597.0 TiB   Used FS: 21.0%   Inodes: 12312.0 Mi   Used Inodes: 9.8%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_easy/ior_file_easy
access              : file-per-process
type                : independent
segments            : 1
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 8192
clients per node    : 32
repetitions         : 1
xfersize            : 2 MiB
blocksize           : 3.91 GiB
aggregate filesize  : 31.25 TiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 2000 max: 2000 -- min data: 3.9 GiB mean data: 3.9 GiB time: 329.1s
write     99408      4096000    2048.00    0.152272   329.46     0.019807   329.63     0   
Max Write: 99407.84 MiB/sec (104236.68 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write       99407.84   99407.84   99407.84       0.00   49703.92   49703.92   49703.92       0.00  329.63195     0   8192  32    1   1     1        1         0    0      1 4194304000  2097152 32768000.0 POSIX      0
Finished            : Mon Oct 22 14:58:37 2018
ior_hard_read
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Mon Oct 22 15:22:49 2018
Command line        : /work/xg17i000/x10007/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 2300 -o /work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_hard/IOR_file -O stoneWallingStatusFile=/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_hard/stonewall
Machine             : Linux c0065.ofp
TestID              : 0
StartTime           : Mon Oct 22 15:22:49 2018
Path                : /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_hard
FS                  : 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 10.1%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 2300
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 8192
clients per node    : 32
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 824.88 GiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      6949       45.91      45.91      0.021518   121.52     0.004744   121.55     0   
Max Read:  6949.15 MiB/sec (7286.71 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
read         6949.15    6949.15    6949.15       0.00  155009.94  155009.94  155009.94       0.00  121.55091     0   8192  32    1   0     1        1         0    0   2300    47008    47008  844675.0 POSIX      0
Finished            : Mon Oct 22 15:24:50 2018
ior_hard_write
IOR-3.2.0: MPI Coordinated Test of Parallel I/O
Began               : Mon Oct 22 15:04:46 2018
Command line        : /work/xg17i000/x10007/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 2300 -o /work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_hard/IOR_file -O stoneWallingStatusFile=/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_hard/stonewall -O stoneWallingWearOut=1 -D 0
Machine             : Linux c0065.ofp
TestID              : 0
StartTime           : Mon Oct 22 15:04:46 2018
Path                : /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_hard
FS                  : 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 10.0%

Options: 
api                 : POSIX
apiVersion          : 
test filename       : /work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/ior_hard/IOR_file
access              : single-shared-file
type                : independent
segments            : 2300
ordering in a file  : sequential
ordering inter file : constant task offset
task offset         : 1
tasks               : 8192
clients per node    : 32
repetitions         : 1
xfersize            : 47008 bytes
blocksize           : 47008 bytes
aggregate filesize  : 824.88 GiB

Results: 

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
stonewalling pairs accessed min: 2300 max: 2300 -- min data: 0.1 GiB mean data: 0.1 GiB time: 313.6s
write     2686.32    45.91      45.91      0.334908   314.10     0.002541   314.44     0   
Max Write: 2686.32 MiB/sec (2816.82 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt   blksiz    xsize aggs(MiB)   API RefNum
write        2686.32    2686.32    2686.32       0.00   59922.05   59922.05   59922.05       0.00  314.43516     0   8192  32    1   0     1        1         0    0   2300    47008    47008  844675.0 POSIX      0
Finished            : Mon Oct 22 15:10:01 2018
mdtest_easy_delete
-- started at 10/22/2018 15:27:11 --

mdtest-1.9.3 was launched with 8192 total task(s) on 256 node(s)
Command line used: /work/xg17i000/x10007/io-500-dev/bin/mdtest "-r" "-F" "-d" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_easy" "-n" "2500" "-u" "-L" "-x" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_easy-stonewall"
Path: /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58
FS: 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 10.1%

8192 tasks, 20480000 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      37231.878      37231.878      37231.878          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.314          0.314          0.314          0.000

-- finished at 10/22/2018 15:36:28 --
mdtest_easy_stat
-- started at 10/22/2018 15:20:25 --

mdtest-1.9.3 was launched with 8192 total task(s) on 256 node(s)
Command line used: /work/xg17i000/x10007/io-500-dev/bin/mdtest "-T" "-F" "-d" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_easy" "-n" "2500" "-u" "-L" "-x" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_easy-stonewall"
Path: /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58
FS: 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 10.1%

8192 tasks, 20480000 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :     152553.498     152553.498     152553.498          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/22/2018 15:22:40 --
mdtest_easy_write
-- started at 10/22/2018 14:58:46 --

mdtest-1.9.3 was launched with 8192 total task(s) on 256 node(s)
Command line used: /work/xg17i000/x10007/io-500-dev/bin/mdtest "-C" "-F" "-d" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_easy" "-n" "2500" "-u" "-L" "-x" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_easy-stonewall" "-W" "0"
Path: /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58
FS: 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 9.8%

8192 tasks, 20480000 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      58458.357      58458.357      58458.357          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          5.018          5.018          5.018          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/22/2018 15:04:37 --
mdtest_hard_delete
-- started at 10/22/2018 15:39:27 --

mdtest-1.9.3 was launched with 8192 total task(s) on 256 node(s)
Command line used: /work/xg17i000/x10007/io-500-dev/bin/mdtest "-r" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_hard" "-n" "1600" "-x" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_hard-stonewall" "-I" "800"
Path: /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58
FS: 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 9.9%

8192 tasks, 13107200 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :      43987.741      43987.741      43987.741          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          1.757          1.757          1.757          0.000

-- finished at 10/22/2018 15:44:26 --
mdtest_hard_read
-- started at 10/22/2018 15:36:38 --

mdtest-1.9.3 was launched with 8192 total task(s) on 256 node(s)
Command line used: /work/xg17i000/x10007/io-500-dev/bin/mdtest "-E" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_hard" "-n" "1600" "-x" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_hard-stonewall" "-I" "800"
Path: /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58
FS: 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 9.9%

8192 tasks, 13107200 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      81699.002      81699.002      81699.002          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/22/2018 15:39:18 --
mdtest_hard_stat
-- started at 10/22/2018 15:25:00 --

mdtest-1.9.3 was launched with 8192 total task(s) on 256 node(s)
Command line used: /work/xg17i000/x10007/io-500-dev/bin/mdtest "-T" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_hard" "-n" "1600" "-x" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_hard-stonewall" "-I" "800"
Path: /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58
FS: 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 10.1%

8192 tasks, 13107200 files
WARNING: could not read stonewall status file

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :     107715.271     107715.271     107715.271          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/22/2018 15:27:01 --
mdtest_hard_write
-- started at 10/22/2018 15:10:10 --

mdtest-1.9.3 was launched with 8192 total task(s) on 256 node(s)
Command line used: /work/xg17i000/x10007/io-500-dev/bin/mdtest "-C" "-t" "-F" "-w" "3901" "-e" "3901" "-d" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_hard" "-n" "1600" "-x" "/work/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58/mdt_hard-stonewall" "-I" "800" "-W" "0"
Path: /work/1/xg17i000/x10007/io-500-dev/datafiles/io500.2018.10.22-14.52.58
FS: 23597.0 TiB   Used FS: 21.2%   Inodes: 12312.0 Mi   Used Inodes: 10.0%

8192 tasks, 13107200 files

SUMMARY rate: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      44604.193      44604.193      44604.193          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :        281.308        281.308        281.308          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 10/22/2018 15:15:04 --
result_summary
[RESULT] BW   phase 1            ior_easy_write               97.078 GB/s : time 329.63 seconds
[RESULT] IOPS phase 1         mdtest_easy_write               58.458 kiops : time 360.00 seconds
[RESULT] BW   phase 2            ior_hard_write                2.623 GB/s : time 314.44 seconds
[RESULT] IOPS phase 2         mdtest_hard_write               44.604 kiops : time 303.16 seconds
[RESULT] IOPS phase 3                      find              849.180 kiops : time  39.56 seconds
[RESULT] BW   phase 3             ior_easy_read              121.771 GB/s : time 262.79 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat              152.553 kiops : time 143.41 seconds
[RESULT] BW   phase 4             ior_hard_read                6.786 GB/s : time 121.55 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat              107.715 kiops : time 130.98 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete               37.232 kiops : time 566.68 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               81.699 kiops : time 169.91 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete               43.988 kiops : time 307.85 seconds
[SCORE] Bandwidth 21.4184 GB/s : IOPS 91.3951 kiops : TOTAL 44.2441