Mistral

Institution DKRZ
Client Procs Per Node
Client Operating System
Client Operating System Version
Client Kernel Version

DATA SERVER

Storage Type
Volatile Memory
Storage Interface
Network
Software Version
OS Version

INFORMATION

Client Nodes 100
Client Total Procs 1,200
Metadata Nodes 0
Metadata Storage Devices 0
Data Nodes 0
Data Storage Devices 0

METADATA

Easy Write 19.43 kIOP/s
Easy Stat 165.75 kIOP/s
Easy Delete 7.16 kIOP/s
Hard Write 18.94 kIOP/s
Hard Read 42.91 kIOP/s
Hard Stat 162.29 kIOP/s
Hard Delete 8.10 kIOP/s

Submitted Files

io500
#!/bin/bash
#
# INSTRUCTIONS:
# Edit this file as needed for your machine.
# This simplified version is just for running on a single node.
# It is a simplified version of the site-configs/sandia/startup.sh which include SLURM directives.
# Most of the variables set in here are needed for io500_fixed.sh which gets sourced at the end of this.
# Please also edit 'extra_description' function.

set -euo pipefail  # better error handling

# turn these to True successively while you debug and tune this benchmark.
# for each one that you turn to true, go and edit the appropriate function.
# to find the function name, see the 'main' function.
# These are listed in the order that they run.
io500_run_ior_easy="True" # does the write phase and enables the subsequent read
io500_run_md_easy="True"  # does the creat phase and enables the subsequent stat
io500_run_ior_hard="True" # does the write phase and enables the subsequent read
io500_run_md_hard="True"  # does the creat phase and enables the subsequent read
io500_run_find="True"
io500_run_ior_easy_read="True"
io500_run_md_easy_stat="True"
io500_run_ior_hard_read="True"
io500_run_md_hard_stat="True"
io500_run_md_hard_read="True"
io500_run_md_easy_delete="True" # turn this off if you want to just run find by itself
io500_run_md_hard_delete="True" # turn this off if you want to just run find by itself
io500_run_mdreal="False"  # this one is optional
io500_cleanup_workdir="False"  # this flag is currently ignored. You'll need to clean up your data files manually if you want to.

# to run this benchmark, find and edit each of these functions.
# please also edit 'extra_description' function to help us collect the required data.
function main {
  setup_directories
  setup_paths
  setup_ior_easy # required if you want a complete score
  setup_ior_hard # required if you want a complete score
  setup_mdt_easy # required if you want a complete score
  setup_mdt_hard # required if you want a complete score
  setup_find     # required if you want a complete score
  setup_mdreal   # optional
  run_benchmarks
}

function setup_directories {
  # set directories for where the benchmark files are created and where the results will go.
  # If you want to set up stripe tuning on your output directories or anything similar, then this is good place to do it.
  timestamp=`date +%Y.%m.%d-%H.%M.%S`           # create a uniquifier
  io500_workdir=/mnt/lustre02/ior-test/io500/datafiles/ # directory where the data will be stored
  io500_result_dir=$PWD/results/$timestamp      # the directory where the output results will be kept
  rm -rf $io500_workdir
  mkdir -p $io500_workdir $io500_result_dir
  mkdir -p $io500_workdir/ior_hard/
  lfs setstripe --stripe-count 124 $io500_workdir/ior_hard/IOR_file || true
  # Lustre 01
}

function setup_paths {
  # Set the paths to the binaries.  If you ran ./utilities/prepare.sh successfully, then binaries are in ./bin/
  io500_ior_cmd=$PWD/bin/ior
  io500_mdtest_cmd=$PWD/bin/mdtest
  io500_mdreal_cmd=$PWD/bin/md-real-io
  io500_mpirun="srun"
  io500_mpiargs="--ntasks-per-node=12 --distribution=block"
}

function setup_ior_easy {
  io500_ior_easy_params="-t 2048k -b 25g -F -a MPIIO" # 2M writes, 2 GB per proc, file per proc
}

function setup_mdt_easy {
  io500_mdtest_easy_params="-u -L" # unique dir per thread, files only at leaves
  io500_mdtest_easy_files_per_proc=10000
}

function setup_ior_hard {
  io500_ior_hard_writes_per_proc="12000 -a MPIIO -E"
}

function setup_mdt_hard {
  io500_mdtest_hard_files_per_proc=5000
}

function setup_find {
  #
  # setup the find command. This is an area where innovation is allowed.
  #    There are three default options provided. One is a serial find, one is python
  #    parallel version, one is C parallel version.  Current default is to use serial.
  #    But it is very slow. We recommend to either customize or use the C parallel version.
  #    For GPFS, we recommend to use the provided mmfind wrapper described below.
  #    Instructions below.
  #    If a custom approach is used, please provide enough info so others can reproduce.

  # the serial version that should run (SLOWLY) without modification
 io500_find_mpi="True"
 io500_find_cmd="$PWD/bin/pfind"
 io500_find_cmd_args="-s 3 -r $io500_result_dir/pfind_results"

  # a parallel version in C, the -s adds a stonewall
  #   for a real run, turn -s (stonewall) off or set it at 300 or more
  #   to prepare this (assuming you've run ./utilities/prepare.sh already):
  #   > cd build/pfind
  #   > ./prepare.sh
  #   > ./compile.sh
  #   > cp pfind ../../bin/
  #   If you use io500_find_mpi="True", then this will run with the same
  #   number of MPI nodes and ranks as the other phases.
  #   If you prefer another number, and fewer might be better here,
  #   Then you can set io500_find_mpi to be "False" and write a wrapper
  #   script for this which sets up MPI as you would like.  Then change
  #   io500_find_cmd to point to your wrapper script.
  #io500_find_mpi="True"
  #io500_find_cmd="$PWD/bin/pfind"
  #io500_find_cmd_args="-s 3 -r $io500_result_dir/pfind_results"

  # for GPFS systems, you should probably use the provided mmfind wrapper
  # if you used ./utilities/prepare.sh, you'll find this wrapper in ./bin/mmfind.sh
  #io500_find_mpi="False"
  #io500_find_cmd="$PWD/bin/mmfind.sh"
  #io500_find_cmd_args=""
}

function setup_mdreal {
  io500_mdreal_params="-P=5000 -I=1000"
}

function run_benchmarks {
  # Important: source the io500_fixed.sh script.  Do not change it. If you discover
  # a need to change it, please email the mailing list to discuss
  source ./utilities/io500_fixed.sh 2>&1 | tee $io500_result_dir/io-500-summary.$timestamp.txt
}

# Add key/value pairs defining your system
# Feel free to add extra ones if you'd like
function extra_description {
  io500_info_system_name='btlogin1'      # e.g. Oakforest-PACS
  io500_info_institute_name='DKRZ'   # e.g. JCAHPC
  io500_info_modules="module add intel mxm/3.4.3082 fca/2.5.2431 bullxmpi_mlx/bullxmpi_mlx-1.2.9.2"
}

main
ior_easy_read
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX/MMAP.  Using value of 0.
Began: Sat May 12 17:15:36 2018
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 2048k -b 25g -F -a MPIIO -o /mnt/lustre02/ior-test/io500/datafiles//ior_easy/ior_file_easy
Machine: Linux m20036

Test 0 started: Sat May 12 17:15:36 2018
Summary:
	api                = MPIIO (version=2, subversion=1)
	test filename      = /mnt/lustre02/ior-test/io500/datafiles//ior_easy/ior_file_easy
	access             = file-per-process
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 1200 (12 per node)
	repetitions        = 1
	xfersize           = 2 MiB
	blocksize          = 25 GiB
	aggregate filesize = 30000 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      173225     26214400   2048.00    0.150294   177.28     0.099269   177.34     0   

Max Read:  173225.18 MiB/sec (181639.77 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
read       173225.18  173225.18  173225.18       0.00   86612.59   86612.59   86612.59       0.00  177.34142 0 1200 12 1 1 1 1 0 0 1 26843545600 2097152 32212254720000 MPIIO 0

Finished: Sat May 12 17:19:05 2018
ior_easy_write
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX/MMAP.  Using value of 0.
Began: Sat May 12 16:47:27 2018
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 2048k -b 25g -F -a MPIIO -o /mnt/lustre02/ior-test/io500/datafiles//ior_easy/ior_file_easy
Machine: Linux m20036

Test 0 started: Sat May 12 16:47:27 2018
Summary:
	api                = MPIIO (version=2, subversion=1)
	test filename      = /mnt/lustre02/ior-test/io500/datafiles//ior_easy/ior_file_easy
	access             = file-per-process
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 1200 (12 per node)
	repetitions        = 1
	xfersize           = 2 MiB
	blocksize          = 25 GiB
	aggregate filesize = 30000 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
write     90594      26214400   2048.00    0.197124   338.82     0.190098   339.09     0   

Max Write: 90594.31 MiB/sec (94995.02 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
write       90594.31   90594.31   90594.31       0.00   45297.16   45297.16   45297.16       0.00  339.09414 0 1200 12 1 1 1 1 0 0 1 26843545600 2097152 32212254720000 MPIIO 0

Finished: Sat May 12 16:53:09 2018
ior_hard_read
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX/MMAP.  Using value of 0.
Began: Sat May 12 17:20:38 2018
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/ior -r -R -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 12000 -a MPIIO -E -o /mnt/lustre02/ior-test/io500/datafiles//ior_hard/IOR_file
Machine: Linux m20036

Test 0 started: Sat May 12 17:20:38 2018
Summary:
	api                = MPIIO (version=2, subversion=1)
	test filename      = /mnt/lustre02/ior-test/io500/datafiles//ior_hard/IOR_file
	access             = single-shared-file
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 1200 (12 per node)
	repetitions        = 1
	xfersize           = 47008 bytes
	blocksize          = 47008 bytes
	aggregate filesize = 630.43 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
read      6441       45.91      45.91      15.36      84.95      0.077516   100.23     0   

Max Read:  6440.83 MiB/sec (6753.70 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
read         6440.83    6440.83    6440.83       0.00  143671.28  143671.28  143671.28       0.00  100.22880 0 1200 12 1 0 1 1 0 0 12000 47008 47008 676915200000 MPIIO 0

Finished: Sat May 12 17:22:18 2018
ior_hard_write
IOR-3.1.0: MPI Coordinated Test of Parallel I/O

ior WARNING: fsync() only available in POSIX/MMAP.  Using value of 0.
Began: Sat May 12 17:03:48 2018
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/ior -w -C -Q 1 -g -G 27 -k -e -t 47008 -b 47008 -s 12000 -a MPIIO -E -o /mnt/lustre02/ior-test/io500/datafiles//ior_hard/IOR_file
Machine: Linux m20036

Test 0 started: Sat May 12 17:03:48 2018
Summary:
	api                = MPIIO (version=2, subversion=1)
	test filename      = /mnt/lustre02/ior-test/io500/datafiles//ior_hard/IOR_file
	access             = single-shared-file
	ordering in a file = sequential offsets
	ordering inter file= constant task offsets = 1
	clients            = 1200 (12 per node)
	repetitions        = 1
	xfersize           = 47008 bytes
	blocksize          = 47008 bytes
	aggregate filesize = 630.43 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
write     1865.22    45.91      45.91      4.73       341.43     0.086271   346.10     0   

Max Write: 1865.22 MiB/sec (1955.83 MB/sec)

Summary of all tests:
Operation   Max(MiB)   Min(MiB)  Mean(MiB)     StdDev   Max(OPs)   Min(OPs)  Mean(OPs)     StdDev    Mean(s) Test# #Tasks tPN reps fPP reord reordoff reordrand seed segcnt blksiz xsize aggsize API RefNum
write        1865.22    1865.22    1865.22       0.00   41606.26   41606.26   41606.26       0.00  346.10178 0 1200 12 1 0 1 1 0 0 12000 47008 47008 676915200000 MPIIO 0

Finished: Sat May 12 17:09:34 2018
mdtest_easy_delete
-- started at 05/12/2018 17:23:15 --

mdtest-1.9.3 was launched with 1200 total task(s) on 100 node(s)
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/mdtest -r -F -d /mnt/lustre02/ior-test/io500/datafiles//mdt_easy -n 10000 -u -L
Path: /mnt/lustre02/ior-test/io500/datafiles
FS: 33418.8 TiB   Used FS: 58.4%   Inodes: 8102.5 Mi   Used Inodes: 3.4%

1200 tasks, 12000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :       7156.673       7156.673       7156.673          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          3.446          3.446          3.446          0.000

-- finished at 05/12/2018 17:51:13 --
mdtest_easy_stat
-- started at 05/12/2018 17:19:16 --

mdtest-1.9.3 was launched with 1200 total task(s) on 100 node(s)
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/mdtest -T -F -d /mnt/lustre02/ior-test/io500/datafiles//mdt_easy -n 10000 -u -L
Path: /mnt/lustre02/ior-test/io500/datafiles
FS: 33418.8 TiB   Used FS: 58.4%   Inodes: 8102.5 Mi   Used Inodes: 3.4%

1200 tasks, 12000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :     165750.733     165750.733     165750.733          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/12/2018 17:20:29 --
mdtest_easy_write
-- started at 05/12/2018 16:53:21 --

mdtest-1.9.3 was launched with 1200 total task(s) on 100 node(s)
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/mdtest -C -F -d /mnt/lustre02/ior-test/io500/datafiles//mdt_easy -n 10000 -u -L
Path: /mnt/lustre02/ior-test/io500/datafiles
FS: 33418.8 TiB   Used FS: 58.4%   Inodes: 8102.5 Mi   Used Inodes: 3.2%

1200 tasks, 12000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      19426.092      19426.092      19426.092          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :         39.303         39.303         39.303          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/12/2018 17:03:39 --
mdtest_hard_delete
-- started at 05/12/2018 17:53:56 --

mdtest-1.9.3 was launched with 1200 total task(s) on 100 node(s)
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/mdtest -r -t -F -w 3901 -e 3901 -d /mnt/lustre02/ior-test/io500/datafiles//mdt_hard -n 5000
Path: /mnt/lustre02/ior-test/io500/datafiles
FS: 33418.8 TiB   Used FS: 58.4%   Inodes: 8102.5 Mi   Used Inodes: 3.3%

1200 tasks, 6000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :       8101.613       8101.613       8101.613          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          5.768          5.768          5.768          0.000

-- finished at 05/12/2018 18:06:18 --
mdtest_hard_read
-- started at 05/12/2018 17:51:24 --

mdtest-1.9.3 was launched with 1200 total task(s) on 100 node(s)
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/mdtest -E -t -F -w 3901 -e 3901 -d /mnt/lustre02/ior-test/io500/datafiles//mdt_hard -n 5000
Path: /mnt/lustre02/ior-test/io500/datafiles
FS: 33418.8 TiB   Used FS: 58.4%   Inodes: 8102.5 Mi   Used Inodes: 3.3%

1200 tasks, 6000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :      42908.426      42908.426      42908.426          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/12/2018 17:53:44 --
mdtest_hard_stat
-- started at 05/12/2018 17:22:29 --

mdtest-1.9.3 was launched with 1200 total task(s) on 100 node(s)
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/mdtest -T -t -F -w 3901 -e 3901 -d /mnt/lustre02/ior-test/io500/datafiles//mdt_hard -n 5000
Path: /mnt/lustre02/ior-test/io500/datafiles
FS: 33418.8 TiB   Used FS: 58.4%   Inodes: 8102.5 Mi   Used Inodes: 3.4%

1200 tasks, 6000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :          0.000          0.000          0.000          0.000
   File stat         :     162290.385     162290.385     162290.385          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :          0.000          0.000          0.000          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/12/2018 17:23:06 --
mdtest_hard_write
-- started at 05/12/2018 17:09:45 --

mdtest-1.9.3 was launched with 1200 total task(s) on 100 node(s)
Command line used: /home/dkrz/k202079/work/io-500/io-500-dev/bin/mdtest -C -t -F -w 3901 -e 3901 -d /mnt/lustre02/ior-test/io500/datafiles//mdt_hard -n 5000
Path: /mnt/lustre02/ior-test/io500/datafiles
FS: 33418.8 TiB   Used FS: 58.4%   Inodes: 8102.5 Mi   Used Inodes: 3.3%

1200 tasks, 6000000 files

SUMMARY: (of 1 iterations)
   Operation                      Max            Min           Mean        Std Dev
   ---------                      ---            ---           ----        -------
   File creation     :      18940.347      18940.347      18940.347          0.000
   File stat         :          0.000          0.000          0.000          0.000
   File read         :          0.000          0.000          0.000          0.000
   File removal      :          0.000          0.000          0.000          0.000
   Tree creation     :       2457.120       2457.120       2457.120          0.000
   Tree removal      :          0.000          0.000          0.000          0.000

-- finished at 05/12/2018 17:15:02 --
result_summary
[RESULT] BW   phase 1            ior_easy_write               88.471 GB/s : time 339.09 seconds
[RESULT] IOPS phase 1         mdtest_easy_write               19.426 kiops : time 630.13 seconds
[RESULT] BW   phase 2            ior_hard_write                1.821 GB/s : time 346.10 seconds
[RESULT] IOPS phase 2         mdtest_hard_write               18.940 kiops : time 327.97 seconds
[RESULT] IOPS phase 3                      find              415.320 kiops : time  25.59 seconds
[RESULT] BW   phase 3             ior_easy_read              169.165 GB/s : time 177.34 seconds
[RESULT] IOPS phase 4          mdtest_easy_stat              165.751 kiops : time  84.43 seconds
[RESULT] BW   phase 4             ior_hard_read                6.290 GB/s : time 100.23 seconds
[RESULT] IOPS phase 5          mdtest_hard_stat              162.290 kiops : time  48.56 seconds
[RESULT] IOPS phase 6        mdtest_easy_delete                7.157 kiops : time 1686.18 seconds
[RESULT] IOPS phase 7          mdtest_hard_read               42.908 kiops : time 151.85 seconds
[RESULT] IOPS phase 8        mdtest_hard_delete                8.102 kiops : time 753.03 seconds
[SCORE] Bandwidth 20.3492 GB/s : IOPS 42.2879 kiops : TOTAL 29.3347