# Invocation command line: # /home/tta/hpc2021/bin/harness/runhpc --config=/home/tta/hpc2021/config/rb128.cfg --reportable --ranks 6 --threads 44 --tune=base tiny # output_root was not used for this run ############################################################################ # Invocation command line: # /home/tta/hpc2021/bin/harness/runhpc --config=rb128.cfg --reportable --tune=base -ranks 6 --threads 44 tiny # output_root was not used for this run ############################################################################ ###################################################################### # Example configuration file for the GNU Compilers # # Defines: "model" => "mpi", "acc", "omp", "tgt", "tgtgpu" default "mpi" # "label" => ext base label, default "nv" # # MPI-only Command: # runhpc -c Example_gnu --reportable -T base --define model=mpi --ranks=40 small # # OpenACC Command: # runhpc -c Example_gnu --reportable -T base --define model=acc --ranks=4 small # # OpenMP Command: # runhpc -c Example_gnu --reportable -T base --define model=omp --ranks=1 --threads=40 small # # OpenMP Target Offload to Host Command: # runhpc -c Example_gnu --reportable -T base --define model=tgt --ranks=1 --threads=40 small # # OpenMP Target Offload to NVIDIA GPU Command: # runhpc -c Example_gnu --reportable -T base --define model=tgtnv --ranks=4 small # ####################################################################### %ifndef %{label} # IF label is not set use gnu % define label gcc11_skylake %endif %ifndef %{model} # IF model is not set use mpi % define model omp %endif teeout = yes makeflags=-j 80 # Tester Information license_num = 068A showtimer = 0 test_sponsor = Telecommunications Technology Association tester = Telecommunications Technology Association prepared_by = Chi-Woong Kim(jwkim.81@tta.or.kr) ###################################################### # SUT Section ###################################################### #include: Example_SUT.inc # ----- Begin inclusion of 'Example_SUT.inc' ############################################################################ ###################################################### # Example configuration information for a # system under test (SUT) Section ###################################################### # General SUT info system_vendor = Uniwide Technologies system_name = Uniwide Technologies RB128 (Intel Xeon E5-2699 v4) interconnect_fs_hw_switch_fs_model = E8013 interconnect_fs_syslbl = Gigabit Ethernet interconnect_fs_purpose = MPI Traffic, NFS interconnect_fs_order = 1 interconnect_fs_hw_vendor = Broadcom interconnect_fs_hw_topo = Mesh interconnect_fs_hw_switch_fs_ports = 140 interconnect_fs_hw_switch_fs_firmware = 2.4.6 interconnect_fs_hw_switch_fs_data_rate = 10Gb/sec interconnect_fs_hw_switch_fs_count = 1 interconnect_fs_hw_switch_fs = UBIQUOSS INC. E8013 interconnect_fs_hw_model = BCM57840 NetXtreme II 10 Gigabit Ethernet node_fileserver_hw_adapter_MISSING_ports_used = 0 node_fileserver_hw_adapter_MISSING_count = 0 hw_avail = May-2017 sw_avail = Jul-2021 # Computation node info # [Node_Description: Hardware] node_compute_syslbl = Compute Node node_compute_order = 1 node_compute_count = 3 node_compute_purpose = compute node_compute_hw_vendor = Uniwide Technogolies node_compute_hw_model = Uniwide Technologies RB128 node_compute_hw_cpu_name = Intel Xeon E5-2699 v4 node_compute_hw_ncpuorder = 1,2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 44 node_compute_hw_ncoresperchip = 22 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char = Intel Turbo Boost Technology up to 3.6 GHz #node_compute_hw_cpu_char001 = Hyper-threading Off. node_compute_hw_cpu_mhz = 2200 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 256 KB I+D on chip per core node_compute_hw_tcache = 55 MB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_other = None node_compute_hw_memory = 512 GB (16 x 32GB 2Rx4 PC4-2400T-R) node_compute_hw_disk = 2 x 300GB SEAGATE ST300MM0048 SAS RAID1 #[Node_Description: Accelerator] node_compute_hw_accel_model = -- node_compute_hw_accel_count = 0 node_compute_hw_accel_vendor= -- node_compute_hw_accel_type = -- node_compute_hw_accel_connect = -- node_compute_hw_accel_ecc = -- node_compute_hw_accel_desc = -- #[Node_Description: Software] node_compute_hw_adapter_fs_model = BCM57840 NetXtreme II 10 Gigabit Ethernet node_compute_hw_adapter_fs_count = 1 node_compute_hw_adapter_fs_slot_type = PCIe x4 node_compute_hw_adapter_fs_data_rate = 10Gb/sec node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_interconnect = Ethernet node_compute_hw_adapter_fs_driver = bnx2x node_compute_hw_adapter_fs_firmware = bc 7.8.79 node_compute_sw_os000 = CentOS Linux release 7.9.2009 (Core) node_compute_sw_os001 = 3.10.0-1160.66.1.el7.x86_64 node_compute_sw_localfile = xfs node_compute_sw_sharedfile = nfs node_compute_sw_state = Multi-user, run level 3 node_compute_sw_other = None #[Fileserver] node_fileserver_syslbl = File server node_fileserver_sw_state = Multi-User, run level 3 node_fileserver_sw_sharedfile = nfs node_fileserver_sw_other = None node_fileserver_sw_os000 = CentOS Linux release 7.9.2009 (Core) node_fileserver_sw_os001 = 3.10.0-1160.66.1.el7.x86_64 node_fileserver_sw_localfile = xfs node_fileserver_purpose = Fileserver node_fileserver_order = 1 node_fileserver_hw_vendor = Uniwide Technogolies node_fileserver_hw_model = Uniwide Technologies RB128 node_fileserver_hw_cpu_name = Intel Xeon E5-2699 v4 node_fileserver_hw_ncpuorder = 1,2 chips node_fileserver_hw_nchips = 2 node_fileserver_hw_ncores = 44 node_fileserver_hw_ncoresperchip = 22 node_fileserver_hw_nthreadspercore = 2 node_fileserver_hw_cpu_char = Intel Turbo Boost Technology up to 3.6 GHz #node_fileserver_hw_cpu_char001 = Hyper-threading Off. node_fileserver_hw_cpu_mhz = 2200 node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_scache = 256 KB I+D on chip per core node_fileserver_hw_tcache = 55 MB I+D on chip per chip node_fileserver_hw_ocache = None node_fileserver_hw_other = None node_fileserver_hw_memory = 512 GB (16 x 32GB 2Rx4 PC4-2400T-R) node_fileserver_hw_disk = 2 x 300GB SEAGATE ST300MM0048 SAS RAID1 node_fileserver_count = 1 #[Interconnect] #interconnect_fs_syslbl = Infiniband (EDR) #interconnect_fs_order = 1 #interconnect_fs_purpose = MPI Traffic, GPFS #interconnect_fs_hw_vendor = Mellanox Technologies #interconnect_fs_hw_model = Mellanox SB7790 #interconnect_fs_hw_switch_fs_model000= 36 x EDR 100 Gb/s #interconnect_fs_hw_switch_fs_count = 2 #interconnect_fs_hw_switch_fs_ports = 36 #interconnect_fs_hw_topo = Mesh (blocking factor: 8:1) #interconnect_fs_hw_switch_fs_data_rate = 100 Gb/s #interconnect_fs_hw_switch_fs_firmware = -- ####################################################################### # End of SUT section # If this config file were to be applied to several SUTs, edits would # be needed only ABOVE this point. ###################################################################### # ---- End inclusion of '/data/caar/spec/hpc2021-1.0.2/config/Example_SUT.inc' #[Software] sw_compiler000 = C/C++/Fortran: Version 11.2.0 of sw_compiler001 = GNU Compilers sw_mpi_library = OpenMPI Version 4.1.3 sw_mpi_other = None system_class = Homogenous Cluster sw_other = None #[General notes] ####################################################################### # End of SUT section ###################################################################### ###################################################################### # The header section of the config file. Must appear # before any instances of "section markers" (see below) # # ext = how the binaries you generated will be identified # tune = specify "base" or "peak" or "all" label = %{label}_%{model} tune = base output_format = text use_submit_for_speed = 1 # Compiler Settings default: CC = mpicc CXX = mpicxx FC = mpif90 # Compiler Version Flags CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version # MPI options and binding environment, dependent upon Model being run # Adjust to match your system MPIRUN_OPTS = --bind-to socket -npersocket 1 --mca topo basic submit = mpirun ${MPIRUN_OPTS} -np $ranks -hostfile /home/tta/hpc2021/hostfile $command flagsurl000=http://www.spec.org/hpc2021/flags/gcc.2021-10-28.xml ####################################################################### # Optimization # Note that SPEC baseline rules require that all uses of a given compiler # use the same flags in the same order. See the SPEChpc Run Rules # for more details # http://www.spec.org/hpc2021/Docs/runrules.html # # OPTIMIZE = flags applicable to all compilers # FOPTIMIZE = flags appliable to the Fortran compiler # COPTIMIZE = flags appliable to the C compiler # CXXOPTIMIZE = flags appliable to the C++ compiler # # See your compiler manual for information on the flags available # for your compiler # Compiler flags applied to all models default=base=default: COPTIMIZE = -Ofast -march=native -lm # use -mcpu=native for ARM CXXOPTIMIZE = -Ofast -march=native -std=c++14 FOPTIMIZE = -Ofast -march=native -ffree-line-length-none -fno-stack-protector #FPPPORTABILITY += -DSPEC_USE_MPIFH -I${MPI_ROOT}/include/ %if %{model} eq 'mpi' pmodel=MPI %endif # OpenACC flags %if %{model} eq 'acc' pmodel=ACC OPTIMIZE += -fopenacc -foffload=-lm %endif # OpenMP (CPU) flags %if %{model} eq 'omp' pmodel=OMP OPTIMIZE += -fopenmp %endif # OpenMP Targeting host flags %if %{model} eq 'tgt' pmodel=TGT OPTIMIZE += -fopenmp %endif # OpenMP Targeting Nvidia GPU flags %if %{model} eq 'tgtnv' pmodel=TGT OPTIMIZE += -fopenmp -fopenmp-targets=nvptx64-nvidia-cuda %endif # No peak flags set, so make peak use the same flags as base default=peak=default: basepeak=1 #528.pot3d_t=default=default: #PORTABILITY += -DSPEC_NO_RECORDER ####################################################################### # Portability ####################################################################### notes_submit_000 =The config file option 'submit' was used. notes_submit_005 =MPI startup command: notes_submit_010 =mpirun --bind-to socket -npersocket 1 --mca topo basic -np $ranks -hostfile /home/tta/hpc2021/hostfile $command # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run.