# Invocation command line: # /lfs/lfs17/mknyazev/spec/hpc2021-1.1.9/bin/harness/runhpc -I --reportable --config=PVC_tiny_2C.cfg --label=build_hedp025 --iterations=3 --size=ref --ranks=4 --define ppn=4 --threads=1 --tune=base,peak tiny --rebuild # output_root was not used for this run ############################################################################ # Invocation command line: # /lfs/lfs17/mknyazev/spec/hpc2021-1.1.9/bin/harness/runhpc -I --reportable --config=PVC_tiny_4C.cfg --label=build_hedp032 --iterations=3 --size=ref --ranks=8 --threads=1 --tune=base,peak tiny --rebuild # output_root was not used for this run ############################################################################ # Invocation command line: # /lfs/lfs17/mknyazev/spec/report/bin/harness/runhpc -I --reportable --config=PVC_tiny_1C.cfg --iterations=3 --size=ref --ranks=2 --threads=1 --tune=peak tiny --rebuild # output_root was not used for this run ############################################################################ Output_format = txt env_vars = 1 strict_rundir_verify = 1 backup_config = 0 allow_label_override = yes ###################################################### # SUT Section ###################################################### #include: Example_SUT.inc # General SUT info system_vendor = Intel hw_vendor_list = Intel hw_total_accel = 2 hw_cpu_name_list = Intel Xeon Platinum 8480+ hw_accel_vendor_list = Intel hw_accel_model_list = Intel Data Center GPU Max 1550 system_name000 = Hatch: Intel Server D50DNP1SB (Xeon Platinum system_name001 = 8480+) system_class = Homogenous Cluster hw_avail = Jan-2023 sw_avail = Mar-2025 prepared_by = Alexander Bobyr # Computation node info # [Node_Description: Hardware] node_compute_syslbl = Intel Server D50DNP1SB (Xeon Platinum 8480+) node_compute_order = 1 node_compute_count = 1 node_compute_purpose = Compute node_compute_hw_vendor = Intel node_compute_hw_model000= Intel Server D50DNP1SB (2 x Intel Xeon node_compute_hw_model001 = Platinum 8480+, 2.0GHz) node_compute_hw_cpu_name = Intel Xeon Platinum 8480+ node_compute_hw_ncpuorder = 1, 2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 112 node_compute_hw_ncoresperchip = 56 node_compute_hw_nthreadspercore = 2 node_compute_hw_cpu_char = Turbo Boost Technology up to 3.8 GHz node_compute_hw_cpu_mhz = 2000 node_compute_hw_pcache = 32 KB I + 48 KB D on chip per core node_compute_hw_scache = 2 MB I+D on chip per core node_compute_hw_tcache = 105 MB I+D on chip per chip node_compute_hw_ocache = None node_compute_hw_memory = 1 TB (16x64 GB DDR5 2Rx4 PC5-4800B-R) node_compute_hw_disk = 1 x 1 1TB NVMe M.2 INTEL SSDPELKX010T8 node_compute_hw_other = None #[Node_Description: Accelerator] node_compute_hw_accel_count = 2 node_compute_hw_accel_vendor = Intel node_compute_hw_accel_type = GPU node_compute_hw_accel_connect = PCIe Gen5 x16 node_compute_hw_accel_model = Intel Data Center GPU Max 1550 node_compute_hw_accel_ecc = yes node_compute_hw_accel_desc = Intel Data Center GPU Max 1550 #[Node_Description: Software] node_compute_hw_adapter_fs_model = Mellanox ConnectX-6 HDR node_compute_hw_adapter_fs_count = 1 node_compute_hw_adapter_fs_slot_type = PCI-Express 4.0 x16 node_compute_hw_adapter_fs_data_rate = 200Gbit/s node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_interconnect = Mellanox HDR node_compute_hw_adapter_fs_firmware = 20.38.1900 node_compute_sw_os000 = SUSE Linux Enterprise Server 15 SP6 node_compute_sw_os001 = 6.4.0-150600.23.42-default node_compute_sw_localfile = lustre node_compute_sw_sharedfile = LUSTRE FS node_compute_sw_state = Run level 5 node_compute_sw_other = None node_compute_sw_accel_driver = 25.05.32567 #[Fileserver] #[Interconnect] interconnect_fs_syslbl = Mellanox HDR interconnect_fs_order = 0 interconnect_fs_purpose = MPI Traffic, LustreFS traffic interconnect_fs_hw_vendor = Mellanox interconnect_fs_hw_model = Mellanox HDR interconnect_fs_hw_switch_fs_model000= Mellanox Technologies MT28908 Family interconnect_fs_hw_switch_fs_model001 = InfiniBand Switch interconnect_fs_hw_switch_fs_count = 12 interconnect_fs_hw_switch_fs_ports = 40 interconnect_fs_hw_topo = Fat-tree interconnect_fs_hw_switch_fs_data_rate = 200 Gbit/s interconnect_fs_hw_switch_fs_firmware = 20.38.1900 ####################################################################### # End of SUT section ###################################################################### AR = ar ARFLAGS = cr CXX = mpiicpc -cxx=icpx CC = mpiicc -cc=icx FC = mpiifort -fc=ifx sw_other = None sw_compiler = Intel oneAPI Compiler 2025.1.0 sw_mpi_library = Intel MPI Library 2021.15 for Linux OS sw_mpi_other = None CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version submit = mpiexec -bootstrap ssh -np $ranks -ppn %{ppn} $command ENV_LIBOMPTARGET_LEVEL_ZERO_MEMORY_POOL=device,1,4,256 OPTIMIZE = -O3 -xCORE-AVX512 -flto -mprefer-vector-width=512 -ffast-math -fiopenmp -fopenmp-targets=spir64_gen -ftarget-register-alloc-mode=pvc:auto -Xopenmp-target-backend '-device pvc -revision_id 0x2f' -DSPEC_COLLAPSE -DSPEC_ACCEL_AWARE_MPI FOPTIMIZE = -fopenmp-target-loopopt COPTIMIZE = -fopenmp-optimistic-collapse PORTABILITY = -DSPEC -DNDEBUG -DUSE_MPI pmodel=TGT 513.soma_t: PORTABILITY+=-DSPEC_NO_VAR_ARRAY_REDUCE 528.pot3d_t: PORTABILITY+=-Wno-incompatible-function-pointer-types 505.lbm_t=peak: OPTIMIZE = -O3 -xCORE-AVX512 -flto -mprefer-vector-width=512 -ffast-math -fiopenmp -fopenmp-targets=spir64_gen -ftarget-register-alloc-mode=pvc:large -Xopenmp-target-backend '-device pvc -revision_id 0x2f' -DSPEC_COLLAPSE -DSPEC_ACCEL_AWARE_MPI -fopenmp-optimistic-collapse 513.soma_t=peak: basepeak=1 518.tealeaf_t=peak: srcalt = IntelDataCenterGPUMax1550_tiny_2C 519.clvleaf_t=peak: srcalt = IntelDataCenterGPUMax1550_tiny_2C 521.miniswp_t=peak: srcalt = IntelDataCenterGPUMax1550_tiny_2C 528.pot3d_t=peak: srcalt = IntelDataCenterGPUMax1550_tiny_2C 532.sph_exa_t=peak: ENV_LIBOMPTARGET_LEVEL_ZERO_MEMORY_POOL=device,8192,6,49152 srcalt = IntelDataCenterGPUMax1550_tiny_2C 534.hpgmgfv_t=peak: srcalt = IntelDataCenterGPUMax1550_tiny_2C 535.weather_t=peak: srcalt = IntelDataCenterGPUMax1550_tiny_2C # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: test_sponsor = Intel license_num = 13 showtimer = 0 tester = Intel notes_000 =Environment variables set by runhpc before the start of the run: notes_005 =LIBOMPTARGET_LEVEL_ZERO_USE_IMMEDIATE_COMMAND_LIST = "all" notes_010 =I_MPI_FABRICS=shm:ofi notes_015 =I_MPI_OFFLOAD=1 notes_020 =I_MPI_OFFLOAD_CELL=tile notes_025 =I_MPI_OFFLOAD_TOPOLIB=level_zero notes_030 =I_MPI_OFFLOAD_CELL_LIST=0,1,2,3,4,5,6,7 notes_035 =For the following tests src.alt was used in PEAK: notes_040 =518 519 521 528 532 534 535 notes_plat_000 = Device Vendor Intel notes_plat_005 = Device Version OpenCL 3.0 NEO notes_plat_010 = Driver Version 25.05.32567 notes_plat_015 = Base clock 900MHz notes_plat_020 = Max clock frequency 1600MHz notes_plat_025 = Tiles 2 notes_plat_030 = Slices per Tile 1 notes_plat_035 = Max compute units per Tile 512 notes_plat_040 = Sub-slices per slice 64 notes_plat_045 = EUs per sub-slice 8 notes_plat_050 = Threads per EU 8 notes_plat_055 = Max work item dimensions 3 notes_plat_060 = Max work item sizes 1024x1024x1024 notes_plat_065 = Max work group size 1024 notes_plat_070 = Preferred work group size multiple 32 notes_plat_075 = Max sub-groups per work group 64 notes_plat_080 = Sub-group sizes 16, 32 notes_plat_085 = L1 Cache per EU 65536 notes_plat_090 = L2 cache size 427819008 notes_plat_095 = Global memory size 137438953472 notes_plat_100 = Address bits 64, Little-Endian # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/hpc2021/flags/Intel_compiler_flags.2025-05-22.00.xml hw_model_list000 = Intel Server D50DNP1SB (2 x Intel XeonPlatinum hw_model_list001 = 8480+, 2.0GHz) sw_os_list000 = SUSE Linux Enterprise Server 15 sw_os_list001 = SP66.4.0-150600.23.42-default