[QAAS] timestamp = isix06.benchmarkcenter.megware.com app_name = llama.cpp git_commit = dataset_name = Llama-3.1-8B-Q8_0 PP=128 NPL=16 run_cmd = -m meta-llama-3.1-8b-instruct-Q8_0.gguf -t -b 2048 -ub 512 -npp 128 -ntg 0 -npl 16 -c 16384 --seed 0 --output-format jsonl LANG = C/CXX [REPORTS] figure_of_merit_type = RATE figure_of_merit_unit = tokens/s compiler_default = aocc multicompiler_report = qaas_compilers.csv mpi_scaling = no openmp_scaling = strong scalability_report = scalability_reference_line = [SYSTEM] machine = isix06.benchmarkcenter.megware.com model_name = Intel(R) Xeon(R) 6972P ISA = x86_64 architecture = GRANITE_RAPIDS number_of_cpus = 384 number_of_cores = 192 number_of_sockets = 2 number_of_cores_per_socket = 96 number_of_numa_domains = 6 frequency_driver = intel_pstate frequency_governor = Unknown frequency governor scaling_max_frequency = Unknown scaling max frequency scaling_min_frequency = Unknown scaling min frequency advertized_frequency = unsupported maximal_frequency = Unknown maximal frequency huge_pages = icx_version = 2025.1.0.20250317 gcc_version = 14.2.0 aocc_version = 5.0.0 mpi_provider = IntelMPI mpi_version = 2021.11 [TIME] initial_profile = 00H09M42S build_binaries = 00H23M44S multicompiler = 00H36M50S