Loop Id: 241 | Module: exec | Source: advec_cell_kernel.f90:83-248 [...] | Coverage: 3.38% |
---|
Loop Id: 241 | Module: exec | Source: advec_cell_kernel.f90:83-248 [...] | Coverage: 3.38% |
---|
0x42f980 VCMPPD $0x1,%ZMM16,%ZMM13,%K1 |
0x42f987 VADDPD %ZMM28,%ZMM31,%ZMM31{%K1} |
0x42f98d VMULPD %ZMM29,%ZMM31,%ZMM3 |
0x42f993 VMOVUPD %ZMM3,(%RBX,%R13,8) [7] |
0x42f99a ADD $0x8,%R13 |
0x42f99e CMP %R12,%R13 |
0x42f9a1 JA 42fcc0 |
0x42f9a7 VMOVUPD (%R15,%R13,8),%ZMM29 [2] |
0x42f9ae VXORPD %XMM28,%XMM28,%XMM28 |
0x42f9b4 VCMPPD $0x1,%ZMM29,%ZMM28,%K1 |
0x42f9bb LEA -0x1(%R14),%EAX |
0x42f9bf VPBROADCASTD %ESI,%YMM3 |
0x42f9c5 VPBROADCASTD %EAX,%YMM3{%K1} |
0x42f9cb VPMOVSXDQ %YMM3,%ZMM3 |
0x42f9d1 VPSUBQ %ZMM5,%ZMM3,%ZMM31 |
0x42f9d7 VPXOR %XMM3,%XMM3,%XMM3 |
0x42f9db VPMULLQ %ZMM31,%ZMM0,%ZMM3 |
0x42f9e1 LEA (%RDX,%R13,1),%RCX |
0x42f9e5 VMOVQ %RCX,%XMM6 |
0x42f9ea VPSUBQ %XMM26,%XMM6,%XMM6 |
0x42f9f0 VPSLLQ $0x3,%XMM6,%XMM6 |
0x42f9f5 VPBROADCASTQ %XMM6,%ZMM6 |
0x42f9fb VPADDQ %ZMM8,%ZMM6,%ZMM16 |
0x42fa01 VPADDQ 0x380(%RSP),%ZMM16,%ZMM6 [3] |
0x42fa09 VPADDQ %ZMM3,%ZMM6,%ZMM3 |
0x42fa0f KXNORW %K0,%K0,%K2 |
0x42fa13 VXORPD %XMM30,%XMM30,%XMM30 |
0x42fa19 VGATHERQPD (,%ZMM3,1),%ZMM30{%K2} [6] |
0x42fa24 VMOVDQA64 %YMM24,%YMM3 |
0x42fa2a VPBROADCASTD %EAX,%YMM3{%K1} |
0x42fa30 VPMOVSXDQ %YMM3,%ZMM3 |
0x42fa36 VPSUBQ %ZMM5,%ZMM3,%ZMM3 |
0x42fa3c KXNORW %K0,%K0,%K2 |
0x42fa40 VPXOR %XMM6,%XMM6,%XMM6 |
0x42fa44 VGATHERQPD (%R11,%ZMM3,8),%ZMM6{%K2} [8] |
0x42fa4b VPXOR %XMM3,%XMM3,%XMM3 |
0x42fa4f VPMULLQ %ZMM31,%ZMM1,%ZMM3 |
0x42fa55 VPADDQ 0x340(%RSP),%ZMM16,%ZMM7 [3] |
0x42fa5d VPADDQ %ZMM3,%ZMM7,%ZMM18 |
0x42fa63 KXNORW %K0,%K0,%K2 |
0x42fa67 VPXOR %XMM3,%XMM3,%XMM3 |
0x42fa6b VGATHERQPD (,%ZMM18,1),%ZMM3{%K2} [4] |
0x42fa76 LEA -0x2(%R14),%ECX |
0x42fa7a VMOVDQA64 %YMM24,%YMM18 |
0x42fa80 VPBROADCASTD %ECX,%YMM18{%K1} |
0x42fa86 VPBROADCASTD %EAX,%YMM19 |
0x42fa8c VPBROADCASTD %ESI,%YMM19{%K1} |
0x42fa92 VPMOVSXDQ %YMM18,%ZMM18 |
0x42fa98 VPSUBQ %ZMM5,%ZMM18,%ZMM18 |
0x42fa9e VPMULLQ %ZMM18,%ZMM1,%ZMM21 |
0x42faa4 VPADDQ %ZMM21,%ZMM7,%ZMM21 |
0x42faaa KXNORW %K0,%K0,%K1 |
0x42faae VXORPD %XMM22,%XMM22,%XMM22 |
0x42fab4 VGATHERQPD (,%ZMM21,1),%ZMM22{%K1} [9] |
0x42fabf VANDPD %ZMM4,%ZMM29,%ZMM21 |
0x42fac5 VDIVPD %ZMM30,%ZMM21,%ZMM21 |
0x42facb VMOVAPD %ZMM15,%ZMM27 |
0x42fad1 VFMADD213PD %ZMM15,%ZMM21,%ZMM27 |
0x42fad7 VPMOVSXDQ %YMM19,%ZMM19 |
0x42fadd VPSUBQ %ZMM5,%ZMM19,%ZMM19 |
0x42fae3 VPMULLQ %ZMM19,%ZMM1,%ZMM14 |
0x42fae9 VPADDQ %ZMM14,%ZMM7,%ZMM7 |
0x42faef KXNORW %K0,%K0,%K1 |
0x42faf3 VPXOR %XMM14,%XMM14,%XMM14 |
0x42faf8 VGATHERQPD (,%ZMM7,1),%ZMM14{%K1} [5] |
0x42fb03 VDIVPD %ZMM6,%ZMM27,%ZMM7 |
0x42fb09 VSUBPD %ZMM21,%ZMM10,%ZMM6 |
0x42fb0f VSUBPD %ZMM22,%ZMM3,%ZMM22 |
0x42fb15 VSUBPD %ZMM3,%ZMM14,%ZMM14 |
0x42fb1b VMULPD %ZMM22,%ZMM14,%ZMM27 |
0x42fb21 VCMPPD $0x1,%ZMM27,%ZMM28,%K1 |
0x42fb28 VSUBPD %ZMM21,%ZMM9,%ZMM21 |
0x42fb2e VFPCLASSPD $0x56,%ZMM14,%K2 |
0x42fb35 VXORPD %ZMM11,%ZMM21,%ZMM21{%K2} |
0x42fb3b VANDPD %ZMM4,%ZMM22,%ZMM22 |
0x42fb41 VANDPD %ZMM4,%ZMM14,%ZMM14 |
0x42fb47 VMULPD %ZMM7,%ZMM22,%ZMM27 |
0x42fb4d VFMADD231PD %ZMM6,%ZMM14,%ZMM27 |
0x42fb53 VMULPD %ZMM12,%ZMM27,%ZMM27 |
0x42fb59 VCMPPD $0x2,%ZMM27,%ZMM14,%K2 |
0x42fb60 VMOVAPD %ZMM14,%ZMM27{%K2} |
0x42fb66 VCMPPD $0x2,%ZMM27,%ZMM22,%K2 |
0x42fb6d VMOVAPD %ZMM22,%ZMM27{%K2} |
0x42fb73 VMOVAPD %ZMM3,%ZMM14 |
0x42fb79 VFMADD231PD %ZMM21,%ZMM27,%ZMM14{%K1} |
0x42fb7f VMULPD %ZMM29,%ZMM14,%ZMM29 |
0x42fb85 VMOVUPD %ZMM29,(%R9,%R13,8) [1] |
0x42fb8c VPXOR %XMM14,%XMM14,%XMM14 |
0x42fb91 VPMULLQ %ZMM31,%ZMM2,%ZMM14 |
0x42fb97 VPADDQ 0x300(%RSP),%ZMM16,%ZMM16 [3] |
0x42fb9f VPADDQ %ZMM14,%ZMM16,%ZMM14 |
0x42fba5 KXNORW %K0,%K0,%K1 |
0x42fba9 VPXORD %XMM31,%XMM31,%XMM31 |
0x42fbaf VGATHERQPD (,%ZMM14,1),%ZMM31{%K1} [12] |
0x42fbba VPXOR %XMM14,%XMM14,%XMM14 |
0x42fbbf VPMULLQ %ZMM18,%ZMM2,%ZMM14 |
0x42fbc5 VPADDQ %ZMM14,%ZMM16,%ZMM14 |
0x42fbcb KXNORW %K0,%K0,%K1 |
0x42fbcf VPXORD %XMM18,%XMM18,%XMM18 |
0x42fbd5 VGATHERQPD (,%ZMM14,1),%ZMM18{%K1} [11] |
0x42fbe0 VPXOR %XMM14,%XMM14,%XMM14 |
0x42fbe5 VPMULLQ %ZMM19,%ZMM2,%ZMM14 |
0x42fbeb VPADDQ %ZMM14,%ZMM16,%ZMM14 |
0x42fbf1 KXNORW %K0,%K0,%K1 |
0x42fbf5 VPXORD %XMM16,%XMM16,%XMM16 |
0x42fbfb VGATHERQPD (,%ZMM14,1),%ZMM16{%K1} [10] |
0x42fc06 VSUBPD %ZMM18,%ZMM31,%ZMM19 |
0x42fc0c VSUBPD %ZMM31,%ZMM16,%ZMM18 |
0x42fc12 VMULPD %ZMM19,%ZMM18,%ZMM16 |
0x42fc18 VCMPPD $0x1,%ZMM16,%ZMM28,%K0 |
0x42fc1f KORTESTB %K0,%K0 |
0x42fc23 JE 42f980 |
0x42fc29 VANDPD %ZMM4,%ZMM29,%ZMM14 |
0x42fc2f VMULPD %ZMM30,%ZMM3,%ZMM3 |
0x42fc35 VDIVPD %ZMM3,%ZMM14,%ZMM3 |
0x42fc3b VSUBPD %ZMM3,%ZMM9,%ZMM3 |
0x42fc41 VFPCLASSPD $0x56,%ZMM18,%K1 |
0x42fc48 VXORPD %ZMM11,%ZMM3,%ZMM3{%K1} |
0x42fc4e VANDPD %ZMM4,%ZMM19,%ZMM14 |
0x42fc54 VANDPD %ZMM4,%ZMM18,%ZMM18 |
0x42fc5a VMULPD %ZMM7,%ZMM14,%ZMM7 |
0x42fc60 VFMADD213PD %ZMM7,%ZMM18,%ZMM6 |
0x42fc66 VMULPD %ZMM12,%ZMM6,%ZMM6 |
0x42fc6c VCMPPD $0x2,%ZMM6,%ZMM18,%K1 |
0x42fc73 VMOVAPD %ZMM18,%ZMM6{%K1} |
0x42fc79 VCMPPD $0x2,%ZMM6,%ZMM14,%K1 |
0x42fc80 VMOVAPD %ZMM14,%ZMM6{%K1} |
0x42fc86 VMULPD %ZMM3,%ZMM6,%ZMM28 |
0x42fc8c JMP 42f980 |
/home/eoseret/qaas_runs_CPU_9468/171-137-7698/intel/CloverLeafFC/build/CloverLeafFC/CloverLeaf_ref/kernels/advec_cell_kernel.f90: 83 - 248 |
-------------------------------------------------------------------------------- |
83: IF(dir.EQ.g_xdir) THEN |
[...] |
204: IF(vol_flux_y(j,k).GT.0.0)THEN |
[...] |
216: sigmat=ABS(vol_flux_y(j,k))/pre_vol(j,donor) |
217: sigma3=(1.0_8+sigmat)*(vertexdy(k)/vertexdy(dif)) |
218: sigma4=2.0_8-sigmat |
219: |
220: sigma=sigmat |
221: sigmav=sigmat |
222: |
223: diffuw=density1(j,donor)-density1(j,upwind) |
224: diffdw=density1(j,downwind)-density1(j,donor) |
225: wind=1.0_8 |
226: IF(diffdw.LE.0.0) wind=-1.0_8 |
227: IF(diffuw*diffdw.GT.0.0)THEN |
228: limiter=(1.0_8-sigmav)*wind*MIN(ABS(diffuw),ABS(diffdw)& |
229: ,one_by_six*(sigma3*ABS(diffuw)+sigma4*ABS(diffdw))) |
230: ELSE |
231: limiter=0.0 |
232: ENDIF |
233: mass_flux_y(j,k)=vol_flux_y(j,k)*(density1(j,donor)+limiter) |
234: |
235: sigmam=ABS(mass_flux_y(j,k))/(density1(j,donor)*pre_vol(j,donor)) |
236: diffuw=energy1(j,donor)-energy1(j,upwind) |
237: diffdw=energy1(j,downwind)-energy1(j,donor) |
238: wind=1.0_8 |
239: IF(diffdw.LE.0.0) wind=-1.0_8 |
240: IF(diffuw*diffdw.GT.0.0)THEN |
241: limiter=(1.0_8-sigmam)*wind*MIN(ABS(diffuw),ABS(diffdw)& |
242: ,one_by_six*(sigma3*ABS(diffuw)+sigma4*ABS(diffdw))) |
243: ELSE |
244: limiter=0.0 |
245: ENDIF |
246: ener_flux(j,k)=mass_flux_y(j,k)*(energy1(j,donor)+limiter) |
247: |
248: ENDDO |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.08 |
CQA speedup if FP arith vectorized | 1.00 |
CQA speedup if fully vectorized | 1.05 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | NA |
Bottlenecks | |
Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
Source | advec_cell_kernel.f90:83-83,advec_cell_kernel.f90:204-204,advec_cell_kernel.f90:216-248 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 61.00 |
CQA cycles if no scalar integer | 56.50 |
CQA cycles if FP arith vectorized | 61.00 |
CQA cycles if fully vectorized | 58.00 |
Front-end cycles | 30.92 |
DIV/SQRT cycles | 61.00 |
P0 cycles | 9.50 |
P1 cycles | 22.67 |
P2 cycles | 22.67 |
P3 cycles | 1.00 |
P4 cycles | 61.00 |
P5 cycles | 2.40 |
P6 cycles | 1.00 |
P7 cycles | 1.00 |
P8 cycles | 1.00 |
P9 cycles | 2.60 |
P10 cycles | 22.67 |
P11 cycles | 40.00 |
Inter-iter dependencies cycles | 0 |
FE+BE cycles (UFS) | 62.12 - 93.29 |
Stall cycles (UFS) | 35.62 - 66.61 |
Nb insns | 120.50 |
Nb uops | 185.50 |
Nb loads | 12.00 |
Nb stores | 2.00 |
Nb stack references | 3.00 |
FLOP/cycle | 3.28 |
Nb FLOP add-sub | 60.00 |
Nb FLOP mul | 64.00 |
Nb FLOP fma | 28.00 |
Nb FLOP div | 20.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 14.75 |
Bytes prefetched | 0.00 |
Bytes loaded | 768.00 |
Bytes stored | 128.00 |
Stride 0 | 1.00 |
Stride 1 | 3.00 |
Stride n | 0.00 |
Stride unknown | 0.00 |
Stride indirect | 6.00 |
Vectorization ratio all | 92.26 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | 100.00 |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 85.75 |
Vector-efficiency ratio all | 78.36 |
Vector-efficiency ratio load | 100.00 |
Vector-efficiency ratio store | 100.00 |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 96.81 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 61.48 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.07 |
CQA speedup if FP arith vectorized | 1.00 |
CQA speedup if fully vectorized | 1.05 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | NA |
Bottlenecks | P0, P5, |
Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
Source | advec_cell_kernel.f90:83-83,advec_cell_kernel.f90:204-204,advec_cell_kernel.f90:216-248 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 65.00 |
CQA cycles if no scalar integer | 60.50 |
CQA cycles if FP arith vectorized | 65.00 |
CQA cycles if fully vectorized | 62.00 |
Front-end cycles | 32.50 |
DIV/SQRT cycles | 65.00 |
P0 cycles | 11.00 |
P1 cycles | 22.67 |
P2 cycles | 22.67 |
P3 cycles | 1.00 |
P4 cycles | 65.00 |
P5 cycles | 2.40 |
P6 cycles | 1.00 |
P7 cycles | 1.00 |
P8 cycles | 1.00 |
P9 cycles | 2.60 |
P10 cycles | 22.67 |
P11 cycles | 48.00 |
Inter-iter dependencies cycles | 0 |
FE+BE cycles (UFS) | 66.15 - 93.87 |
Stall cycles (UFS) | 38.03 - 65.59 |
Nb insns | 129.00 |
Nb uops | 195.00 |
Nb loads | 12.00 |
Nb stores | 2.00 |
Nb stack references | 3.00 |
FLOP/cycle | 3.57 |
Nb FLOP add-sub | 64.00 |
Nb FLOP mul | 80.00 |
Nb FLOP fma | 32.00 |
Nb FLOP div | 24.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 13.78 |
Bytes prefetched | 0.00 |
Bytes loaded | 768.00 |
Bytes stored | 128.00 |
Stride 0 | 1.00 |
Stride 1 | 3.00 |
Stride n | 0.00 |
Stride unknown | 0.00 |
Stride indirect | 6.00 |
Vectorization ratio all | 92.86 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | 100.00 |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 86.89 |
Vector-efficiency ratio all | 80.02 |
Vector-efficiency ratio load | 100.00 |
Vector-efficiency ratio store | 100.00 |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 96.88 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 64.55 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.09 |
CQA speedup if FP arith vectorized | 1.00 |
CQA speedup if fully vectorized | 1.06 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | NA |
Bottlenecks | P0, P5, |
Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
Source | advec_cell_kernel.f90:83-83,advec_cell_kernel.f90:204-204,advec_cell_kernel.f90:216-248 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 57.00 |
CQA cycles if no scalar integer | 52.50 |
CQA cycles if FP arith vectorized | 57.00 |
CQA cycles if fully vectorized | 54.00 |
Front-end cycles | 29.33 |
DIV/SQRT cycles | 57.00 |
P0 cycles | 8.00 |
P1 cycles | 22.67 |
P2 cycles | 22.67 |
P3 cycles | 1.00 |
P4 cycles | 57.00 |
P5 cycles | 2.40 |
P6 cycles | 1.00 |
P7 cycles | 1.00 |
P8 cycles | 1.00 |
P9 cycles | 2.60 |
P10 cycles | 22.67 |
P11 cycles | 32.00 |
Inter-iter dependencies cycles | 0 |
FE+BE cycles (UFS) | 58.09 - 92.71 |
Stall cycles (UFS) | 33.20 - 67.63 |
Nb insns | 112.00 |
Nb uops | 176.00 |
Nb loads | 12.00 |
Nb stores | 2.00 |
Nb stack references | 3.00 |
FLOP/cycle | 2.95 |
Nb FLOP add-sub | 56.00 |
Nb FLOP mul | 48.00 |
Nb FLOP fma | 24.00 |
Nb FLOP div | 16.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 15.72 |
Bytes prefetched | 0.00 |
Bytes loaded | 768.00 |
Bytes stored | 128.00 |
Stride 0 | 1.00 |
Stride 1 | 3.00 |
Stride n | 0.00 |
Stride unknown | 0.00 |
Stride indirect | 6.00 |
Vectorization ratio all | 91.67 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | 100.00 |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 84.62 |
Vector-efficiency ratio all | 76.69 |
Vector-efficiency ratio load | 100.00 |
Vector-efficiency ratio store | 100.00 |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 96.74 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 58.41 |
Path / |
Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
Source file and lines | advec_cell_kernel.f90:83-248 |
Module | exec |
nb instructions | 120.50 |
nb uops | 185.50 |
loop length | 733 |
used x86 registers | 12 |
used mmx registers | 0 |
used xmm registers | 10 |
used ymm registers | 4 |
used zmm registers | 26 |
nb stack references | 3 |
ADD-SUB / MUL ratio | 0.98 |
micro-operation queue | 30.92 cycles |
front end | 30.92 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 61.00 | 5.00 | 22.67 | 22.67 | 1.00 | 61.00 | 2.40 | 1.00 | 1.00 | 1.00 | 2.60 | 22.67 |
cycles | 61.00 | 9.50 | 22.67 | 22.67 | 1.00 | 61.00 | 2.40 | 1.00 | 1.00 | 1.00 | 2.60 | 22.67 |
Cycles executing div or sqrt instructions | 40.00 |
Longest recurrence chain latency (RecMII) | 0.00 |
FE+BE cycles | 62.12-93.29 |
Stall cycles | 35.62-66.61 |
RS full (events) | 60.75-0.69 |
Front-end | 30.92 |
Dispatch | 61.00 |
DIV/SQRT | 40.00 |
Data deps. | 0.00 |
Overall L1 | 61.00 |
all | 83% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 69% |
all | 100% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 92% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 85% |
all | 58% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 95% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 25% |
all | 95% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 92% |
all | 78% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 96% |
fma | 100% |
div/sqrt | 100% |
other | 61% |
Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
Source file and lines | advec_cell_kernel.f90:83-248 |
Module | exec |
nb instructions | 129 |
nb uops | 195 |
loop length | 785 |
used x86 registers | 12 |
used mmx registers | 0 |
used xmm registers | 10 |
used ymm registers | 4 |
used zmm registers | 26 |
nb stack references | 3 |
ADD-SUB / MUL ratio | 0.80 |
micro-operation queue | 32.50 cycles |
front end | 32.50 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 65.00 | 5.00 | 22.67 | 22.67 | 1.00 | 65.00 | 2.40 | 1.00 | 1.00 | 1.00 | 2.60 | 22.67 |
cycles | 65.00 | 11.00 | 22.67 | 22.67 | 1.00 | 65.00 | 2.40 | 1.00 | 1.00 | 1.00 | 2.60 | 22.67 |
Cycles executing div or sqrt instructions | 48.00 |
Longest recurrence chain latency (RecMII) | 0.00 |
FE+BE cycles | 66.15-93.87 |
Stall cycles | 38.03-65.59 |
RS full (events) | 64.77-0.87 |
Front-end | 32.50 |
Dispatch | 65.00 |
DIV/SQRT | 48.00 |
Data deps. | 0.00 |
Overall L1 | 65.00 |
all | 83% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 69% |
all | 100% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 92% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 86% |
all | 58% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 95% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 25% |
all | 96% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 93% |
all | 80% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 96% |
fma | 100% |
div/sqrt | 100% |
other | 64% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VCMPPD $0x1,%ZMM16,%ZMM13,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %ZMM28,%ZMM31,%ZMM31{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM29,%ZMM31,%ZMM3 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD %ZMM3,(%RBX,%R13,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 0-1 | 1 |
ADD $0x8,%R13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %R12,%R13 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JA 42fcc0 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x2900> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
VMOVUPD (%R15,%R13,8),%ZMM29 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VXORPD %XMM28,%XMM28,%XMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VCMPPD $0x1,%ZMM29,%ZMM28,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
LEA -0x1(%R14),%EAX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
VPBROADCASTD %ESI,%YMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPBROADCASTD %EAX,%YMM3{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPMOVSXDQ %YMM3,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBQ %ZMM5,%ZMM3,%ZMM31 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VPXOR %XMM3,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM31,%ZMM0,%ZMM3 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
LEA (%RDX,%R13,1),%RCX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VMOVQ %RCX,%XMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPSUBQ %XMM26,%XMM6,%XMM6 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VPSLLQ $0x3,%XMM6,%XMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 0.50 |
VPBROADCASTQ %XMM6,%ZMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDQ %ZMM8,%ZMM6,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VPADDQ 0x380(%RSP),%ZMM16,%ZMM6 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.67 |
VPADDQ %ZMM3,%ZMM6,%ZMM3 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VXORPD %XMM30,%XMM30,%XMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (,%ZMM3,1),%ZMM30{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VMOVDQA64 %YMM24,%YMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VPBROADCASTD %EAX,%YMM3{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPMOVSXDQ %YMM3,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBQ %ZMM5,%ZMM3,%ZMM3 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXOR %XMM6,%XMM6,%XMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (%R11,%ZMM3,8),%ZMM6{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VPXOR %XMM3,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM31,%ZMM1,%ZMM3 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ 0x340(%RSP),%ZMM16,%ZMM7 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.67 |
VPADDQ %ZMM3,%ZMM7,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXOR %XMM3,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (,%ZMM18,1),%ZMM3{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
LEA -0x2(%R14),%ECX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
VMOVDQA64 %YMM24,%YMM18 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VPBROADCASTD %ECX,%YMM18{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPBROADCASTD %EAX,%YMM19 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPBROADCASTD %ESI,%YMM19{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPMOVSXDQ %YMM18,%ZMM18 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBQ %ZMM5,%ZMM18,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VPMULLQ %ZMM18,%ZMM1,%ZMM21 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ %ZMM21,%ZMM7,%ZMM21 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VXORPD %XMM22,%XMM22,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (,%ZMM21,1),%ZMM22{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VANDPD %ZMM4,%ZMM29,%ZMM21 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VDIVPD %ZMM30,%ZMM21,%ZMM21 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VMOVAPD %ZMM15,%ZMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD213PD %ZMM15,%ZMM21,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VPMOVSXDQ %YMM19,%ZMM19 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBQ %ZMM5,%ZMM19,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VPMULLQ %ZMM19,%ZMM1,%ZMM14 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ %ZMM14,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXOR %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (,%ZMM7,1),%ZMM14{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VDIVPD %ZMM6,%ZMM27,%ZMM7 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VSUBPD %ZMM21,%ZMM10,%ZMM6 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSUBPD %ZMM22,%ZMM3,%ZMM22 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSUBPD %ZMM3,%ZMM14,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM22,%ZMM14,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x1,%ZMM27,%ZMM28,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VSUBPD %ZMM21,%ZMM9,%ZMM21 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFPCLASSPD $0x56,%ZMM14,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VXORPD %ZMM11,%ZMM21,%ZMM21{%K2} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VANDPD %ZMM4,%ZMM22,%ZMM22 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %ZMM4,%ZMM14,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMULPD %ZMM7,%ZMM22,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD231PD %ZMM6,%ZMM14,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %ZMM12,%ZMM27,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x2,%ZMM27,%ZMM14,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM14,%ZMM27{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM27,%ZMM22,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM22,%ZMM27{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %ZMM3,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD231PD %ZMM21,%ZMM27,%ZMM14{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %ZMM29,%ZMM14,%ZMM29 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD %ZMM29,(%R9,%R13,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 0-1 | 1 |
VPXOR %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM31,%ZMM2,%ZMM14 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ 0x300(%RSP),%ZMM16,%ZMM16 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.67 |
VPADDQ %ZMM14,%ZMM16,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXORD %XMM31,%XMM31,%XMM31 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VGATHERQPD (,%ZMM14,1),%ZMM31{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VPXOR %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM18,%ZMM2,%ZMM14 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ %ZMM14,%ZMM16,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXORD %XMM18,%XMM18,%XMM18 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VGATHERQPD (,%ZMM14,1),%ZMM18{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VPXOR %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM19,%ZMM2,%ZMM14 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ %ZMM14,%ZMM16,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXORD %XMM16,%XMM16,%XMM16 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VGATHERQPD (,%ZMM14,1),%ZMM16{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VSUBPD %ZMM18,%ZMM31,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSUBPD %ZMM31,%ZMM16,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM19,%ZMM18,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x1,%ZMM16,%ZMM28,%K0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
KORTESTB %K0,%K0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
JE 42f980 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x25c0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
VANDPD %ZMM4,%ZMM29,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMULPD %ZMM30,%ZMM3,%ZMM3 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VDIVPD %ZMM3,%ZMM14,%ZMM3 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VSUBPD %ZMM3,%ZMM9,%ZMM3 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFPCLASSPD $0x56,%ZMM18,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VXORPD %ZMM11,%ZMM3,%ZMM3{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VANDPD %ZMM4,%ZMM19,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %ZMM4,%ZMM18,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMULPD %ZMM7,%ZMM14,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD213PD %ZMM7,%ZMM18,%ZMM6 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %ZMM12,%ZMM6,%ZMM6 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x2,%ZMM6,%ZMM18,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM18,%ZMM6{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM6,%ZMM14,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM14,%ZMM6{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMULPD %ZMM3,%ZMM6,%ZMM28 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
JMP 42f980 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x25c0> | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.08 |
Function | advec_cell_kernel_.DIR.OMP.PARALLEL.2 |
Source file and lines | advec_cell_kernel.f90:83-248 |
Module | exec |
nb instructions | 112 |
nb uops | 176 |
loop length | 681 |
used x86 registers | 12 |
used mmx registers | 0 |
used xmm registers | 10 |
used ymm registers | 4 |
used zmm registers | 26 |
nb stack references | 3 |
ADD-SUB / MUL ratio | 1.17 |
micro-operation queue | 29.33 cycles |
front end | 29.33 cycles |
P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 57.00 | 5.00 | 22.67 | 22.67 | 1.00 | 57.00 | 2.40 | 1.00 | 1.00 | 1.00 | 2.60 | 22.67 |
cycles | 57.00 | 8.00 | 22.67 | 22.67 | 1.00 | 57.00 | 2.40 | 1.00 | 1.00 | 1.00 | 2.60 | 22.67 |
Cycles executing div or sqrt instructions | 32.00 |
Longest recurrence chain latency (RecMII) | 0.00 |
FE+BE cycles | 58.09-92.71 |
Stall cycles | 33.20-67.63 |
RS full (events) | 56.74-0.50 |
Front-end | 29.33 |
Dispatch | 57.00 |
DIV/SQRT | 32.00 |
Data deps. | 0.00 |
Overall L1 | 57.00 |
all | 83% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 100% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 69% |
all | 100% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 91% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 84% |
all | 58% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | 95% |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 25% |
all | 95% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 91% |
all | 76% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 96% |
fma | 100% |
div/sqrt | 100% |
other | 58% |
Instruction | Nb FU | P0 | P1 | P2 | P3 | P4 | P5 | P6 | P7 | P8 | P9 | P10 | P11 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VCMPPD $0x1,%ZMM16,%ZMM13,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VADDPD %ZMM28,%ZMM31,%ZMM31{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM29,%ZMM31,%ZMM3 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD %ZMM3,(%RBX,%R13,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 0-1 | 1 |
ADD $0x8,%R13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.17 |
CMP %R12,%R13 | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1 | 0.20 |
JA 42fcc0 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x2900> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |
VMOVUPD (%R15,%R13,8),%ZMM29 | 1 | 0 | 0 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0-1 | 0.50 |
VXORPD %XMM28,%XMM28,%XMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VCMPPD $0x1,%ZMM29,%ZMM28,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
LEA -0x1(%R14),%EAX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
VPBROADCASTD %ESI,%YMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPBROADCASTD %EAX,%YMM3{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPMOVSXDQ %YMM3,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBQ %ZMM5,%ZMM3,%ZMM31 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VPXOR %XMM3,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM31,%ZMM0,%ZMM3 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
LEA (%RDX,%R13,1),%RCX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VMOVQ %RCX,%XMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPSUBQ %XMM26,%XMM6,%XMM6 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VPSLLQ $0x3,%XMM6,%XMM6 | 1 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2-4 | 0.50 |
VPBROADCASTQ %XMM6,%ZMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPADDQ %ZMM8,%ZMM6,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VPADDQ 0x380(%RSP),%ZMM16,%ZMM6 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.67 |
VPADDQ %ZMM3,%ZMM6,%ZMM3 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VXORPD %XMM30,%XMM30,%XMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (,%ZMM3,1),%ZMM30{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VMOVDQA64 %YMM24,%YMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VPBROADCASTD %EAX,%YMM3{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPMOVSXDQ %YMM3,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBQ %ZMM5,%ZMM3,%ZMM3 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXOR %XMM6,%XMM6,%XMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (%R11,%ZMM3,8),%ZMM6{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VPXOR %XMM3,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM31,%ZMM1,%ZMM3 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ 0x340(%RSP),%ZMM16,%ZMM7 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.67 |
VPADDQ %ZMM3,%ZMM7,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K2 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXOR %XMM3,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (,%ZMM18,1),%ZMM3{%K2} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
LEA -0x2(%R14),%ECX | 1 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0.20 | 0 | 0 | 0 | 0.20 | 0 | 1-2 | 0.20 |
VMOVDQA64 %YMM24,%YMM18 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VPBROADCASTD %ECX,%YMM18{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPBROADCASTD %EAX,%YMM19 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPBROADCASTD %ESI,%YMM19{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPMOVSXDQ %YMM18,%ZMM18 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBQ %ZMM5,%ZMM18,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VPMULLQ %ZMM18,%ZMM1,%ZMM21 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ %ZMM21,%ZMM7,%ZMM21 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VXORPD %XMM22,%XMM22,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (,%ZMM21,1),%ZMM22{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VANDPD %ZMM4,%ZMM29,%ZMM21 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VDIVPD %ZMM30,%ZMM21,%ZMM21 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VMOVAPD %ZMM15,%ZMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD213PD %ZMM15,%ZMM21,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VPMOVSXDQ %YMM19,%ZMM19 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VPSUBQ %ZMM5,%ZMM19,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VPMULLQ %ZMM19,%ZMM1,%ZMM14 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ %ZMM14,%ZMM7,%ZMM7 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXOR %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VGATHERQPD (,%ZMM7,1),%ZMM14{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VDIVPD %ZMM6,%ZMM27,%ZMM7 | 3 | 2.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 22-24 | 16 |
VSUBPD %ZMM21,%ZMM10,%ZMM6 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSUBPD %ZMM22,%ZMM3,%ZMM22 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSUBPD %ZMM3,%ZMM14,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM22,%ZMM14,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x1,%ZMM27,%ZMM28,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VSUBPD %ZMM21,%ZMM9,%ZMM21 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFPCLASSPD $0x56,%ZMM14,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VXORPD %ZMM11,%ZMM21,%ZMM21{%K2} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.50 |
VANDPD %ZMM4,%ZMM22,%ZMM22 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %ZMM4,%ZMM14,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMULPD %ZMM7,%ZMM22,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VFMADD231PD %ZMM6,%ZMM14,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %ZMM12,%ZMM27,%ZMM27 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x2,%ZMM27,%ZMM14,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM14,%ZMM27{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VCMPPD $0x2,%ZMM27,%ZMM22,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
VMOVAPD %ZMM22,%ZMM27{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VMOVAPD %ZMM3,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.17 |
VFMADD231PD %ZMM21,%ZMM27,%ZMM14{%K1} | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULPD %ZMM29,%ZMM14,%ZMM29 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMOVUPD %ZMM29,(%R9,%R13,8) | 1 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0 | 0 | 0-1 | 1 |
VPXOR %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM31,%ZMM2,%ZMM14 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ 0x300(%RSP),%ZMM16,%ZMM16 | 1 | 0.50 | 0 | 0.33 | 0.33 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.33 | 1 | 0.67 |
VPADDQ %ZMM14,%ZMM16,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXORD %XMM31,%XMM31,%XMM31 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VGATHERQPD (,%ZMM14,1),%ZMM31{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VPXOR %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM18,%ZMM2,%ZMM14 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ %ZMM14,%ZMM16,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXORD %XMM18,%XMM18,%XMM18 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VGATHERQPD (,%ZMM14,1),%ZMM18{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VPXOR %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VPMULLQ %ZMM19,%ZMM2,%ZMM14 | 5 | 1.50 | 0 | 0 | 0 | 0 | 1.50 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 1.50 |
VPADDQ %ZMM14,%ZMM16,%ZMM14 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
KXNORW %K0,%K0,%K1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
VPXORD %XMM16,%XMM16,%XMM16 | 1 | 0.33 | 0.33 | 0 | 0 | 0 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 0-1 | 0.33 |
VGATHERQPD (,%ZMM14,1),%ZMM16{%K1} | 5 | 1 | 0 | 2.67 | 2.67 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 2.67 | 0-29 | 2.67 |
VSUBPD %ZMM18,%ZMM31,%ZMM19 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VSUBPD %ZMM31,%ZMM16,%ZMM18 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMULPD %ZMM19,%ZMM18,%ZMM16 | 1 | 0.50 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VCMPPD $0x1,%ZMM16,%ZMM28,%K0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 |
KORTESTB %K0,%K0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
JE 42f980 <advec_cell_kernel_module_mp_advec_cell_kernel_.DIR.OMP.PARALLEL.2+0x25c0> | 1 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 |