Loop Id: 16790 | Module: libgromacs_mpi.so.9.0.0 | Source: bonded.cpp:1151-1185 [...] | Coverage: 0.26% |
---|
Loop Id: 16790 | Module: libgromacs_mpi.so.9.0.0 | Source: bonded.cpp:1151-1185 [...] | Coverage: 0.26% |
---|
0xbc4460 VMOVDQA64 0x5c0(%RSP),%ZMM20 |
0xbc4468 VPMULLD %ZMM9,%ZMM20,%ZMM11 |
0xbc446e VXORPS %XMM14,%XMM14,%XMM14 |
0xbc4473 KXNORW %K0,%K0,%K1 |
0xbc4477 VGATHERDPS (%R14,%ZMM11,4),%ZMM14{%K1} |
0xbc447e VXORPS %XMM15,%XMM15,%XMM15 |
0xbc4483 KXNORW %K0,%K0,%K1 |
0xbc4487 VGATHERDPS (%RAX,%ZMM11,4),%ZMM15{%K1} |
0xbc448e VXORPS %XMM17,%XMM17,%XMM17 |
0xbc4494 KXNORW %K0,%K0,%K1 |
0xbc4498 VGATHERDPS (%RCX,%ZMM11,4),%ZMM17{%K1} |
0xbc449f VMOVDQA64 0x580(%RSP),%ZMM11 |
0xbc44a7 VPMULLD %ZMM9,%ZMM11,%ZMM21 |
0xbc44ad VXORPS %XMM22,%XMM22,%XMM22 |
0xbc44b3 KXNORW %K0,%K0,%K1 |
0xbc44b7 VGATHERDPS (%R14,%ZMM21,4),%ZMM22{%K1} |
0xbc44be VXORPS %XMM23,%XMM23,%XMM23 |
0xbc44c4 KXNORW %K0,%K0,%K1 |
0xbc44c8 VGATHERDPS (%RAX,%ZMM21,4),%ZMM23{%K1} |
0xbc44cf VXORPS %XMM24,%XMM24,%XMM24 |
0xbc44d5 KXNORW %K0,%K0,%K1 |
0xbc44d9 VGATHERDPS (%RCX,%ZMM21,4),%ZMM24{%K1} |
0xbc44e0 VMOVDQA64 0x540(%RSP),%ZMM31 |
0xbc44e8 VPMULLD %ZMM9,%ZMM31,%ZMM21 |
0xbc44ee VXORPS %XMM26,%XMM26,%XMM26 |
0xbc44f4 KXNORW %K0,%K0,%K1 |
0xbc44f8 VGATHERDPS (%R14,%ZMM21,4),%ZMM26{%K1} |
0xbc44ff VXORPS %XMM27,%XMM27,%XMM27 |
0xbc4505 KXNORW %K0,%K0,%K1 |
0xbc4509 VGATHERDPS (%RAX,%ZMM21,4),%ZMM27{%K1} |
0xbc4510 VXORPS %XMM28,%XMM28,%XMM28 |
0xbc4516 KXNORW %K0,%K0,%K1 |
0xbc451a VGATHERDPS (%RCX,%ZMM21,4),%ZMM28{%K1} |
0xbc4521 VSUBPS %ZMM22,%ZMM14,%ZMM14 |
0xbc4527 VSUBPS %ZMM23,%ZMM15,%ZMM15 |
0xbc452d VSUBPS %ZMM24,%ZMM17,%ZMM17 |
0xbc4533 VSUBPS %ZMM22,%ZMM26,%ZMM21 |
0xbc4539 VSUBPS %ZMM23,%ZMM27,%ZMM22 |
0xbc453f VSUBPS %ZMM24,%ZMM28,%ZMM23 |
0xbc4545 VMOVAPS 0x3c0(%RSP),%ZMM0 |
0xbc454d VMULPS 0x40(%RSP),%ZMM0,%ZMM27 |
0xbc4555 VMOVAPS 0x500(%RSP),%ZMM0 |
0xbc455d VMULPS %ZMM0,%ZMM17,%ZMM24 |
0xbc4563 VRNDSCALEPS $0,%ZMM24,%ZMM24 |
0xbc456a VMOVAPS 0x4c0(%RSP),%ZMM1 |
0xbc4572 VMULPS %ZMM1,%ZMM24,%ZMM26 |
0xbc4578 VSUBPS %ZMM26,%ZMM14,%ZMM26 |
0xbc457e VMOVAPS 0x480(%RSP),%ZMM2 |
0xbc4586 VMULPS %ZMM2,%ZMM24,%ZMM14 |
0xbc458c VSUBPS %ZMM14,%ZMM15,%ZMM15 |
0xbc4592 VMOVAPS 0x440(%RSP),%ZMM3 |
0xbc459a VMULPS %ZMM3,%ZMM24,%ZMM14 |
0xbc45a0 VSUBPS %ZMM14,%ZMM17,%ZMM14 |
0xbc45a6 VMOVAPS 0x400(%RSP),%ZMM4 |
0xbc45ae VMULPS %ZMM4,%ZMM15,%ZMM17 |
0xbc45b4 VRNDSCALEPS $0,%ZMM17,%ZMM17 |
0xbc45bb VMOVAPS 0xc0(%RSP),%ZMM5 |
0xbc45c3 VMULPS %ZMM5,%ZMM17,%ZMM24 |
0xbc45c9 VSUBPS %ZMM24,%ZMM26,%ZMM24 |
0xbc45cf VMOVAPS 0x80(%RSP),%ZMM6 |
0xbc45d7 VMULPS %ZMM6,%ZMM17,%ZMM17 |
0xbc45dd VSUBPS %ZMM17,%ZMM15,%ZMM15 |
0xbc45e3 VMULPS %ZMM7,%ZMM24,%ZMM17 |
0xbc45e9 VRNDSCALEPS $0,%ZMM17,%ZMM17 |
0xbc45f0 VMULPS %ZMM8,%ZMM17,%ZMM17 |
0xbc45f6 VSUBPS %ZMM17,%ZMM24,%ZMM17 |
0xbc45fc VMULPS %ZMM0,%ZMM23,%ZMM24 |
0xbc4602 VRNDSCALEPS $0,%ZMM24,%ZMM24 |
0xbc4609 VMULPS %ZMM24,%ZMM1,%ZMM26 |
0xbc460f VSUBPS %ZMM26,%ZMM21,%ZMM26 |
0xbc4615 VMULPS %ZMM24,%ZMM2,%ZMM21 |
0xbc461b VSUBPS %ZMM21,%ZMM22,%ZMM22 |
0xbc4621 VMULPS %ZMM24,%ZMM3,%ZMM21 |
0xbc4627 VSUBPS %ZMM21,%ZMM23,%ZMM21 |
0xbc462d VMULPS %ZMM22,%ZMM4,%ZMM23 |
0xbc4633 VRNDSCALEPS $0,%ZMM23,%ZMM23 |
0xbc463a VMULPS %ZMM23,%ZMM5,%ZMM24 |
0xbc4640 VSUBPS %ZMM24,%ZMM26,%ZMM24 |
0xbc4646 VMULPS %ZMM23,%ZMM6,%ZMM23 |
0xbc464c VSUBPS %ZMM23,%ZMM22,%ZMM23 |
0xbc4652 VMULPS %ZMM24,%ZMM7,%ZMM22 |
0xbc4658 VRNDSCALEPS $0,%ZMM22,%ZMM22 |
0xbc465f VMULPS %ZMM22,%ZMM8,%ZMM22 |
0xbc4665 VSUBPS %ZMM22,%ZMM24,%ZMM24 |
0xbc466b VMULPS %ZMM24,%ZMM17,%ZMM22 |
0xbc4671 VMULPS %ZMM23,%ZMM15,%ZMM26 |
0xbc4677 VADDPS %ZMM22,%ZMM26,%ZMM22 |
0xbc467d VMULPS %ZMM21,%ZMM14,%ZMM26 |
0xbc4683 VADDPS %ZMM22,%ZMM26,%ZMM28 |
0xbc4689 VMULPS %ZMM17,%ZMM17,%ZMM22 |
0xbc468f VMULPS %ZMM15,%ZMM15,%ZMM26 |
0xbc4695 VADDPS %ZMM22,%ZMM26,%ZMM22 |
0xbc469b VMULPS %ZMM14,%ZMM14,%ZMM26 |
0xbc46a1 VADDPS %ZMM22,%ZMM26,%ZMM29 |
0xbc46a7 VMULPS %ZMM24,%ZMM24,%ZMM22 |
0xbc46ad VMULPS %ZMM23,%ZMM23,%ZMM26 |
0xbc46b3 VADDPS %ZMM22,%ZMM26,%ZMM22 |
0xbc46b9 VRSQRT14PS %ZMM29,%ZMM26 |
0xbc46bf VMULPS %ZMM21,%ZMM21,%ZMM30 |
0xbc46c5 VADDPS %ZMM22,%ZMM30,%ZMM30 |
0xbc46cb VMULPS %ZMM29,%ZMM26,%ZMM22 |
0xbc46d1 VMULPS %ZMM12,%ZMM26,%ZMM0 |
0xbc46d7 VFMADD213PS %ZMM13,%ZMM26,%ZMM22 |
0xbc46dd VMULPS %ZMM22,%ZMM0,%ZMM22 |
0xbc46e3 VRSQRT14PS %ZMM30,%ZMM0 |
0xbc46e9 VMULPS %ZMM30,%ZMM0,%ZMM26 |
0xbc46ef VMULPS %ZMM12,%ZMM0,%ZMM1 |
0xbc46f5 VFMADD213PS %ZMM13,%ZMM0,%ZMM26 |
0xbc46fb VMULPS %ZMM26,%ZMM1,%ZMM26 |
0xbc4701 VMULPS %ZMM22,%ZMM28,%ZMM0 |
0xbc4707 VMULPS %ZMM30,%ZMM29,%ZMM1 |
0xbc470d VRCP14PS %ZMM1,%ZMM29 |
0xbc4713 VMULPS %ZMM26,%ZMM0,%ZMM0 |
0xbc4719 VMULPS %ZMM28,%ZMM28,%ZMM28 |
0xbc471f VFNMADD213PS 0x380(%RSP),%ZMM29,%ZMM1 |
0xbc4727 VMULPS %ZMM1,%ZMM29,%ZMM1 |
0xbc472d VMULPS %ZMM1,%ZMM28,%ZMM1 |
0xbc4733 VMAXPS 0x340(%RSP),%ZMM0,%ZMM0 |
0xbc473b VMINPS %ZMM16,%ZMM0,%ZMM28 |
0xbc4741 VMINPS 0x300(%RSP),%ZMM1,%ZMM0 |
0xbc4749 VANDPS %ZMM18,%ZMM28,%ZMM1 |
0xbc474f VCMPPS $0x1,%ZMM1,%ZMM19,%K1 |
0xbc4756 VCMPPS $0x1,%ZMM28,%ZMM10,%K2 |
0xbc475d VMOVAPS %ZMM19,%ZMM29 |
0xbc4763 VFNMADD213PS %ZMM19,%ZMM1,%ZMM29 |
0xbc4769 VCMPPS $0x1,%ZMM16,%ZMM1,%K3 |
0xbc4770 VRSQRT14PS %ZMM29,%ZMM1{%K3}{z} |
0xbc4776 VMULPS %ZMM29,%ZMM1,%ZMM30 |
0xbc477c VMULPS %ZMM12,%ZMM1,%ZMM2 |
0xbc4782 VFMADD213PS %ZMM13,%ZMM1,%ZMM30 |
0xbc4788 VMULPS %ZMM30,%ZMM2,%ZMM1 |
0xbc478e VMOVAPS %ZMM28,%ZMM2 |
0xbc4794 VMULPS %ZMM1,%ZMM29,%ZMM2{%K1} |
0xbc479a VANDPS %ZMM18,%ZMM2,%ZMM1 |
0xbc47a0 VCMPPS $0x1,%ZMM1,%ZMM19,%K3 |
0xbc47a7 VSUBPS %ZMM1,%ZMM16,%ZMM29 |
0xbc47ad VMULPS %ZMM19,%ZMM29,%ZMM29 |
0xbc47b3 VCMPPS $0x1,%ZMM16,%ZMM1,%K4 |
0xbc47ba VRSQRT14PS %ZMM29,%ZMM30{%K4}{z} |
0xbc47c0 VMULPS %ZMM29,%ZMM30,%ZMM3 |
0xbc47c6 VMULPS %ZMM12,%ZMM30,%ZMM4 |
0xbc47cc VFMADD213PS %ZMM13,%ZMM30,%ZMM3 |
0xbc47d2 VMULPS %ZMM3,%ZMM4,%ZMM3 |
0xbc47d8 VMULPS %ZMM1,%ZMM1,%ZMM4 |
0xbc47de VMOVAPS %ZMM29,%ZMM4{%K3} |
0xbc47e4 VMOVAPS %ZMM1,%ZMM30 |
0xbc47ea VMULPS %ZMM3,%ZMM29,%ZMM30{%K3} |
0xbc47f0 VMULPS %ZMM4,%ZMM4,%ZMM3 |
0xbc47f6 VMOVAPS 0x2c0(%RSP),%ZMM29 |
0xbc47fe VFMADD213PS 0x280(%RSP),%ZMM3,%ZMM29 |
0xbc4806 VFMADD213PS 0x1c0(%RSP),%ZMM3,%ZMM29 |
0xbc480e VMULPS %ZMM29,%ZMM4,%ZMM4 |
0xbc4814 VMOVAPS 0x240(%RSP),%ZMM29 |
0xbc481c VFMADD213PS 0x200(%RSP),%ZMM3,%ZMM29 |
0xbc4824 VFMADD231PS %ZMM29,%ZMM3,%ZMM4 |
0xbc482a VFMADD213PS %ZMM30,%ZMM30,%ZMM4 |
0xbc4830 VSUBPS %ZMM4,%ZMM25,%ZMM3 |
0xbc4836 VSUBPS %ZMM4,%ZMM3,%ZMM4{%K3} |
0xbc483c VCMPPS $0xe,0x180(%RSP),%ZMM1,%K3 |
0xbc4845 VMOVAPS %ZMM4,%ZMM1{%K3} |
0xbc484b VPTERNLOGD $-0x28,0x140(%RSP),%ZMM2,%ZMM1 |
0xbc4854 VADDPS %ZMM1,%ZMM1,%ZMM2 |
0xbc485a VMOVAPS 0x100(%RSP),%ZMM3 |
0xbc4862 VSUBPS %ZMM2,%ZMM3,%ZMM3 |
0xbc4868 VSUBPS %ZMM1,%ZMM25,%ZMM1 |
0xbc486e VMOVAPS %ZMM2,%ZMM3{%K2} |
0xbc4874 VMOVAPS %ZMM3,%ZMM1{%K1} |
0xbc487a VSUBPS %ZMM1,%ZMM27,%ZMM1 |
0xbc4880 VSUBPS %ZMM0,%ZMM16,%ZMM0 |
0xbc4886 VRSQRT14PS %ZMM0,%ZMM2 |
0xbc488c VMULPS %ZMM2,%ZMM0,%ZMM0 |
0xbc4892 VMULPS %ZMM12,%ZMM2,%ZMM3 |
0xbc4898 VFMADD213PS %ZMM13,%ZMM2,%ZMM0 |
0xbc489e VMULPS %ZMM0,%ZMM3,%ZMM0 |
0xbc48a4 VMULPS (%RSP),%ZMM1,%ZMM1 |
0xbc48ab VMULPS %ZMM1,%ZMM0,%ZMM0 |
0xbc48b1 VMULPS %ZMM0,%ZMM28,%ZMM30 |
0xbc48b7 VMULPS %ZMM0,%ZMM22,%ZMM0 |
0xbc48bd VMULPS %ZMM0,%ZMM26,%ZMM27 |
0xbc48c3 VMULPS %ZMM30,%ZMM22,%ZMM0 |
0xbc48c9 VMULPS %ZMM0,%ZMM22,%ZMM0 |
0xbc48cf VMULPS %ZMM0,%ZMM17,%ZMM22 |
0xbc48d5 VFNMADD231PS %ZMM24,%ZMM27,%ZMM22 |
0xbc48db VMULPS %ZMM0,%ZMM15,%ZMM28 |
0xbc48e1 VFNMADD231PS %ZMM23,%ZMM27,%ZMM28 |
0xbc48e7 VMULPS %ZMM0,%ZMM14,%ZMM29 |
0xbc48ed VFNMADD231PS %ZMM21,%ZMM27,%ZMM29 |
0xbc48f3 VPSLLD $0x2,%ZMM20,%ZMM0 |
0xbc48fa VMOVDQA -0x858942(%RIP),%YMM6 |
0xbc4902 VPERMD %ZMM0,%ZMM6,%ZMM1 |
0xbc4908 VUNPCKLPS %ZMM29,%ZMM22,%ZMM2 |
0xbc490e VUNPCKLPS %ZMM10,%ZMM28,%ZMM3 |
0xbc4914 VUNPCKLPS %ZMM3,%ZMM2,%ZMM4 |
0xbc491a VUNPCKHPS %ZMM3,%ZMM2,%ZMM2 |
0xbc4920 VEXTRACTF128 $0x1,%YMM4,%XMM3 |
0xbc4926 VPMOVSXDQ %YMM1,%ZMM1 |
0xbc492c VMOVQ %XMM1,%RDI |
0xbc4931 VADDPS (%RBX,%RDI,4),%XMM4,%XMM20 |
0xbc4938 VMOVAPS %XMM20,(%RBX,%RDI,4) |
0xbc493f VEXTRACTF32X4 $0x2,%ZMM4,%XMM20 |
0xbc4946 VPEXTRQ $0x1,%XMM1,%RDI |
0xbc494c VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 |
0xbc4951 VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4956 VEXTRACTI128 $0x1,%YMM1,%XMM3 |
0xbc495c VMOVQ %XMM3,%RDI |
0xbc4961 VADDPS (%RBX,%RDI,4),%XMM20,%XMM20 |
0xbc4968 VMOVAPS %XMM20,(%RBX,%RDI,4) |
0xbc496f VMOVDQA -0x85f077(%RIP),%YMM5 |
0xbc4977 VPERMD %ZMM0,%ZMM5,%ZMM0 |
0xbc497d VEXTRACTF32X4 $0x3,%ZMM4,%XMM4 |
0xbc4984 VPEXTRQ $0x1,%XMM3,%RDI |
0xbc498a VADDPS (%RBX,%RDI,4),%XMM4,%XMM3 |
0xbc498f VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4994 VEXTRACTI32X4 $0x2,%ZMM1,%XMM3 |
0xbc499b VMOVQ %XMM3,%RDI |
0xbc49a0 VADDPS (%RBX,%RDI,4),%XMM2,%XMM4 |
0xbc49a5 VMOVAPS %XMM4,(%RBX,%RDI,4) |
0xbc49aa VUNPCKHPS %ZMM29,%ZMM22,%ZMM4 |
0xbc49b0 VPEXTRQ $0x1,%XMM3,%RDI |
0xbc49b6 VEXTRACTF128 $0x1,%YMM2,%XMM3 |
0xbc49bc VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 |
0xbc49c1 VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc49c6 VEXTRACTF32X4 $0x2,%ZMM2,%XMM3 |
0xbc49cd VEXTRACTI32X4 $0x3,%ZMM1,%XMM1 |
0xbc49d4 VMOVQ %XMM1,%RDI |
0xbc49d9 VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 |
0xbc49de VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc49e3 VUNPCKHPS %ZMM10,%ZMM28,%ZMM3 |
0xbc49e9 VPEXTRQ $0x1,%XMM1,%RDI |
0xbc49ef VUNPCKLPS %ZMM3,%ZMM4,%ZMM1 |
0xbc49f5 VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 |
0xbc49fc VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 |
0xbc4a01 VUNPCKHPS %ZMM3,%ZMM4,%ZMM3 |
0xbc4a07 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4a0c VEXTRACTF128 $0x1,%YMM1,%XMM2 |
0xbc4a12 VPMOVSXDQ %YMM0,%ZMM0 |
0xbc4a18 VMOVQ %XMM0,%RDI |
0xbc4a1d VADDPS (%RBX,%RDI,4),%XMM1,%XMM4 |
0xbc4a22 VMOVAPS %XMM4,(%RBX,%RDI,4) |
0xbc4a27 VPEXTRQ $0x1,%XMM0,%RDI |
0xbc4a2d VEXTRACTF32X4 $0x2,%ZMM1,%XMM4 |
0xbc4a34 VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 |
0xbc4a39 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4a3e VEXTRACTI128 $0x1,%YMM0,%XMM2 |
0xbc4a44 VMOVQ %XMM2,%RDI |
0xbc4a49 VADDPS (%RBX,%RDI,4),%XMM4,%XMM4 |
0xbc4a4e VMOVAPS %XMM4,(%RBX,%RDI,4) |
0xbc4a53 VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 |
0xbc4a5a VPEXTRQ $0x1,%XMM2,%RDI |
0xbc4a60 VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 |
0xbc4a65 VMOVAPS %XMM1,(%RBX,%RDI,4) |
0xbc4a6a VEXTRACTI32X4 $0x2,%ZMM0,%XMM1 |
0xbc4a71 VMOVQ %XMM1,%RDI |
0xbc4a76 VADDPS (%RBX,%RDI,4),%XMM3,%XMM2 |
0xbc4a7b VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4a80 VPEXTRQ $0x1,%XMM1,%RDI |
0xbc4a86 VEXTRACTF128 $0x1,%YMM3,%XMM1 |
0xbc4a8c VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 |
0xbc4a91 VMOVAPS %XMM1,(%RBX,%RDI,4) |
0xbc4a96 VEXTRACTF32X4 $0x2,%ZMM3,%XMM1 |
0xbc4a9d VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 |
0xbc4aa4 VMOVQ %XMM0,%RDI |
0xbc4aa9 VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 |
0xbc4aae VMOVAPS %XMM1,(%RBX,%RDI,4) |
0xbc4ab3 VPEXTRQ $0x1,%XMM0,%RDI |
0xbc4ab9 VEXTRACTF32X4 $0x3,%ZMM3,%XMM0 |
0xbc4ac0 VADDPS (%RBX,%RDI,4),%XMM0,%XMM0 |
0xbc4ac5 VMOVAPS %XMM0,(%RBX,%RDI,4) |
0xbc4aca VMULPS %ZMM30,%ZMM26,%ZMM0 |
0xbc4ad0 VMULPS %ZMM0,%ZMM26,%ZMM0 |
0xbc4ad6 VMULPS %ZMM0,%ZMM23,%ZMM20 |
0xbc4adc VFNMADD231PS %ZMM15,%ZMM27,%ZMM20 |
0xbc4ae2 VMULPS %ZMM0,%ZMM24,%ZMM15 |
0xbc4ae8 VFNMADD231PS %ZMM17,%ZMM27,%ZMM15 |
0xbc4aee VMULPS %ZMM0,%ZMM21,%ZMM17 |
0xbc4af4 VFNMADD231PS %ZMM14,%ZMM27,%ZMM17 |
0xbc4afa VADDPS %ZMM15,%ZMM22,%ZMM0 |
0xbc4b00 VADDPS %ZMM20,%ZMM28,%ZMM1 |
0xbc4b06 VADDPS %ZMM17,%ZMM29,%ZMM2 |
0xbc4b0c VPSLLD $0x2,%ZMM11,%ZMM3 |
0xbc4b13 VPERMD %ZMM3,%ZMM6,%ZMM4 |
0xbc4b19 VUNPCKLPS %ZMM2,%ZMM0,%ZMM14 |
0xbc4b1f VUNPCKHPS %ZMM2,%ZMM0,%ZMM11 |
0xbc4b25 VUNPCKLPS %ZMM10,%ZMM1,%ZMM0 |
0xbc4b2b VUNPCKLPS %ZMM0,%ZMM14,%ZMM2 |
0xbc4b31 VUNPCKHPS %ZMM0,%ZMM14,%ZMM0 |
0xbc4b37 VEXTRACTF128 $0x1,%YMM2,%XMM14 |
0xbc4b3d VPMOVSXDQ %YMM4,%ZMM4 |
0xbc4b43 VMOVQ %XMM4,%RDI |
0xbc4b48 VMOVAPS (%RBX,%RDI,4),%XMM21 |
0xbc4b4f VSUBPS %XMM2,%XMM21,%XMM21 |
0xbc4b55 VMOVAPS %XMM21,(%RBX,%RDI,4) |
0xbc4b5c VPEXTRQ $0x1,%XMM4,%RDI |
0xbc4b62 VMOVAPS (%RBX,%RDI,4),%XMM21 |
0xbc4b69 VEXTRACTF32X4 $0x2,%ZMM2,%XMM22 |
0xbc4b70 VSUBPS %XMM14,%XMM21,%XMM14 |
0xbc4b76 VMOVAPS %XMM14,(%RBX,%RDI,4) |
0xbc4b7b VEXTRACTI128 $0x1,%YMM4,%XMM14 |
0xbc4b81 VMOVQ %XMM14,%RDI |
0xbc4b86 VMOVAPS (%RBX,%RDI,4),%XMM21 |
0xbc4b8d VSUBPS %XMM22,%XMM21,%XMM21 |
0xbc4b93 VMOVAPS %XMM21,(%RBX,%RDI,4) |
0xbc4b9a VPERMD %ZMM3,%ZMM5,%ZMM3 |
0xbc4ba0 VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 |
0xbc4ba7 VPEXTRQ $0x1,%XMM14,%RDI |
0xbc4bad VMOVAPS (%RBX,%RDI,4),%XMM14 |
0xbc4bb2 VSUBPS %XMM2,%XMM14,%XMM2 |
0xbc4bb6 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4bbb VEXTRACTI32X4 $0x2,%ZMM4,%XMM2 |
0xbc4bc2 VMOVQ %XMM2,%RDI |
0xbc4bc7 VMOVAPS (%RBX,%RDI,4),%XMM14 |
0xbc4bcc VUNPCKHPS %ZMM10,%ZMM1,%ZMM1 |
0xbc4bd2 VSUBPS %XMM0,%XMM14,%XMM14 |
0xbc4bd6 VMOVAPS %XMM14,(%RBX,%RDI,4) |
0xbc4bdb VPEXTRQ $0x1,%XMM2,%RDI |
0xbc4be1 VEXTRACTF128 $0x1,%YMM0,%XMM2 |
0xbc4be7 VMOVAPS (%RBX,%RDI,4),%XMM14 |
0xbc4bec VSUBPS %XMM2,%XMM14,%XMM2 |
0xbc4bf0 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4bf5 VEXTRACTF32X4 $0x2,%ZMM0,%XMM2 |
0xbc4bfc VEXTRACTI32X4 $0x3,%ZMM4,%XMM4 |
0xbc4c03 VMOVQ %XMM4,%RDI |
0xbc4c08 VMOVAPS (%RBX,%RDI,4),%XMM14 |
0xbc4c0d VSUBPS %XMM2,%XMM14,%XMM2 |
0xbc4c11 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4c16 VPEXTRQ $0x1,%XMM4,%RDI |
0xbc4c1c VMOVAPS (%RBX,%RDI,4),%XMM2 |
0xbc4c21 VUNPCKLPS %ZMM1,%ZMM11,%ZMM4 |
0xbc4c27 VEXTRACTF32X4 $0x3,%ZMM0,%XMM0 |
0xbc4c2e VSUBPS %XMM0,%XMM2,%XMM0 |
0xbc4c32 VMOVAPS %XMM0,(%RBX,%RDI,4) |
0xbc4c37 VPMOVSXDQ %YMM3,%ZMM0 |
0xbc4c3d VMOVQ %XMM0,%RDI |
0xbc4c42 VMOVAPS (%RBX,%RDI,4),%XMM2 |
0xbc4c47 VSUBPS %XMM4,%XMM2,%XMM2 |
0xbc4c4b VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4c50 VEXTRACTF128 $0x1,%YMM4,%XMM2 |
0xbc4c56 VPEXTRQ $0x1,%XMM0,%RDI |
0xbc4c5c VMOVAPS (%RBX,%RDI,4),%XMM3 |
0xbc4c61 VSUBPS %XMM2,%XMM3,%XMM2 |
0xbc4c65 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4c6a VEXTRACTI128 $0x1,%YMM0,%XMM2 |
0xbc4c70 VMOVQ %XMM2,%RDI |
0xbc4c75 VMOVAPS (%RBX,%RDI,4),%XMM3 |
0xbc4c7a VEXTRACTF32X4 $0x2,%ZMM4,%XMM14 |
0xbc4c81 VSUBPS %XMM14,%XMM3,%XMM3 |
0xbc4c86 VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4c8b VPEXTRQ $0x1,%XMM2,%RDI |
0xbc4c91 VEXTRACTF32X4 $0x3,%ZMM4,%XMM2 |
0xbc4c98 VMOVAPS (%RBX,%RDI,4),%XMM3 |
0xbc4c9d VSUBPS %XMM2,%XMM3,%XMM2 |
0xbc4ca1 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4ca6 VUNPCKHPS %ZMM1,%ZMM11,%ZMM1 |
0xbc4cac VEXTRACTI32X4 $0x2,%ZMM0,%XMM2 |
0xbc4cb3 VMOVQ %XMM2,%RDI |
0xbc4cb8 VMOVAPS (%RBX,%RDI,4),%XMM3 |
0xbc4cbd VSUBPS %XMM1,%XMM3,%XMM3 |
0xbc4cc1 VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4cc6 VPEXTRQ $0x1,%XMM2,%RDI |
0xbc4ccc VMOVAPS (%RBX,%RDI,4),%XMM2 |
0xbc4cd1 VEXTRACTF128 $0x1,%YMM1,%XMM3 |
0xbc4cd7 VSUBPS %XMM3,%XMM2,%XMM2 |
0xbc4cdb VEXTRACTF32X4 $0x2,%ZMM1,%XMM3 |
0xbc4ce2 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4ce7 VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 |
0xbc4cee VMOVQ %XMM0,%RDI |
0xbc4cf3 VMOVAPS (%RBX,%RDI,4),%XMM2 |
0xbc4cf8 VSUBPS %XMM3,%XMM2,%XMM2 |
0xbc4cfc VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4d01 VPEXTRQ $0x1,%XMM0,%RDI |
0xbc4d07 VMOVAPS (%RBX,%RDI,4),%XMM0 |
0xbc4d0c VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 |
0xbc4d13 VSUBPS %XMM1,%XMM0,%XMM0 |
0xbc4d17 VMOVAPS %XMM0,(%RBX,%RDI,4) |
0xbc4d1c VPSLLD $0x2,%ZMM31,%ZMM0 |
0xbc4d23 VPERMD %ZMM0,%ZMM6,%ZMM1 |
0xbc4d29 VUNPCKLPS %ZMM17,%ZMM15,%ZMM2 |
0xbc4d2f VUNPCKHPS %ZMM17,%ZMM15,%ZMM11 |
0xbc4d35 VUNPCKLPS %ZMM10,%ZMM20,%ZMM3 |
0xbc4d3b VUNPCKLPS %ZMM3,%ZMM2,%ZMM4 |
0xbc4d41 VUNPCKHPS %ZMM3,%ZMM2,%ZMM2 |
0xbc4d47 VEXTRACTF128 $0x1,%YMM4,%XMM3 |
0xbc4d4d VPMOVSXDQ %YMM1,%ZMM1 |
0xbc4d53 VMOVQ %XMM1,%RDI |
0xbc4d58 VADDPS (%RBX,%RDI,4),%XMM4,%XMM14 |
0xbc4d5d VMOVAPS %XMM14,(%RBX,%RDI,4) |
0xbc4d62 VPEXTRQ $0x1,%XMM1,%RDI |
0xbc4d68 VEXTRACTF32X4 $0x2,%ZMM4,%XMM14 |
0xbc4d6f VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 |
0xbc4d74 VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4d79 VEXTRACTI128 $0x1,%YMM1,%XMM3 |
0xbc4d7f VMOVQ %XMM3,%RDI |
0xbc4d84 VADDPS (%RBX,%RDI,4),%XMM14,%XMM14 |
0xbc4d89 VMOVAPS %XMM14,(%RBX,%RDI,4) |
0xbc4d8e VEXTRACTF32X4 $0x3,%ZMM4,%XMM4 |
0xbc4d95 VPEXTRQ $0x1,%XMM3,%RDI |
0xbc4d9b VADDPS (%RBX,%RDI,4),%XMM4,%XMM3 |
0xbc4da0 VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4da5 VEXTRACTI32X4 $0x2,%ZMM1,%XMM3 |
0xbc4dac VMOVQ %XMM3,%RDI |
0xbc4db1 VADDPS (%RBX,%RDI,4),%XMM2,%XMM4 |
0xbc4db6 VMOVAPS %XMM4,(%RBX,%RDI,4) |
0xbc4dbb VPERMD %ZMM0,%ZMM5,%ZMM0 |
0xbc4dc1 VPEXTRQ $0x1,%XMM3,%RDI |
0xbc4dc7 VEXTRACTF128 $0x1,%YMM2,%XMM3 |
0xbc4dcd VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 |
0xbc4dd2 VUNPCKHPS %ZMM10,%ZMM20,%ZMM4 |
0xbc4dd8 VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4ddd VEXTRACTF32X4 $0x2,%ZMM2,%XMM3 |
0xbc4de4 VEXTRACTI32X4 $0x3,%ZMM1,%XMM1 |
0xbc4deb VMOVQ %XMM1,%RDI |
0xbc4df0 VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 |
0xbc4df5 VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4dfa VPEXTRQ $0x1,%XMM1,%RDI |
0xbc4e00 VUNPCKLPS %ZMM4,%ZMM11,%ZMM1 |
0xbc4e06 VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 |
0xbc4e0d VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 |
0xbc4e12 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4e17 VPMOVSXDQ %YMM0,%ZMM0 |
0xbc4e1d VMOVQ %XMM0,%RDI |
0xbc4e22 VADDPS (%RBX,%RDI,4),%XMM1,%XMM2 |
0xbc4e27 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4e2c VEXTRACTF128 $0x1,%YMM1,%XMM2 |
0xbc4e32 VEXTRACTF32X4 $0x2,%ZMM1,%XMM3 |
0xbc4e39 VPEXTRQ $0x1,%XMM0,%RDI |
0xbc4e3f VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 |
0xbc4e44 VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4e49 VEXTRACTI128 $0x1,%YMM0,%XMM2 |
0xbc4e4f VMOVQ %XMM2,%RDI |
0xbc4e54 VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 |
0xbc4e59 VUNPCKHPS %ZMM4,%ZMM11,%ZMM4 |
0xbc4e5f VMOVAPS %XMM3,(%RBX,%RDI,4) |
0xbc4e64 VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 |
0xbc4e6b VPEXTRQ $0x1,%XMM2,%RDI |
0xbc4e71 VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 |
0xbc4e76 VMOVAPS %XMM1,(%RBX,%RDI,4) |
0xbc4e7b VEXTRACTI32X4 $0x2,%ZMM0,%XMM1 |
0xbc4e82 VMOVQ %XMM1,%RDI |
0xbc4e87 VADDPS (%RBX,%RDI,4),%XMM4,%XMM2 |
0xbc4e8c VMOVAPS %XMM2,(%RBX,%RDI,4) |
0xbc4e91 VPEXTRQ $0x1,%XMM1,%RDI |
0xbc4e97 VEXTRACTF128 $0x1,%YMM4,%XMM1 |
0xbc4e9d VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 |
0xbc4ea2 VMOVAPS %XMM1,(%RBX,%RDI,4) |
0xbc4ea7 VEXTRACTF32X4 $0x2,%ZMM4,%XMM1 |
0xbc4eae VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 |
0xbc4eb5 VMOVQ %XMM0,%RDI |
0xbc4eba VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 |
0xbc4ebf VMOVAPS %XMM1,(%RBX,%RDI,4) |
0xbc4ec4 VPEXTRQ $0x1,%XMM0,%RDI |
0xbc4eca VEXTRACTF32X4 $0x3,%ZMM4,%XMM0 |
0xbc4ed1 VADDPS (%RBX,%RDI,4),%XMM0,%XMM0 |
0xbc4ed6 VMOVAPS %XMM0,(%RBX,%RDI,4) |
0xbc4edb ADD $0x40,%RSI |
0xbc4edf CMP %RDX,%RSI |
0xbc4ee2 JAE bc4f7e |
0xbc4ee8 XOR %EDI,%EDI |
0xbc4eea MOV %ESI,%R8D |
0xbc4eed JMP bc4f0d |
(16791) 0xbc4ef0 MOVL $0,(%RSP,%RDI,1) |
(16791) 0xbc4ef7 MOVL $0,0x40(%RSP,%RDI,1) |
(16791) 0xbc4eff ADD $0x4,%RDI |
(16791) 0xbc4f03 CMP $0x40,%RDI |
(16791) 0xbc4f07 JE bc4460 |
(16791) 0xbc4f0d MOVSXD %R8D,%R10 |
(16791) 0xbc4f10 MOVSXD (%R12,%R10,4),%R9 |
(16791) 0xbc4f14 MOV 0x4(%R12,%R10,4),%R11D |
(16791) 0xbc4f19 MOV %R11D,0x5c0(%RSP,%RDI,1) |
(16791) 0xbc4f21 MOV 0x8(%R12,%R10,4),%R11D |
(16791) 0xbc4f26 MOV %R11D,0x580(%RSP,%RDI,1) |
(16791) 0xbc4f2e MOV 0xc(%R12,%R10,4),%R10D |
(16791) 0xbc4f33 MOV %R10D,0x540(%RSP,%RDI,1) |
(16791) 0xbc4f3b LEA (%RSI,%RDI,1),%R10 |
(16791) 0xbc4f3f CMP %RDX,%R10 |
(16791) 0xbc4f42 JAE bc4ef0 |
(16791) 0xbc4f44 LEA (%R9,%R9,2),%R9 |
(16791) 0xbc4f48 SAL $0x4,%R9 |
(16791) 0xbc4f4c VMOVSS 0x4(%R15,%R9,1),%XMM11 |
(16791) 0xbc4f53 VMOVSS %XMM11,(%RSP,%RDI,1) |
(16791) 0xbc4f58 VMOVSS (%R15,%R9,1),%XMM11 |
(16791) 0xbc4f5e VMOVSS %XMM11,0x40(%RSP,%RDI,1) |
(16791) 0xbc4f64 LEA 0x4(%R8),%R9D |
(16791) 0xbc4f68 CMP %R13D,%R9D |
(16791) 0xbc4f6b CMOVL %R9D,%R8D |
(16791) 0xbc4f6f ADD $0x4,%RDI |
(16791) 0xbc4f73 CMP $0x40,%RDI |
(16791) 0xbc4f77 JNE bc4f0d |
0xbc4f79 JMP bc4460 |
/home/eoseret/gromacs-2024.2/src/gromacs/listed_forces/bonded.cpp: 1151 - 1185 |
-------------------------------------------------------------------------------- |
1151: for (i = 0; (i < nbonds); i += GMX_SIMD_REAL_WIDTH * nfa1) |
[...] |
1157: for (s = 0; s < GMX_SIMD_REAL_WIDTH; s++) |
1158: { |
1159: type = forceatoms[iu]; |
1160: ai[s] = forceatoms[iu + 1]; |
1161: aj[s] = forceatoms[iu + 2]; |
1162: ak[s] = forceatoms[iu + 3]; |
1163: |
1164: /* At the end fill the arrays with the last atoms and 0 params */ |
1165: if (i + s * nfa1 < nbonds) |
1166: { |
1167: coeff[s] = forceparams[type].harmonic.krA; |
1168: coeff[GMX_SIMD_REAL_WIDTH + s] = forceparams[type].harmonic.rA; |
1169: |
1170: if (iu + nfa1 < nbonds) |
[...] |
1177: coeff[s] = 0; |
1178: coeff[GMX_SIMD_REAL_WIDTH + s] = 0; |
1179: } |
1180: } |
1181: |
1182: /* Store the non PBC corrected distances packed and aligned */ |
1183: gatherLoadUTranspose<3>(reinterpret_cast<const real*>(x), ai, &xi_S, &yi_S, &zi_S); |
1184: gatherLoadUTranspose<3>(reinterpret_cast<const real*>(x), aj, &xj_S, &yj_S, &zj_S); |
1185: gatherLoadUTranspose<3>(reinterpret_cast<const real*>(x), ak, &xk_S, &yk_S, &zk_S); |
/home/eoseret/gromacs-2024.2/src/gromacs/simd/include/gromacs/simd/impl_x86_avx_512/impl_x86_avx_512_util_float.h: 68 - 291 |
-------------------------------------------------------------------------------- |
68: return _mm512_slli_epi32(x.simdInternal_, 2); |
[...] |
113: v->simdInternal_ = _mm512_i32gather_ps(offset.simdInternal_, base, sizeof(float) * align_); |
[...] |
194: t5 = _mm512_unpacklo_ps(v0.simdInternal_, v2.simdInternal_); |
195: t6 = _mm512_unpackhi_ps(v0.simdInternal_, v2.simdInternal_); |
196: t7 = _mm512_unpacklo_ps(v1.simdInternal_, _mm512_setzero_ps()); |
197: t8 = _mm512_unpackhi_ps(v1.simdInternal_, _mm512_setzero_ps()); |
198: t[0] = _mm512_unpacklo_ps(t5, t7); // x0 y0 z0 0 | x4 y4 z4 0 |
199: t[1] = _mm512_unpackhi_ps(t5, t7); // x1 y1 z1 0 | x5 y5 z5 0 |
200: t[2] = _mm512_unpacklo_ps(t6, t8); // x2 y2 z2 0 | x6 y6 z6 0 |
201: t[3] = _mm512_unpackhi_ps(t6, t8); // x3 y3 z3 0 | x7 y7 z7 0 |
202: if (align % 4 == 0) |
203: { |
204: for (i = 0; i < 4; i++) |
205: { |
206: _mm_store_ps(base + o[i], |
207: _mm_add_ps(_mm_load_ps(base + o[i]), _mm512_castps512_ps128(t[i]))); |
208: _mm_store_ps(base + o[4 + i], |
209: _mm_add_ps(_mm_load_ps(base + o[4 + i]), _mm512_extractf32x4_ps(t[i], 1))); |
210: _mm_store_ps(base + o[8 + i], |
211: _mm_add_ps(_mm_load_ps(base + o[8 + i]), _mm512_extractf32x4_ps(t[i], 2))); |
212: _mm_store_ps(base + o[12 + i], |
213: _mm_add_ps(_mm_load_ps(base + o[12 + i]), _mm512_extractf32x4_ps(t[i], 3))); |
[...] |
272: t5 = _mm512_unpacklo_ps(v0.simdInternal_, v2.simdInternal_); |
273: t6 = _mm512_unpackhi_ps(v0.simdInternal_, v2.simdInternal_); |
274: t7 = _mm512_unpacklo_ps(v1.simdInternal_, _mm512_setzero_ps()); |
275: t8 = _mm512_unpackhi_ps(v1.simdInternal_, _mm512_setzero_ps()); |
276: t[0] = _mm512_unpacklo_ps(t5, t7); // x0 y0 z0 0 | x4 y4 z4 0 |
277: t[1] = _mm512_unpackhi_ps(t5, t7); // x1 y1 z1 0 | x5 y5 z5 0 |
278: t[2] = _mm512_unpacklo_ps(t6, t8); // x2 y2 z2 0 | x6 y6 z6 0 |
279: t[3] = _mm512_unpackhi_ps(t6, t8); // x3 y3 z3 0 | x7 y7 z7 0 |
280: if (align % 4 == 0) |
281: { |
282: for (i = 0; i < 4; i++) |
283: { |
284: _mm_store_ps(base + o[i], |
285: _mm_sub_ps(_mm_load_ps(base + o[i]), _mm512_castps512_ps128(t[i]))); |
286: _mm_store_ps(base + o[4 + i], |
287: _mm_sub_ps(_mm_load_ps(base + o[4 + i]), _mm512_extractf32x4_ps(t[i], 1))); |
288: _mm_store_ps(base + o[8 + i], |
289: _mm_sub_ps(_mm_load_ps(base + o[8 + i]), _mm512_extractf32x4_ps(t[i], 2))); |
290: _mm_store_ps(base + o[12 + i], |
291: _mm_sub_ps(_mm_load_ps(base + o[12 + i]), _mm512_extractf32x4_ps(t[i], 3))); |
/home/eoseret/gromacs-2024.2/src/gromacs/simd/include/gromacs/simd/impl_x86_avx_512/impl_x86_avx_512_simd_float.h: 181 - 451 |
-------------------------------------------------------------------------------- |
181: return { _mm512_add_ps(a.simdInternal_, b.simdInternal_) }; |
182: } |
183: |
184: static inline SimdFloat gmx_simdcall operator-(SimdFloat a, SimdFloat b) |
185: { |
186: return { _mm512_sub_ps(a.simdInternal_, b.simdInternal_) }; |
[...] |
197: return { _mm512_mul_ps(a.simdInternal_, b.simdInternal_) }; |
198: } |
199: |
200: static inline SimdFloat gmx_simdcall fma(SimdFloat a, SimdFloat b, SimdFloat c) |
201: { |
202: return { _mm512_fmadd_ps(a.simdInternal_, b.simdInternal_, c.simdInternal_) }; |
[...] |
212: return { _mm512_fnmadd_ps(a.simdInternal_, b.simdInternal_, c.simdInternal_) }; |
[...] |
224: return { _mm512_rsqrt14_ps(x.simdInternal_) }; |
225: } |
226: |
227: static inline SimdFloat gmx_simdcall rcp(SimdFloat x) |
228: { |
229: return { _mm512_rcp14_ps(x.simdInternal_) }; |
[...] |
252: return { _mm512_maskz_rsqrt14_ps(m.simdInternal_, x.simdInternal_) }; |
[...] |
263: return { _mm512_castsi512_ps(_mm512_andnot_epi32(_mm512_castps_si512(_mm512_set1_ps(GMX_FLOAT_NEGZERO)), |
[...] |
269: return { _mm512_max_ps(a.simdInternal_, b.simdInternal_) }; |
270: } |
271: |
272: static inline SimdFloat gmx_simdcall min(SimdFloat a, SimdFloat b) |
273: { |
274: return { _mm512_min_ps(a.simdInternal_, b.simdInternal_) }; |
275: } |
276: |
277: static inline SimdFloat gmx_simdcall round(SimdFloat x) |
278: { |
279: return { _mm512_roundscale_ps(x.simdInternal_, 0) }; |
[...] |
367: return { _mm512_cmp_ps_mask(a.simdInternal_, b.simdInternal_, _CMP_LT_OQ) }; |
[...] |
408: return { _mm512_mask_blend_ps(sel.simdInternal_, a.simdInternal_, b.simdInternal_) }; |
409: } |
410: |
411: static inline SimdFloat gmx_simdcall copysign(SimdFloat a, SimdFloat b) |
412: { |
413: return { _mm512_castsi512_ps(_mm512_ternarylogic_epi32(_mm512_castps_si512(a.simdInternal_), |
[...] |
451: return { _mm512_mullo_epi32(a.simdInternal_, b.simdInternal_) }; |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►100.00+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►100.00+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►100.00+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►71.51+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 | |
►14.52+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
►13.98+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►80.36+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 | |
►16.83+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
►2.81+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►81.50+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 | |
►17.51+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►95.92+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 | |
►4.08+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►81.18+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 | |
►9.75+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
►8.47+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
Coverage (%) | Name | Source Location | Module |
---|---|---|---|
►100.00+ | calculateSimpleBond(int, int, [...] | bonded.cpp:4143 | libgromacs_mpi.so.9.0.0 |
○ | (anonymous namespace)::calc_on[...] | listed_forces.cpp:356 | libgromacs_mpi.so.9.0.0 |
○ | .omp_outlined.#0xbe5d40 | listed_forces.cpp:428 | libgromacs_mpi.so.9.0.0 |
○ | __kmp_invoke_microtask | libomp.so | |
○ | __kmp_fork_call | libomp.so | |
○ | __kmpc_fork_call | libomp.so | |
○ | ListedForces::calculate(gmx_wa[...] | listed_forces.cpp:387 | libgromacs_mpi.so.9.0.0 |
○ | do_force(_IO_FILE*, t_commrec [...] | sim_util.cpp:2047 | libgromacs_mpi.so.9.0.0 |
○ | gmx::LegacySimulator::do_md() | md.cpp:1248 | libgromacs_mpi.so.9.0.0 |
○ | gmx::Mdrunner::mdrunner() | runner.cpp:2311 | libgromacs_mpi.so.9.0.0 |
○ | gmx::gmx_mdrun(int, gmx_hw_inf[...] | mdrun.cpp:280 | gmx_mpi |
○ | gmx::gmx_mdrun(int, char**) | mdrun.cpp:82 | gmx_mpi |
○ | gmx::CommandLineModuleManager:[...] | cmdlinemodulemanager.cpp:569 | libgromacs_mpi.so.9.0.0 |
○ | main | gmx.cpp:58 | gmx_mpi |
○ | __libc_start_call_main | libc.so.6 |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.01 |
CQA speedup if FP arith vectorized | 1.06 |
CQA speedup if fully vectorized | 1.29 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.08 |
Bottlenecks | |
Function | std::enable_if<((BondedKernelFlavor)0)==((BondedKernelFlavor)0), float>::type (anonymous namespace)::angles<(BondedKernelFlavor)0>(int, int const*, t_iparams const*, float const (*) [3], float (*) [4], float (*) [3], t_pbc const*, float, float*, gmx::ArrayRef |
Source | bonded.cpp:1151-1151,bonded.cpp:1157-1157,bonded.cpp:1183-1185,impl_x86_avx_512_util_float.h:68-68,impl_x86_avx_512_util_float.h:113-113,impl_x86_avx_512_util_float.h:194-201,impl_x86_avx_512_util_float.h:206-213,impl_x86_avx_512_util_float.h:272-279,impl_x86_avx_512_util_float.h:284-291,impl_x86_avx_512_simd_float.h:181-181,impl_x86_avx_512_simd_float.h:186-186,impl_x86_avx_512_simd_float.h:197-197,impl_x86_avx_512_simd_float.h:202-202,impl_x86_avx_512_simd_float.h:212-212,impl_x86_avx_512_simd_float.h:224-224,impl_x86_avx_512_simd_float.h:229-229,impl_x86_avx_512_simd_float.h:252-252,impl_x86_avx_512_simd_float.h:263-263,impl_x86_avx_512_simd_float.h:269-269,impl_x86_avx_512_simd_float.h:274-274,impl_x86_avx_512_simd_float.h:279-279,impl_x86_avx_512_simd_float.h:367-367,impl_x86_avx_512_simd_float.h:408-408,impl_x86_avx_512_simd_float.h:413-413,impl_x86_avx_512_simd_float.h:451-451 |
Source loop unroll info | NA |
Source loop unroll confidence level | NA |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 200.75 |
CQA cycles if no scalar integer | 198.50 |
CQA cycles if FP arith vectorized | 190.26 |
CQA cycles if fully vectorized | 155.63 |
Front-end cycles | 200.75 |
P0 cycles | 1.25 |
P1 cycles | 0.75 |
P2 cycles | 0.75 |
P3 cycles | 0.50 |
P4 cycles | 1.25 |
P5 cycles | 48.67 |
P6 cycles | 48.67 |
P7 cycles | 48.67 |
P8 cycles | 186.00 |
P9 cycles | 178.00 |
P10 cycles | 154.42 |
P11 cycles | 156.58 |
P12 cycles | 129.00 |
P13 cycles | 129.00 |
DIV/SQRT cycles | 0.00 |
Inter-iter dependencies cycles | 1 |
FE+BE cycles (UFS) | NA |
Stall cycles (UFS) | NA |
Nb insns | 459.50 |
Nb uops | 1204.50 |
Nb loads | 83.00 |
Nb stores | 48.00 |
Nb stack references | 24.00 |
FLOP/cycle | 12.67 |
Nb FLOP add-sub | 752.00 |
Nb FLOP mul | 1120.00 |
Nb FLOP fma | 288.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 16.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 80.00 |
Bytes/cycle | 18.49 |
Bytes prefetched | 0.00 |
Bytes loaded | 2944.00 |
Bytes stored | 768.00 |
Stride 0 | 2.00 |
Stride 1 | 0.00 |
Stride n | 3.00 |
Stride unknown | 0.50 |
Stride indirect | 6.00 |
Vectorization ratio all | 88.99 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | 100.00 |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 73.51 |
Vector-efficiency ratio all | 57.26 |
Vector-efficiency ratio load | 55.42 |
Vector-efficiency ratio store | 25.00 |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 56.63 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 43.14 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.01 |
CQA speedup if FP arith vectorized | 1.06 |
CQA speedup if fully vectorized | 1.29 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.08 |
Bottlenecks | micro-operation queue, |
Function | std::enable_if<((BondedKernelFlavor)0)==((BondedKernelFlavor)0), float>::type (anonymous namespace)::angles<(BondedKernelFlavor)0>(int, int const*, t_iparams const*, float const (*) [3], float (*) [4], float (*) [3], t_pbc const*, float, float*, gmx::ArrayRef |
Source | bonded.cpp:1151-1151,bonded.cpp:1157-1157,bonded.cpp:1183-1185,impl_x86_avx_512_util_float.h:68-68,impl_x86_avx_512_util_float.h:113-113,impl_x86_avx_512_util_float.h:194-201,impl_x86_avx_512_util_float.h:206-213,impl_x86_avx_512_util_float.h:272-279,impl_x86_avx_512_util_float.h:284-291,impl_x86_avx_512_simd_float.h:181-181,impl_x86_avx_512_simd_float.h:186-186,impl_x86_avx_512_simd_float.h:197-197,impl_x86_avx_512_simd_float.h:202-202,impl_x86_avx_512_simd_float.h:212-212,impl_x86_avx_512_simd_float.h:224-224,impl_x86_avx_512_simd_float.h:229-229,impl_x86_avx_512_simd_float.h:252-252,impl_x86_avx_512_simd_float.h:263-263,impl_x86_avx_512_simd_float.h:269-269,impl_x86_avx_512_simd_float.h:274-274,impl_x86_avx_512_simd_float.h:279-279,impl_x86_avx_512_simd_float.h:367-367,impl_x86_avx_512_simd_float.h:408-408,impl_x86_avx_512_simd_float.h:413-413,impl_x86_avx_512_simd_float.h:451-451 |
Source loop unroll info | NA |
Source loop unroll confidence level | NA |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 200.83 |
CQA cycles if no scalar integer | 198.50 |
CQA cycles if FP arith vectorized | 190.36 |
CQA cycles if fully vectorized | 155.63 |
Front-end cycles | 200.83 |
P0 cycles | 1.50 |
P1 cycles | 0.75 |
P2 cycles | 0.75 |
P3 cycles | 0.50 |
P4 cycles | 1.50 |
P5 cycles | 48.67 |
P6 cycles | 48.67 |
P7 cycles | 48.67 |
P8 cycles | 186.00 |
P9 cycles | 178.00 |
P10 cycles | 154.42 |
P11 cycles | 156.58 |
P12 cycles | 129.00 |
P13 cycles | 129.00 |
DIV/SQRT cycles | 0.00 |
Inter-iter dependencies cycles | 1 |
FE+BE cycles (UFS) | NA |
Stall cycles (UFS) | NA |
Nb insns | 460.00 |
Nb uops | 1205.00 |
Nb loads | 83.00 |
Nb stores | 48.00 |
Nb stack references | 24.00 |
FLOP/cycle | 12.67 |
Nb FLOP add-sub | 752.00 |
Nb FLOP mul | 1120.00 |
Nb FLOP fma | 288.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 16.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 80.00 |
Bytes/cycle | 18.48 |
Bytes prefetched | 0.00 |
Bytes loaded | 2944.00 |
Bytes stored | 768.00 |
Stride 0 | 2.00 |
Stride 1 | 0.00 |
Stride n | 3.00 |
Stride unknown | 1.00 |
Stride indirect | 6.00 |
Vectorization ratio all | 88.99 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | 100.00 |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 73.51 |
Vector-efficiency ratio all | 57.26 |
Vector-efficiency ratio load | 55.42 |
Vector-efficiency ratio store | 25.00 |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 56.63 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 43.14 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.01 |
CQA speedup if FP arith vectorized | 1.06 |
CQA speedup if fully vectorized | 1.29 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.08 |
Bottlenecks | micro-operation queue, |
Function | std::enable_if<((BondedKernelFlavor)0)==((BondedKernelFlavor)0), float>::type (anonymous namespace)::angles<(BondedKernelFlavor)0>(int, int const*, t_iparams const*, float const (*) [3], float (*) [4], float (*) [3], t_pbc const*, float, float*, gmx::ArrayRef |
Source | bonded.cpp:1151-1151,bonded.cpp:1157-1157,bonded.cpp:1183-1185,impl_x86_avx_512_util_float.h:68-68,impl_x86_avx_512_util_float.h:113-113,impl_x86_avx_512_util_float.h:194-201,impl_x86_avx_512_util_float.h:206-213,impl_x86_avx_512_util_float.h:272-279,impl_x86_avx_512_util_float.h:284-291,impl_x86_avx_512_simd_float.h:181-181,impl_x86_avx_512_simd_float.h:186-186,impl_x86_avx_512_simd_float.h:197-197,impl_x86_avx_512_simd_float.h:202-202,impl_x86_avx_512_simd_float.h:212-212,impl_x86_avx_512_simd_float.h:224-224,impl_x86_avx_512_simd_float.h:229-229,impl_x86_avx_512_simd_float.h:252-252,impl_x86_avx_512_simd_float.h:263-263,impl_x86_avx_512_simd_float.h:269-269,impl_x86_avx_512_simd_float.h:274-274,impl_x86_avx_512_simd_float.h:279-279,impl_x86_avx_512_simd_float.h:367-367,impl_x86_avx_512_simd_float.h:408-408,impl_x86_avx_512_simd_float.h:413-413,impl_x86_avx_512_simd_float.h:451-451 |
Source loop unroll info | NA |
Source loop unroll confidence level | NA |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 200.67 |
CQA cycles if no scalar integer | 198.50 |
CQA cycles if FP arith vectorized | 190.17 |
CQA cycles if fully vectorized | 155.63 |
Front-end cycles | 200.67 |
P0 cycles | 1.00 |
P1 cycles | 0.75 |
P2 cycles | 0.75 |
P3 cycles | 0.50 |
P4 cycles | 1.00 |
P5 cycles | 48.67 |
P6 cycles | 48.67 |
P7 cycles | 48.67 |
P8 cycles | 186.00 |
P9 cycles | 178.00 |
P10 cycles | 154.42 |
P11 cycles | 156.58 |
P12 cycles | 129.00 |
P13 cycles | 129.00 |
DIV/SQRT cycles | 0.00 |
Inter-iter dependencies cycles | 1 |
FE+BE cycles (UFS) | NA |
Stall cycles (UFS) | NA |
Nb insns | 459.00 |
Nb uops | 1204.00 |
Nb loads | 83.00 |
Nb stores | 48.00 |
Nb stack references | 24.00 |
FLOP/cycle | 12.68 |
Nb FLOP add-sub | 752.00 |
Nb FLOP mul | 1120.00 |
Nb FLOP fma | 288.00 |
Nb FLOP div | 0.00 |
Nb FLOP rcp | 16.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 80.00 |
Bytes/cycle | 18.50 |
Bytes prefetched | 0.00 |
Bytes loaded | 2944.00 |
Bytes stored | 768.00 |
Stride 0 | 2.00 |
Stride 1 | 0.00 |
Stride n | 3.00 |
Stride unknown | 0.00 |
Stride indirect | 6.00 |
Vectorization ratio all | 88.99 |
Vectorization ratio load | 100.00 |
Vectorization ratio store | 100.00 |
Vectorization ratio mul | 100.00 |
Vectorization ratio add_sub | 100.00 |
Vectorization ratio fma | 100.00 |
Vectorization ratio div_sqrt | 100.00 |
Vectorization ratio other | 73.51 |
Vector-efficiency ratio all | 57.26 |
Vector-efficiency ratio load | 55.42 |
Vector-efficiency ratio store | 25.00 |
Vector-efficiency ratio mul | 100.00 |
Vector-efficiency ratio add_sub | 56.63 |
Vector-efficiency ratio fma | 100.00 |
Vector-efficiency ratio div_sqrt | 100.00 |
Vector-efficiency ratio other | 43.14 |
Path / |
Function | std::enable_if<((BondedKernelFlavor)0)==((BondedKernelFlavor)0), float>::type (anonymous namespace)::angles<(BondedKernelFlavor)0>(int, int const*, t_iparams const*, float const (*) [3], float (*) [4], float (*) [3], t_pbc const*, float, float*, gmx::ArrayRef |
Source file and lines | bonded.cpp:1151-1185 |
Module | libgromacs_mpi.so.9.0.0 |
nb instructions | 459.50 |
nb uops | 1204.50 |
loop length | 2705.50 |
used x86 registers | 9 |
used mmx registers | 0 |
used xmm registers | 16 |
used ymm registers | 7 |
used zmm registers | 32 |
nb stack references | 24 |
ADD-SUB / MUL ratio | 1.19 |
micro-operation queue | 200.75 cycles |
front end | 200.75 cycles |
ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 1.25 | 0.75 | 0.75 | 0.50 | 1.25 | 40.67 | 40.67 | 40.67 | 138.00 | 138.00 | 137.92 | 138.08 | 129.00 | 129.00 |
cycles | 1.25 | 0.75 | 0.75 | 0.50 | 1.25 | 48.67 | 48.67 | 48.67 | 186.00 | 178.00 | 154.42 | 156.58 | 129.00 | 129.00 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 1.00 |
Front-end | 200.75 |
Dispatch | 186.00 |
Data deps. | 1.00 |
Overall L1 | 200.75 |
all | 46% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 40% |
all | 100% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 88% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 73% |
all | 33% |
load | 83% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 28% |
all | 63% |
load | 53% |
store | 25% |
mul | 100% |
add-sub | 56% |
fma | 100% |
div/sqrt | 100% |
other | 55% |
all | 57% |
load | 55% |
store | 25% |
mul | 100% |
add-sub | 56% |
fma | 100% |
div/sqrt | 100% |
other | 43% |
Function | std::enable_if<((BondedKernelFlavor)0)==((BondedKernelFlavor)0), float>::type (anonymous namespace)::angles<(BondedKernelFlavor)0>(int, int const*, t_iparams const*, float const (*) [3], float (*) [4], float (*) [3], t_pbc const*, float, float*, gmx::ArrayRef |
Source file and lines | bonded.cpp:1151-1185 |
Module | libgromacs_mpi.so.9.0.0 |
nb instructions | 460 |
nb uops | 1205 |
loop length | 2708 |
used x86 registers | 9 |
used mmx registers | 0 |
used xmm registers | 16 |
used ymm registers | 7 |
used zmm registers | 32 |
nb stack references | 24 |
ADD-SUB / MUL ratio | 1.19 |
micro-operation queue | 200.83 cycles |
front end | 200.83 cycles |
ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 1.50 | 0.75 | 0.75 | 0.50 | 1.50 | 40.67 | 40.67 | 40.67 | 138.00 | 138.00 | 137.92 | 138.08 | 129.00 | 129.00 |
cycles | 1.50 | 0.75 | 0.75 | 0.50 | 1.50 | 48.67 | 48.67 | 48.67 | 186.00 | 178.00 | 154.42 | 156.58 | 129.00 | 129.00 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 1.00 |
Front-end | 200.83 |
Dispatch | 186.00 |
Data deps. | 1.00 |
Overall L1 | 200.83 |
all | 46% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 40% |
all | 100% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 88% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 73% |
all | 33% |
load | 83% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 28% |
all | 63% |
load | 53% |
store | 25% |
mul | 100% |
add-sub | 56% |
fma | 100% |
div/sqrt | 100% |
other | 55% |
all | 57% |
load | 55% |
store | 25% |
mul | 100% |
add-sub | 56% |
fma | 100% |
div/sqrt | 100% |
other | 43% |
Instruction | Nb FU | ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | Latency | Recip. throughput | Vectorization |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVDQA64 0x5c0(%RSP),%ZMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VPMULLD %ZMM9,%ZMM20,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VXORPS %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%R14,%ZMM11,4),%ZMM14{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM15,%XMM15,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RAX,%ZMM11,4),%ZMM15{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM17,%XMM17,%XMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RCX,%ZMM11,4),%ZMM17{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VMOVDQA64 0x580(%RSP),%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VPMULLD %ZMM9,%ZMM11,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VXORPS %XMM22,%XMM22,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%R14,%ZMM21,4),%ZMM22{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM23,%XMM23,%XMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RAX,%ZMM21,4),%ZMM23{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM24,%XMM24,%XMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RCX,%ZMM21,4),%ZMM24{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VMOVDQA64 0x540(%RSP),%ZMM31 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VPMULLD %ZMM9,%ZMM31,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VXORPS %XMM26,%XMM26,%XMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%R14,%ZMM21,4),%ZMM26{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM27,%XMM27,%XMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RAX,%ZMM21,4),%ZMM27{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM28,%XMM28,%XMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RCX,%ZMM21,4),%ZMM28{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VSUBPS %ZMM22,%ZMM14,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM23,%ZMM15,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM24,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM22,%ZMM26,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM23,%ZMM27,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM24,%ZMM28,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x3c0(%RSP),%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS 0x40(%RSP),%ZMM0,%ZMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x500(%RSP),%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM17,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM24,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x4c0(%RSP),%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM24,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM26,%ZMM14,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x480(%RSP),%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM2,%ZMM24,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM14,%ZMM15,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x440(%RSP),%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM3,%ZMM24,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM14,%ZMM17,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x400(%RSP),%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM4,%ZMM15,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0xc0(%RSP),%ZMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM5,%ZMM17,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM24,%ZMM26,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x80(%RSP),%ZMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM6,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM17,%ZMM15,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM7,%ZMM24,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM8,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM17,%ZMM24,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM23,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM24,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM1,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM26,%ZMM21,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM2,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM21,%ZMM22,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM3,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM21,%ZMM23,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM22,%ZMM4,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM23,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM23,%ZMM5,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM24,%ZMM26,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM23,%ZMM6,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM23,%ZMM22,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM7,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM22,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM22,%ZMM8,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM22,%ZMM24,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM17,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM23,%ZMM15,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM21,%ZMM14,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM17,%ZMM17,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM15,%ZMM15,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM14,%ZMM14,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM24,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM23,%ZMM23,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM29,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM21,%ZMM21,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM30,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM29,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM26,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM22,%ZMM0,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM30,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM30,%ZMM0,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM0,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM0,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM26,%ZMM1,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM22,%ZMM28,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM30,%ZMM29,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRCP14PS %ZMM1,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM26,%ZMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM28,%ZMM28,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD213PS 0x380(%RSP),%ZMM29,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM29,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM28,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMAXPS 0x340(%RSP),%ZMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 1 | vect (100.0%) |
VMINPS %ZMM16,%ZMM0,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 1 | vect (100.0%) |
VMINPS 0x300(%RSP),%ZMM1,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 1 | vect (100.0%) |
VANDPS %ZMM18,%ZMM28,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.50 | vect (100.0%) |
VCMPPS $0x1,%ZMM1,%ZMM19,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VCMPPS $0x1,%ZMM28,%ZMM10,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMOVAPS %ZMM19,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VFNMADD213PS %ZMM19,%ZMM1,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VCMPPS $0x1,%ZMM16,%ZMM1,%K3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM29,%ZMM1{%K3}{z} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM29,%ZMM1,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM1,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM1,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM30,%ZMM2,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS %ZMM28,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VMULPS %ZMM1,%ZMM29,%ZMM2{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VANDPS %ZMM18,%ZMM2,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.50 | vect (100.0%) |
VCMPPS $0x1,%ZMM1,%ZMM19,%K3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VSUBPS %ZMM1,%ZMM16,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM19,%ZMM29,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VCMPPS $0x1,%ZMM16,%ZMM1,%K4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM29,%ZMM30{%K4}{z} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM29,%ZMM30,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM30,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM30,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM3,%ZMM4,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM1,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS %ZMM29,%ZMM4{%K3} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VMOVAPS %ZMM1,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VMULPS %ZMM3,%ZMM29,%ZMM30{%K3} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM4,%ZMM4,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x2c0(%RSP),%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS 0x280(%RSP),%ZMM3,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VFMADD213PS 0x1c0(%RSP),%ZMM3,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM29,%ZMM4,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x240(%RSP),%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS 0x200(%RSP),%ZMM3,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VFMADD231PS %ZMM29,%ZMM3,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VFMADD213PS %ZMM30,%ZMM30,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VSUBPS %ZMM4,%ZMM25,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM4,%ZMM3,%ZMM4{%K3} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VCMPPS $0xe,0x180(%RSP),%ZMM1,%K3 | 2 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMOVAPS %ZMM4,%ZMM1{%K3} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VPTERNLOGD $-0x28,0x140(%RSP),%ZMM2,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 1 | vect (100.0%) |
VADDPS %ZMM1,%ZMM1,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x100(%RSP),%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM2,%ZMM3,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM1,%ZMM25,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS %ZMM2,%ZMM3{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VMOVAPS %ZMM3,%ZMM1{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VSUBPS %ZMM1,%ZMM27,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM0,%ZMM16,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM0,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM2,%ZMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM2,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM2,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM3,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS (%RSP),%ZMM1,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM28,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM22,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM26,%ZMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM30,%ZMM22,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM22,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM17,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM24,%ZMM27,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM15,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM23,%ZMM27,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM14,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM21,%ZMM27,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VPSLLD $0x2,%ZMM20,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 1 | vect (100.0%) |
VMOVDQA -0x858942(%RIP),%YMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (50.0%) |
VPERMD %ZMM0,%ZMM6,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VUNPCKLPS %ZMM29,%ZMM22,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM10,%ZMM28,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM3,%ZMM2,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM3,%ZMM2,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF128 $0x1,%YMM4,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPMOVSXDQ %YMM1,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM20,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM4,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM3,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM20,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM20,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VMOVDQA -0x85f077(%RIP),%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (50.0%) |
VPERMD %ZMM0,%ZMM5,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VEXTRACTF32X4 $0x3,%ZMM4,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM3,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM3,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM4,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VUNPCKHPS %ZMM29,%ZMM22,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VPEXTRQ $0x1,%XMM3,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM2,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM2,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VUNPCKHPS %ZMM10,%ZMM28,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VUNPCKLPS %ZMM3,%ZMM4,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKHPS %ZMM3,%ZMM4,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF128 $0x1,%YMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPMOVSXDQ %YMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM4,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x2,%ZMM1,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM4,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM0,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM3,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM3,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x3,%ZMM3,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM0,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VMULPS %ZMM30,%ZMM26,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM26,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM23,%ZMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM15,%ZMM27,%ZMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM24,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM17,%ZMM27,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM21,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM14,%ZMM27,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VADDPS %ZMM15,%ZMM22,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM20,%ZMM28,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM17,%ZMM29,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VPSLLD $0x2,%ZMM11,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 1 | vect (100.0%) |
VPERMD %ZMM3,%ZMM6,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VUNPCKLPS %ZMM2,%ZMM0,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM2,%ZMM0,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM10,%ZMM1,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM0,%ZMM14,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM0,%ZMM14,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF128 $0x1,%YMM2,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPMOVSXDQ %YMM4,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM4,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM21,%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM21,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM4,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM2,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VSUBPS %XMM14,%XMM21,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM14,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM4,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM14,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM22,%XMM21,%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM21,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPERMD %ZMM3,%ZMM5,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM14,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM14,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKHPS %ZMM10,%ZMM1,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VSUBPS %XMM0,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM14,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVAPS (%RBX,%RDI,4),%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM14,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM4,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM4,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM14,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM4,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKLPS %ZMM1,%ZMM11,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF32X4 $0x3,%ZMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VSUBPS %XMM0,%XMM2,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM0,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPMOVSXDQ %YMM3,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM4,%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF128 $0x1,%YMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM3,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM4,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VSUBPS %XMM14,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x3,%ZMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVAPS (%RBX,%RDI,4),%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM3,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VUNPCKHPS %ZMM1,%ZMM11,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTI32X4 $0x2,%ZMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM1,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF128 $0x1,%YMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VSUBPS %XMM3,%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM3,%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VSUBPS %XMM1,%XMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM0,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPSLLD $0x2,%ZMM31,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 1 | vect (100.0%) |
VPERMD %ZMM0,%ZMM6,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VUNPCKLPS %ZMM17,%ZMM15,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM17,%ZMM15,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM10,%ZMM20,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM3,%ZMM2,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM3,%ZMM2,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF128 $0x1,%YMM4,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPMOVSXDQ %YMM1,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM14,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x2,%ZMM4,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM3,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM14,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x3,%ZMM4,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM3,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM3,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM4,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPERMD %ZMM0,%ZMM5,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VPEXTRQ $0x1,%XMM3,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM2,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKHPS %ZMM10,%ZMM20,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM2,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VUNPCKLPS %ZMM4,%ZMM11,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPMOVSXDQ %YMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF128 $0x1,%YMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKHPS %ZMM4,%ZMM11,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM0,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM4,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM4,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x3,%ZMM4,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM0,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
ADD $0x40,%RSI | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | N/A |
CMP %RDX,%RSI | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | N/A |
JAE bc4f7e <_ZN12_GLOBAL__N_16anglesIL18BondedKernelFlavor0EEENSt9enable_ifIXeqT_LS1_0EEfE4typeEiPKiPK9t_iparamsPA3_KfPA4_fPA3_fPK5t_pbcfPfN3gmx8ArrayRefISA_EEP8t_fcdataP12t_disresdataP12t_oriresdataPi+0xd1e> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 | N/A |
XOR %EDI,%EDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | N/A |
MOV %ESI,%R8D | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | scal (6.3%) |
JMP bc4f0d <_ZN12_GLOBAL__N_16anglesIL18BondedKernelFlavor0EEENSt9enable_ifIXeqT_LS1_0EEfE4typeEiPKiPK9t_iparamsPA3_KfPA4_fPA3_fPK5t_pbcfPfN3gmx8ArrayRefISA_EEP8t_fcdataP12t_disresdataP12t_oriresdataPi+0xcad> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | N/A |
JMP bc4460 <_ZN12_GLOBAL__N_16anglesIL18BondedKernelFlavor0EEENSt9enable_ifIXeqT_LS1_0EEfE4typeEiPKiPK9t_iparamsPA3_KfPA4_fPA3_fPK5t_pbcfPfN3gmx8ArrayRefISA_EEP8t_fcdataP12t_disresdataP12t_oriresdataPi+0x200> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | N/A |
Function | std::enable_if<((BondedKernelFlavor)0)==((BondedKernelFlavor)0), float>::type (anonymous namespace)::angles<(BondedKernelFlavor)0>(int, int const*, t_iparams const*, float const (*) [3], float (*) [4], float (*) [3], t_pbc const*, float, float*, gmx::ArrayRef |
Source file and lines | bonded.cpp:1151-1185 |
Module | libgromacs_mpi.so.9.0.0 |
nb instructions | 459 |
nb uops | 1204 |
loop length | 2703 |
used x86 registers | 9 |
used mmx registers | 0 |
used xmm registers | 16 |
used ymm registers | 7 |
used zmm registers | 32 |
nb stack references | 24 |
ADD-SUB / MUL ratio | 1.19 |
micro-operation queue | 200.67 cycles |
front end | 200.67 cycles |
ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 1.00 | 0.75 | 0.75 | 0.50 | 1.00 | 40.67 | 40.67 | 40.67 | 138.00 | 138.00 | 137.92 | 138.08 | 129.00 | 129.00 |
cycles | 1.00 | 0.75 | 0.75 | 0.50 | 1.00 | 48.67 | 48.67 | 48.67 | 186.00 | 178.00 | 154.42 | 156.58 | 129.00 | 129.00 |
Cycles executing div or sqrt instructions | NA |
Longest recurrence chain latency (RecMII) | 1.00 |
Front-end | 200.67 |
Dispatch | 186.00 |
Data deps. | 1.00 |
Overall L1 | 200.67 |
all | 46% |
load | 100% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 40% |
all | 100% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 100% |
all | 88% |
load | 100% |
store | 100% |
mul | 100% |
add-sub | 100% |
fma | 100% |
div/sqrt | 100% |
other | 73% |
all | 33% |
load | 83% |
store | NA (no store vectorizable/vectorized instructions) |
mul | 100% |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 28% |
all | 63% |
load | 53% |
store | 25% |
mul | 100% |
add-sub | 56% |
fma | 100% |
div/sqrt | 100% |
other | 55% |
all | 57% |
load | 55% |
store | 25% |
mul | 100% |
add-sub | 56% |
fma | 100% |
div/sqrt | 100% |
other | 43% |
Instruction | Nb FU | ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | Latency | Recip. throughput | Vectorization |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
VMOVDQA64 0x5c0(%RSP),%ZMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VPMULLD %ZMM9,%ZMM20,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VXORPS %XMM14,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%R14,%ZMM11,4),%ZMM14{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM15,%XMM15,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RAX,%ZMM11,4),%ZMM15{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM17,%XMM17,%XMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RCX,%ZMM11,4),%ZMM17{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VMOVDQA64 0x580(%RSP),%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VPMULLD %ZMM9,%ZMM11,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VXORPS %XMM22,%XMM22,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%R14,%ZMM21,4),%ZMM22{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM23,%XMM23,%XMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RAX,%ZMM21,4),%ZMM23{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM24,%XMM24,%XMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RCX,%ZMM21,4),%ZMM24{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VMOVDQA64 0x540(%RSP),%ZMM31 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VPMULLD %ZMM9,%ZMM31,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VXORPS %XMM26,%XMM26,%XMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%R14,%ZMM21,4),%ZMM26{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM27,%XMM27,%XMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RAX,%ZMM21,4),%ZMM27{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VXORPS %XMM28,%XMM28,%XMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (25.0%) |
KXNORW %K0,%K0,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 0.50 | N/A |
VGATHERDPS (%RCX,%ZMM21,4),%ZMM28{%K1} | 81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2.50 | 6.67 | 7.17 | 5.67 | 9 | 9 | 0-21 | 16.56 | vect (100.0%) |
VSUBPS %ZMM22,%ZMM14,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM23,%ZMM15,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM24,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM22,%ZMM26,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM23,%ZMM27,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM24,%ZMM28,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x3c0(%RSP),%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS 0x40(%RSP),%ZMM0,%ZMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x500(%RSP),%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM17,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM24,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x4c0(%RSP),%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM24,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM26,%ZMM14,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x480(%RSP),%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM2,%ZMM24,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM14,%ZMM15,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x440(%RSP),%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM3,%ZMM24,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM14,%ZMM17,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x400(%RSP),%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM4,%ZMM15,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0xc0(%RSP),%ZMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM5,%ZMM17,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM24,%ZMM26,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x80(%RSP),%ZMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM6,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM17,%ZMM15,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM7,%ZMM24,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM8,%ZMM17,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM17,%ZMM24,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM23,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM24,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM1,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM26,%ZMM21,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM2,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM21,%ZMM22,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM3,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM21,%ZMM23,%ZMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM22,%ZMM4,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM23,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM23,%ZMM5,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM24,%ZMM26,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM23,%ZMM6,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM23,%ZMM22,%ZMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM7,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRNDSCALEPS $0,%ZMM22,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM22,%ZMM8,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM22,%ZMM24,%ZMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM17,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM23,%ZMM15,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM21,%ZMM14,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM17,%ZMM17,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM15,%ZMM15,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM14,%ZMM14,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM24,%ZMM24,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM23,%ZMM23,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM29,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM21,%ZMM21,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM22,%ZMM30,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM29,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM26,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM26,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM22,%ZMM0,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM30,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM30,%ZMM0,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM0,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM0,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM26,%ZMM1,%ZMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM22,%ZMM28,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM30,%ZMM29,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRCP14PS %ZMM1,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM26,%ZMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM28,%ZMM28,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD213PS 0x380(%RSP),%ZMM29,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM29,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM28,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMAXPS 0x340(%RSP),%ZMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 1 | vect (100.0%) |
VMINPS %ZMM16,%ZMM0,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 1 | vect (100.0%) |
VMINPS 0x300(%RSP),%ZMM1,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 1 | vect (100.0%) |
VANDPS %ZMM18,%ZMM28,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.50 | vect (100.0%) |
VCMPPS $0x1,%ZMM1,%ZMM19,%K1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VCMPPS $0x1,%ZMM28,%ZMM10,%K2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMOVAPS %ZMM19,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VFNMADD213PS %ZMM19,%ZMM1,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VCMPPS $0x1,%ZMM16,%ZMM1,%K3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM29,%ZMM1{%K3}{z} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM29,%ZMM1,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM1,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM1,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM30,%ZMM2,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS %ZMM28,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VMULPS %ZMM1,%ZMM29,%ZMM2{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VANDPS %ZMM18,%ZMM2,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.50 | vect (100.0%) |
VCMPPS $0x1,%ZMM1,%ZMM19,%K3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VSUBPS %ZMM1,%ZMM16,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM19,%ZMM29,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VCMPPS $0x1,%ZMM16,%ZMM1,%K4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM29,%ZMM30{%K4}{z} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM29,%ZMM30,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM30,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM30,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM3,%ZMM4,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM1,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS %ZMM29,%ZMM4{%K3} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VMOVAPS %ZMM1,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VMULPS %ZMM3,%ZMM29,%ZMM30{%K3} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM4,%ZMM4,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x2c0(%RSP),%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS 0x280(%RSP),%ZMM3,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VFMADD213PS 0x1c0(%RSP),%ZMM3,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM29,%ZMM4,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x240(%RSP),%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS 0x200(%RSP),%ZMM3,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VFMADD231PS %ZMM29,%ZMM3,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VFMADD213PS %ZMM30,%ZMM30,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VSUBPS %ZMM4,%ZMM25,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM4,%ZMM3,%ZMM4{%K3} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VCMPPS $0xe,0x180(%RSP),%ZMM1,%K3 | 2 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMOVAPS %ZMM4,%ZMM1{%K3} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VPTERNLOGD $-0x28,0x140(%RSP),%ZMM2,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 1 | vect (100.0%) |
VADDPS %ZMM1,%ZMM1,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS 0x100(%RSP),%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM2,%ZMM3,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM1,%ZMM25,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMOVAPS %ZMM2,%ZMM3{%K2} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VMOVAPS %ZMM3,%ZMM1{%K1} | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | vect (100.0%) |
VSUBPS %ZMM1,%ZMM27,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VSUBPS %ZMM0,%ZMM16,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VRSQRT14PS %ZMM0,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VMULPS %ZMM2,%ZMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM12,%ZMM2,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFMADD213PS %ZMM13,%ZMM2,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM3,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS (%RSP),%ZMM1,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM1,%ZMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM28,%ZMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM22,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM26,%ZMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM30,%ZMM22,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM22,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM17,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM24,%ZMM27,%ZMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM15,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM23,%ZMM27,%ZMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM14,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM21,%ZMM27,%ZMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VPSLLD $0x2,%ZMM20,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 1 | vect (100.0%) |
VMOVDQA -0x858942(%RIP),%YMM6 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (50.0%) |
VPERMD %ZMM0,%ZMM6,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VUNPCKLPS %ZMM29,%ZMM22,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM10,%ZMM28,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM3,%ZMM2,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM3,%ZMM2,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF128 $0x1,%YMM4,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPMOVSXDQ %YMM1,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM20,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM4,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM3,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM20,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM20,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VMOVDQA -0x85f077(%RIP),%YMM5 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (50.0%) |
VPERMD %ZMM0,%ZMM5,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VEXTRACTF32X4 $0x3,%ZMM4,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM3,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM3,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM4,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VUNPCKHPS %ZMM29,%ZMM22,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VPEXTRQ $0x1,%XMM3,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM2,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM2,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VUNPCKHPS %ZMM10,%ZMM28,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VUNPCKLPS %ZMM3,%ZMM4,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKHPS %ZMM3,%ZMM4,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF128 $0x1,%YMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPMOVSXDQ %YMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM4,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x2,%ZMM1,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM4,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM0,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM3,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM3,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x3,%ZMM3,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM0,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VMULPS %ZMM30,%ZMM26,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM26,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM23,%ZMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM15,%ZMM27,%ZMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM24,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM17,%ZMM27,%ZMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VMULPS %ZMM0,%ZMM21,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 1 | vect (100.0%) |
VFNMADD231PS %ZMM14,%ZMM27,%ZMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 1 | vect (100.0%) |
VADDPS %ZMM15,%ZMM22,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM20,%ZMM28,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VADDPS %ZMM17,%ZMM29,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 1 | vect (100.0%) |
VPSLLD $0x2,%ZMM11,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 1 | vect (100.0%) |
VPERMD %ZMM3,%ZMM6,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VUNPCKLPS %ZMM2,%ZMM0,%ZMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM2,%ZMM0,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM10,%ZMM1,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM0,%ZMM14,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM0,%ZMM14,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF128 $0x1,%YMM2,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPMOVSXDQ %YMM4,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM4,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM21,%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM21,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM4,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM2,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VSUBPS %XMM14,%XMM21,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM14,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM4,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM14,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM22,%XMM21,%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM21,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPERMD %ZMM3,%ZMM5,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM14,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM14,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKHPS %ZMM10,%ZMM1,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VSUBPS %XMM0,%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM14,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVAPS (%RBX,%RDI,4),%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM14,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM4,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM4,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM14,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM4,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKLPS %ZMM1,%ZMM11,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF32X4 $0x3,%ZMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VSUBPS %XMM0,%XMM2,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM0,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPMOVSXDQ %YMM3,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM4,%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF128 $0x1,%YMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM3,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM4,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VSUBPS %XMM14,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x3,%ZMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVAPS (%RBX,%RDI,4),%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM2,%XMM3,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VUNPCKHPS %ZMM1,%ZMM11,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTI32X4 $0x2,%ZMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM1,%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF128 $0x1,%YMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VSUBPS %XMM3,%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VSUBPS %XMM3,%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VMOVAPS (%RBX,%RDI,4),%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VSUBPS %XMM1,%XMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM0,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPSLLD $0x2,%ZMM31,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 1 | 1 | vect (100.0%) |
VPERMD %ZMM0,%ZMM6,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VUNPCKLPS %ZMM17,%ZMM15,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM17,%ZMM15,%ZMM11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM10,%ZMM20,%ZMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKLPS %ZMM3,%ZMM2,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VUNPCKHPS %ZMM3,%ZMM2,%ZMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF128 $0x1,%YMM4,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VPMOVSXDQ %YMM1,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM14,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x2,%ZMM4,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM3,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM14,%XMM14 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM14,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x3,%ZMM4,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM3,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM3,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM4,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPERMD %ZMM0,%ZMM5,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 5 | 1 | vect (100.0%) |
VPEXTRQ $0x1,%XMM3,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM2,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKHPS %ZMM10,%ZMM20,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM2,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VUNPCKLPS %ZMM4,%ZMM11,%ZMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VEXTRACTF32X4 $0x3,%ZMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPMOVSXDQ %YMM0,%ZMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (50.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF128 $0x1,%YMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM1,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI128 $0x1,%YMM0,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VMOVQ %XMM2,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM3,%XMM3 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VUNPCKHPS %ZMM4,%ZMM11,%ZMM4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.67 | 0.67 | 0.67 | 0 | 0 | 1 | 0.67 | vect (50.0%) |
VMOVAPS %XMM3,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x3,%ZMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM2,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x2,%ZMM0,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM1,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM2,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM1,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF128 $0x1,%YMM4,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VEXTRACTF32X4 $0x2,%ZMM4,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VEXTRACTI32X4 $0x3,%ZMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VMOVQ %XMM0,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 | scal (12.5%) |
VADDPS (%RBX,%RDI,4),%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM1,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
VPEXTRQ $0x1,%XMM0,%RDI | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 6 | 1 | scal (12.5%) |
VEXTRACTF32X4 $0x3,%ZMM4,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 4 | 1 | vect (25.0%) |
VADDPS (%RBX,%RDI,4),%XMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 | vect (25.0%) |
VMOVAPS %XMM0,(%RBX,%RDI,4) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 4 | 1 | vect (25.0%) |
ADD $0x40,%RSI | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | N/A |
CMP %RDX,%RSI | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | N/A |
JAE bc4f7e <_ZN12_GLOBAL__N_16anglesIL18BondedKernelFlavor0EEENSt9enable_ifIXeqT_LS1_0EEfE4typeEiPKiPK9t_iparamsPA3_KfPA4_fPA3_fPK5t_pbcfPfN3gmx8ArrayRefISA_EEP8t_fcdataP12t_disresdataP12t_oriresdataPi+0xd1e> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 | N/A |
XOR %EDI,%EDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | N/A |
MOV %ESI,%R8D | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 | scal (6.3%) |
JMP bc4f0d <_ZN12_GLOBAL__N_16anglesIL18BondedKernelFlavor0EEENSt9enable_ifIXeqT_LS1_0EEfE4typeEiPKiPK9t_iparamsPA3_KfPA4_fPA3_fPK5t_pbcfPfN3gmx8ArrayRefISA_EEP8t_fcdataP12t_disresdataP12t_oriresdataPi+0xcad> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | N/A |
Run 1x1 | Number processes: 1Number processes per node: 1OMP_NUM_THREADS: 1 |
---|---|
Run 2x1 | Number processes: 2Number processes per node: 2OMP_NUM_THREADS: 1 |
Run 4x1 | Number processes: 4Number processes per node: 4OMP_NUM_THREADS: 1 |
Run 8x1 | Number processes: 8Number processes per node: 8OMP_NUM_THREADS: 1 |
Run 16x1 | Number processes: 16Number processes per node: 16OMP_NUM_THREADS: 1 |
Run 32x1 | Number processes: 32Number processes per node: 32OMP_NUM_THREADS: 1 |
Run 64x1 | Number processes: 64Number processes per node: 64OMP_NUM_THREADS: 1 |
Run 128x1 | Number processes: 128Number processes per node: 128OMP_NUM_THREADS: 1 |
Run 192x1 | Number processes: 192Number nodes: 1Number processes per node: 192Run Command: <executable> mdrun -s ion_channel.tpr -nsteps 10000 -pin on -deffnm aoccMPI Command: mpirun -genv I_MPI_FABRICS=shm -n <number_processes>Dataset: Run Directory: .OMP_NUM_THREADS: 1 |
(1x1) Efficiency | (1x1) Potential Speed-Up (%) | (2x1) Efficiency | (2x1) Potential Speed-Up (%) | (4x1) Efficiency | (4x1) Potential Speed-Up (%) | (8x1) Efficiency | (8x1) Potential Speed-Up (%) | (16x1) Efficiency | (16x1) Potential Speed-Up (%) | (32x1) Efficiency | (32x1) Potential Speed-Up (%) | (64x1) Efficiency | (64x1) Potential Speed-Up (%) | (128x1) Efficiency | (128x1) Potential Speed-Up (%) | (192x1) Efficiency | (192x1) Potential Speed-Up (%) |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1 | 0 | 1.14 | 0 | 1.17 | 0 | 1.06 | 0 | 1.23 | 0 | 1.35 | 0 | 1.43 | 0 | 1.3 | 0 | 1.32 | 0 |
Run | Number of threads | Efficiency (ideal is 1) | Speedup | Ideal Speedup | Time (s) | Coverage (%) |
---|---|---|---|---|---|---|
1x1 | 1 | 1 | 1 | 1 | 8.8249845504761 | 1.0586534738541 |
2x1 | 2 | 1.14 | 2.28 | 2 | 4.3699917793274 | 0.85379606485367 |
4x1 | 4 | 1.17 | 4.67 | 4 | 2.2700042724609 | 0.7580897808075 |
8x1 | 8 | 1.06 | 8.5 | 8 | 1.2799988985062 | 0.67080366611481 |
16x1 | 16 | 1.23 | 19.72 | 16 | 0.8450009226799 | 0.54121792316437 |
32x1 | 19 | 1.35 | 43.13 | 32 | 0.63999968767166 | 0.39812985062599 |
64x1 | 36 | 1.43 | 91.82 | 64 | 0.32499992847443 | 0.24181532859802 |
128x1 | 71 | 1.3 | 166.96 | 128 | 0.26000007987022 | 0.31102013587952 |
192x1 | 102 | 1.32 | 254.14 | 192 | 0.17500001192093 | 0.25779283046722 |