I wrote program that multiplies arr1*arr2 and save result to arr3.
Pseudocode:
arr3[i]=arr1[i]*arr2[i]
And I want to use AVX instructions. I have assembler code for m128 and m256 instructions (unrolled). Results show that using ymm is 4 times slower than xmm. But why? If lathency is the same..
Mul_ASM_AVX proc ; (float* RCX=arr1, float* RDX=arr2, float* R8=arr3, int R9 = arraySize)
push rbx
vpxor xmm0, xmm0, xmm0 ; Zero the counters
vpxor xmm1, xmm1, xmm1
vpxor xmm2, xmm2, xmm2
vpxor xmm3, xmm3, xmm3
mov rbx, r9
sar r9, 4 ; Divide the count by 16 for AVX
jz MulResiduals ; If that's 0, then we have only scalar mul to perfomance
LoopHead:
;add 16 floats
vmovaps xmm0 , xmmword ptr[rcx]
vmovaps xmm1 , xmmword ptr[rcx+16]
vmovaps xmm2 , xmmword ptr[rcx+32]
vmovaps xmm3 , xmmword ptr[rcx+48]
vmulps xmm0, xmm0, xmmword ptr[rdx]
vmulps xmm1, xmm1, xmmword ptr[rdx+16]
vmulps xmm2, xmm2, xmmword ptr[rdx+32]
vmulps xmm3, xmm3, xmmword ptr[rdx+48]
vmovaps xmmword ptr[R8], xmm0
vmovaps xmmword ptr[R8+16], xmm1
vmovaps xmmword ptr[R8+32], xmm2
vmovaps xmmword ptr[R8+48], xmm3
add rcx, 64 ; move on to the next 16 floats (4*16=64)
add rdx, 64
add r8, 64
dec r9
jnz LoopHead
MulResiduals:
and ebx, 15 ; do we have residuals?
jz Finished ; If not, we're done
ResidualsLoopHead:
vmovss xmm0, real4 ptr[rcx]
vmulss xmm0, xmm0, real4 ptr[rdx]
vmovss real4 ptr[r8], xmm0
add rcx, 4
add rdx, 4
dec rbx
jnz ResidualsLoopHead
Finished:
pop rbx ; restore caller's rbx
ret
Mul_ASM_AVX endp
And for m256, ymm instructions:
Mul_ASM_AVX_YMM proc ; UNROLLED AVX
push rbx
vzeroupper
mov rbx, r9
sar r9, 5 ; Divide the count by 32 for AVX (8 floats * 4 registers = 32 floats)
jz MulResiduals ; If that's 0, then we have only scalar mul to perfomance
LoopHead:
;add 32 floats
vmovaps ymm0, ymmword ptr[rcx] ; 8 float each, 8*4 = 32
vmovaps ymm1, ymmword ptr[rcx+32]
vmovaps ymm2, ymmword ptr[rcx+64]
vmovaps ymm3, ymmword ptr[rcx+96]
vmulps ymm0, ymm0, ymmword ptr[rdx]
vmulps ymm1, ymm1, ymmword ptr[rdx+32]
vmulps ymm2, ymm2, ymmword ptr[rdx+64]
vmulps ymm3, ymm3, ymmword ptr[rdx+96]
vmovupd ymmword ptr[r8], ymm0
vmovupd ymmword ptr[r8+32], ymm1
vmovupd ymmword ptr[r8+64], ymm2
vmovupd ymmword ptr[r8+96], ymm3
add rcx, 128 ; move on to the next 32 floats (4*32=128)
add rdx, 128
add r8, 128
dec r9
jnz LoopHead
MulResiduals:
and ebx, 31 ; do we have residuals?
jz Finished ; If not, we're done
ResidualsLoopHead:
vmovss xmm0, real4 ptr[rcx]
vmulss xmm0, xmm0, real4 ptr[rdx]
vmovss real4 ptr[r8], xmm0
add rcx, 4
add rdx, 4
dec rbx
jnz ResidualsLoopHead
Finished:
pop rbx ; restore caller's rbx
ret
Mul_ASM_AVX_YMM endp
CPU-Z report:
- Manufacturer: AuthenticAMD
- Name: AMD FX-6300 Codename: Vishera
- Specification: AMD FX(tm)-6300 Six-Core Processor
- CPUID: F.2.0
- Extended CPUID: 15.2
- Technology: 32 nm
- Instructions sets MMX (+), SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2,
SSE4A, x86-64, AMD-V, AES, AVX, XOP, FMA3, FMA4
ResidualsLoopHead
section will be executed much more times in the YMM version. This will become significant if the average size of your arrays is small. – Barmecide