At first glance, I thought the compiler could generate equivalent assembly for both versions with optimization flags activated. When I checked it, I was surprised to see the result:
Source unoptimized.cpp
note: this code is not meant to be executed.
struct bitmap_t
{
long long width;
} bitmap;
int main(int argc, char** argv)
{
for (unsigned x = 0 ; x < static_cast<unsigned>(bitmap.width) ; ++x)
{
argv[x][0] = '\0';
}
return 0;
}
Source optimized.cpp
note: this code is not meant to be executed.
struct bitmap_t
{
long long width;
} bitmap;
int main(int argc, char** argv)
{
const unsigned width = static_cast<unsigned>(bitmap.width);
for (unsigned x = 0 ; x < width ; ++x)
{
argv[x][0] = '\0';
}
return 0;
}
Compilation
$ g++ -s -O3 unoptimized.cpp
$ g++ -s -O3 optimized.cpp
Assembly (unoptimized.s)
.file "unoptimized.cpp"
.text
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB0:
.cfi_startproc
.cfi_personality 0x3,__gxx_personality_v0
movl bitmap(%rip), %eax
testl %eax, %eax
je .L2
xorl %eax, %eax
.p2align 4,,10
.p2align 3
.L3:
mov %eax, %edx
addl $1, %eax
movq (%rsi,%rdx,8), %rdx
movb $0, (%rdx)
cmpl bitmap(%rip), %eax
jb .L3
.L2:
xorl %eax, %eax
ret
.cfi_endproc
.LFE0:
.size main, .-main
.globl bitmap
.bss
.align 8
.type bitmap, @object
.size bitmap, 8
bitmap:
.zero 8
.ident "GCC: (GNU) 4.4.7 20120313 (Red Hat 4.4.7-16)"
.section .note.GNU-stack,"",@progbits
Assembly (optimized.s)
.file "optimized.cpp"
.text
.p2align 4,,15
.globl main
.type main, @function
main:
.LFB0:
.cfi_startproc
.cfi_personality 0x3,__gxx_personality_v0
movl bitmap(%rip), %eax
testl %eax, %eax
je .L2
subl $1, %eax
leaq 8(,%rax,8), %rcx
xorl %eax, %eax
.p2align 4,,10
.p2align 3
.L3:
movq (%rsi,%rax), %rdx
addq $8, %rax
cmpq %rcx, %rax
movb $0, (%rdx)
jne .L3
.L2:
xorl %eax, %eax
ret
.cfi_endproc
.LFE0:
.size main, .-main
.globl bitmap
.bss
.align 8
.type bitmap, @object
.size bitmap, 8
bitmap:
.zero 8
.ident "GCC: (GNU) 4.4.7 20120313 (Red Hat 4.4.7-16)"
.section .note.GNU-stack,"",@progbits
diff
$ diff -uN unoptimized.s optimized.s
--- unoptimized.s 2015-11-24 16:11:55.837922223 +0000
+++ optimized.s 2015-11-24 16:12:02.628922941 +0000
@@ -1,4 +1,4 @@
- .file "unoptimized.cpp"
+ .file "optimized.cpp"
.text
.p2align 4,,15
.globl main
@@ -10,16 +10,17 @@
movl bitmap(%rip), %eax
testl %eax, %eax
je .L2
+ subl $1, %eax
+ leaq 8(,%rax,8), %rcx
xorl %eax, %eax
.p2align 4,,10
.p2align 3
.L3:
- mov %eax, %edx
- addl $1, %eax
- movq (%rsi,%rdx,8), %rdx
+ movq (%rsi,%rax), %rdx
+ addq $8, %rax
+ cmpq %rcx, %rax
movb $0, (%rdx)
- cmpl bitmap(%rip), %eax
- jb .L3
+ jne .L3
.L2:
xorl %eax, %eax
ret
The generated assembly for the optimized version does actually load (lea
) the width
constant unlike the unoptimized version which computes the width
offset at each iteration (movq
).
When I'll get time, I eventually post some benchmark on that. Good question.
*p
is of the same type aswidth
then it's not trivial to optimize, sincep
could point towidth
and modify it inside the loop. – Binkp
points to the same memory asbitmap->width
. Therefore I can't legally optimize the first example to the second one. – Geomorphology__restrict__
, and the results were the same. See my edited answer. – Concussstrlen(foo)
or another call to a pure function, it almost certainly will be worth caching the result, since the compiler will not be likely to realize that those could be optimized, and they take a significant amount of time per call. – Colbstrlen
calls and stuff like that will tend to help in more ordinary kind of code where it might not be so loopy and more about avoiding tiny efficiencies. – Unstringmemcpy
and one branch doesmemmove
). This is what GCC does e.g. with auto-vectorization so usingrestrict
only reduces the amount code and the check and in practice does not help much. However, for the OPs question the range of the loop can change so this does not apply (which is why this is a comment and not an answer). – Hintze