I think transpose/2-pass is not good for optimizing Sobel Operator code. Sobel Operator is not computational function, so wasting memory access for transpose/2-pass access is not good for this case. I wrote some Sobel Operator test codes to see how fast SSE can get. this code does not handle first and last edge pixels, and use FPUs to calculate sqrt() value.
Sobel operator need 14 multiply, 1 square root, 11 addition, 2 min/max, 12 read access and 1 write access operators. This means you can process a component in 20~30 cycle if you optimize code well.
FloatSobel() function took 2113044 CPU cycles to process 256 * 256 image processing 32.76 cycle/component. I'll convert this sample code to SSE.
void FPUSobel()
{
BYTE* image_0 = g_image + g_image_width * 0;
BYTE* image_1 = g_image + g_image_width * 1;
BYTE* image_2 = g_image + g_image_width * 2;
DWORD* screen = g_screen + g_screen_width*1;
for(int y=1; y<g_image_height-1; ++y)
{
for(int x=1; x<g_image_width-1; ++x)
{
float gx = image_0[x-1] * (+1.0f) +
image_0[x+1] * (-1.0f) +
image_1[x-1] * (+2.0f) +
image_1[x+1] * (-2.0f) +
image_2[x-1] * (+1.0f) +
image_2[x+1] * (-1.0f);
float gy = image_0[x-1] * (+1.0f) +
image_0[x+0] * (+2.0f) +
image_0[x+1] * (+1.0f) +
image_2[x-1] * (-1.0f) +
image_2[x+0] * (-2.0f) +
image_2[x+1] * (-1.0f);
int result = (int)min(255.0f, max(0.0f, sqrtf(gx * gx + gy * gy)));
screen[x] = 0x01010101 * result;
}
image_0 += g_image_width;
image_1 += g_image_width;
image_2 += g_image_width;
screen += g_screen_width;
}
}
SseSobel() function took 613220 CPU cycle to process same 256*256 image. It took 9.51 cycle/component and 3.4 time faster than FPUSobel(). There are some spaces to optimize but it will not faster than 4 times because it used 4-way SIMD.
This function used SoA approach to process 4 pixels at once. SoA is better than AoS in most array or image datas because you have to transpose/shuffle to use AoS. And SoA is far easier changing common C code to SSE codes.
void SseSobel()
{
BYTE* image_0 = g_image + g_image_width * 0;
BYTE* image_1 = g_image + g_image_width * 1;
BYTE* image_2 = g_image + g_image_width * 2;
DWORD* screen = g_screen + g_screen_width*1;
__m128 const_p_one = _mm_set1_ps(+1.0f);
__m128 const_p_two = _mm_set1_ps(+2.0f);
__m128 const_n_one = _mm_set1_ps(-1.0f);
__m128 const_n_two = _mm_set1_ps(-2.0f);
for(int y=1; y<g_image_height-1; ++y)
{
for(int x=1; x<g_image_width-1; x+=4)
{
// load 16 components. (0~6 will be used)
__m128i current_0 = _mm_unpacklo_epi8(_mm_loadu_si128((__m128i*)(image_0+x-1)), _mm_setzero_si128());
__m128i current_1 = _mm_unpacklo_epi8(_mm_loadu_si128((__m128i*)(image_1+x-1)), _mm_setzero_si128());
__m128i current_2 = _mm_unpacklo_epi8(_mm_loadu_si128((__m128i*)(image_2+x-1)), _mm_setzero_si128());
// image_00 = { image_0[x-1], image_0[x+0], image_0[x+1], image_0[x+2] }
__m128 image_00 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(current_0, _mm_setzero_si128()));
// image_01 = { image_0[x+0], image_0[x+1], image_0[x+2], image_0[x+3] }
__m128 image_01 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_srli_si128(current_0, 2), _mm_setzero_si128()));
// image_02 = { image_0[x+1], image_0[x+2], image_0[x+3], image_0[x+4] }
__m128 image_02 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_srli_si128(current_0, 4), _mm_setzero_si128()));
__m128 image_10 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(current_1, _mm_setzero_si128()));
__m128 image_12 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_srli_si128(current_1, 4), _mm_setzero_si128()));
__m128 image_20 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(current_2, _mm_setzero_si128()));
__m128 image_21 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_srli_si128(current_2, 2), _mm_setzero_si128()));
__m128 image_22 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_srli_si128(current_2, 4), _mm_setzero_si128()));
__m128 gx = _mm_add_ps( _mm_mul_ps(image_00,const_p_one),
_mm_add_ps( _mm_mul_ps(image_02,const_n_one),
_mm_add_ps( _mm_mul_ps(image_10,const_p_two),
_mm_add_ps( _mm_mul_ps(image_12,const_n_two),
_mm_add_ps( _mm_mul_ps(image_20,const_p_one),
_mm_mul_ps(image_22,const_n_one))))));
__m128 gy = _mm_add_ps( _mm_mul_ps(image_00,const_p_one),
_mm_add_ps( _mm_mul_ps(image_01,const_p_two),
_mm_add_ps( _mm_mul_ps(image_02,const_p_one),
_mm_add_ps( _mm_mul_ps(image_20,const_n_one),
_mm_add_ps( _mm_mul_ps(image_21,const_n_two),
_mm_mul_ps(image_22,const_n_one))))));
__m128 result = _mm_min_ps( _mm_set1_ps(255.0f),
_mm_max_ps( _mm_set1_ps(0.0f),
_mm_sqrt_ps(_mm_add_ps(_mm_mul_ps(gx, gx), _mm_mul_ps(gy,gy))) ));
__m128i pack_32 = _mm_cvtps_epi32(result); //R32,G32,B32,A32
__m128i pack_16 = _mm_packs_epi32(pack_32, pack_32); //R16,G16,B16,A16,R16,G16,B16,A16
__m128i pack_8 = _mm_packus_epi16(pack_16, pack_16); //RGBA,RGBA,RGBA,RGBA
__m128i unpack_2 = _mm_unpacklo_epi8(pack_8, pack_8); //RRGG,BBAA,RRGG,BBAA
__m128i unpack_4 = _mm_unpacklo_epi8(unpack_2, unpack_2); //RRRR,GGGG,BBBB,AAAA
_mm_storeu_si128((__m128i*)(screen+x),unpack_4);
}
image_0 += g_image_width;
image_1 += g_image_width;
image_2 += g_image_width;
screen += g_screen_width;
}
}
_MM_TRANSPOSE4_PS
along with loop blocking as described in https://mcmap.net/q/15727/-what-is-the-fastest-way-to-transpose-a-matrix-in-c. In the end the transpose was still the bottleneck unless the kernel size was large. – Capri