summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornick <nick@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-11-06 17:14:22 +0000
committernick <nick@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-11-06 17:14:22 +0000
commitd573fa0df80ce56736b8d48b8a82b7e4bf45fd28 (patch)
tree17662e18ee7439ef4c551a11b77fb9d084363920
parent2d0a67f48db77075ede86ea8607178e040703600 (diff)
downloadmpv-d573fa0df80ce56736b8d48b8a82b7e4bf45fd28.tar.bz2
mpv-d573fa0df80ce56736b8d48b8a82b7e4bf45fd28.tar.xz
Minor speedup of rgb32to24. (performance is not successful)
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@2747 b3059339-0415-0410-9bf9-f77b7e298cf2
-rw-r--r--postproc/rgb2rgb.c54
-rw-r--r--postproc/rgb2rgb_template.c54
2 files changed, 82 insertions, 26 deletions
diff --git a/postproc/rgb2rgb.c b/postproc/rgb2rgb.c
index 9109fcc303..2536aa71b5 100644
--- a/postproc/rgb2rgb.c
+++ b/postproc/rgb2rgb.c
@@ -16,6 +16,9 @@
static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
+static const uint64_t mask24hh __attribute__((aligned(8))) = 0xffff000000000000ULL;
+static const uint64_t mask24hhh __attribute__((aligned(8))) = 0xffffffff00000000ULL;
+static const uint64_t mask24hhhh __attribute__((aligned(8))) = 0xffffffffffff0000ULL;
static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
static const uint64_t mask15s __attribute__((aligned(8))) = 0xFFE0FFE0FFE0FFE0ULL;
@@ -90,34 +93,59 @@ void rgb32to24(const uint8_t *src,uint8_t *dst,unsigned src_size)
end = s + src_size;
#ifdef HAVE_MMX
__asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6"
- ::"m"(mask24l),"m"(mask24h):"memory");
+ mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*4))*(MMREG_SIZE*4));
while(s < mm_end)
{
__asm __volatile(
PREFETCH" 32%1\n\t"
"movq %1, %%mm0\n\t"
"movq 8%1, %%mm1\n\t"
+ "movq 16%1, %%mm4\n\t"
+ "movq 24%1, %%mm5\n\t"
"movq %%mm0, %%mm2\n\t"
"movq %%mm1, %%mm3\n\t"
+ "movq %%mm4, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
"psrlq $8, %%mm2\n\t"
"psrlq $8, %%mm3\n\t"
- "pand %%mm7, %%mm0\n\t"
- "pand %%mm7, %%mm1\n\t"
- "pand %%mm6, %%mm2\n\t"
- "pand %%mm6, %%mm3\n\t"
+ "psrlq $8, %%mm6\n\t"
+ "psrlq $8, %%mm7\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm1\n\t"
+ "pand %2, %%mm4\n\t"
+ "pand %2, %%mm5\n\t"
+ "pand %3, %%mm2\n\t"
+ "pand %3, %%mm3\n\t"
+ "pand %3, %%mm6\n\t"
+ "pand %3, %%mm7\n\t"
"por %%mm2, %%mm0\n\t"
"por %%mm3, %%mm1\n\t"
+ "por %%mm6, %%mm4\n\t"
+ "por %%mm7, %%mm5\n\t"
+
+ "movq %%mm1, %%mm2\n\t"
+ "movq %%mm4, %%mm3\n\t"
+ "psllq $48, %%mm2\n\t"
+ "psllq $32, %%mm3\n\t"
+ "pand %4, %%mm2\n\t"
+ "pand %5, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psrlq $16, %%mm1\n\t"
+ "psrlq $32, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "pand %6, %%mm5\n\t"
+ "por %%mm5, %%mm4\n\t"
+
MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm1, 6%0"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm4, 16%0"
:"=m"(*dest)
- :"m"(*s)
+ :"m"(*s),"m"(mask24l),
+ "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
:"memory");
- dest += 12;
- s += 16;
+ dest += 24;
+ s += 32;
}
__asm __volatile(SFENCE:::"memory");
__asm __volatile(EMMS:::"memory");
diff --git a/postproc/rgb2rgb_template.c b/postproc/rgb2rgb_template.c
index 9109fcc303..2536aa71b5 100644
--- a/postproc/rgb2rgb_template.c
+++ b/postproc/rgb2rgb_template.c
@@ -16,6 +16,9 @@
static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
+static const uint64_t mask24hh __attribute__((aligned(8))) = 0xffff000000000000ULL;
+static const uint64_t mask24hhh __attribute__((aligned(8))) = 0xffffffff00000000ULL;
+static const uint64_t mask24hhhh __attribute__((aligned(8))) = 0xffffffffffff0000ULL;
static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
static const uint64_t mask15s __attribute__((aligned(8))) = 0xFFE0FFE0FFE0FFE0ULL;
@@ -90,34 +93,59 @@ void rgb32to24(const uint8_t *src,uint8_t *dst,unsigned src_size)
end = s + src_size;
#ifdef HAVE_MMX
__asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6"
- ::"m"(mask24l),"m"(mask24h):"memory");
+ mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*4))*(MMREG_SIZE*4));
while(s < mm_end)
{
__asm __volatile(
PREFETCH" 32%1\n\t"
"movq %1, %%mm0\n\t"
"movq 8%1, %%mm1\n\t"
+ "movq 16%1, %%mm4\n\t"
+ "movq 24%1, %%mm5\n\t"
"movq %%mm0, %%mm2\n\t"
"movq %%mm1, %%mm3\n\t"
+ "movq %%mm4, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
"psrlq $8, %%mm2\n\t"
"psrlq $8, %%mm3\n\t"
- "pand %%mm7, %%mm0\n\t"
- "pand %%mm7, %%mm1\n\t"
- "pand %%mm6, %%mm2\n\t"
- "pand %%mm6, %%mm3\n\t"
+ "psrlq $8, %%mm6\n\t"
+ "psrlq $8, %%mm7\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm1\n\t"
+ "pand %2, %%mm4\n\t"
+ "pand %2, %%mm5\n\t"
+ "pand %3, %%mm2\n\t"
+ "pand %3, %%mm3\n\t"
+ "pand %3, %%mm6\n\t"
+ "pand %3, %%mm7\n\t"
"por %%mm2, %%mm0\n\t"
"por %%mm3, %%mm1\n\t"
+ "por %%mm6, %%mm4\n\t"
+ "por %%mm7, %%mm5\n\t"
+
+ "movq %%mm1, %%mm2\n\t"
+ "movq %%mm4, %%mm3\n\t"
+ "psllq $48, %%mm2\n\t"
+ "psllq $32, %%mm3\n\t"
+ "pand %4, %%mm2\n\t"
+ "pand %5, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psrlq $16, %%mm1\n\t"
+ "psrlq $32, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "pand %6, %%mm5\n\t"
+ "por %%mm5, %%mm4\n\t"
+
MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm1, 6%0"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm4, 16%0"
:"=m"(*dest)
- :"m"(*s)
+ :"m"(*s),"m"(mask24l),
+ "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
:"memory");
- dest += 12;
- s += 16;
+ dest += 24;
+ s += 32;
}
__asm __volatile(SFENCE:::"memory");
__asm __volatile(EMMS:::"memory");