diff options
-rw-r--r-- | libswscale/colorspace-test.c | 22 | ||||
-rw-r--r-- | libswscale/ppc/yuv2rgb_altivec.c | 36 | ||||
-rw-r--r-- | libswscale/rgb2rgb.c | 16 | ||||
-rw-r--r-- | libswscale/rgb2rgb.h | 16 | ||||
-rw-r--r-- | libswscale/rgb2rgb_template.c | 2736 | ||||
-rw-r--r-- | libswscale/sparc/yuv2rgb_vis.c | 188 | ||||
-rw-r--r-- | libswscale/swscale.c | 270 | ||||
-rw-r--r-- | libswscale/swscale_template.c | 228 |
8 files changed, 1756 insertions, 1756 deletions
diff --git a/libswscale/colorspace-test.c b/libswscale/colorspace-test.c index 2223ee3a31..a1ec1be1b4 100644 --- a/libswscale/colorspace-test.c +++ b/libswscale/colorspace-test.c @@ -41,17 +41,17 @@ static char *args_parse(int argc, char *argv[]) while ((o = getopt(argc, argv, "m23")) != -1) { switch (o) { - case 'm': - cpu_caps |= SWS_CPU_CAPS_MMX; - break; - case '2': - cpu_caps |= SWS_CPU_CAPS_MMX2; - break; - case '3': - cpu_caps |= SWS_CPU_CAPS_3DNOW; - break; - default: - av_log(NULL, AV_LOG_ERROR, "Unknown option %c\n", o); + case 'm': + cpu_caps |= SWS_CPU_CAPS_MMX; + break; + case '2': + cpu_caps |= SWS_CPU_CAPS_MMX2; + break; + case '3': + cpu_caps |= SWS_CPU_CAPS_3DNOW; + break; + default: + av_log(NULL, AV_LOG_ERROR, "Unknown option %c\n", o); } } diff --git a/libswscale/ppc/yuv2rgb_altivec.c b/libswscale/ppc/yuv2rgb_altivec.c index 37fbcb5dd5..57acc6bc3e 100644 --- a/libswscale/ppc/yuv2rgb_altivec.c +++ b/libswscale/ppc/yuv2rgb_altivec.c @@ -855,13 +855,13 @@ ff_yuv2packedX_altivec(SwsContext *c, B = vec_packclp (B0,B1); switch(c->dstFormat) { - case PIX_FMT_ABGR: out_abgr (R,G,B,out); break; - case PIX_FMT_BGRA: out_bgra (R,G,B,out); break; - case PIX_FMT_RGBA: out_rgba (R,G,B,out); break; - case PIX_FMT_ARGB: out_argb (R,G,B,out); break; - case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break; - case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break; - default: + case PIX_FMT_ABGR: out_abgr (R,G,B,out); break; + case PIX_FMT_BGRA: out_bgra (R,G,B,out); break; + case PIX_FMT_RGBA: out_rgba (R,G,B,out); break; + case PIX_FMT_ARGB: out_argb (R,G,B,out); break; + case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break; + case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break; + default: { /* If this is reached, the caller should have called yuv2packedXinC instead. */ @@ -934,17 +934,17 @@ ff_yuv2packedX_altivec(SwsContext *c, nout = (vector unsigned char *)scratch; switch(c->dstFormat) { - case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break; - case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break; - case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break; - case PIX_FMT_ARGB: out_argb (R,G,B,nout); break; - case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break; - case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break; - default: - /* Unreachable, I think. */ - av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n", - sws_format_name(c->dstFormat)); - return; + case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break; + case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break; + case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break; + case PIX_FMT_ARGB: out_argb (R,G,B,nout); break; + case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break; + case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break; + default: + /* Unreachable, I think. */ + av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n", + sws_format_name(c->dstFormat)); + return; } memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4); diff --git a/libswscale/rgb2rgb.c b/libswscale/rgb2rgb.c index 7aeb2a3ac2..1b1d4db77c 100644 --- a/libswscale/rgb2rgb.c +++ b/libswscale/rgb2rgb.c @@ -88,17 +88,17 @@ void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *sr long srcStride1, long srcStride2, long srcStride3, long dstStride); void (*uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); void (*uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); void (*yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); void (*yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); #if ARCH_X86 && CONFIG_GPL diff --git a/libswscale/rgb2rgb.h b/libswscale/rgb2rgb.h index 3850ef291d..9d6e6df415 100644 --- a/libswscale/rgb2rgb.h +++ b/libswscale/rgb2rgb.h @@ -144,17 +144,17 @@ extern void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint extern void (*uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); extern void (*uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); extern void (*yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); extern void (*yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src, - long width, long height, - long lumStride, long chromStride, long srcStride); + long width, long height, + long lumStride, long chromStride, long srcStride); void sws_rgb2rgb_init(int flags); diff --git a/libswscale/rgb2rgb_template.c b/libswscale/rgb2rgb_template.c index 3d1fb8aea6..475a810f0b 100644 --- a/libswscale/rgb2rgb_template.c +++ b/libswscale/rgb2rgb_template.c @@ -77,16 +77,16 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s const uint8_t *s = src; const uint8_t *end; #if HAVE_MMX - const uint8_t *mm_end; + const uint8_t *mm_end; #endif end = s + src_size; #if HAVE_MMX - __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); - mm_end = end - 23; - __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory"); - while (s < mm_end) - { - __asm__ volatile( + __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); + mm_end = end - 23; + __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory"); + while (s < mm_end) + { + __asm__ volatile( PREFETCH" 32%1 \n\t" "movd %1, %%mm0 \n\t" "punpckldq 3%1, %%mm0 \n\t" @@ -107,11 +107,11 @@ static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long s :"=m"(*dest) :"m"(*s) :"memory"); - dest += 32; - s += 24; - } - __asm__ volatile(SFENCE:::"memory"); - __asm__ volatile(EMMS:::"memory"); + dest += 32; + s += 24; + } + __asm__ volatile(SFENCE:::"memory"); + __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { @@ -146,53 +146,53 @@ static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long s while (s < mm_end) { __asm__ volatile( - PREFETCH" 32%1 \n\t" - "movq %1, %%mm0 \n\t" - "movq 8%1, %%mm1 \n\t" - "movq 16%1, %%mm4 \n\t" - "movq 24%1, %%mm5 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm1, %%mm3 \n\t" - "movq %%mm4, %%mm6 \n\t" - "movq %%mm5, %%mm7 \n\t" - "psrlq $8, %%mm2 \n\t" - "psrlq $8, %%mm3 \n\t" - "psrlq $8, %%mm6 \n\t" - "psrlq $8, %%mm7 \n\t" - "pand %2, %%mm0 \n\t" - "pand %2, %%mm1 \n\t" - "pand %2, %%mm4 \n\t" - "pand %2, %%mm5 \n\t" - "pand %3, %%mm2 \n\t" - "pand %3, %%mm3 \n\t" - "pand %3, %%mm6 \n\t" - "pand %3, %%mm7 \n\t" - "por %%mm2, %%mm0 \n\t" - "por %%mm3, %%mm1 \n\t" - "por %%mm6, %%mm4 \n\t" - "por %%mm7, %%mm5 \n\t" - - "movq %%mm1, %%mm2 \n\t" - "movq %%mm4, %%mm3 \n\t" - "psllq $48, %%mm2 \n\t" - "psllq $32, %%mm3 \n\t" - "pand %4, %%mm2 \n\t" - "pand %5, %%mm3 \n\t" - "por %%mm2, %%mm0 \n\t" - "psrlq $16, %%mm1 \n\t" - "psrlq $32, %%mm4 \n\t" - "psllq $16, %%mm5 \n\t" - "por %%mm3, %%mm1 \n\t" - "pand %6, %%mm5 \n\t" - "por %%mm5, %%mm4 \n\t" - - MOVNTQ" %%mm0, %0 \n\t" - MOVNTQ" %%mm1, 8%0 \n\t" - MOVNTQ" %%mm4, 16%0" - :"=m"(*dest) - :"m"(*s),"m"(mask24l), - "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) - :"memory"); + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm1 \n\t" + "movq 16%1, %%mm4 \n\t" + "movq 24%1, %%mm5 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm1, %%mm3 \n\t" + "movq %%mm4, %%mm6 \n\t" + "movq %%mm5, %%mm7 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm3 \n\t" + "psrlq $8, %%mm6 \n\t" + "psrlq $8, %%mm7 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm1 \n\t" + "pand %2, %%mm4 \n\t" + "pand %2, %%mm5 \n\t" + "pand %3, %%mm2 \n\t" + "pand %3, %%mm3 \n\t" + "pand %3, %%mm6 \n\t" + "pand %3, %%mm7 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm3, %%mm1 \n\t" + "por %%mm6, %%mm4 \n\t" + "por %%mm7, %%mm5 \n\t" + + "movq %%mm1, %%mm2 \n\t" + "movq %%mm4, %%mm3 \n\t" + "psllq $48, %%mm2 \n\t" + "psllq $32, %%mm3 \n\t" + "pand %4, %%mm2 \n\t" + "pand %5, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "psrlq $16, %%mm1 \n\t" + "psrlq $32, %%mm4 \n\t" + "psllq $16, %%mm5 \n\t" + "por %%mm3, %%mm1 \n\t" + "pand %6, %%mm5 \n\t" + "por %%mm5, %%mm4 \n\t" + + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm1, 8%0 \n\t" + MOVNTQ" %%mm4, 16%0" + :"=m"(*dest) + :"m"(*s),"m"(mask24l), + "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh) + :"memory"); dest += 24; s += 32; } @@ -237,19 +237,19 @@ static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_ while (s<mm_end) { __asm__ volatile( - PREFETCH" 32%1 \n\t" - "movq %1, %%mm0 \n\t" - "movq 8%1, %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "pand %%mm4, %%mm0 \n\t" - "pand %%mm4, %%mm2 \n\t" - "paddw %%mm1, %%mm0 \n\t" - "paddw %%mm3, %%mm2 \n\t" - MOVNTQ" %%mm0, %0 \n\t" - MOVNTQ" %%mm2, 8%0" - :"=m"(*d) - :"m"(*s) + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "pand %%mm4, %%mm0 \n\t" + "pand %%mm4, %%mm2 \n\t" + "paddw %%mm1, %%mm0 \n\t" + "paddw %%mm3, %%mm2 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm2, 8%0" + :"=m"(*d) + :"m"(*s) ); d+=16; s+=16; @@ -287,23 +287,23 @@ static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_ while (s<mm_end) { __asm__ volatile( - PREFETCH" 32%1 \n\t" - "movq %1, %%mm0 \n\t" - "movq 8%1, %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "psrlq $1, %%mm0 \n\t" - "psrlq $1, %%mm2 \n\t" - "pand %%mm7, %%mm0 \n\t" - "pand %%mm7, %%mm2 \n\t" - "pand %%mm6, %%mm1 \n\t" - "pand %%mm6, %%mm3 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm3, %%mm2 \n\t" - MOVNTQ" %%mm0, %0 \n\t" - MOVNTQ" %%mm2, 8%0" - :"=m"(*d) - :"m"(*s) + PREFETCH" 32%1 \n\t" + "movq %1, %%mm0 \n\t" + "movq 8%1, %%mm2 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm2, %%mm3 \n\t" + "psrlq $1, %%mm0 \n\t" + "psrlq $1, %%mm2 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm3 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm3, %%mm2 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + MOVNTQ" %%mm2, 8%0" + :"=m"(*d) + :"m"(*s) ); d+=16; s+=16; @@ -339,38 +339,38 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_ mm_end = end - 15; #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) __asm__ volatile( - "movq %3, %%mm5 \n\t" - "movq %4, %%mm6 \n\t" - "movq %5, %%mm7 \n\t" - "jmp 2f \n\t" - ASMALIGN(4) - "1: \n\t" - PREFETCH" 32(%1) \n\t" - "movd (%1), %%mm0 \n\t" - "movd 4(%1), %%mm3 \n\t" - "punpckldq 8(%1), %%mm0 \n\t" - "punpckldq 12(%1), %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm3, %%mm4 \n\t" - "pand %%mm6, %%mm0 \n\t" - "pand %%mm6, %%mm3 \n\t" - "pmaddwd %%mm7, %%mm0 \n\t" - "pmaddwd %%mm7, %%mm3 \n\t" - "pand %%mm5, %%mm1 \n\t" - "pand %%mm5, %%mm4 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "psrld $5, %%mm0 \n\t" - "pslld $11, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, (%0) \n\t" - "add $16, %1 \n\t" - "add $8, %0 \n\t" - "2: \n\t" - "cmp %2, %1 \n\t" - " jb 1b \n\t" - : "+r" (d), "+r"(s) - : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) + "movq %3, %%mm5 \n\t" + "movq %4, %%mm6 \n\t" + "movq %5, %%mm7 \n\t" + "jmp 2f \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1) \n\t" + "movd (%1), %%mm0 \n\t" + "movd 4(%1), %%mm3 \n\t" + "punpckldq 8(%1), %%mm0 \n\t" + "punpckldq 12(%1), %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pand %%mm6, %%mm0 \n\t" + "pand %%mm6, %%mm3 \n\t" + "pmaddwd %%mm7, %%mm0 \n\t" + "pmaddwd %%mm7, %%mm3 \n\t" + "pand %%mm5, %%mm1 \n\t" + "pand %%mm5, %%mm4 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "psrld $5, %%mm0 \n\t" + "pslld $11, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, (%0) \n\t" + "add $16, %1 \n\t" + "add $8, %0 \n\t" + "2: \n\t" + "cmp %2, %1 \n\t" + " jb 1b \n\t" + : "+r" (d), "+r"(s) + : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) ); #else __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); @@ -381,35 +381,35 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_ while (s < mm_end) { __asm__ volatile( - PREFETCH" 32%1 \n\t" - "movd %1, %%mm0 \n\t" - "movd 4%1, %%mm3 \n\t" - "punpckldq 8%1, %%mm0 \n\t" - "punpckldq 12%1, %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm3, %%mm4 \n\t" - "movq %%mm3, %%mm5 \n\t" - "psrlq $3, %%mm0 \n\t" - "psrlq $3, %%mm3 \n\t" - "pand %2, %%mm0 \n\t" - "pand %2, %%mm3 \n\t" - "psrlq $5, %%mm1 \n\t" - "psrlq $5, %%mm4 \n\t" - "pand %%mm6, %%mm1 \n\t" - "pand %%mm6, %%mm4 \n\t" - "psrlq $8, %%mm2 \n\t" - "psrlq $8, %%mm5 \n\t" - "pand %%mm7, %%mm2 \n\t" - "pand %%mm7, %%mm5 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "por %%mm2, %%mm0 \n\t" - "por %%mm5, %%mm3 \n\t" - "psllq $16, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, %0 \n\t" - :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); d += 4; s += 16; } @@ -443,35 +443,35 @@ static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long s while (s < mm_end) { __asm__ volatile( - PREFETCH" 32%1 \n\t" - "movd %1, %%mm0 \n\t" - "movd 4%1, %%mm3 \n\t" - "punpckldq 8%1, %%mm0 \n\t" - "punpckldq 12%1, %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm3, %%mm4 \n\t" - "movq %%mm3, %%mm5 \n\t" - "psllq $8, %%mm0 \n\t" - "psllq $8, %%mm3 \n\t" - "pand %%mm7, %%mm0 \n\t" - "pand %%mm7, %%mm3 \n\t" - "psrlq $5, %%mm1 \n\t" - "psrlq $5, %%mm4 \n\t" - "pand %%mm6, %%mm1 \n\t" - "pand %%mm6, %%mm4 \n\t" - "psrlq $19, %%mm2 \n\t" - "psrlq $19, %%mm5 \n\t" - "pand %2, %%mm2 \n\t" - "pand %2, %%mm5 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "por %%mm2, %%mm0 \n\t" - "por %%mm5, %%mm3 \n\t" - "psllq $16, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, %0 \n\t" - :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $8, %%mm0 \n\t" + "psllq $8, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); d += 4; s += 16; } @@ -498,38 +498,38 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_ mm_end = end - 15; #if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster) __asm__ volatile( - "movq %3, %%mm5 \n\t" - "movq %4, %%mm6 \n\t" - "movq %5, %%mm7 \n\t" - "jmp 2f \n\t" - ASMALIGN(4) - "1: \n\t" - PREFETCH" 32(%1) \n\t" - "movd (%1), %%mm0 \n\t" - "movd 4(%1), %%mm3 \n\t" - "punpckldq 8(%1), %%mm0 \n\t" - "punpckldq 12(%1), %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm3, %%mm4 \n\t" - "pand %%mm6, %%mm0 \n\t" - "pand %%mm6, %%mm3 \n\t" - "pmaddwd %%mm7, %%mm0 \n\t" - "pmaddwd %%mm7, %%mm3 \n\t" - "pand %%mm5, %%mm1 \n\t" - "pand %%mm5, %%mm4 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "psrld $6, %%mm0 \n\t" - "pslld $10, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, (%0) \n\t" - "add $16, %1 \n\t" - "add $8, %0 \n\t" - "2: \n\t" - "cmp %2, %1 \n\t" - " jb 1b \n\t" - : "+r" (d), "+r"(s) - : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) + "movq %3, %%mm5 \n\t" + "movq %4, %%mm6 \n\t" + "movq %5, %%mm7 \n\t" + "jmp 2f \n\t" + ASMALIGN(4) + "1: \n\t" + PREFETCH" 32(%1) \n\t" + "movd (%1), %%mm0 \n\t" + "movd 4(%1), %%mm3 \n\t" + "punpckldq 8(%1), %%mm0 \n\t" + "punpckldq 12(%1), %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm3, %%mm4 \n\t" + "pand %%mm6, %%mm0 \n\t" + "pand %%mm6, %%mm3 \n\t" + "pmaddwd %%mm7, %%mm0 \n\t" + "pmaddwd %%mm7, %%mm3 \n\t" + "pand %%mm5, %%mm1 \n\t" + "pand %%mm5, %%mm4 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "psrld $6, %%mm0 \n\t" + "pslld $10, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, (%0) \n\t" + "add $16, %1 \n\t" + "add $8, %0 \n\t" + "2: \n\t" + "cmp %2, %1 \n\t" + " jb 1b \n\t" + : "+r" (d), "+r"(s) + : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) ); #else __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory"); @@ -540,35 +540,35 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_ while (s < mm_end) { __asm__ volatile( - PREFETCH" 32%1 \n\t" - "movd %1, %%mm0 \n\t" - "movd 4%1, %%mm3 \n\t" - "punpckldq 8%1, %%mm0 \n\t" - "punpckldq 12%1, %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm3, %%mm4 \n\t" - "movq %%mm3, %%mm5 \n\t" - "psrlq $3, %%mm0 \n\t" - "psrlq $3, %%mm3 \n\t" - "pand %2, %%mm0 \n\t" - "pand %2, %%mm3 \n\t" - "psrlq $6, %%mm1 \n\t" - "psrlq $6, %%mm4 \n\t" - "pand %%mm6, %%mm1 \n\t" - "pand %%mm6, %%mm4 \n\t" - "psrlq $9, %%mm2 \n\t" - "psrlq $9, %%mm5 \n\t" - "pand %%mm7, %%mm2 \n\t" - "pand %%mm7, %%mm5 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "por %%mm2, %%mm0 \n\t" - "por %%mm5, %%mm3 \n\t" - "psllq $16, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, %0 \n\t" - :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $9, %%mm2 \n\t" + "psrlq $9, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); d += 4; s += 16; } @@ -602,35 +602,35 @@ static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long s while (s < mm_end) { __asm__ volatile( - PREFETCH" 32%1 \n\t" - "movd %1, %%mm0 \n\t" - "movd 4%1, %%mm3 \n\t" - "punpckldq 8%1, %%mm0 \n\t" - "punpckldq 12%1, %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm3, %%mm4 \n\t" - "movq %%mm3, %%mm5 \n\t" - "psllq $7, %%mm0 \n\t" - "psllq $7, %%mm3 \n\t" - "pand %%mm7, %%mm0 \n\t" - "pand %%mm7, %%mm3 \n\t" - "psrlq $6, %%mm1 \n\t" - "psrlq $6, %%mm4 \n\t" - "pand %%mm6, %%mm1 \n\t" - "pand %%mm6, %%mm4 \n\t" - "psrlq $19, %%mm2 \n\t" - "psrlq $19, %%mm5 \n\t" - "pand %2, %%mm2 \n\t" - "pand %2, %%mm5 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "por %%mm2, %%mm0 \n\t" - "por %%mm5, %%mm3 \n\t" - "psllq $16, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, %0 \n\t" - :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 4%1, %%mm3 \n\t" + "punpckldq 8%1, %%mm0 \n\t" + "punpckldq 12%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psllq $7, %%mm0 \n\t" + "psllq $7, %%mm3 \n\t" + "pand %%mm7, %%mm0 \n\t" + "pand %%mm7, %%mm3 \n\t" + "psrlq $6, %%mm1 \n\t" + "psrlq $6, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $19, %%mm2 \n\t" + "psrlq $19, %%mm5 \n\t" + "pand %2, %%mm2 \n\t" + "pand %2, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); d += 4; s += 16; } @@ -663,35 +663,35 @@ static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long s while (s < mm_end) { __asm__ volatile( - PREFETCH" 32%1 \n\t" - "movd %1, %%mm0 \n\t" - "movd 3%1, %%mm3 \n\t" - "punpckldq 6%1, %%mm0 \n\t" - "punpckldq 9%1, %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm3, %%mm4 \n\t" - "movq %%mm3, %%mm5 \n\t" - "psrlq $3, %%mm0 \n\t" - "psrlq $3, %%mm3 \n\t" - "pand %2, %%mm0 \n\t" - "pand %2, %%mm3 \n\t" - "psrlq $5, %%mm1 \n\t" - "psrlq $5, %%mm4 \n\t" - "pand %%mm6, %%mm1 \n\t" - "pand %%mm6, %%mm4 \n\t" - "psrlq $8, %%mm2 \n\t" - "psrlq $8, %%mm5 \n\t" - "pand %%mm7, %%mm2 \n\t" - "pand %%mm7, %%mm5 \n\t" - "por %%mm1, %%mm0 \n\t" - "por %%mm4, %%mm3 \n\t" - "por %%mm2, %%mm0 \n\t" - "por %%mm5, %%mm3 \n\t" - "psllq $16, %%mm3 \n\t" - "por %%mm3, %%mm0 \n\t" - MOVNTQ" %%mm0, %0 \n\t" - :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); + PREFETCH" 32%1 \n\t" + "movd %1, %%mm0 \n\t" + "movd 3%1, %%mm3 \n\t" + "punpckldq 6%1, %%mm0 \n\t" + "punpckldq 9%1, %%mm3 \n\t" + "movq %%mm0, %%mm1 \n\t" + "movq %%mm0, %%mm2 \n\t" + "movq %%mm3, %%mm4 \n\t" + "movq %%mm3, %%mm5 \n\t" + "psrlq $3, %%mm0 \n\t" + "psrlq $3, %%mm3 \n\t" + "pand %2, %%mm0 \n\t" + "pand %2, %%mm3 \n\t" + "psrlq $5, %%mm1 \n\t" + "psrlq $5, %%mm4 \n\t" + "pand %%mm6, %%mm1 \n\t" + "pand %%mm6, %%mm4 \n\t" + "psrlq $8, %%mm2 \n\t" + "psrlq $8, %%mm5 \n\t" + "pand %%mm7, %%mm2 \n\t" + "pand %%mm7, %%mm5 \n\t" + "por %%mm1, %%mm0 \n\t" + "por %%mm4, %%mm3 \n\t" + "por %%mm2, %%mm0 \n\t" + "por %%mm5, %%mm3 \n\t" + "psllq $16, %%mm3 \n\t" + "por %%mm3, %%mm0 \n\t" + MOVNTQ" %%mm0, %0 \n\t" + :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); d += 4; s += 12; } @@ -726,35 +726,35 @@ st |