summaryrefslogtreecommitdiffstats
path: root/libvo
diff options
context:
space:
mode:
authoratmosfear <atmosfear@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-06-11 17:43:15 +0000
committeratmosfear <atmosfear@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-06-11 17:43:15 +0000
commitf4e97b214b24ca90598ef787caf5af35a602e7bb (patch)
tree082df83eaf49fc7cae7651498d736f43ccb6b4f7 /libvo
parent430300621f2db74575e8a62c470a10cd62e558d7 (diff)
downloadmpv-f4e97b214b24ca90598ef787caf5af35a602e7bb.tar.bz2
mpv-f4e97b214b24ca90598ef787caf5af35a602e7bb.tar.xz
Code cleanup and fix missing config.h and use femms on K6 2/2+/3.
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@1102 b3059339-0415-0410-9bf9-f77b7e298cf2
Diffstat (limited to 'libvo')
-rw-r--r--libvo/yuv2rgb_mmx.c319
1 files changed, 165 insertions, 154 deletions
diff --git a/libvo/yuv2rgb_mmx.c b/libvo/yuv2rgb_mmx.c
index 6a651c62d4..658134f0e3 100644
--- a/libvo/yuv2rgb_mmx.c
+++ b/libvo/yuv2rgb_mmx.c
@@ -28,6 +28,8 @@
#include <stdio.h>
#include <stdlib.h>
+#include "../config.h"
+
#include "mmx.h"
//#include "libmpeg2/mpeg2.h"
//#include "libmpeg2/mpeg2_internal.h"
@@ -54,9 +56,18 @@ uint64_t mmx_grnshift = 0x03;
uint64_t mmx_blueshift = 0x03;
#ifdef HAVE_MMX2
-#define movntq "movntq" // use this for K7 and p3 only
+/* use this for K7 and p3 only */
+#define MOVNTQ "movntq"
+#else
+/* for MMX-only processors */
+#define MOVNTQ "movq"
+#endif
+
+#if !defined( HAVE_MMX2) && defined( HAVE_3DNOW)
+/* for K6 2/2+/3 */
+#define EMMS "femms;"
#else
-#define movntq "movq" // for MMX-only processors
+#define EMMS "emms;"
#endif
static void yuv420_rgb16_mmx (uint8_t * image, uint8_t * py,
@@ -68,126 +79,126 @@ static void yuv420_rgb16_mmx (uint8_t * image, uint8_t * py,
int x = 0, y = 0;
/* load data for first scan line */
- __asm__ (
- "movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0\n\t"
- "movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0\n\t"
+ __asm__ __volatile__ (
+ "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "pxor %%mm4, %%mm4 # zero mm4\n\t"
- "movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0\n\t"
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+ "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
- //"movl $0, (%3) # cache preload for image\n\t"
+ //"movl $0, (%3);" /* cache preload for image */
: : "r" (py), "r" (pu), "r" (pv), "r" (image));
do {
do {
/* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
pixels in each iteration */
- __asm__ (".align 8 \n\t"
+ __asm__ __volatile__ (".align 8;"
/* Do the multiply part of the conversion for even and odd pixels,
register usage:
mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
mm6 -> Y even, mm7 -> Y odd */
/* convert the chroma part */
- "punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0\n\t"
- "punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0\n\t"
+ "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */
+ "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */
- "psubsw mmx_80w, %%mm0 # Cb -= 128\n\t"
- "psubsw mmx_80w, %%mm1 # Cr -= 128\n\t"
+ "psubsw mmx_80w, %%mm0;" /* Cb -= 128 */
+ "psubsw mmx_80w, %%mm1;" /* Cr -= 128 */
- "psllw $3, %%mm0 # Promote precision\n\t"
- "psllw $3, %%mm1 # Promote precision\n\t"
+ "psllw $3, %%mm0;" /* Promote precision */
+ "psllw $3, %%mm1;" /* Promote precision */
- "movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0\n\t"
- "movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0\n\t"
+ "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */
+ "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */
- "pmulhw mmx_U_green, %%mm2# Mul Cb with green coeff -> Cb green\n\t"
- "pmulhw mmx_V_green, %%mm3# Mul Cr with green coeff -> Cr green\n\t"
+ "pmulhw mmx_U_green, %%mm2;" /* Mul Cb with green coeff -> Cb green */
+ "pmulhw mmx_V_green, %%mm3;" /* Mul Cr with green coeff -> Cr green */
- "pmulhw mmx_U_blue, %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0\n\t"
- "pmulhw mmx_V_red, %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0\n\t"
+ "pmulhw mmx_U_blue, %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */
+ "pmulhw mmx_V_red, %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */
- "paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen\n\t"
+ "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */
/* convert the luma part */
- "psubusb mmx_10w, %%mm6 # Y -= 16\n\t"
+ "psubusb mmx_10w, %%mm6;" /* Y -= 16 */
- "movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0\n\t"
- "pand mmx_00ffw, %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0\n\t"
+ "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ "pand mmx_00ffw, %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */
- "psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1\n\t"
+ "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */
- "psllw $3, %%mm6 # Promote precision\n\t"
- "psllw $3, %%mm7 # Promote precision\n\t"
+ "psllw $3, %%mm6;" /* Promote precision */
+ "psllw $3, %%mm7;" /* Promote precision */
- "pmulhw mmx_Y_coeff, %%mm6# Mul 4 Y even 00 y6 00 y4 00 y2 00 y0\n\t"
- "pmulhw mmx_Y_coeff, %%mm7# Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1\n\t"
+ "pmulhw mmx_Y_coeff, %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */
+ "pmulhw mmx_Y_coeff, %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */
/* Do the addition part of the conversion for even and odd pixels,
register usage:
mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
mm6 -> Y even, mm7 -> Y odd */
- "movq %%mm0, %%mm3 # Copy Cblue\n\t"
- "movq %%mm1, %%mm4 # Copy Cred\n\t"
- "movq %%mm2, %%mm5 # Copy Cgreen\n\t"
+ "movq %%mm0, %%mm3;" /* Copy Cblue */
+ "movq %%mm1, %%mm4;" /* Copy Cred */
+ "movq %%mm2, %%mm5;" /* Copy Cgreen */
- "paddsw %%mm6, %%mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0\n\t"
- "paddsw %%mm7, %%mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1\n\t"
+ "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */
+ "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */
- "paddsw %%mm6, %%mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0\n\t"
- "paddsw %%mm7, %%mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1\n\t"
+ "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */
+ "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */
- "paddsw %%mm6, %%mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0\n\t"
- "paddsw %%mm7, %%mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1\n\t"
+ "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */
+ "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */
/* Limit RGB even to 0..255 */
- "packuswb %%mm0, %%mm0 # B6 B4 B2 B0 B6 B4 B2 B0\n\t"
- "packuswb %%mm1, %%mm1 # R6 R4 R2 R0 R6 R4 R2 R0\n\t"
- "packuswb %%mm2, %%mm2 # G6 G4 G2 G0 G6 G4 G2 G0\n\t"
+ "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */
+ "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */
+ "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */
/* Limit RGB odd to 0..255 */
- "packuswb %%mm3, %%mm3 # B7 B5 B3 B1 B7 B5 B3 B1\n\t"
- "packuswb %%mm4, %%mm4 # R7 R5 R3 R1 R7 R5 R3 R1\n\t"
- "packuswb %%mm5, %%mm5 # G7 G5 G3 G1 G7 G5 G3 G1\n\t"
+ "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */
+ "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */
+ "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */
/* Interleave RGB even and odd */
- "punpcklbw %%mm3, %%mm0 # B7 B6 B5 B4 B3 B2 B1 B0\n\t"
- "punpcklbw %%mm4, %%mm1 # R7 R6 R5 R4 R3 R2 R1 R0\n\t"
- "punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0\n\t"
+ "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
+ "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */
/* mask unneeded bits off */
- "pand mmx_redmask, %%mm0# b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0\n\t"
- "pand mmx_grnmask, %%mm2# g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0\n\t"
- "pand mmx_redmask, %%mm1# r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0\n\t"
+ "pand mmx_redmask, %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand mmx_grnmask, %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
+ "pand mmx_redmask, %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
- "psrlw mmx_blueshift,%%mm0#0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3\n\t"
- "pxor %%mm4, %%mm4 # zero mm4\n\t"
+ "psrlw mmx_blueshift,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
- "movq %%mm0, %%mm5 # Copy B7-B0\n\t"
- "movq %%mm2, %%mm7 # Copy G7-G0\n\t"
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
/* convert rgb24 plane to rgb16 pack for pixel 0-3 */
- "punpcklbw %%mm4, %%mm2 # 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0\n\t"
- "punpcklbw %%mm1, %%mm0 # r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3\n\t"
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
- "psllw mmx_blueshift,%%mm2# 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0\n\t"
- "por %%mm2, %%mm0 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3\n\t"
+ "psllw mmx_blueshift,%%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
- "movq 8 (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0\n\t"
- movntq " %%mm0, (%3) # store pixel 0-3\n\t"
+ "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%3);" /* store pixel 0-3 */
/* convert rgb24 plane to rgb16 pack for pixel 0-3 */
- "punpckhbw %%mm4, %%mm7 # 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0\n\t"
- "punpckhbw %%mm1, %%mm5 # r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3\n\t"
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
- "psllw mmx_blueshift,%%mm7# 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0\n\t"
- "movd 4 (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0\n\t"
+ "psllw mmx_blueshift,%%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
- "por %%mm7, %%mm5 # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3\n\t"
- "movd 4 (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0\n\t"
+ "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+ "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- movntq " %%mm5, 8 (%3) # store pixel 4-7\n\t"
+ MOVNTQ " %%mm5, 8 (%3);" /* store pixel 4-7 */
: : "r" (py), "r" (pu), "r" (pv), "r" (image));
py += 8;
@@ -209,12 +220,12 @@ static void yuv420_rgb16_mmx (uint8_t * image, uint8_t * py,
image += (rgb_stride - 2*h_size);
/* load data for start of next scan line */
- __asm__ (
- "movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 00 u3 u2 u1 u0\n\t"
- "movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 00 v2 v1 v0\n\t"
+ __asm__ __volatile__ (
+ "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 00 v2 v1 v0 */
- //"movl $0, (%3) # cache preload for image\n\t"
- "movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0\n\t"
+ //"movl $0, (%3);" /* cache preload for image */
+ "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
: : "r" (py), "r" (pu), "r" (pv), "r" (image));
@@ -223,7 +234,7 @@ static void yuv420_rgb16_mmx (uint8_t * image, uint8_t * py,
even = (!even);
} while (y < v_size) ;
- __asm__ ("emms\n\t");
+ __asm__ __volatile__ (EMMS);
}
static void yuv420_argb32_mmx (uint8_t * image, uint8_t * py,
@@ -234,23 +245,23 @@ static void yuv420_argb32_mmx (uint8_t * image, uint8_t * py,
int even = 1;
int x = 0, y = 0;
- __asm__ (
- ".align 8 \n\t"
- "movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0\n\t"
- //"movl $0, (%3) # cache preload for image\n\t"
+ __asm__ __volatile__ (
+ ".align 8;"
+ "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ //"movl $0, (%3);" /* cache preload for image */
- "movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0\n\t"
- "pxor %%mm4, %%mm4 # zero mm4\n\t"
+ "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
- "movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0\n\t"
+ "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
: : "r" (py), "r" (pu), "r" (pv), "r" (image));
do {
do {
/* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
pixels in each iteration */
- __asm__ (
- ".align 8 \n\t"
+ __asm__ __volatile__ (
+ ".align 8;"
/* Do the multiply part of the conversion for even and odd pixels,
register usage:
mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
@@ -258,39 +269,39 @@ static void yuv420_argb32_mmx (uint8_t * image, uint8_t * py,
mm6 -> Y even, mm7 -> Y odd */
/* convert the chroma part */
- "punpcklbw %%mm4, %%mm0 # scatter 4 Cb 00 u3 00 u2 00 u1 00 u0\n\t"
- "punpcklbw %%mm4, %%mm1 # scatter 4 Cr 00 v3 00 v2 00 v1 00 v0\n\t"
+ "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */
+ "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */
- "psubsw mmx_80w, %%mm0 # Cb -= 128\n\t"
- "psubsw mmx_80w, %%mm1 # Cr -= 128\n\t"
+ "psubsw mmx_80w, %%mm0;" /* Cb -= 128 */
+ "psubsw mmx_80w, %%mm1;" /* Cr -= 128 */
- "psllw $3, %%mm0 # Promote precision\n\t"
- "psllw $3, %%mm1 # Promote precision\n\t"
+ "psllw $3, %%mm0;" /* Promote precision */
+ "psllw $3, %%mm1;" /* Promote precision */
- "movq %%mm0, %%mm2 # Copy 4 Cb 00 u3 00 u2 00 u1 00 u0\n\t"
- "movq %%mm1, %%mm3 # Copy 4 Cr 00 v3 00 v2 00 v1 00 v0\n\t"
+ "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */
+ "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */
- "pmulhw mmx_U_green, %%mm2# Mul Cb with green coeff -> Cb green\n\t"
- "pmulhw mmx_V_green, %%mm3# Mul Cr with green coeff -> Cr green\n\t"
+ "pmulhw mmx_U_green, %%mm2;" /* Mul Cb with green coeff -> Cb green */
+ "pmulhw mmx_V_green, %%mm3;" /* Mul Cr with green coeff -> Cr green */
- "pmulhw mmx_U_blue, %%mm0 # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0\n\t"
- "pmulhw mmx_V_red, %%mm1 # Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0\n\t"
+ "pmulhw mmx_U_blue, %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */
+ "pmulhw mmx_V_red, %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */
- "paddsw %%mm3, %%mm2 # Cb green + Cr green -> Cgreen\n\t"
+ "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */
/* convert the luma part */
- "psubusb mmx_10w, %%mm6 # Y -= 16\n\t"
+ "psubusb mmx_10w, %%mm6;" /* Y -= 16 */
- "movq %%mm6, %%mm7 # Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0\n\t"
- "pand mmx_00ffw, %%mm6 # get Y even 00 Y6 00 Y4 00 Y2 00 Y0\n\t"
+ "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ "pand mmx_00ffw, %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */
- "psrlw $8, %%mm7 # get Y odd 00 Y7 00 Y5 00 Y3 00 Y1\n\t"
+ "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */
- "psllw $3, %%mm6 # Promote precision\n\t"
- "psllw $3, %%mm7 # Promote precision\n\t"
+ "psllw $3, %%mm6;" /* Promote precision */
+ "psllw $3, %%mm7;" /* Promote precision */
- "pmulhw mmx_Y_coeff, %%mm6# Mul 4 Y even 00 y6 00 y4 00 y2 00 y0\n\t"
- "pmulhw mmx_Y_coeff, %%mm7# Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1\n\t"
+ "pmulhw mmx_Y_coeff, %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */
+ "pmulhw mmx_Y_coeff, %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */
/* Do the addition part of the conversion for even and odd pixels,
register usage:
@@ -298,75 +309,75 @@ static void yuv420_argb32_mmx (uint8_t * image, uint8_t * py,
mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
mm6 -> Y even, mm7 -> Y odd */
- "movq %%mm0, %%mm3 # Copy Cblue\n\t"
- "movq %%mm1, %%mm4 # Copy Cred\n\t"
- "movq %%mm2, %%mm5 # Copy Cgreen\n\t"
+ "movq %%mm0, %%mm3;" /* Copy Cblue */
+ "movq %%mm1, %%mm4;" /* Copy Cred */
+ "movq %%mm2, %%mm5;" /* Copy Cgreen */
- "paddsw %%mm6, %%mm0 # Y even + Cblue 00 B6 00 B4 00 B2 00 B0\n\t"
- "paddsw %%mm7, %%mm3 # Y odd + Cblue 00 B7 00 B5 00 B3 00 B1\n\t"
+ "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */
+ "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */
- "paddsw %%mm6, %%mm1 # Y even + Cred 00 R6 00 R4 00 R2 00 R0\n\t"
- "paddsw %%mm7, %%mm4 # Y odd + Cred 00 R7 00 R5 00 R3 00 R1\n\t"
+ "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */
+ "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */
- "paddsw %%mm6, %%mm2 # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0\n\t"
- "paddsw %%mm7, %%mm5 # Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1\n\t"
+ "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */
+ "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */
/* Limit RGB even to 0..255 */
- "packuswb %%mm0, %%mm0 # B6 B4 B2 B0 B6 B4 B2 B0\n\t"
- "packuswb %%mm1, %%mm1 # R6 R4 R2 R0 R6 R4 R2 R0\n\t"
- "packuswb %%mm2, %%mm2 # G6 G4 G2 G0 G6 G4 G2 G0\n\t"
+ "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */
+ "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */
+ "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */
/* Limit RGB odd to 0..255 */
- "packuswb %%mm3, %%mm3 # B7 B5 B3 B1 B7 B5 B3 B1\n\t"
- "packuswb %%mm4, %%mm4 # R7 R5 R3 R1 R7 R5 R3 R1\n\t"
- "packuswb %%mm5, %%mm5 # G7 G5 G3 G1 G7 G5 G3 G1\n\t"
+ "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */
+ "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */
+ "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */
/* Interleave RGB even and odd */
- "punpcklbw %%mm3, %%mm0 # B7 B6 B5 B4 B3 B2 B1 B0\n\t"
- "punpcklbw %%mm4, %%mm1 # R7 R6 R5 R4 R3 R2 R1 R0\n\t"
- "punpcklbw %%mm5, %%mm2 # G7 G6 G5 G4 G3 G2 G1 G0\n\t"
+ "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
+ "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */
/* convert RGB plane to RGB packed format,
mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
mm4 -> GB, mm5 -> AR pixel 4-7,
mm6 -> GB, mm7 -> AR pixel 0-3 */
- "pxor %%mm3, %%mm3 # zero mm3\n\t"
+ "pxor %%mm3, %%mm3;" /* zero mm3 */
- "movq %%mm0, %%mm6 # B7 B6 B5 B4 B3 B2 B1 B0\n\t"
- "movq %%mm1, %%mm7 # R7 R6 R5 R4 R3 R2 R1 R0\n\t"
+ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "movq %%mm1, %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
- "movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0\n\t"
- "movq %%mm1, %%mm5 # R7 R6 R5 R4 R3 R2 R1 R0\n\t"
+ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "movq %%mm1, %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
- "punpcklbw %%mm2, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0\n\t"
- "punpcklbw %%mm3, %%mm7 # 00 R3 00 R2 00 R1 00 R0\n\t"
+ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
+ "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */
- "punpcklwd %%mm7, %%mm6 # 00 R1 B1 G1 00 R0 B0 G0\n\t"
- movntq " %%mm6, (%3) # Store ARGB1 ARGB0\n\t"
+ "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */
+ MOVNTQ " %%mm6, (%3);" /* Store ARGB1 ARGB0 */
- "movq %%mm0, %%mm6 # B7 B6 B5 B4 B3 B2 B1 B0\n\t"
- "punpcklbw %%mm2, %%mm6 # G3 B3 G2 B2 G1 B1 G0 B0\n\t"
+ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
- "punpckhwd %%mm7, %%mm6 # 00 R3 G3 B3 00 R2 B3 G2\n\t"
- movntq " %%mm6, 8 (%3) # Store ARGB3 ARGB2\n\t"
+ "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */
+ MOVNTQ " %%mm6, 8 (%3);" /* Store ARGB3 ARGB2 */
- "punpckhbw %%mm2, %%mm4 # G7 B7 G6 B6 G5 B5 G4 B4\n\t"
- "punpckhbw %%mm3, %%mm5 # 00 R7 00 R6 00 R5 00 R4\n\t"
+ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
+ "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */
- "punpcklwd %%mm5, %%mm4 # 00 R5 B5 G5 00 R4 B4 G4\n\t"
- movntq " %%mm4, 16 (%3) # Store ARGB5 ARGB4\n\t"
+ "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */
+ MOVNTQ " %%mm4, 16 (%3);" /* Store ARGB5 ARGB4 */
- "movq %%mm0, %%mm4 # B7 B6 B5 B4 B3 B2 B1 B0\n\t"
- "punpckhbw %%mm2, %%mm4 # G7 B7 G6 B6 G5 B5 G4 B4\n\t"
+ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
- "punpckhwd %%mm5, %%mm4 # 00 R7 G7 B7 00 R6 B6 G6\n\t"
- movntq " %%mm4, 24 (%3) # Store ARGB7 ARGB6\n\t"
+ "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */
+ MOVNTQ " %%mm4, 24 (%3);" /* Store ARGB7 ARGB6 */
- "movd 4 (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0\n\t"
- "movd 4 (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0\n\t"
+ "movd 4 (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd 4 (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "pxor %%mm4, %%mm4 # zero mm4\n\t"
- "movq 8 (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0\n\t"
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+ "movq 8 (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
: : "r" (py), "r" (pu), "r" (pv), "r" (image));
@@ -389,14 +400,14 @@ static void yuv420_argb32_mmx (uint8_t * image, uint8_t * py,
image += (rgb_stride - 4*h_size);
/* load data for start of next scan line */
- __asm__
+ __asm__ __volatile__
(
- ".align 8 \n\t"
- "movd (%1), %%mm0 # Load 4 Cb 00 00 00 00 u3 u2 u1 u0\n\t"
- "movd (%2), %%mm1 # Load 4 Cr 00 00 00 00 v3 v2 v1 v0\n\t"
+ ".align 8;"
+ "movd (%1), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%2), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- //"movl $0, (%3) # cache preload for image\n\t"
- "movq (%0), %%mm6 # Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0\n\t"
+ //"movl $0, (%3);" /* cache preload for image */
+ "movq (%0), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
: : "r" (py), "r" (pu), "r" (pv), "r" (image)
);
@@ -406,7 +417,7 @@ static void yuv420_argb32_mmx (uint8_t * image, uint8_t * py,
even = (!even);
} while ( y < v_size) ;
- __asm__ ("emms\n\t");
+ __asm__ __volatile__ (EMMS);
}
yuv2rgb_fun yuv2rgb_init_mmx (int bpp, int mode)