summaryrefslogtreecommitdiffstats
path: root/libswscale/swscale_template.c
diff options
context:
space:
mode:
authorlucabe <lucabe@b3059339-0415-0410-9bf9-f77b7e298cf2>2006-06-30 12:00:31 +0000
committerlucabe <lucabe@b3059339-0415-0410-9bf9-f77b7e298cf2>2006-06-30 12:00:31 +0000
commitcc30aae17bf521fc5453c7ee173cceefde255faf (patch)
treee88e8921f811ae85227b451ce739a0b9d49db9f8 /libswscale/swscale_template.c
parent6ff1b359593595622d6f231fb3d5ff53e63cfa3d (diff)
downloadmpv-cc30aae17bf521fc5453c7ee173cceefde255faf.tar.bz2
mpv-cc30aae17bf521fc5453c7ee173cceefde255faf.tar.xz
Move postproc ---> libswscale
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@18866 b3059339-0415-0410-9bf9-f77b7e298cf2
Diffstat (limited to 'libswscale/swscale_template.c')
-rw-r--r--libswscale/swscale_template.c2928
1 files changed, 2928 insertions, 0 deletions
diff --git a/libswscale/swscale_template.c b/libswscale/swscale_template.c
new file mode 100644
index 0000000000..9830e3d39f
--- /dev/null
+++ b/libswscale/swscale_template.c
@@ -0,0 +1,2928 @@
+/*
+ Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include "asmalign.h"
+
+#undef REAL_MOVNTQ
+#undef MOVNTQ
+#undef PAVGB
+#undef PREFETCH
+#undef PREFETCHW
+#undef EMMS
+#undef SFENCE
+
+#ifdef HAVE_3DNOW
+/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
+#define EMMS "femms"
+#else
+#define EMMS "emms"
+#endif
+
+#ifdef HAVE_3DNOW
+#define PREFETCH "prefetch"
+#define PREFETCHW "prefetchw"
+#elif defined ( HAVE_MMX2 )
+#define PREFETCH "prefetchnta"
+#define PREFETCHW "prefetcht0"
+#else
+#define PREFETCH "/nop"
+#define PREFETCHW "/nop"
+#endif
+
+#ifdef HAVE_MMX2
+#define SFENCE "sfence"
+#else
+#define SFENCE "/nop"
+#endif
+
+#ifdef HAVE_MMX2
+#define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
+#elif defined (HAVE_3DNOW)
+#define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
+#endif
+
+#ifdef HAVE_MMX2
+#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
+#else
+#define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
+#endif
+#define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
+
+#ifdef HAVE_ALTIVEC
+#include "swscale_altivec_template.c"
+#endif
+
+#define YSCALEYUV2YV12X(x, offset) \
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ ASMALIGN16 /* FIXME Unroll? */\
+ "1: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm3 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ " jnz 1b \n\t"\
+ "psraw $3, %%mm3 \n\t"\
+ "psraw $3, %%mm4 \n\t"\
+ "packuswb %%mm4, %%mm3 \n\t"\
+ MOVNTQ(%%mm3, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "cmp %2, %%"REG_a" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "jb 1b \n\t"
+
+#define YSCALEYUV2YV121 \
+ "mov %2, %%"REG_a" \n\t"\
+ ASMALIGN16 /* FIXME Unroll? */\
+ "1: \n\t"\
+ "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
+ "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
+ "psraw $7, %%mm0 \n\t"\
+ "psraw $7, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ MOVNTQ(%%mm0, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "jnc 1b \n\t"
+
+/*
+ :: "m" (-lumFilterSize), "m" (-chrFilterSize),
+ "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
+ "r" (dest), "m" (dstW),
+ "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
+ : "%eax", "%ebx", "%ecx", "%edx", "%esi"
+*/
+#define YSCALEYUV2PACKEDX \
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN16\
+ "nop \n\t"\
+ "1: \n\t"\
+ "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ ASMALIGN16\
+ "2: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm3 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ " jnz 2b \n\t"\
+\
+ "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
+ "movq %%mm1, %%mm7 \n\t"\
+ ASMALIGN16\
+ "2: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm1 \n\t"\
+ "paddw %%mm5, %%mm7 \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ " jnz 2b \n\t"\
+
+
+#define YSCALEYUV2RGBX \
+ YSCALEYUV2PACKEDX\
+ "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
+ "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
+ "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
+ "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#if 0
+#define FULL_YSCALEYUV2RGB \
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd %6, %%mm6 \n\t" /*yalpha1*/\
+ "punpcklwd %%mm6, %%mm6 \n\t"\
+ "punpcklwd %%mm6, %%mm6 \n\t"\
+ "movd %7, %%mm5 \n\t" /*uvalpha1*/\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
+ "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
+\
+\
+ "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
+ "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
+ "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
+\
+\
+ "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
+ "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
+ "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
+ "paddw %%mm1, %%mm3 \n\t" /* B*/\
+ "paddw %%mm1, %%mm0 \n\t" /* R*/\
+ "packuswb %%mm3, %%mm3 \n\t"\
+\
+ "packuswb %%mm0, %%mm0 \n\t"\
+ "paddw %%mm4, %%mm2 \n\t"\
+ "paddw %%mm2, %%mm1 \n\t" /* G*/\
+\
+ "packuswb %%mm1, %%mm1 \n\t"
+#endif
+
+#define REAL_YSCALEYUV2PACKED(index, c) \
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
+ "psraw $3, %%mm0 \n\t"\
+ "psraw $3, %%mm1 \n\t"\
+ "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
+ "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
+ "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+
+#define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
+
+#define REAL_YSCALEYUV2RGB(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
+ "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
+
+#define REAL_YSCALEYUV2PACKED1(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
+ "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $7, %%mm3 \n\t" \
+ "psraw $7, %%mm4 \n\t" \
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $7, %%mm1 \n\t" \
+ "psraw $7, %%mm7 \n\t" \
+
+#define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
+
+#define REAL_YSCALEYUV2RGB1(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
+ "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
+
+#define REAL_YSCALEYUV2PACKED1b(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+ "psrlw $8, %%mm3 \n\t" \
+ "psrlw $8, %%mm4 \n\t" \
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $7, %%mm1 \n\t" \
+ "psraw $7, %%mm7 \n\t"
+#define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
+
+// do vertical chrominance interpolation
+#define REAL_YSCALEYUV2RGB1b(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN16\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+ "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
+ "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
+
+#define REAL_WRITEBGR32(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ MOVNTQ(%%mm0, (dst, index, 4))\
+ MOVNTQ(%%mm2, 8(dst, index, 4))\
+ MOVNTQ(%%mm1, 16(dst, index, 4))\
+ MOVNTQ(%%mm3, 24(dst, index, 4))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
+
+#define REAL_WRITEBGR16(dst, dstw, index) \
+ "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
+ "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
+ "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
+ "psrlq $3, %%mm2 \n\t"\
+\
+ "movq %%mm2, %%mm1 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm5, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm5, %%mm1 \n\t"\
+\
+ "psllq $3, %%mm3 \n\t"\
+ "psllq $3, %%mm4 \n\t"\
+\
+ "por %%mm3, %%mm2 \n\t"\
+ "por %%mm4, %%mm1 \n\t"\
+\
+ MOVNTQ(%%mm2, (dst, index, 2))\
+ MOVNTQ(%%mm1, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
+
+#define REAL_WRITEBGR15(dst, dstw, index) \
+ "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
+ "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
+ "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
+ "psrlq $3, %%mm2 \n\t"\
+ "psrlq $1, %%mm5 \n\t"\
+\
+ "movq %%mm2, %%mm1 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm5, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm5, %%mm1 \n\t"\
+\
+ "psllq $2, %%mm3 \n\t"\
+ "psllq $2, %%mm4 \n\t"\
+\
+ "por %%mm3, %%mm2 \n\t"\
+ "por %%mm4, %%mm1 \n\t"\
+\
+ MOVNTQ(%%mm2, (dst, index, 2))\
+ MOVNTQ(%%mm1, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
+
+#define WRITEBGR24OLD(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
+ "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
+ "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
+ "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
+ "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
+ "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
+ "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
+ "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
+\
+ "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
+ "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
+ "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
+ "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
+ "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
+ "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
+ "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
+ "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
+ "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
+ "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
+ "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
+ "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
+\
+ "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
+ "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
+ "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
+ "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
+ "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
+ "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
+ "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
+ "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
+\
+ MOVNTQ(%%mm0, (dst))\
+ MOVNTQ(%%mm2, 8(dst))\
+ MOVNTQ(%%mm3, 16(dst))\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#define WRITEBGR24MMX(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
+ "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
+ "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
+ "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
+\
+ "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
+ "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
+ "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
+ "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
+\
+ "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
+ "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
+ "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
+ "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
+\
+ "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
+ "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
+ "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
+ "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
+ MOVNTQ(%%mm0, (dst))\
+\
+ "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
+ "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
+ "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
+ "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
+ MOVNTQ(%%mm6, 8(dst))\
+\
+ "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
+ "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
+ "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
+ MOVNTQ(%%mm5, 16(dst))\
+\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#define WRITEBGR24MMX2(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq "MANGLE(M24A)", %%mm0 \n\t"\
+ "movq "MANGLE(M24C)", %%mm7 \n\t"\
+ "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
+ "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
+ "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
+\
+ "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
+ "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
+ "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
+\
+ "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
+ "por %%mm1, %%mm6 \n\t"\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, (dst))\
+\
+ "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
+ "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
+ "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
+ "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
+\
+ "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
+ "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
+ "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
+\
+ "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, 8(dst))\
+\
+ "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
+ "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
+ "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
+\
+ "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
+ "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
+ "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
+\
+ "por %%mm1, %%mm3 \n\t"\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, 16(dst))\
+\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#ifdef HAVE_MMX2
+#undef WRITEBGR24
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
+#else
+#undef WRITEBGR24
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
+#endif
+
+#define REAL_WRITEYUY2(dst, dstw, index) \
+ "packuswb %%mm3, %%mm3 \n\t"\
+ "packuswb %%mm4, %%mm4 \n\t"\
+ "packuswb %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm4, %%mm3 \n\t"\
+ "movq %%mm1, %%mm7 \n\t"\
+ "punpcklbw %%mm3, %%mm1 \n\t"\
+ "punpckhbw %%mm3, %%mm7 \n\t"\
+\
+ MOVNTQ(%%mm1, (dst, index, 2))\
+ MOVNTQ(%%mm7, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
+
+
+static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
+{
+#ifdef HAVE_MMX
+ if(uDest != NULL)
+ {
+ asm volatile(
+ YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
+ :: "r" (&c->redDither),
+ "r" (uDest), "p" (chrDstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
+ );
+
+ asm volatile(
+ YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
+ :: "r" (&c->redDither),
+ "r" (vDest), "p" (chrDstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
+ );
+ }
+
+ asm volatile(
+ YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
+ :: "r" (&c->redDither),
+ "r" (dest), "p" (dstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
+ );
+#else
+#ifdef HAVE_ALTIVEC
+yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+#else //HAVE_ALTIVEC
+yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+#endif //!HAVE_ALTIVEC
+#endif
+}
+
+static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
+{
+yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, dstW, chrDstW, dstFormat);
+}
+
+static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
+{
+#ifdef HAVE_MMX
+ if(uDest != NULL)
+ {
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
+ "g" (-chrDstW)
+ : "%"REG_a
+ );
+
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
+ "g" (-chrDstW)
+ : "%"REG_a
+ );
+ }
+
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (lumSrc + dstW), "r" (dest + dstW),
+ "g" (-dstW)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int val= lumSrc[i]>>7;
+
+ if(val&256){
+ if(val<0) val=0;
+ else val=255;
+ }
+
+ dest[i]= val;
+ }
+
+ if(uDest != NULL)
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=chrSrc[i]>>7;
+ int v=chrSrc[i + 2048]>>7;
+
+ if((u|v)&256){
+ if(u<0) u=0;
+ else if (u>255) u=255;
+ if(v<0) v=0;
+ else if (v>255) v=255;
+ }
+
+ uDest[i]= u;
+ vDest[i]= v;
+ }
+#endif
+}
+
+
+/**
+ * vertical scale YV12 to RGB
+ */
+static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, long dstW, long dstY)
+{
+ long dummy=0;
+ switch(c->dstFormat)
+ {
+#ifdef HAVE_MMX
+ case IMGFMT_BGR32:
+ {
+ asm volatile(
+ YSCALEYUV2RGBX
+ WRITEBGR32(%4, %5, %%REGa)
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
+ );
+ }
+ break;
+ case IMGFMT_BGR24:
+ {
+ asm volatile(
+ YSCALEYUV2RGBX
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
+ "add %4, %%"REG_b" \n\t"
+ WRITEBGR24(%%REGb, %5, %%REGa)
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
+ );
+ }
+ break;
+ case IMGFMT_BGR15:
+ {
+ asm volatile(
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR15(%4, %5, %%REGa)
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
+ );
+ }
+ break;
+ case IMGFMT_BGR16:
+ {
+ asm volatile(
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%4, %5, %%REGa)
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
+ );
+ }
+ break;
+ case IMGFMT_YUY2:
+ {
+ asm volatile(
+ YSCALEYUV2PACKEDX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+ "psraw $3, %%mm3 \n\t"
+ "psraw $3, %%mm4 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ WRITEYUY2(%4, %5, %%REGa)
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
+ );
+ }
+ break;
+#endif
+ default:
+#ifdef HAVE_ALTIVEC
+ /* The following list of supported dstFormat values should
+ match what's found in the body of altivec_yuv2packedX() */
+ if(c->dstFormat==IMGFMT_A