summaryrefslogtreecommitdiffstats
path: root/postproc
diff options
context:
space:
mode:
authorarpi <arpi@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-10-10 22:13:27 +0000
committerarpi <arpi@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-10-10 22:13:27 +0000
commitccb3b35a3d99463ab222246afae42b939e911dec (patch)
treef8b94fa8b05cf157958123d1bbfd5037b0ed08e4 /postproc
parentcbb1e84445f4f4161fa9f0231167c8aac768af47 (diff)
downloadmpv-ccb3b35a3d99463ab222246afae42b939e911dec.tar.bz2
mpv-ccb3b35a3d99463ab222246afae42b939e911dec.tar.xz
new postprocess code by Michael Niedermayer (michaelni@gmx.at)
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@2159 b3059339-0415-0410-9bf9-f77b7e298cf2
Diffstat (limited to 'postproc')
-rw-r--r--postproc/postprocess.c1551
-rw-r--r--postproc/postprocess.h57
-rw-r--r--postproc/postprocess_template.c1551
3 files changed, 3159 insertions, 0 deletions
diff --git a/postproc/postprocess.c b/postproc/postprocess.c
new file mode 100644
index 0000000000..e674d6c5ea
--- /dev/null
+++ b/postproc/postprocess.c
@@ -0,0 +1,1551 @@
+/*
+ Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+/*
+ C MMX MMX2
+isVertDC Ec Ec
+isVertMinMaxOk Ec Ec
+doVertLowPass E e
+doVertDefFilter Ec Ec Ec
+isHorizDC Ec Ec
+isHorizMinMaxOk a
+doHorizLowPass E a
+doHorizDefFilter E a
+deRing
+
+E = Exact implementation
+e = allmost exact implementation
+a = alternative / approximate impl
+c = checked against the other implementations (-vo md5)
+*/
+
+/*
+TODO:
+verify that everything workes as it should
+reduce the time wasted on the mem transfer
+implement dering
+implement everything in C at least
+figure range of QP out (assuming <256 for now)
+unroll stuff if instructions depend too much on the prior one
+we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
+move YScale thing to the end instead of fixing QP
+...
+
+Notes:
+
+*/
+
+
+#include <inttypes.h>
+#include <stdio.h>
+#include "../config.h"
+#include "postprocess.h"
+//#undef HAVE_MMX2
+//#undef HAVE_MMX
+
+
+
+static uint64_t packedYOffset= 0x0000000000000000LL;
+static uint64_t packedYScale= 0x0100010001000100LL;
+static uint64_t w05= 0x0005000500050005LL;
+static uint64_t w20= 0x0020002000200020LL;
+static uint64_t w1400= 0x1400140014001400LL;
+static uint64_t bm00000001= 0x00000000000000FFLL;
+static uint64_t bm00010000= 0x000000FF00000000LL;
+static uint64_t bm00001000= 0x00000000FF000000LL;
+static uint64_t bm10000000= 0xFF00000000000000LL;
+static uint64_t bm10000001= 0xFF000000000000FFLL;
+static uint64_t bm11000011= 0xFFFF00000000FFFFLL;
+static uint64_t bm00011000= 0x000000FFFF000000LL;
+static uint64_t bm00110011= 0x0000FFFF0000FFFFLL;
+static uint64_t bm11001100= 0xFFFF0000FFFF0000LL;
+static uint64_t b00= 0x0000000000000000LL;
+static uint64_t b02= 0x0202020202020202LL;
+static uint64_t b0F= 0x0F0F0F0F0F0F0F0FLL;
+static uint64_t bFF= 0xFFFFFFFFFFFFFFFFLL;
+static uint64_t b7E= 0x7E7E7E7E7E7E7E7ELL;
+static uint64_t b7C= 0x7C7C7C7C7C7C7C7CLL;
+static uint64_t b3F= 0x3F3F3F3F3F3F3F3FLL;
+static uint64_t temp0=0;
+static uint64_t temp1=0;
+static uint64_t temp2=0;
+static uint64_t temp3=0;
+static uint64_t temp4=0;
+static uint64_t temp5=0;
+static uint64_t pQPb=0;
+static uint8_t tempBlock[16*16];
+
+int hFlatnessThreshold= 56 - 16;
+int vFlatnessThreshold= 56 - 16;
+
+//amount of "black" u r willing to loose to get a brightness corrected picture
+double maxClippedThreshold= 0.01;
+
+int maxAllowedY=255;
+//FIXME can never make a movie´s black brighter (anyone needs that?)
+int minAllowedY=0;
+
+
+static inline long long rdtsc()
+{
+ long long l;
+ asm volatile( "rdtsc\n\t"
+ : "=A" (l)
+ );
+// printf("%d\n", int(l/1000));
+ return l;
+}
+
+static inline void prefetchnta(void *p)
+{
+ asm volatile( "prefetchnta (%0)\n\t"
+ : : "r" (p)
+ );
+}
+
+static inline void prefetcht0(void *p)
+{
+ asm volatile( "prefetcht0 (%0)\n\t"
+ : : "r" (p)
+ );
+}
+
+static inline void prefetcht1(void *p)
+{
+ asm volatile( "prefetcht1 (%0)\n\t"
+ : : "r" (p)
+ );
+}
+
+static inline void prefetcht2(void *p)
+{
+ asm volatile( "prefetcht2 (%0)\n\t"
+ : : "r" (p)
+ );
+}
+
+//FIXME? |255-0| = 1 (shouldnt be a problem ...)
+/**
+ * Check if the middle 8x8 Block in the given 8x10 block is flat
+ */
+static inline bool isVertDC(uint8_t src[], int stride){
+// return true;
+ int numEq= 0;
+ src+= stride; // src points to begin of the 8x8 Block
+#ifdef HAVE_MMX
+ asm volatile(
+// "int $3 \n\t"
+ "pushl %1\n\t"
+ "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
+ "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
+ "movq (%1), %%mm0 \n\t"
+ "addl %2, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
+ "paddb %%mm7, %%mm0 \n\t"
+ "pcmpgtb %%mm6, %%mm0 \n\t"
+
+ "addl %2, %1 \n\t"
+ "movq (%1), %%mm2 \n\t"
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "addl %2, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "addl %2, %1 \n\t"
+ "movq (%1), %%mm2 \n\t"
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "addl %2, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "addl %2, %1 \n\t"
+ "movq (%1), %%mm2 \n\t"
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "addl %2, %1 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ " \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlq $16, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "popl %1\n\t"
+ "movd %%mm0, %0 \n\t"
+ : "=r" (numEq)
+ : "r" (src), "r" (stride)
+ );
+// printf("%d\n", numEq);
+ numEq= (256 - (numEq & 0xFF)) &0xFF;
+
+// int asmEq= numEq;
+// numEq=0;
+// uint8_t *temp= src;
+
+#else
+ for(int y=0; y<BLOCK_SIZE-1; y++)
+ {
+ if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
+ if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
+ if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
+ if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
+ if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
+ if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
+ if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
+ if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
+ src+= stride;
+ }
+#endif
+/* if(abs(numEq - asmEq) > 0)
+ {
+ printf("\nasm:%d c:%d\n", asmEq, numEq);
+ for(int y=0; y<8; y++)
+ {
+ for(int x=0; x<8; x++)
+ {
+ printf("%d ", temp[x + y*stride]);
+ }
+ printf("\n");
+ }
+ }
+*/
+ return numEq > vFlatnessThreshold;
+}
+
+static inline bool isVertMinMaxOk(uint8_t src[], int stride, int QP)
+{
+#ifdef HAVE_MMX
+ int isOk;
+ asm volatile(
+// "int $3 \n\t"
+ "movq (%1, %2), %%mm0 \n\t"
+ "movq (%1, %2, 8), %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm0 \n\t"
+ "psubusb %%mm2, %%mm1 \n\t"
+ "por %%mm1, %%mm0 \n\t" // ABS Diff
+
+ "movq pQPb, %%mm7 \n\t" // QP,..., QP
+ "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
+ "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
+ "pcmpeqd b00, %%mm0 \n\t"
+ "psrlq $16, %%mm0 \n\t"
+ "pcmpeqd bFF, %%mm0 \n\t"
+// "movd %%mm0, (%1, %2, 4)\n\t"
+ "movd %%mm0, %0 \n\t"
+ : "=r" (isOk)
+ : "r" (src), "r" (stride)
+ );
+ return isOk;
+#else
+
+ int isOk2= true;
+ for(int x=0; x<BLOCK_SIZE; x++)
+ {
+ if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=false;
+ }
+/* if(isOk && !isOk2 || !isOk && isOk2)
+ {
+ printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
+ for(int y=0; y<9; y++)
+ {
+ for(int x=0; x<8; x++)
+ {
+ printf("%d ", src[x + y*stride]);
+ }
+ printf("\n");
+ }
+ } */
+
+ return isOk2;
+#endif
+
+}
+
+/**
+ * Do a vertical low pass filter on the 8x10 block (only write to the 8x8 block in the middle)
+ * useing the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
+ */
+static inline void doVertLowPass(uint8_t *src, int stride, int QP)
+{
+// QP= 64;
+
+#ifdef HAVE_MMX2
+ asm volatile( //"movv %0 %1 %2\n\t"
+ "pushl %0 \n\t"
+ "movq pQPb, %%mm0 \n\t" // QP,..., QP
+// "movq bFF , %%mm0 \n\t" // QP,..., QP
+
+ "movq (%0), %%mm6 \n\t"
+ "movq (%0, %1), %%mm5 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm6, %%mm2 \n\t"
+ "psubusb %%mm6, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
+ "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
+ "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
+
+ "pand %%mm2, %%mm6 \n\t"
+ "pandn %%mm1, %%mm2 \n\t"
+ "por %%mm2, %%mm6 \n\t"// First Line to Filter
+
+ "movq (%0, %1, 8), %%mm5 \n\t"
+ "leal (%0, %1, 4), %%eax \n\t"
+ "leal (%0, %1, 8), %%ebx \n\t"
+ "subl %1, %%ebx \n\t"
+ "addl %1, %0 \n\t" // %0 points to line 1 not 0
+ "movq (%0, %1, 8), %%mm7 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm7, %%mm2 \n\t"
+ "psubusb %%mm7, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
+ "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
+ "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
+
+ "pand %%mm2, %%mm7 \n\t"
+ "pandn %%mm1, %%mm2 \n\t"
+ "por %%mm2, %%mm7 \n\t" // First Line to Filter
+
+
+ // 1 2 3 4 5 6 7 8
+ // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
+ // 6 4 2 2 1 1
+ // 6 4 4 2
+ // 6 8 2
+/*
+ "movq %%mm6, %%mm2 \n\t" //1
+ "movq %%mm6, %%mm3 \n\t" //1
+ "paddusb b02, %%mm3 \n\t"
+ "psrlw $2, %%mm3 \n\t" //1 /4
+ "pand b3F, %%mm3 \n\t"
+ "psubb %%mm3, %%mm2 \n\t"
+ "movq (%0, %1), %%mm0 \n\t" // 1
+ "movq %%mm0, %%mm1 \n\t" // 1
+ "paddusb b02, %%mm0 \n\t"
+ "psrlw $2, %%mm0 \n\t" // 1 /4
+ "pand b3F, %%mm0 \n\t"
+ "paddusb %%mm2, %%mm0 \n\t" //3 1 /4
+*/
+ "movq (%0, %1), %%mm0 \n\t" // 1
+ "movq %%mm0, %%mm1 \n\t" // 1
+ "pavgb %%mm6, %%mm0 \n\t" //1 1 /2
+ "pavgb %%mm6, %%mm0 \n\t" //3 1 /4
+
+ "movq (%0, %1, 4), %%mm2 \n\t" // 1
+ "movq %%mm2, %%mm5 \n\t" // 1
+ "pavgb (%%eax), %%mm2 \n\t" // 11 /2
+ "pavgb (%0, %1, 2), %%mm2 \n\t" // 211 /4
+ "movq %%mm2, %%mm3 \n\t" // 211 /4
+ "movq (%0), %%mm4 \n\t" // 1
+ "pavgb %%mm4, %%mm3 \n\t" // 4 211 /8
+ "pavgb %%mm0, %%mm3 \n\t" //642211 /16
+ "movq %%mm3, (%0) \n\t" // X
+ // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
+ "movq %%mm1, %%mm0 \n\t" // 1
+ "pavgb %%mm6, %%mm0 \n\t" //1 1 /2
+ "movq %%mm4, %%mm3 \n\t" // 1
+ "pavgb (%0,%1,2), %%mm3 \n\t" // 1 1 /2
+ "pavgb (%%eax,%1,2), %%mm5 \n\t" // 11 /2
+ "pavgb (%%eax), %%mm5 \n\t" // 211 /4
+ "pavgb %%mm5, %%mm3 \n\t" // 2 2211 /8
+ "pavgb %%mm0, %%mm3 \n\t" //4242211 /16
+ "movq %%mm3, (%0,%1) \n\t" // X
+ // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
+ "pavgb %%mm4, %%mm6 \n\t" //11 /2
+ "movq (%%ebx), %%mm0 \n\t" // 1
+ "pavgb (%%eax, %1, 2), %%mm0 \n\t" // 11/2
+ "movq %%mm0, %%mm3 \n\t" // 11/2
+ "pavgb %%mm1, %%mm0 \n\t" // 2 11/4
+ "pavgb %%mm6, %%mm0 \n\t" //222 11/8
+ "pavgb %%mm2, %%mm0 \n\t" //22242211/16
+ "movq (%0, %1, 2), %%mm2 \n\t" // 1
+ "movq %%mm0, (%0, %1, 2) \n\t" // X
+ // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
+ "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
+ "pavgb (%%ebx), %%mm0 \n\t" // 11 /2
+ "pavgb %%mm0, %%mm6 \n\t" //11 11 /4
+ "pavgb %%mm1, %%mm4 \n\t" // 11 /2
+ "pavgb %%mm2, %%mm1 \n\t" // 11 /2
+ "pavgb %%mm1, %%mm6 \n\t" //1122 11 /8
+ "pavgb %%mm5, %%mm6 \n\t" //112242211 /16
+ "movq (%%eax), %%mm5 \n\t" // 1
+ "movq %%mm6, (%%eax) \n\t" // X
+ // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
+ "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
+ "pavgb %%mm7, %%mm6 \n\t" // 11 /2
+ "pavgb %%mm4, %%mm6 \n\t" // 11 11 /4
+ "pavgb %%mm3, %%mm6 \n\t" // 11 2211 /8
+ "pavgb %%mm5, %%mm2 \n\t" // 11 /2
+ "movq (%0, %1, 4), %%mm4 \n\t" // 1
+ "pavgb %%mm4, %%mm2 \n\t" // 112 /4
+ "pavgb %%mm2, %%mm6 \n\t" // 112242211 /16
+ "movq %%mm6, (%0, %1, 4) \n\t" // X
+ // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
+ "pavgb %%mm7, %%mm1 \n\t" // 11 2 /4
+ "pavgb %%mm4, %%mm5 \n\t" // 11 /2
+ "pavgb %%mm5, %%mm0 \n\t" // 11 11 /4
+ "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
+ "pavgb %%mm6, %%mm1 \n\t" // 11 4 2 /8
+ "pavgb %%mm0, %%mm1 \n\t" // 11224222 /16
+// "pxor %%mm1, %%mm1 \n\t"
+ "movq %%mm1, (%%eax, %1, 2) \n\t" // X
+ // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
+ "pavgb (%%ebx), %%mm2 \n\t" // 112 4 /8
+ "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
+ "pavgb %%mm0, %%mm6 \n\t" // 1 1 /2
+ "pavgb %%mm7, %%mm6 \n\t" // 1 12 /4
+ "pavgb %%mm2, %%mm6 \n\t" // 1122424 /4
+// "pxor %%mm6, %%mm6 \n\t"
+ "movq %%mm6, (%%ebx) \n\t" // X
+ // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
+ "pavgb %%mm7, %%mm5 \n\t" // 11 2 /4
+ "pavgb %%mm7, %%mm5 \n\t" // 11 6 /8
+
+ "pavgb %%mm3, %%mm0 \n\t" // 112 /4
+ "pavgb %%mm0, %%mm5 \n\t" // 112246 /16
+// "pxor %%mm5, %%mm5 \n\t"
+// "movq pQPb, %%mm5 \n\t"
+ "movq %%mm5, (%%eax, %1, 4) \n\t" // X
+ "popl %0\n\t"
+
+ :
+ : "r" (src), "r" (stride)
+ : "%eax", "%ebx"
+ );
+
+#else
+ const int l1= stride;
+ const int l2= stride + l1;
+ const int l3= stride + l2;
+ const int l4= stride + l3;
+ const int l5= stride + l4;
+ const int l6= stride + l5;
+ const int l7= stride + l6;
+ const int l8= stride + l7;
+ const int l9= stride + l8;
+
+ for(int x=0; x<BLOCK_SIZE; x++)
+ {
+ const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
+ const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
+
+ int sums[9];
+ sums[0] = first + src[l1];
+ sums[1] = src[l1] + src[l2];
+ sums[2] = src[l2] + src[l3];
+ sums[3] = src[l3] + src[l4];
+ sums[4] = src[l4] + src[l5];
+ sums[5] = src[l5] + src[l6];
+ sums[6] = src[l6] + src[l7];
+ sums[7] = src[l7] + src[l8];
+ sums[8] = src[l8] + last;
+
+ src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
+ src[l2]= ((src[l2]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
+ src[l3]= ((src[l3]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
+ src[l4]= ((src[l4]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
+ src[l5]= ((src[l5]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
+ src[l6]= ((src[l6]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
+ src[l7]= ((last + src[l7]<<2) + (src[l8] + sums[5]<<1) + sums[3] + 8)>>4;
+ src[l8]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
+
+ src++;
+ }
+
+#endif
+}
+
+static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
+{
+#ifdef HAVE_MMX
+ src+= stride;
+ //FIXME try pmul for *5 stuff
+// src[0]=0;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "leal (%0, %1), %%eax \n\t"
+ "leal (%%eax, %1, 4), %%ebx \n\t"
+// 0 1 2 3 4 5 6 7
+// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
+// %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
+
+ "movq (%0), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
+ "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
+
+ "movq (%%eax), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
+ "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
+
+ "movq (%%eax, %1), %%mm4 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
+ "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
+
+ "paddw %%mm0, %%mm0 \n\t" // 2L0
+ "paddw %%mm1, %%mm1 \n\t" // 2H0
+ "psubw %%mm4, %%mm2 \n\t" // L1 - L2
+ "psubw %%mm5, %%mm3 \n\t" // H1 - H2
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
+
+ "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
+ "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
+
+ "movq (%%eax, %1, 2), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L3
+ "punpckhbw %%mm7, %%mm3 \n\t" // H3
+
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+ "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+
+ "movq (%0, %1, 4), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t" // L4
+ "punpckhbw %%mm7, %%mm1 \n\t" // H4
+
+ "psubw %%mm0, %%mm2 \n\t" // L3 - L4
+ "psubw %%mm1, %%mm3 \n\t" // H3 - H4
+ "movq %%mm2, temp2 \n\t" // L3 - L4
+ "movq %%mm3, temp3 \n\t" // H3 - H4
+ "paddw %%mm4, %%mm4 \n\t" // 2L2
+ "paddw %%mm5, %%mm5 \n\t" // 2H2
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
+
+ "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
+ "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
+//50 opcodes so far
+ "movq (%%ebx), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L5
+ "punpckhbw %%mm7, %%mm3 \n\t" // H5
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
+
+ "movq (%%ebx, %1), %%mm6 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t" // L6
+ "psubw %%mm6, %%mm2 \n\t" // L5 - L6
+ "movq (%%ebx, %1), %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm6 \n\t" // H6
+ "psubw %%mm6, %%mm3 \n\t" // H5 - H6
+
+ "paddw %%mm0, %%mm0 \n\t" // 2L4
+ "paddw %%mm1, %%mm1 \n\t" // 2H4
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
+
+ "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
+ "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
+
+ "movq (%%ebx, %1, 2), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L7
+ "punpckhbw %%mm7, %%mm3 \n\t" // H7
+
+ "paddw %%mm2, %%mm2 \n\t" // 2L7
+ "paddw %%mm3, %%mm3 \n\t" // 2H7
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
+
+ "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+//FIXME pxor, psubw, pmax for abs
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm0, %%mm6 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm1, %%mm6 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
+
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm2, %%mm6 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm3, %%mm6 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
+
+#ifdef HAVE_MMX2
+ "pminsw %%mm2, %%mm0 \n\t"
+ "pminsw %%mm3, %%mm1 \n\t"
+#else
+ "movq %%mm0, %%mm6 \n\t"
+ "psubusw %%mm2, %%mm6 \n\t"
+ "psubw %%mm6, %%mm0 \n\t"
+ "movq %%mm1, %%mm6 \n\t"
+ "psubusw %%mm3, %%mm6 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+#endif
+
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
+ "pxor %%mm6, %%mm4 \n\t"
+ "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
+ "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
+ "pxor %%mm7, %%mm5 \n\t"
+ "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
+// 100 opcodes
+ "movd %2, %%mm2 \n\t" // QP
+//"pcmpeqb %%mm2, %%mm2\n\t"
+ "punpcklwd %%mm2, %%mm2 \n\t"
+ "punpcklwd %%mm2, %%mm2 \n\t"
+ "psllw $3, %%mm2 \n\t" // 8QP
+ "movq %%mm2, %%mm3 \n\t" // 8QP
+ "pcmpgtw %%mm4, %%mm2 \n\t"
+ "pcmpgtw %%mm5, %%mm3 \n\t"
+ "pand %%mm2, %%mm4 \n\t"
+ "pand %%mm3, %%mm5 \n\t"
+
+
+ "psubusw %%mm0, %%mm4 \n\t" // hd
+ "psubusw %%mm1, %%mm5 \n\t" // ld
+
+
+ "movq w05, %%mm2 \n\t" // 5
+ "pmullw %%mm2, %%mm4 \n\t"
+ "pmullw %%mm2, %%mm5 \n\t"
+ "movq w20, %%mm2 \n\t" // 32
+ "paddw %%mm2, %%mm4 \n\t"
+ "paddw %%mm2, %%mm5 \n\t"
+ "psrlw $6, %%mm4 \n\t"
+ "psrlw $6, %%mm5 \n\t"
+
+/*
+ "movq w06, %%mm2 \n\t" // 6
+ "paddw %%mm2, %%mm4 \n\t"
+ "paddw %%mm2, %%mm5 \n\t"
+ "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
+//FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
+ "pmulhw %%mm2, %%mm4 \n\t" // hd/13
+ "pmulhw %%mm2, %%mm5 \n\t" // ld/13
+*/
+
+ "movq temp2, %%mm0 \n\t" // L3 - L4
+ "movq temp3, %%mm1 \n\t" // H3 - H4
+
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+
+ // FIXME rounding error
+ "psraw $1, %%mm0 \n\t" // (L3 - L4)/2
+ "psraw $1, %%mm1 \n\t" // (H3 - H4)/2
+ "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
+ "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
+ "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
+// "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
+// "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
+
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm7, %%mm3 \n\t"
+ "pand %%mm2, %%mm4 \n\t"
+ "pand %%mm3, %%mm5 \n\t"
+
+#ifdef HAVE_MMX2
+ "pminsw %%mm0, %%mm4 \n\t"
+ "pminsw %%mm1, %%mm5 \n\t"
+#else
+ "movq %%mm4, %%mm2 \n\t"
+ "psubusw %%mm0, %%mm2 \n\t"
+ "psubw %%mm2, %%mm4 \n\t"
+ "movq %%mm5, %%mm2 \n\t"
+ "psubusw %%mm1, %%mm2 \n\t"
+ "psubw %%mm2, %%mm5 \n\t"
+#endif
+ "pxor %%mm6, %%mm4 \n\t"
+ "pxor %%mm7, %%mm5 \n\t"
+ "psubw %%mm6, %%mm4 \n\t"
+ "psubw %%mm7, %%mm5 \n\t"
+ "packsswb %%mm5, %%mm4 \n\t"
+ "movq (%%eax, %1, 2), %%mm0 \n\t"
+ "paddb %%mm4, %%mm0 \n\t"
+ "movq %%mm0, (%%eax, %1, 2) \n\t"
+ "movq (%0, %1, 4), %%mm0 \n\t"
+ "psubb %%mm4, %%mm0 \n\t"
+// "pxor %%mm0, %%mm0 \n\t"
+ "movq %%mm0, (%0, %1, 4) \n\t"
+
+ :
+ : "r" (src), "r" (stride), "r" (QP)
+ : "%eax", "%ebx"
+ );
+#else
+ const int l1= stride;
+ const int l2= stride + l1;
+ const int l3= stride + l2;
+ const int l4= stride + l3;
+ const int l5= stride + l4;
+ const int l6= stride + l5;
+ const int l7= stride + l6;
+ const int l8= stride + l7;
+// const int l9= stride + l8;
+
+ for(int x=0; x<BLOCK_SIZE; x++)
+ {
+ const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
+ if(ABS(middleEnergy) < 8*QP)
+ {
+ const int q=(src[l4] - src[l5])/2;
+ const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
+ const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
+
+ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
+ d= MAX(d, 0);
+
+ d= (5*d + 32) >> 6;
+ d*= SIGN(-middleEnergy);
+
+ if(q>0)
+ {
+ d= d<0 ? 0 : d;
+ d= d>q ? q : d;
+ }
+ else
+ {
+ d= d>0 ? 0 : d;
+ d= d<q ? q : d;
+ }
+
+ src[l4]-= d;
+ src[l5]+= d;
+ }
+ src++;
+ }
+#endif
+}
+
+//FIXME? |255-0| = 1
+/**
+ * Check if the given 8x8 Block is mostly "flat" and copy the unaliged data into tempBlock.
+ */
+static inline bool isHorizDCAndCopy2Temp(uint8_t src[], int stride)
+{
+// src++;
+ int numEq= 0;
+#ifdef HAVE_MMX
+asm volatile (
+// "int $3 \n\t"
+ "pushl %1\n\t"
+ "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
+ "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
+ "leal tempBlock, %%eax \n\t"
+ "pxor %%mm0, %%mm0 \n\t"
+
+#define HDC_CHECK_AND_CPY(i) \
+ "movq -4(%1), %%mm2 \n\t"\
+ "psrlq $32, %%mm2 \n\t"\
+ "punpckldq 4(%1), %%mm2 \n\t" /* (%1) */\
+ "movq %%mm2, %%mm1 \n\t"\
+ "psrlq $8, %%mm2 \n\t"\
+ "psubb %%mm1, %%mm2 \n\t"\
+ "paddb %%mm7, %%mm2 \n\t"\
+ "pcmpgtb %%mm6, %%mm2 \n\t"\
+ "paddb %%mm2, %%mm0 \n\t"\
+ "movq %%mm1," #i "(%%eax) \n\t"
+
+ HDC_CHECK_AND_CPY(0)
+ "addl %2, %1 \n\t"
+ HDC_CHECK_AND_CPY(8)
+ "addl %2, %1 \n\t"
+ HDC_CHECK_AND_CPY(16)
+ "addl %2, %1 \n\t"
+ HDC_CHECK_AND_CPY(24)
+ "addl %2, %1 \n\t"
+ HDC_CHECK_AND_CPY(32)
+ "addl %2, %1 \n\t"
+ HDC_CHECK_AND_CPY(40)
+ "addl %2, %1 \n\t"
+ HDC_CHECK_AND_CPY(48)
+ "addl %2, %1 \n\t"
+ HDC_CHECK_AND_CPY(56)
+
+ "psllq $8, %%mm0 \n\t" // remove dummy value
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlq $16, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "popl %1\n\t"
+ "movd %%mm0, %0 \n\t"
+ : "=r" (numEq)
+ : "r" (src), "r" (stride)
+ : "%eax"
+ );
+// printf("%d\n", numEq);
+ numEq= (256 - (numEq & 0xFF)) &0xFF;
+#else
+ for(int y=0; y<BLOCK_SIZE; y++)
+ {
+ if(((src[0] - src[1] + 1) & 0xFFFF) < 3) numEq++;
+ if(((src[1] - src[2] + 1) & 0xFFFF) < 3) numEq++;
+ if(((src[2] - src[3] + 1) & 0xFFFF) < 3) numEq++;
+ if(((src[3] - src[4] + 1) & 0xFFFF) < 3) numEq++;
+ if(((src[4] - src[5] + 1) & 0xFFFF) < 3) numEq++;
+ if(((src[5] - src[6] + 1) & 0xFFFF) < 3) numEq++;
+ if(((src[6] - src[7] + 1) & 0xFFFF) < 3) numEq++;
+ tempBlock[0 + y*TEMP_STRIDE] = src[0];
+ tempBlock[1 + y*TEMP_STRIDE] = src[1];
+ tempBlock[2 + y*TEMP_STRIDE] = src[2];
+ tempBlock[3 + y*TEMP_STRIDE] = src[3];
+ tempBlock[4 + y*TEMP_STRIDE] = src[4];
+ tempBlock[5 + y*TEMP_STRIDE] = src[5];
+ tempBlock[6 + y*TEMP_STRIDE] = src[6];
+ tempBlock[7 + y*TEMP_STRIDE] = src[7];
+ src+= stride;
+ }
+#endif
+/* if(abs(numEq - asmEq) > 0)
+ {
+// printf("\nasm:%d c:%d\n", asmEq, numEq);
+ for(int y=0; y<8; y++)
+ {
+ for(int x=0; x<8; x++)
+ {
+ printf("%d ", src[x + y*stride]);
+ }
+ printf("\n");
+ }
+ }
+*/
+// printf("%d\n", numEq);
+ return numEq > hFlatnessThreshold;
+}
+
+static inline bool isHorizMinMaxOk(uint8_t src[], int stride, int QP)
+{
+#ifdef MMX_FIXME
+FIXME
+ int isOk;
+ asm volatile(
+// "int $3 \n\t"
+ "movq (%1, %2), %%mm0 \n\t"
+ "movq (%1, %2, 8), %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "psubusb %%mm1, %%mm0 \n\t"
+ "psubusb %%mm2, %%mm1 \n\t"
+ "por %%mm1, %%mm0 \n\t" // ABS Diff
+
+ "movq pQPb, %%mm7 \n\t" // QP,..., QP
+ "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
+ "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
+ "pcmpeqd b00, %%mm0 \n\t"
+ "psrlq $16, %%mm0 \n\t"
+ "pcmpeqd bFF, %%mm0 \n\t"
+// "movd %%mm0, (%1, %2, 4)\n\t"
+ "movd %%mm0, %0 \n\t"
+ : "=r" (isOk)
+ : "r" (src), "r" (stride)
+ );
+ return isOk;
+#else
+ if(abs(src[0] - src[7]) > 2*QP) return false;
+
+ return true;
+#endif
+}
+
+static inline void doHorizDefFilterAndCopyBack(uint8_t dst[], int stride, int QP)
+{
+#ifdef HAVE_MMX2
+ asm volatile(
+ "pushl %0 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq bm00001000, %%mm6 \n\t"
+ "movd %2, %%mm5 \n\t" // QP
+ "movq %%mm5, %%mm4 \n\t"
+ "paddusb %%mm5, %%mm5 \n\t" // 2QP
+ "paddusb %%mm5, %%mm4 \n\t" // 3QP
+ "psllq $24, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" // 0
+ "psubb %%mm4, %%mm5 \n\t" // -QP
+ "leal tempBlock, %%eax \n\t"
+
+//FIXME? "unroll by 2" and mix
+#define HDF(i) "movq " #i "(%%eax), %%mm0 \n\t"\
+ "movq %%mm0, %%mm1 \n\t"\
+ "movq %%mm0, %%mm2 \n\t"\
+ "psrlq $8, %%mm1 \n\t"\
+ "psubusb %%mm1, %%mm2 \n\t"\
+ "psubusb %%mm0, %%mm1 \n\t"\
+ "por %%mm2, %%mm1 \n\t" /* |px - p(x+1)| */\
+ "pcmpeqb %%mm7, %%mm2 \n\t" /* sgn[px - p(x+1)] */\
+ "pshufw $0xAA, %%mm1, %%mm3 \n\t"\
+ "pminub %%mm1, %%mm3 \n\t"\
+ "psrlq $16, %%mm3 \n\t"\
+ "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5,ü6|) */\
+ "paddb %%mm5, %%mm1 \n\t"\
+ "psubusb %%mm5, %%mm1 \n\t"\
+ "psrlw $2, %%mm1 \n\t"\
+ "pxor %%mm2, %%mm1 \n\t"\
+ "psubb %%mm2, %%mm1 \n\t"\
+ "pand %%mm6, %%mm1 \n\t"\
+ "psubb %%mm1, %%mm0 \n\t"\
+ "psllq $8, %%mm1 \n\t"\
+ "paddb %%mm1, %%mm0 \n\t"\
+ "movd %%mm0, (%0) \n\t"\
+ "psrlq $32, %%mm0 \n\t"\
+ "movd %%mm0, 4(%0) \n\t"
+
+ HDF(0)
+ "addl %1, %0 \n\t"
+ HDF(8)
+ "addl %1, %0 \n\t"
+ HDF(16)
+ "addl %1, %0 \n\t"
+ HDF(24)
+ "addl %1, %0 \n\t"
+ HDF(32)
+ "addl %1, %0 \n\t"
+ HDF(40)
+ "addl %1, %0 \n\t"
+ HDF(48)
+ "addl %1, %0 \n\t"
+ HDF(56)
+ "popl %0 \n\t"
+ :
+ : "r" (dst), "r" (stride), "r" (QP)
+ : "%eax"
+ );
+#else
+ uint8_t *src= tempBlock;
+
+ for(int y=0; y<BLOCK_SIZE; y++)
+ {
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ dst[4] = src[4];
+ dst[5] = src[5];
+ dst[6] = src[6];
+ dst[7] = src[7];
+
+ const int middleEnergy= 5*(src[4] - src[5]) + 2*(src[2] - src[5]);
+ if(ABS(middleEnergy) < 8*QP)
+ {
+ const int q=(src[3] - src[4])/2;
+ const int leftEnergy= 5*(src[2] - src[1]) + 2*(src[0] - src[3]);
+ const int rightEnergy= 5*(src[6] - src[5]) + 2*(src[4] - src[7]);
+
+ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
+ d= MAX(d, 0);
+
+ d= (5*d + 32) >> 6;
+ d*= SIGN(-middleEnergy);
+
+ if(q>0)
+ {
+ d= d<0 ? 0 : d;
+ d= d>q ? q : d;
+ }
+ else
+ {
+ d= d>0 ? 0 : d;
+ d= d<q ? q : d;
+ }
+
+ dst[3]-= d;
+ dst[4]+= d;
+ }
+ dst+= stride;
+ src+= TEMP_STRIDE;
+ }
+#endif
+}
+
+/**
+ * Do a horizontal low pass filter on the 8x8 block
+ * useing the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
+ * useing approximately the 7-Tap Filter (1,2,3,4,3,2,1)/16 (MMX2 version)
+ */
+static inline void doHorizLowPassAndCopyBack(uint8_t dst[], int stride, int QP)
+{
+//return;
+#ifdef HAVE_MMX2
+ asm volatile( //"movv %0 %1 %2\n\t"
+ "pushl %0\n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "leal tempBlock, %%eax \n\t"
+
+#define HLP1 "movq (%0), %%mm0 \n\t"\
+ "movq %%mm0, %%mm1 \n\t"\
+ "psllq $8, %%mm0 \n\t"\
+ "pavgb %%mm1, %%mm0 \n\t"\
+ "psrlw $8, %%mm0 \n\t"\
+ "pxor %%mm1, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ "movq %%mm0, %%mm1 \n\t"\
+ "movq %%mm0, %%mm2 \n\t"\
+ "psllq $32, %%mm0 \n\t"\
+ "paddb %%mm0, %%mm1 \n\t"\
+ "psllq $16, %%mm2 \n\t"\
+ "pavgb %%mm2, %%mm0 \n\t"\
+ "movq %%mm0, %%mm3 \n\t"\
+ "pand bm11001100, %%mm0 \n\t"\
+ "paddusb %%mm0, %%mm3 \n\t"\
+ "psrlq $8, %%mm3 \n\t"\
+ "pavgb %%mm1, %%mm4 \n\t"\
+ "pavgb %%mm3, %%mm2 \n\t"\
+ "psrlq $16, %%mm2 \n\t"\
+ "punpcklbw %%mm2, %%mm2 \n\t"\
+ "movq %%mm2, (%0) \n\t"\
+
+#define HLP2 "movq (%0), %%mm0 \n\t"\
+ "movq %%mm0, %%mm1 \n\t"\
+ "psllq $8, %%mm0 \n\t"\
+ "pavgb %%mm1, %%mm0 \n\t"\
+ "psrlw $8, %%mm0 \n\t"\
+ "pxor %%mm1, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ "movq %%mm0, %%mm2 \n\t"\
+ "psllq $32, %%mm0 \n\t"\
+ "psllq $16, %%mm2 \n\t"\
+ "pavgb %%mm2, %%mm0 \n\t"\
+ "movq %%mm0, %%mm3 \n\t"\
+ "pand bm11001100, %%mm0 \n\t"\
+ "paddusb %%mm0, %%