summaryrefslogtreecommitdiffstats
path: root/mp3lib/dct64_k7.s
diff options
context:
space:
mode:
authornick <nick@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-06-29 17:55:35 +0000
committernick <nick@b3059339-0415-0410-9bf9-f77b7e298cf2>2001-06-29 17:55:35 +0000
commit2ec6762923fea7f28331849b1d394f30dfce1aff (patch)
tree58ff3fcc1ac955a2b07e81d74fe489076e1fe631 /mp3lib/dct64_k7.s
parentbf8a76c06387345aa448b66ce2dff37ba0fcd69e (diff)
downloadmpv-2ec6762923fea7f28331849b1d394f30dfce1aff.tar.bz2
mpv-2ec6762923fea7f28331849b1d394f30dfce1aff.tar.xz
Added newest MMX-optimized decore which speedups decoding at least on 13% for any cpu.
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@1246 b3059339-0415-0410-9bf9-f77b7e298cf2
Diffstat (limited to 'mp3lib/dct64_k7.s')
-rw-r--r--mp3lib/dct64_k7.s1469
1 files changed, 798 insertions, 671 deletions
diff --git a/mp3lib/dct64_k7.s b/mp3lib/dct64_k7.s
index 6a82d618c4..e2dcf07195 100644
--- a/mp3lib/dct64_k7.s
+++ b/mp3lib/dct64_k7.s
@@ -1,677 +1,804 @@
-///
-/// Replacement of dct64() with AMD's 3DNowEx(DSP)! SIMD operations support
-///
-/// This code based 'dct64_3dnow.s' by Syuuhei Kashiyama
-/// <squash@mb.kcom.ne.jp>,only some types of changes have been made:
-///
-/// - added new opcodes PSWAPD, PFPNACC
-/// - decreased number of opcodes (as it was suggested by k7 manual)
-/// (using memory reference as operand of instructions)
-/// - Phase 6 is rewritten with mixing of cpu and mmx opcodes
-/// - change function name for support 3DNowEx! automatic detect
-/// - negation of 3dnow reg was replaced with PXOR 0x800000000, MMi instead
-/// of PFMUL as it was suggested by athlon manual. (Two not separated PFMUL
-/// can not be paired, but PXOR can be).
-///
-/// note: because K7 processors are an aggresive out-of-order three-way
-/// superscalar ones instruction order is not significand for them.
-///
-/// Modified by Nick Kurshev <nickols_k@mail.ru>
-///
-/// The author of this program disclaim whole expressed or implied
-/// warranties with regard to this program, and in no event shall the
-/// author of this program liable to whatever resulted from the use of
-/// this program. Use it at your own risk.
-///
+# This code was taken from http://www.mpg123.org
+# See ChangeLog of mpg123-0.59s-pre.1 for detail
+# Applied to mplayer by Nick Kurshev <nickols_k@mail.ru>
+# Partial 3dnowex-DSP! optimization by Nick Kurshev
+#
+# TODO: finish 3dnow! optimization at least in scalar mode
+#
.data
- .align 8
+ .align 8
plus_minus_3dnow: .long 0x00000000, 0x80000000
+costab:
+ .long 1056974725
+ .long 1057056395
+ .long 1057223771
+ .long 1057485416
+ .long 1057855544
+ .long 1058356026
+ .long 1059019886
+ .long 1059897405
+ .long 1061067246
+ .long 1062657950
+ .long 1064892987
+ .long 1066774581
+ .long 1069414683
+ .long 1073984175
+ .long 1079645762
+ .long 1092815430
+ .long 1057005197
+ .long 1057342072
+ .long 1058087743
+ .long 1059427869
+ .long 1061799040
+ .long 1065862217
+ .long 1071413542
+ .long 1084439708
+ .long 1057128951
+ .long 1058664893
+ .long 1063675095
+ .long 1076102863
+ .long 1057655764
+ .long 1067924853
+ .long 1060439283
.text
- .globl dct64_3dnowex
- .type dct64_3dnowex,@function
-
-/* Discrete Cosine Tansform (DCT) for subband synthesis */
-/* void dct64(real *a,real *b,real *c) */
-dct64_3dnowex:
- subl $256,%esp
- pushl %ebp
- pushl %edi
- pushl %esi
- pushl %ebx
- leal 16(%esp),%ebx /* ebx -> real tmp1[32] */
- movl 284(%esp),%edi /* edi -> c */
- movl 276(%esp),%ebp /* ebp -> a */
- movl 280(%esp),%edx /* edx -> b */
- leal 128(%ebx),%esi /* esi -> real tmp2[32] */
-
- / femms
-
- // 1
- movl pnts,%eax
-
- movq 0(%edi),%mm0 /* mm0 = c[0x00] | c[0x01]*/
- movq %mm0,%mm1 /* mm1 = mm0 */
- movd 124(%edi),%mm2 /* mm2 = c[0x1f] */
- punpckldq 120(%edi),%mm2 /* mm2 = c[0x1f] | c[0x1E] */
- pfadd %mm2,%mm0 /* mm0 = c[0x00]+c[0x1F] | c[0x1E]+c[0x01] */
- movq %mm0,0(%ebx) /* tmp[0, 1] = mm0 */
- pfsub %mm2,%mm1 /* c[0x00]-c[0x1f] | c[0x01]-c[0x1e] */
- pfmul 0(%eax),%mm1 /* (c[0x00]-c[0x1f])*pnts[0]|(c[0x01]-c[0x1e])*pnts[1]*/
- pswapd %mm1, %mm1 /* (c[0x01]-c[0x1e])*pnts[1]|(c[0x00]-c[0x1f])*pnts[0]*/
- movq %mm1, 120(%ebx) /* tmp1[30, 31]=mm1 */
-
- movq 8(%edi),%mm4
- movq %mm4,%mm5
- movd 116(%edi),%mm6
- punpckldq 112(%edi),%mm6
- pfadd %mm6,%mm4
- movq %mm4,8(%ebx)
- pfsub %mm6,%mm5
- pfmul 8(%eax),%mm5
- pswapd %mm5, %mm5
- movq %mm5, 112(%ebx)
-
- movq 16(%edi),%mm0
- movq %mm0,%mm1
- movd 108(%edi),%mm2
- punpckldq 104(%edi),%mm2
- pfadd %mm2,%mm0
- movq %mm0,16(%ebx)
- pfsub %mm2,%mm1
- pfmul 16(%eax),%mm1
- pswapd %mm1, %mm1
- movq %mm1, 104(%ebx)
-
- movq 24(%edi),%mm4
- movq %mm4,%mm5
- movd 100(%edi),%mm6
- punpckldq 96(%edi),%mm6
- pfadd %mm6,%mm4
- movq %mm4,24(%ebx)
- pfsub %mm6,%mm5
- pfmul 24(%eax),%mm5
- pswapd %mm5, %mm5
- movq %mm5, 96(%ebx)
-
- movq 32(%edi),%mm0
- movq %mm0,%mm1
- movd 92(%edi),%mm2
- punpckldq 88(%edi),%mm2
- pfadd %mm2,%mm0
- movq %mm0,32(%ebx)
- pfsub %mm2,%mm1
- pfmul 32(%eax),%mm1
- pswapd %mm1, %mm1
- movq %mm1, 88(%ebx)
-
- movq 40(%edi),%mm4
- movq %mm4,%mm5
- movd 84(%edi),%mm6
- punpckldq 80(%edi),%mm6
- pfadd %mm6,%mm4
- movq %mm4,40(%ebx)
- pfsub %mm6,%mm5
- pfmul 40(%eax),%mm5
- pswapd %mm5, %mm5
- movq %mm5, 80(%ebx)
-
- movq 48(%edi),%mm0
- movq %mm0,%mm1
- movd 76(%edi),%mm2
- punpckldq 72(%edi),%mm2
- pfadd %mm2,%mm0
- movq %mm0,48(%ebx)
- pfsub %mm2,%mm1
- pfmul 48(%eax),%mm1
- pswapd %mm1, %mm1
- movq %mm1, 72(%ebx)
-
- movq 56(%edi),%mm4
- movq %mm4,%mm5
- movd 68(%edi),%mm6
- punpckldq 64(%edi),%mm6
- pfadd %mm6,%mm4
- movq %mm4,56(%ebx)
- pfsub %mm6,%mm5
- pfmul 56(%eax),%mm5
- pswapd %mm5, %mm5
- movq %mm5, 64(%ebx)
-
- // 2
- movl pnts+4,%eax
- / 0, 14
- movq 0(%ebx),%mm0 /* mm0 = tmp1[0] | tmp1[1] */
- movq %mm0,%mm1
- movd 60(%ebx),%mm2 /* mm2 = tmp1[0x0F] */
- punpckldq 56(%ebx),%mm2 /* mm2 = tmp1[0x0E] | tmp1[0x0F] */
- movq 0(%eax),%mm3 /* mm3 = pnts[0] | pnts[1] */
- pfadd %mm2,%mm0 /* mm0 = tmp1[0]+tmp1[0x0F]|tmp1[1]+tmp1[0x0E]*/
- movq %mm0,0(%esi) /* tmp2[0, 1] = mm0 */
- pfsub %mm2,%mm1 /* mm1 = tmp1[0]-tmp1[0x0F]|tmp1[1]-tmp1[0x0E]*/
- pfmul %mm3,%mm1 /* mm1 = (tmp1[0]-tmp1[0x0F])*pnts[0]|(tmp1[1]-tmp1[0x0E])*pnts[1]*/
- pswapd %mm1, %mm1 /* mm1 = (tmp1[1]-tmp1[0x0E])*pnts[1]|(tmp1[0]-tmp1[0x0F])*pnts[0]*/
- movq %mm1, 56(%esi) /* tmp2[0x0E, 0x0F] = mm1 */
- / 16, 30
- movq 64(%ebx),%mm0
- movq %mm0,%mm1
- movd 124(%ebx),%mm2
- punpckldq 120(%ebx),%mm2
- pfadd %mm2,%mm0
- movq %mm0,64(%esi)
- pfsubr %mm2,%mm1
- pfmul %mm3,%mm1
- pswapd %mm1, %mm1
- movq %mm1, 120(%esi)
- movq 8(%ebx),%mm4
- / 2, 12
- movq %mm4,%mm5
- movd 52(%ebx),%mm6
- punpckldq 48(%ebx),%mm6
- movq 8(%eax),%mm7
- pfadd %mm6,%mm4
- movq %mm4,8(%esi)
- pfsub %mm6,%mm5
- pfmul %mm7,%mm5
- pswapd %mm5, %mm5
- movq %mm5, 48(%esi)
- movq 72(%ebx),%mm4
- / 18, 28
- movq %mm4,%mm5
- movd 116(%ebx),%mm6
- punpckldq 112(%ebx),%mm6
- pfadd %mm6,%mm4
- movq %mm4,72(%esi)
- pfsubr %mm6,%mm5
- pfmul %mm7,%mm5
- pswapd %mm5, %mm5
- movq %mm5, 112(%esi)
- movq 16(%ebx),%mm0
- / 4, 10
- movq %mm0,%mm1
- movd 44(%ebx),%mm2
- punpckldq 40(%ebx),%mm2
- movq 16(%eax),%mm3
- pfadd %mm2,%mm0
- movq %mm0,16(%esi)
- pfsub %mm2,%mm1
- pfmul %mm3,%mm1
- pswapd %mm1, %mm1
- movq %mm1, 40(%esi)
- movq 80(%ebx),%mm0
- / 20, 26
- movq %mm0,%mm1
- movd 108(%ebx),%mm2
- punpckldq 104(%ebx),%mm2
- pfadd %mm2,%mm0
- movq %mm0,80(%esi)
- pfsubr %mm2,%mm1
- pfmul %mm3,%mm1
- pswapd %mm1, %mm1
- movq %mm1, 104(%esi)
- movq 24(%ebx),%mm4
- / 6, 8
- movq %mm4,%mm5
- movd 36(%ebx),%mm6
- punpckldq 32(%ebx),%mm6
- movq 24(%eax),%mm7
- pfadd %mm6,%mm4
- movq %mm4,24(%esi)
- pfsub %mm6,%mm5
- pfmul %mm7,%mm5
- pswapd %mm5, %mm5
- movq %mm5, 32(%esi)
- movq 88(%ebx),%mm4
- / 22, 24
- movq %mm4,%mm5
- movd 100(%ebx),%mm6
- punpckldq 96(%ebx),%mm6
- pfadd %mm6,%mm4
- movq %mm4,88(%esi)
- pfsubr %mm6,%mm5
- pfmul %mm7,%mm5
- pswapd %mm5, %mm5
- movq %mm5, 96(%esi)
-
- // 3
- movl pnts+8,%eax
- movq 0(%eax),%mm0
- movq 8(%eax),%mm1
- movq 0(%esi),%mm2
- / 0, 6
- movq %mm2,%mm3
- movd 28(%esi),%mm4
- punpckldq 24(%esi),%mm4
- pfadd %mm4,%mm2
- pfsub %mm4,%mm3
- pfmul %mm0,%mm3
- movq %mm2,0(%ebx)
- pswapd %mm3, %mm3
- movq %mm3, 24(%ebx)
- movq 8(%esi),%mm5
- / 2, 4
- movq %mm5,%mm6
- movd 20(%esi),%mm7
- punpckldq 16(%esi),%mm7
- pfadd %mm7,%mm5
- pfsub %mm7,%mm6
- pfmul %mm1,%mm6
- movq %mm5,8(%ebx)
- pswapd %mm6, %mm6
- movq %mm6, 16(%ebx)
- movq 32(%esi),%mm2
- / 8, 14
- movq %mm2,%mm3
- movd 60(%esi),%mm4
- punpckldq 56(%esi),%mm4
- pfadd %mm4,%mm2
- pfsubr %mm4,%mm3
- pfmul %mm0,%mm3
- movq %mm2,32(%ebx)
- pswapd %mm3, %mm3
- movq %mm3, 56(%ebx)
- movq 40(%esi),%mm5
- / 10, 12
- movq %mm5,%mm6
- movd 52(%esi),%mm7
- punpckldq 48(%esi),%mm7
- pfadd %mm7,%mm5
- pfsubr %mm7,%mm6
- pfmul %mm1,%mm6
- movq %mm5,40(%ebx)
- pswapd %mm6, %mm6
- movq %mm6, 48(%ebx)
- movq 64(%esi),%mm2
- / 16, 22
- movq %mm2,%mm3
- movd 92(%esi),%mm4
- punpckldq 88(%esi),%mm4
- pfadd %mm4,%mm2
- pfsub %mm4,%mm3
- pfmul %mm0,%mm3
- movq %mm2,64(%ebx)
- pswapd %mm3, %mm3
- movq %mm3, 88(%ebx)
- movq 72(%esi),%mm5
- / 18, 20
- movq %mm5,%mm6
- movd 84(%esi),%mm7
- punpckldq 80(%esi),%mm7
- pfadd %mm7,%mm5
- pfsub %mm7,%mm6
- pfmul %mm1,%mm6
- movq %mm5,72(%ebx)
- pswapd %mm6, %mm6
- movq %mm6, 80(%ebx)
- movq 96(%esi),%mm2
- / 24, 30
- movq %mm2,%mm3
- movd 124(%esi),%mm4
- punpckldq 120(%esi),%mm4
- pfadd %mm4,%mm2
- pfsubr %mm4,%mm3
- pfmul %mm0,%mm3
- movq %mm2,96(%ebx)
- pswapd %mm3, %mm3
- movq %mm3, 120(%ebx)
- movq 104(%esi),%mm5
- / 26, 28
- movq %mm5,%mm6
- movd 116(%esi),%mm7
- punpckldq 112(%esi),%mm7
- pfadd %mm7,%mm5
- pfsubr %mm7,%mm6
- pfmul %mm1,%mm6
- movq %mm5,104(%ebx)
- pswapd %mm6, %mm6
- movq %mm6, 112(%ebx)
-
- // 4
- movl pnts+12,%eax
- movq 0(%eax),%mm0 /* mm0 = pnts[3] | pnts[4] */
- movq 0(%ebx),%mm1 /* mm1 = tmp1[0] | tmp1[1] */
- / 0
- movq %mm1,%mm2
- movd 12(%ebx),%mm3 /* mm3 = tmp1[3] */
- punpckldq 8(%ebx),%mm3 /* mm3 = tmp1[3] | tmp1[2] */
- pfadd %mm3,%mm1 /* mm1 = tmp1[0]+tmp1[3] | tmp1[1]+tmp1[2]*/
- pfsub %mm3,%mm2 /* mm2 = tmp1[0]-tmp1[3] | tmp1[0]-tmp1[2]*/
- pfmul %mm0,%mm2 /* mm2 = tmp1[0]-tmp1[3]*pnts[3]|tmp1[0]-tmp1[2]*pnts[4]*/
- movq %mm1,0(%esi) /* tmp2[0, 1] = mm1 */
- pswapd %mm2, %mm2 /* mm2 = tmp1[0]-tmp1[2]*pnts[4]|tmp1[0]-tmp1[3]*pnts[3] */
- movq %mm2, 8(%esi) /* tmp2[2, 3] = mm2 */
- movq 16(%ebx),%mm4
- / 4
- movq %mm4,%mm5
- movd 28(%ebx),%mm6
- punpckldq 24(%ebx),%mm6
- pfadd %mm6,%mm4
- pfsubr %mm6,%mm5
- pfmul %mm0,%mm5
- movq %mm4,16(%esi)
- pswapd %mm5, %mm5
- movq %mm5, 24(%esi)
- movq 32(%ebx),%mm1
- / 8
- movq %mm1,%mm2
- movd 44(%ebx),%mm3
- punpckldq 40(%ebx),%mm3
- pfadd %mm3,%mm1
- pfsub %mm3,%mm2
- pfmul %mm0,%mm2
- movq %mm1,32(%esi)
- pswapd %mm2, %mm2
- movq %mm2, 40(%esi)
- movq 48(%ebx),%mm4
- / 12
- movq %mm4,%mm5
- movd 60(%ebx),%mm6
- punpckldq 56(%ebx),%mm6
- pfadd %mm6,%mm4
- pfsubr %mm6,%mm5
- pfmul %mm0,%mm5
- movq %mm4,48(%esi)
- pswapd %mm5, %mm5
- movq %mm5, 56(%esi)
- movq 64(%ebx),%mm1
- / 16
- movq %mm1,%mm2
- movd 76(%ebx),%mm3
- punpckldq 72(%ebx),%mm3
- pfadd %mm3,%mm1
- pfsub %mm3,%mm2
- pfmul %mm0,%mm2
- movq %mm1,64(%esi)
- pswapd %mm2, %mm2
- movq %mm2, 72(%esi)
- movq 80(%ebx),%mm4
- / 20
- movq %mm4,%mm5
- movd 92(%ebx),%mm6
- punpckldq 88(%ebx),%mm6
- pfadd %mm6,%mm4
- pfsubr %mm6,%mm5
- pfmul %mm0,%mm5
- movq %mm4,80(%esi)
- pswapd %mm5, %mm5
- movq %mm5, 88(%esi)
- movq 96(%ebx),%mm1
- / 24
- movq %mm1,%mm2
- movd 108(%ebx),%mm3
- punpckldq 104(%ebx),%mm3
- pfadd %mm3,%mm1
- pfsub %mm3,%mm2
- pfmul %mm0,%mm2
- movq %mm1,96(%esi)
- pswapd %mm2, %mm2
- movq %mm2, 104(%esi)
- movq 112(%ebx),%mm4
- / 28
- movq %mm4,%mm5
- movd 124(%ebx),%mm6
- punpckldq 120(%ebx),%mm6
- pfadd %mm6,%mm4
- pfsubr %mm6,%mm5
- pfmul %mm0,%mm5
- movq %mm4,112(%esi)
- pswapd %mm5, %mm5
- movq %mm5, 120(%esi)
-
- // 5
- movq plus_minus_3dnow, %mm0 /* mm0 = 1.0 | -1.0 */
- movl $1,%eax
- movd %eax,%mm1
- pi2fd %mm1,%mm1
- movl pnts+16,%eax
- movd 0(%eax),%mm2
- punpckldq %mm2,%mm1 /* mm1 = 1.0 | cos0 */
- movq 0(%esi),%mm2 /* mm2 = tmp2[0] | tmp2[1] */
- / 0
- pfpnacc %mm2, %mm2
- pswapd %mm2, %mm2 /* mm2 = tmp2[0]+tmp2[1]|tmp2[0]-tmp2[1]*/
- pfmul %mm1,%mm2 /* mm2 = tmp2[0]+tmp2[1]|(tmp2[0]-tmp2[1])*cos0*/
- movq %mm2,0(%ebx) /* tmp1[0, 1] = mm2 */
- movq 8(%esi),%mm4 /* mm4 = tmp2[2] | tmp2[3]*/
- pfpnacc %mm4, %mm4
- pswapd %mm4, %mm4 /* mm4 = tmp2[2]+tmp2[3]|tmp2[2]-tmp2[3]*/
- pxor %mm0,%mm4 /* mm4 = tmp2[2]+tmp2[3]|tmp2[3]-tmp2[2]*/
- pfmul %mm1,%mm4 /* mm4 = tmp2[2]+tmp2[3]|(tmp2[3]-tmp2[2])*cos0*/
- movq %mm4,%mm5
- psrlq $32,%mm5 /* mm5 = (tmp2[3]-tmp2[2])*cos0 */
- pfacc %mm5,%mm4 /* mm4 = tmp2[2]+tmp2[3]+(tmp2[3]-tmp2[2])*cos0|(tmp2[3]-tmp2[2])*cos0*/
- movq %mm4,8(%ebx) /* tmp1[2, 3] = mm4 */
- movq 16(%esi),%mm2
- / 4
- pfpnacc %mm2, %mm2
- pswapd %mm2, %mm2
-
- pfmul %mm1,%mm2
- movq 24(%esi),%mm4
- pfpnacc %mm4, %mm4
- pswapd %mm4, %mm4
-
- pxor %mm0,%mm4
- pfmul %mm1,%mm4
- movq %mm4,%mm5
- psrlq $32,%mm5
- pfacc %mm5,%mm4
- movq %mm2,%mm3
- psrlq $32,%mm3
- pfadd %mm4,%mm2
- pfadd %mm3,%mm4
- movq %mm2,16(%ebx)
- movq %mm4,24(%ebx)
- movq 32(%esi),%mm2
- / 8
- pfpnacc %mm2, %mm2
- pswapd %mm2, %mm2
-
- pfmul %mm1,%mm2
- movq %mm2,32(%ebx)
- movq 40(%esi),%mm4
- pfpnacc %mm4, %mm4
- pswapd %mm4, %mm4
- pxor %mm0,%mm4
- pfmul %mm1,%mm4
- movq %mm4,%mm5
- psrlq $32,%mm5
- pfacc %mm5,%mm4
- movq %mm4,40(%ebx)
- movq 48(%esi),%mm2
- / 12
- pfpnacc %mm2, %mm2
- pswapd %mm2, %mm2
- pfmul %mm1,%mm2
- movq 56(%esi),%mm4
- pfpnacc %mm4, %mm4
- pswapd %mm4, %mm4
- pxor %mm0,%mm4
- pfmul %mm1,%mm4
- movq %mm4,%mm5
- psrlq $32,%mm5
- pfacc %mm5,%mm4
- movq %mm2,%mm3
- psrlq $32,%mm3
- pfadd %mm4,%mm2
- pfadd %mm3,%mm4
- movq %mm2,48(%ebx)
- movq %mm4,56(%ebx)
- movq 64(%esi),%mm2
- / 16
- pfpnacc %mm2, %mm2
- pswapd %mm2, %mm2
- pfmul %mm1,%mm2
- movq %mm2,64(%ebx)
- movq 72(%esi),%mm4
- pfpnacc %mm4, %mm4
- pswapd %mm4, %mm4
- pxor %mm0,%mm4
- pfmul %mm1,%mm4
- movq %mm4,%mm5
- psrlq $32,%mm5
- pfacc %mm5,%mm4
- movq %mm4,72(%ebx)
- movq 80(%esi),%mm2
- / 20
- pfpnacc %mm2, %mm2
- pswapd %mm2, %mm2
- pfmul %mm1,%mm2
- movq 88(%esi),%mm4
- pfpnacc %mm4, %mm4
- pswapd %mm4, %mm4
- pxor %mm0,%mm4
- pfmul %mm1,%mm4
- movq %mm4,%mm5
- psrlq $32,%mm5
- pfacc %mm5,%mm4
- movq %mm2,%mm3
- psrlq $32,%mm3
- pfadd %mm4,%mm2
- pfadd %mm3,%mm4
- movq %mm2,80(%ebx)
- movq %mm4,88(%ebx)
- movq 96(%esi),%mm2
- / 24
- pfpnacc %mm2, %mm2
- pswapd %mm2, %mm2
- pfmul %mm1,%mm2
- movq %mm2,96(%ebx)
- movq 104(%esi),%mm4
- pfpnacc %mm4, %mm4
- pswapd %mm4, %mm4
- pxor %mm0,%mm4
- pfmul %mm1,%mm4
- movq %mm4,%mm5
- psrlq $32,%mm5
- pfacc %mm5,%mm4
- movq %mm4,104(%ebx)
- movq 112(%esi),%mm2
- / 28
- pfpnacc %mm2, %mm2
- pswapd %mm2, %mm2
- pfmul %mm1,%mm2
- movq 120(%esi),%mm4
- pfpnacc %mm4, %mm4
- pswapd %mm4, %mm4
- pxor %mm0,%mm4
- pfmul %mm1,%mm4
- movq %mm4,%mm5
- psrlq $32,%mm5
- pfacc %mm5,%mm4
- movq %mm2,%mm3
- psrlq $32,%mm3
- pfadd %mm4,%mm2
- pfadd %mm3,%mm4
- movq %mm2,112(%ebx)
- movq %mm4,120(%ebx)
-
- // Phase6
- movd 0(%ebx),%mm0
- movd %mm0,1024(%ebp)
- movl 4(%ebx),%eax
- movl %eax,0(%ebp)
- movl %eax,0(%edx)
- movd 8(%ebx),%mm2
- movd %mm2,512(%ebp)
- movd 12(%ebx),%mm3
- movd %mm3,512(%edx)
-
- movl 16(%ebx),%eax
- movl %eax,768(%ebp)
- movd 20(%ebx),%mm5
- movd %mm5,256(%edx)
-
- movd 24(%ebx),%mm6
- movd %mm6,256(%ebp)
- movd 28(%ebx),%mm7
- movd %mm7,768(%edx)
-
- movq 32(%ebx),%mm0 /* mm0 = tmp1[8] | tmp1[9] */
- movq 48(%ebx),%mm1 /* mm1 = tmp1[12] | tmp1[13] */
- pfadd %mm1,%mm0 /* mm0 = tmp1[8]+tmp1[12]| tmp1[9]+tmp1[13]*/
- movd %mm0,896(%ebp) /* a[0xE0] = tmp1[8]+tmp1[12] */
- psrlq $32,%mm0
- movd %mm0,128(%edx) /* a[0x20] = tmp1[9]+tmp1[13] */
- movq 40(%ebx),%mm2
- pfadd %mm2,%mm1
- movd %mm1,640(%ebp)
- psrlq $32,%mm1
- movd %mm1,384(%edx)
-
- movq 56(%ebx),%mm3
- pfadd %mm3,%mm2
- movd %mm2,384(%ebp)
- psrlq $32,%mm2
- movd %mm2,640(%edx)
-
- movd 36(%ebx),%mm4
- pfadd %mm4,%mm3
- movd %mm3,128(%ebp)
- psrlq $32,%mm3
- movd %mm3,896(%edx)
- movq 96(%ebx),%mm0
- movq 64(%ebx),%mm1
-
- movq 112(%ebx),%mm2
- pfadd %mm2,%mm0
- movq %mm0,%mm3
- pfadd %mm1,%mm3
- movd %mm3,960(%ebp)
- psrlq $32,%mm3
- movd %mm3,64(%edx)
- movq 80(%ebx),%mm1
- pfadd %mm1,%mm0
- movd %mm0,832(%ebp)
- psrlq $32,%mm0
- movd %mm0,192(%edx)
- movq 104(%ebx),%mm3
- pfadd %mm3,%mm2
- movq %mm2,%mm4
- pfadd %mm1,%mm4
- movd %mm4,704(%ebp)
- psrlq $32,%mm4
- movd %mm4,320(%edx)
- movq 72(%ebx),%mm1
- pfadd %mm1,%mm2
- movd %mm2,576(%ebp)
- psrlq $32,%mm2
- movd %mm2,448(%edx)
-
- movq 120(%ebx),%mm4
- pfadd %mm4,%mm3
- movq %mm3,%mm5
- pfadd %mm1,%mm5
- movd %mm5,448(%ebp)
- psrlq $32,%mm5
- movd %mm5,576(%edx)
- movq 88(%ebx),%mm1
- pfadd %mm1,%mm3
- movd %mm3,320(%ebp)
- psrlq $32,%mm3
- movd %mm3,704(%edx)
-
- movd 100(%ebx),%mm5
- pfadd %mm5,%mm4
- movq %mm4,%mm6
- pfadd %mm1,%mm6
- movd %mm6,192(%ebp)
- psrlq $32,%mm6
- movd %mm6,832(%edx)
- movd 68(%ebx),%mm1
- pfadd %mm1,%mm4
- movd %mm4,64(%ebp)
- psrlq $32,%mm4
- movd %mm4,960(%edx)
-
- / femms
-
- popl %ebx
- popl %esi
- popl %edi
- popl %ebp
- addl $256,%esp
-
- ret $12
+
+ .align 16
+
+.globl dct64_MMX_3dnowex
+dct64_MMX_3dnowex:
+ pushl %ebx
+ pushl %esi
+ pushl %edi
+ subl $256,%esp
+ movl 280(%esp),%eax
+
+ leal 128(%esp),%edx
+ movl 272(%esp),%esi
+ movl 276(%esp),%edi
+ movl $costab,%ebx
+ orl %ecx,%ecx
+ movl %esp,%ecx
+ femms
+/* Phase 1*/
+ movq (%eax), %mm0
+ movq 8(%eax), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 120(%eax), %mm1
+ movq 112(%eax), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, (%edx)
+ movq %mm4, 8(%edx)
+ pfsub %mm1, %mm3
+ pfsub %mm5, %mm7
+ pfmul (%ebx), %mm3
+ pfmul 8(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 120(%edx)
+ movq %mm7, 112(%edx)
+
+ movq 16(%eax), %mm0
+ movq 24(%eax), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 104(%eax), %mm1
+ movq 96(%eax), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 16(%edx)
+ movq %mm4, 24(%edx)
+ pfsub %mm1, %mm3
+ pfsub %mm5, %mm7
+ pfmul 16(%ebx), %mm3
+ pfmul 24(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 104(%edx)
+ movq %mm7, 96(%edx)
+
+ movq 32(%eax), %mm0
+ movq 40(%eax), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 88(%eax), %mm1
+ movq 80(%eax), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 32(%edx)
+ movq %mm4, 40(%edx)
+ pfsub %mm1, %mm3
+ pfsub %mm5, %mm7
+ pfmul 32(%ebx), %mm3
+ pfmul 40(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 88(%edx)
+ movq %mm7, 80(%edx)
+
+ movq 48(%eax), %mm0
+ movq 56(%eax), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 72(%eax), %mm1
+ movq 64(%eax), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 48(%edx)
+ movq %mm4, 56(%edx)
+ pfsub %mm1, %mm3
+ pfsub %mm5, %mm7
+ pfmul 48(%ebx), %mm3
+ pfmul 56(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 72(%edx)
+ movq %mm7, 64(%edx)
+
+/* Phase 2*/
+
+ movq (%edx), %mm0
+ movq 8(%edx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 56(%edx), %mm1
+ movq 48(%edx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, (%ecx)
+ movq %mm4, 8(%ecx)
+ pfsub %mm1, %mm3
+ pfsub %mm5, %mm7
+ pfmul 64(%ebx), %mm3
+ pfmul 72(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 56(%ecx)
+ movq %mm7, 48(%ecx)
+
+ movq 16(%edx), %mm0
+ movq 24(%edx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 40(%edx), %mm1
+ movq 32(%edx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 16(%ecx)
+ movq %mm4, 24(%ecx)
+ pfsub %mm1, %mm3
+ pfsub %mm5, %mm7
+ pfmul 80(%ebx), %mm3
+ pfmul 88(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 40(%ecx)
+ movq %mm7, 32(%ecx)
+
+/* Phase 3*/
+
+ movq 64(%edx), %mm0
+ movq 72(%edx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 120(%edx), %mm1
+ movq 112(%edx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 64(%ecx)
+ movq %mm4, 72(%ecx)
+ pfsubr %mm1, %mm3
+ pfsubr %mm5, %mm7
+ pfmul 64(%ebx), %mm3
+ pfmul 72(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 120(%ecx)
+ movq %mm7, 112(%ecx)
+
+ movq 80(%edx), %mm0
+ movq 88(%edx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 104(%edx), %mm1
+ movq 96(%edx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 80(%ecx)
+ movq %mm4, 88(%ecx)
+ pfsubr %mm1, %mm3
+ pfsubr %mm5, %mm7
+ pfmul 80(%ebx), %mm3
+ pfmul 88(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 104(%ecx)
+ movq %mm7, 96(%ecx)
+
+/* Phase 4*/
+
+ movq (%ecx), %mm0
+ movq 8(%ecx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 24(%ecx), %mm1
+ movq 16(%ecx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, (%edx)
+ movq %mm4, 8(%edx)
+ pfsub %mm1, %mm3
+ pfsub %mm5, %mm7
+ pfmul 96(%ebx), %mm3
+ pfmul 104(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 24(%edx)
+ movq %mm7, 16(%edx)
+
+ movq 32(%ecx), %mm0
+ movq 40(%ecx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 56(%ecx), %mm1
+ movq 48(%ecx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 32(%edx)
+ movq %mm4, 40(%edx)
+ pfsubr %mm1, %mm3
+ pfsubr %mm5, %mm7
+ pfmul 96(%ebx), %mm3
+ pfmul 104(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 56(%edx)
+ movq %mm7, 48(%edx)
+
+ movq 64(%ecx), %mm0
+ movq 72(%ecx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 88(%ecx), %mm1
+ movq 80(%ecx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 64(%edx)
+ movq %mm4, 72(%edx)
+ pfsub %mm1, %mm3
+ pfsub %mm5, %mm7
+ pfmul 96(%ebx), %mm3
+ pfmul 104(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 88(%edx)
+ movq %mm7, 80(%edx)
+
+ movq 96(%ecx), %mm0
+ movq 104(%ecx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 120(%ecx), %mm1
+ movq 112(%ecx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 96(%edx)
+ movq %mm4, 104(%edx)
+ pfsubr %mm1, %mm3
+ pfsubr %mm5, %mm7
+ pfmul 96(%ebx), %mm3
+ pfmul 104(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 120(%edx)
+ movq %mm7, 112(%edx)
+
+/* Phase 5 */
+
+ movq (%edx), %mm0
+ movq 16(%edx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 8(%edx), %mm1
+ movq 24(%edx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, (%ecx)
+ movq %mm4, 16(%ecx)
+ pfsub %mm1, %mm3
+ pfsubr %mm5, %mm7
+ pfmul 112(%ebx), %mm3
+ pfmul 112(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 8(%ecx)
+ movq %mm7, 24(%ecx)
+
+ movq 32(%edx), %mm0
+ movq 48(%edx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 40(%edx), %mm1
+ movq 56(%edx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 32(%ecx)
+ movq %mm4, 48(%ecx)
+ pfsub %mm1, %mm3
+ pfsubr %mm5, %mm7
+ pfmul 112(%ebx), %mm3
+ pfmul 112(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 40(%ecx)
+ movq %mm7, 56(%ecx)
+
+ movq 64(%edx), %mm0
+ movq 80(%edx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 72(%edx), %mm1
+ movq 88(%edx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 64(%ecx)
+ movq %mm4, 80(%ecx)
+ pfsub %mm1, %mm3
+ pfsubr %mm5, %mm7
+ pfmul 112(%ebx), %mm3
+ pfmul 112(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 72(%ecx)
+ movq %mm7, 88(%ecx)
+
+ movq 96(%edx), %mm0
+ movq 112(%edx), %mm4
+ movq %mm0, %mm3
+ movq %mm4, %mm7
+ movq 104(%edx), %mm1
+ movq 120(%edx), %mm5
+ pswapd %mm1, %mm1
+ pswapd %mm5, %mm5
+ pfadd %mm1, %mm0
+ pfadd %mm5, %mm4
+ movq %mm0, 96(%ecx)
+ movq %mm4, 112(%ecx)
+ pfsub %mm1, %mm3
+ pfsubr %mm5, %mm7
+ pfmul 112(%ebx), %mm3
+ pfmul 112(%ebx), %mm7
+ pswapd %mm3, %mm3
+ pswapd %mm7, %mm7
+ movq %mm3, 104(%ecx)
+ movq %mm7, 120(%ecx)
+
+/* Phase 6. This is the end of easy road. */
+ movl $1, %eax
+ movd %eax, %mm7
+ pi2fd %mm7, %mm7
+ movq 32(%ecx), %mm0
+ punpckldq 120(%ebx), %mm7 /* 1.0 | 120(%ebx) */
+ movq %mm0, %mm1
+ movq plus_minus_3dnow, %mm6
+ /* n.b.: pfpnacc */
+ pxor %mm6, %mm1
+ pfacc %mm1, %mm0
+ /**/
+ pfmul %mm7, %mm0
+ movq %mm0, 32(%edx)
+ femms
+
+ flds 44(%ecx)
+ fsubs 40(%ecx)
+ fmuls 120(%ebx)
+
+ fsts 44(%edx)
+ fadds 40(%ecx) /* pfacc 40(ecx), 56(%ecx) */
+ fadds 44(%ecx)
+ fstps 40(%edx)
+
+ flds 48(%ecx)
+ fsubs 52(%ecx)
+ fmuls 120(%ebx)
+
+ flds 60(%ecx)
+ fsubs 56(%ecx)
+ fmuls 120(%ebx)
+
+ fld %st(0)
+ fadds 56(%ecx)
+ fadds 60(%ecx)
+
+ fld %st(0)
+ fadds 48(%ecx)
+ fadds 52(%ecx)
+ fstps 48(%edx)
+ fadd %st(2)
+ fstps 56(%edx)
+ fsts 60(%edx)
+ faddp %st(1)
+ fstps 52(%edx)
+/*---*/
+ flds 64(%ecx)
+ fadds 68(%ecx)
+ fstps 64(%edx)
+
+ flds 64(%ecx)
+ fsubs 68(%ecx)
+ fmuls 120(%ebx)
+ fstps 68(%edx)
+
+ flds 76(%ecx)
+ fsubs 72(%ecx)
+ fmuls 120(%ebx)
+ fsts 76(%edx)
+ fadds 72(%ecx)
+ fadds 76(%ecx)
+ fstps 72(%edx)
+
+ flds 92(%ecx)
+ fsubs 88(%ecx)
+ fmuls 120(%ebx)
+ fsts 92(%edx)
+ fadds 92(%ecx)
+ fadds 88(%ecx)
+
+ fld %st(0)
+ fadds 80(%ecx)
+ fadds 84(%ecx)
+ fstps 80(%edx)
+
+ flds 80(%ecx)
+ fsubs 84(%ecx)
+ fmuls 120(%ebx)
+ fadd %st(0), %st(1)
+ fadds 92(%edx)
+ fstps 84(%edx)
+ fstps 88(%edx)
+
+ flds 96(%ecx)
+ fadds 100(%ecx)
+ fstps 96(%edx)
+
+ flds 96(%ecx)
+ fsubs 100(%ecx)
+ fmuls 120(%ebx)
+ fstps 100(%edx)
+
+ flds 108(%ecx)
+ fsubs 104(%ecx)
+ fmuls 120(%ebx)
+ fsts 108(%edx)
+ fadds 104(%ecx)
+ fadds 108(%ecx)
+ fstps 104(%edx)
+
+ flds 124(%ecx)
+ fsubs 120(%ecx)
+ fmuls 120(%ebx)
+ fsts 124(%edx)
+ fadds 120(%ecx)
+ fadds 124(%ecx)
+
+ fld %st(0)
+ fadds 112(%ecx)
+ fadds 116(%ecx)
+ fstps 112(%edx)
+
+ flds 112(%ecx)
+ fsubs 116(%ecx)
+ fmuls 120(%ebx)
+ fadd %st(0),%st(1)
+ fadds 124(%edx)
+ fstps 116(%edx)
+ fstps 120(%edx)
+ jnz .L01
+
+/* Phase 7*/
+
+ flds (%ecx)
+ fadds 4(%ecx)
+ fstps 1024(%esi)
+
+ flds (%ecx)
+ fsubs 4(%ecx)
+ fmuls 120(%ebx)
+ fsts (%esi)
+ fstps (%edi)
+
+ flds 12(%ecx)
+ fsubs 8(%ecx)
+ fmuls 120(%ebx)
+ fsts 512(%edi)
+ fadds 12(%ecx)
+ fadds 8(%e