summaryrefslogtreecommitdiffstats
path: root/mp3lib/decode_i586.s
blob: a4dc904071e058f9ae6369d88402ba85262173ea (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
/
/ mpg123_synth_1to1 works the same way as the c version of this
/ file.  only two types of changes have been made:
/ - reordered floating point instructions to
/   prevent pipline stalls
/ - made WRITE_SAMPLE use integer instead of
/   (slower) floating point
/ all kinds of x86 processors should benefit from these
/ modifications.
/
/ useful sources of information on optimizing x86 code include:
/
/     Intel Architecture Optimization Manual
/     http://www.intel.com/design/pentium/manuals/242816.htm
/
/     Cyrix 6x86 Instruction Set Summary
/     ftp://ftp.cyrix.com/6x86/6x-dbch6.pdf
/
/     AMD-K5 Processor Software Development
/     http://www.amd.com/products/cpg/techdocs/appnotes/20007e.pdf
/
/ Stefan Bieschewski <stb@acm.org>
/
/ $Id$
/
.bss
        .comm   buffs,4352,4
.data
        .align 4
bo:
        .long 1
.section .rodata
        .align 8
.LC0:
        .long 0x0,0x40dfffc0
        .align 8
.LC1:
        .long 0x0,0xc0e00000
        .align 8
.text
.globl synth_1to1_pent
synth_1to1_pent:
        subl $12,%esp
        pushl %ebp
        pushl %edi
        pushl %esi
        pushl %ebx
        movl 32(%esp),%eax
        movl 40(%esp),%esi
        xorl %edi,%edi
        movl bo,%ebp
        cmpl %edi,36(%esp)
        jne .L48
        decl %ebp
        andl $15,%ebp
        movl %ebp,bo
        movl $buffs,%ecx
        jmp .L49
.L48:
        addl $2,%esi
        movl $buffs+2176,%ecx
.L49:
        testl $1,%ebp
        je .L50
        movl %ecx,%ebx
        movl %ebp,16(%esp)
        pushl %eax
        movl 20(%esp),%edx
        leal (%ebx,%edx,4),%eax
        pushl %eax
        movl 24(%esp),%eax
        incl %eax
        andl $15,%eax
        leal 1088(,%eax,4),%eax
        addl %ebx,%eax
        jmp .L74
.L50:
        leal 1088(%ecx),%ebx
        leal 1(%ebp),%edx
        movl %edx,16(%esp)
        pushl %eax
        leal 1092(%ecx,%ebp,4),%eax
        pushl %eax
        leal (%ecx,%ebp,4),%eax
.L74:
        pushl %eax
        call dct64
        addl $12,%esp
        movl 16(%esp),%edx
        leal 0(,%edx,4),%edx
        movl $decwin+64,%eax
        movl %eax,%ecx
        subl %edx,%ecx
        movl $16,%ebp
.L55:
        flds (%ecx)
        fmuls (%ebx)
        flds 4(%ecx)
        fmuls 4(%ebx)
        fxch %st(1)
        flds 8(%ecx)
        fmuls 8(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds 12(%ecx)
        fmuls 12(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 16(%ecx)
        fmuls 16(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds 20(%ecx)
        fmuls 20(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 24(%ecx)
        fmuls 24(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds 28(%ecx)
        fmuls 28(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 32(%ecx)
        fmuls 32(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds 36(%ecx)
        fmuls 36(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 40(%ecx)
        fmuls 40(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds 44(%ecx)
        fmuls 44(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 48(%ecx)
        fmuls 48(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds 52(%ecx)
        fmuls 52(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 56(%ecx)
        fmuls 56(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds 60(%ecx)
        fmuls 60(%ebx)
        fxch %st(2)
        subl $4,%esp
        faddp %st,%st(1)
        fxch %st(1)
        fsubrp %st,%st(1)
        fistpl (%esp)
        popl %eax
        cmpl $32767,%eax
        jg 1f
        cmpl $-32768,%eax
        jl 2f
        movw %ax,(%esi)
        jmp 4f
1:      movw $32767,(%esi)
        jmp 3f
2:      movw $-32768,(%esi)
3:      incl %edi
4:
.L54:
        addl $64,%ebx
        subl $-128,%ecx
        addl $4,%esi
        decl %ebp
        jnz .L55
        flds (%ecx)
        fmuls (%ebx)
        flds 8(%ecx)
        fmuls 8(%ebx)
        flds 16(%ecx)
        fmuls 16(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 24(%ecx)
        fmuls 24(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 32(%ecx)
        fmuls 32(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 40(%ecx)
        fmuls 40(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 48(%ecx)
        fmuls 48(%ebx)
        fxch %st(2)
        faddp %st,%st(1)
        flds 56(%ecx)
        fmuls 56(%ebx)
        fxch %st(2)
        subl $4,%esp
        faddp %st,%st(1)
        fxch %st(1)
        faddp %st,%st(1)
        fistpl (%esp)
        popl %eax
        cmpl $32767,%eax
        jg 1f
        cmpl $-32768,%eax
        jl 2f
        movw %ax,(%esi)
        jmp 4f
1:      movw $32767,(%esi)
        jmp 3f
2:      movw $-32768,(%esi)
3:      incl %edi
4:
.L62:
        addl $-64,%ebx
        addl $4,%esi
        movl 16(%esp),%edx
        leal -128(%ecx,%edx,8),%ecx
        movl $15,%ebp
.L68:
        flds -4(%ecx)
        fchs
        fmuls (%ebx)
        flds -8(%ecx)
        fmuls 4(%ebx)
        fxch %st(1)
        flds -12(%ecx)
        fmuls 8(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -16(%ecx)
        fmuls 12(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -20(%ecx)
        fmuls 16(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -24(%ecx)
        fmuls 20(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -28(%ecx)
        fmuls 24(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -32(%ecx)
        fmuls 28(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -36(%ecx)
        fmuls 32(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -40(%ecx)
        fmuls 36(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -44(%ecx)
        fmuls 40(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -48(%ecx)
        fmuls 44(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -52(%ecx)
        fmuls 48(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -56(%ecx)
        fmuls 52(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds -60(%ecx)
        fmuls 56(%ebx)
        fxch %st(2)
        fsubrp %st,%st(1)
        flds (%ecx)
        fmuls 60(%ebx)
        fxch %st(2)
        subl $4,%esp
        fsubrp %st,%st(1)
        fxch %st(1)
        fsubrp %st,%st(1)
        fistpl (%esp)
        popl %eax
        cmpl $32767,%eax
        jg 1f
        cmpl $-32768,%eax
        jl 2f
        movw %ax,(%esi)
        jmp 4f
1:      movw $32767,(%esi)
        jmp 3f
2:      movw $-32768,(%esi)
3:      incl %edi
4:
.L67:
        addl $-64,%ebx
        addl $-128,%ecx
        addl $4,%esi
        decl %ebp
        jnz .L68
        movl %edi,%eax
        popl %ebx
        popl %esi
        popl %edi
        popl %ebp
        addl $12,%esp
        ret