summaryrefslogtreecommitdiffstats
path: root/libass/aarch64/blur.S
blob: de8b50834b9d7d77c4f37c1280f80b00bb78351f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
/*
 * Copyright (C) 2022 libass contributors
 *
 * This file is part of libass.
 *
 * Permission to use, copy, modify, and distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

#include "asm.S"

const words_zero, align=16
    .dc.w 0, 0, 0, 0, 0, 0, 0, 0
endconst

/*
 * void stripe_unpack(int16_t *dst, const uint8_t *src, ptrdiff_t src_stride,
 *                    size_t width, size_t height);
 */

function stripe_unpack16_neon, export=1
    add x3, x3, 7
    lsl x5, x4, 4
    bic x7, x3, 15
    sub x2, x2, x7
0:
    mov x6, x0
    subs x7, x3, 16
    b.lo 2f
1:
    ld1 {v0.16b}, [x1], 16
    zip1 v1.16b, v0.16b, v0.16b
    zip2 v0.16b, v0.16b, v0.16b
    urshr v1.8h, v1.8h, 2
    urshr v0.8h, v0.8h, 2
    st1 {v1.8h}, [x6]
    add x6, x6, x5
    st1 {v0.8h}, [x6]
    add x6, x6, x5
    subs x7, x7, 16
    b.hs 1b
2:
    tst x7, 8
    b.eq 3f
    ld1 {v0.16b}, [x1]
    zip1 v0.16b, v0.16b, v0.16b
    urshr v0.8h, v0.8h, 2
    st1 {v0.8h}, [x6]
3:
    subs x4, x4, 1
    add x0, x0, 16
    add x1, x1, x2
    b.ne 0b
    ret
endfunc

/*
 * void stripe_pack(uint8_t *dst, ptrdiff_t dst_stride, const int16_t *src,
 *                  size_t width, size_t height);
 */

function stripe_pack16_neon, export=1
    lsl x4, x4, 4
    mov w5, 8
    movk w5, 40, lsl 16
    movi v1.8h, 48
    subs x3, x3, 9
    b.lo 2f
0:
    mov x6, x0
    mov x7, x4
    dup v0.4s, w5
1:
    add x8, x2, x4
    ld1 {v2.8h}, [x2], 16
    ld1 {v3.8h}, [x8]
    ushr v4.8h, v2.8h, 8
    ushr v5.8h, v3.8h, 8
    sub v2.8h, v2.8h, v4.8h
    sub v3.8h, v3.8h, v5.8h
    add v2.8h, v2.8h, v0.8h
    add v3.8h, v3.8h, v0.8h
    shrn v2.8b, v2.8h, 6
    shrn2 v2.16b, v3.8h, 6
    st1 {v2.16b}, [x6]
    subs x7, x7, 16
    eor v0.16b, v0.16b, v1.16b
    add x6, x6, x1
    b.ne 1b
    subs x3, x3, 16
    add x0, x0, 16
    add x2, x2, x4
    b.hs 0b
2:
    tst x3, 8
    b.eq 4f
    dup v0.4s, w5
3:
    ld1 {v2.8h}, [x2], 16
    ushr v4.8h, v2.8h, 8
    sub v2.8h, v2.8h, v4.8h
    add v2.8h, v2.8h, v0.8h
    shrn v2.8b, v2.8h, 6
    st1 {v2.16b}, [x0]
    subs x4, x4, 16
    eor v0.16b, v0.16b, v1.16b
    add x0, x0, x1
    b.ne 3b
4:
    ret
endfunc

/*
 * load_line
 * Load vN register with correct source bitmap data
 */

.macro load_line dst, base, offs, max, zero_offs, tmp
    cmp \offs, \max
    csel \tmp, \offs, \zero_offs, lo
    add \tmp, \tmp, \base
    ld1 {\dst\().8h}, [\tmp]
.endm

/*
 * void shrink_horz(int16_t *dst, const int16_t *src,
 *                  size_t src_width, size_t src_height);
 */

function shrink_horz16_neon, export=1
    lsl x4, x2, 1
    add x4, x4, 15
    bic x4, x4, 15
    mul x4, x4, x3
    add x2, x2, 5 - 2
    movrel x5, words_zero
    sub x5, x5, x1
    mov x6, 0
0:
    mov x7, x3
1:
    sub x8, x6, x3, lsl 4
    load_line v1, x1, x8, x4, x5, x9
    load_line v2, x1, x6, x4, x5, x9
    add x8, x6, x3, lsl 4
    load_line v3, x1, x8, x4, x5, x9
    uzp1 v0.8h, v1.8h, v1.8h
    uzp2 v1.8h, v1.8h, v1.8h
    uzp1 v4.8h, v2.8h, v3.8h
    uzp2 v5.8h, v2.8h, v3.8h
    ext v2.16b, v0.16b, v4.16b, 14
    ext v3.16b, v1.16b, v5.16b, 14
    ext v0.16b, v0.16b, v4.16b, 12
    ext v1.16b, v1.16b, v5.16b, 12

    add v0.8h, v0.8h, v5.8h
    add v1.8h, v1.8h, v4.8h
    add v2.8h, v2.8h, v3.8h
    uhadd v0.8h, v0.8h, v1.8h
    uhadd v0.8h, v0.8h, v2.8h
    uhadd v0.8h, v0.8h, v1.8h
    uhadd v0.8h, v0.8h, v2.8h
    urshr v0.8h, v0.8h, 1
    st1 {v0.8h}, [x0], 16

    subs x7, x7, 1
    add x6, x6, 16
    b.ne 1b
    subs x2, x2, 16
    add x6, x6, x3, lsl 4
    b.hs 0b
    ret
endfunc

/*
 * void shrink_vert(int16_t *dst, const int16_t *src,
 *                  size_t src_width, size_t src_height);
 */

function shrink_vert16_neon, export=1
    lsl x3, x3, 4
    movrel x4, words_zero
    sub x4, x4, x1
0:
    add x5, x3, (5 - 2) * 16
    movi v0.8h, 0
    movi v1.8h, 0
    movi v2.8h, 0
    movi v3.8h, 0
    mov x6, 0
1:
    load_line v4, x1, x6, x3, x4, x7
    add x6, x6, 16
    load_line v5, x1, x6, x3, x4, x7
    add x6, x6, 16

    add v0.8h, v0.8h, v5.8h
    add v1.8h, v1.8h, v4.8h
    add v6.8h, v2.8h, v3.8h
    uhadd v0.8h, v0.8h, v1.8h
    uhadd v0.8h, v0.8h, v6.8h
    uhadd v0.8h, v0.8h, v1.8h
    uhadd v0.8h, v0.8h, v6.8h
    urshr v0.8h, v0.8h, 1
    st1 {v0.8h}, [x0], 16

    subs x5, x5, 32
    mov v0.16b, v2.16b
    mov v1.16b, v3.16b
    mov v2.16b, v4.16b
    mov v3.16b, v5.16b
    b.hs 1b
    subs x2, x2, 8
    add x1, x1, x3
    sub x4, x4, x3
    b.hi 0b
    ret
endfunc

/*
 * void expand_horz(int16_t *dst, const int16_t *src,
 *                  size_t src_width, size_t src_height);
 */

function expand_horz16_neon, export=1
    lsl x4, x2, 1
    add x4, x4, 15
    bic x4, x4, 15
    mul x4, x4, x3
    movrel x5, words_zero
    sub x5, x5, x1
    subs x2, x2, 3
    mov x6, 0
    b.lo 2f
0:
    mov x7, x3
1:
    sub x8, x6, x3, lsl 4
    load_line v1, x1, x8, x4, x5, x9
    load_line v2, x1, x6, x4, x5, x9
    ext v0.16b, v1.16b, v2.16b, 12
    ext v1.16b, v1.16b, v2.16b, 14

    uhadd v3.8h, v0.8h, v2.8h
    uhadd v3.8h, v3.8h, v1.8h
    uhadd v0.8h, v0.8h, v3.8h
    uhadd v2.8h, v2.8h, v3.8h
    urhadd v0.8h, v0.8h, v1.8h
    urhadd v2.8h, v2.8h, v1.8h
    zip1 v1.8h, v0.8h, v2.8h
    zip2 v2.8h, v0.8h, v2.8h
    add x9, x0, x3, lsl 4
    st1 {v1.8h}, [x0]
    st1 {v2.8h}, [x9]

    subs x7, x7, 1
    add x0, x0, 16
    add x6, x6, 16
    b.ne 1b
    subs x2, x2, 8
    add x0, x0, x3, lsl 4
    b.hs 0b
2:
    tst x2, 4
    b.eq 4f
    mov x7, x3
3:
    sub x8, x6, x3, lsl 4
    load_line v1, x1, x8, x4, x5, x9
    load_line v2, x1, x6, x4, x5, x9
    ext v0.16b, v1.16b, v2.16b, 12
    ext v1.16b, v1.16b, v2.16b, 14

    uhadd v3.8h, v0.8h, v2.8h
    uhadd v3.8h, v3.8h, v1.8h
    uhadd v0.8h, v0.8h, v3.8h
    uhadd v2.8h, v2.8h, v3.8h
    urhadd v0.8h, v0.8h, v1.8h
    urhadd v2.8h, v2.8h, v1.8h
    zip1 v1.8h, v0.8h, v2.8h
    st1 {v1.8h}, [x0], 16

    subs x7, x7, 1
    add x6, x6, 16
    b.ne 3b
4:
    ret
endfunc

/*
 * void expand_vert(int16_t *dst, const int16_t *src,
 *                  size_t src_width, size_t src_height);
 */

function expand_vert16_neon, export=1
    lsl x3, x3, 4
    movrel x4, words_zero
    sub x4, x4, x1
0:
    add x5, x3, 32
    movi v0.8h, 0
    movi v1.8h, 0
    mov x6, 0
1:
    load_line v2, x1, x6, x3, x4, x7
    add x6, x6, 16

    uhadd v3.8h, v0.8h, v2.8h
    uhadd v3.8h, v3.8h, v1.8h
    uhadd v0.8h, v0.8h, v3.8h
    uhadd v3.8h, v2.8h, v3.8h
    urhadd v0.8h, v0.8h, v1.8h
    urhadd v3.8h, v3.8h, v1.8h
    st1 {v0.8h}, [x0], 16
    st1 {v3.8h}, [x0], 16

    subs x5, x5, 16
    mov v0.16b, v1.16b
    mov v1.16b, v2.16b
    b.ne 1b
    subs x2, x2, 8
    add x1, x1, x3
    sub x4, x4, x3
    b.hi 0b
    ret
endfunc

/*
 * calc_diff
 * Calculate difference between offset line and center line
 */

.macro calc_diff dst, line0, line1, line2, pos, center
.if \pos == 0
    sub \dst\().8h, \line2\().8h, \center\().8h
.elseif \pos > 0 && \pos < 8
    ext \dst\().16b, \line1\().16b, \line2\().16b, 16 - 2 * \pos
    sub \dst\().8h, \dst\().8h, \center\().8h
.elseif \pos == 8
    sub \dst\().8h, \line1\().8h, \center\().8h
.elseif \pos > 8 && \pos < 16
    ext \dst\().16b, \line0\().16b, \line1\().16b, 32 - 2 * \pos
    sub \dst\().8h, \dst\().8h, \center\().8h
.elseif \pos == 16
    sub \dst\().8h, \line0\().8h, \center\().8h
.else
.error "invalid pos"
.endif
.endm

/*
 * calc_blur
 * Calculate filterd line
 */

.macro calc_blur dst, line0, line1, line2, n, center, params, vtmp1, vtmp2, vtmp3
    movi \vtmp1\().4s, 0x80, lsl 8
    movi \vtmp2\().4s, 0x80, lsl 8
.set pos, 0
.rept \n
    calc_diff \vtmp3, \line0, \line1, \line2, (\n - pos - 1), \center
    smlal \vtmp1\().4s, \vtmp3\().4h, \params\().h[pos]
    smlal2 \vtmp2\().4s, \vtmp3\().8h, \params\().h[pos]
    calc_diff \vtmp3, \line0, \line1, \line2, (\n + pos + 1), \center
    smlal \vtmp1\().4s, \vtmp3\().4h, \params\().h[pos]
    smlal2 \vtmp2\().4s, \vtmp3\().8h, \params\().h[pos]
.set pos, pos + 1
.endr
    uzp2 \vtmp1\().8h, \vtmp1\().8h, \vtmp2\().8h
    add \vtmp1\().8h, \vtmp1\().8h, \center\().8h
    st1 {\vtmp1\().8h}, [\dst], 16
.endm

/*
 * void blur_horz(int16_t *dst, const int16_t *src,
 *                size_t src_width, size_t src_height,
 *                const int16_t *param);
 */

.macro blur_horz n
function blur\n\()_horz16_neon, export=1
    ld1 {v0.8h}, [x4]
    lsl x4, x2, 1
    add x4, x4, 15
    bic x4, x4, 15
    mul x4, x4, x3
    movrel x5, words_zero
    sub x5, x5, x1
    add x2, x2, 2 * \n
    mov x6, 0
0:
    mov x7, x3
1:
.if \n > 4
    sub x8, x6, x3, lsl 5
    load_line v1, x1, x8, x4, x5, x9
.endif
    sub x8, x6, x3, lsl 4
    load_line v2, x1, x8, x4, x5, x9
    load_line v3, x1, x6, x4, x5, x9

.if \n < 8
    ext v7.16b, v2.16b, v3.16b, 16 - 2 * \n
    calc_blur x0, v1, v2, v3, \n, v7, v0, v4, v5, v6
.else
    calc_blur x0, v1, v2, v3, \n, v2, v0, v4, v5, v6
.endif

    subs x7, x7, 1
    add x6, x6, 16
    b.ne 1b
    subs x2, x2, 8
    b.hi 0b
    ret
endfunc
.endm

blur_horz 4
blur_horz 5
blur_horz 6
blur_horz 7
blur_horz 8

/*
 * void blur_vert(int16_t *dst, const int16_t *src,
 *                size_t src_width, size_t src_height,
 *                const int16_t *param);
 */

.macro blur_vert n
function blur\n\()_vert16_neon, export=1
    ld1 {v0.8h}, [x4]
    lsl x3, x3, 4
    movrel x4, words_zero
    sub x4, x4, x1
0:
    add x5, x3, 32 * \n
    mov x6, -16 * \n
1:
    load_line v1, x1, x6, x3, x4, x7
    movi v2.4s, 0x80, lsl 8
    movi v3.4s, 0x80, lsl 8
.set pos, 0
.rept \n
    sub x8, x6, 16 * (pos + 1)
    load_line v4, x1, x8, x3, x4, x7
    sub v4.8h, v4.8h, v1.8h
    smlal v2.4s, v4.4h, v0.h[pos]
    smlal2 v3.4s, v4.8h, v0.h[pos]
    add x8, x6, 16 * (pos + 1)
    load_line v4, x1, x8, x3, x4, x7
    sub v4.8h, v4.8h, v1.8h
    smlal v2.4s, v4.4h, v0.h[pos]
    smlal2 v3.4s, v4.8h, v0.h[pos]
.set pos, pos + 1
.endr
    uzp2 v2.8h, v2.8h, v3.8h
    add v2.8h, v2.8h, v1.8h
    st1 {v2.8h}, [x0], 16

    subs x5, x5, 16
    add x6, x6, 16
    b.ne 1b
    subs x2, x2, 8
    add x1, x1, x3
    sub x4, x4, x3
    b.hi 0b
    ret
endfunc
.endm

blur_vert 4
blur_vert 5
blur_vert 6
blur_vert 7
blur_vert 8