summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--libvo/osd.c4
-rw-r--r--libvo/osd_template.c4
-rw-r--r--postproc/rgb2rgb.c8
-rw-r--r--postproc/rgb2rgb_template.c8
-rw-r--r--postproc/swscale.c12
-rw-r--r--postproc/swscale_template.c12
6 files changed, 24 insertions, 24 deletions
diff --git a/libvo/osd.c b/libvo/osd.c
index 2452f3b7a5..55c23dfb32 100644
--- a/libvo/osd.c
+++ b/libvo/osd.c
@@ -86,7 +86,7 @@ void vo_draw_alpha_rgb32(int w,int h, unsigned char* src, unsigned char *srca, i
"pxor %%mm7, %%mm7 \n\t"
"xorl %%eax, %%eax \n\t"
"pcmpeqb %%mm6, %%mm6 \n\t" // F..F
- ".align 16\n\t"
+ ".balign 16\n\t"
"1: \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // dstbase
"movq %%mm0, %%mm1 \n\t"
@@ -121,7 +121,7 @@ void vo_draw_alpha_rgb32(int w,int h, unsigned char* src, unsigned char *srca, i
"xorl %%eax, %%eax \n\t"
"xorl %%ebx, %%ebx \n\t"
"xorl %%edx, %%edx \n\t"
- ".align 16\n\t"
+ ".balign 16\n\t"
"1: \n\t"
"movb (%1, %%eax), %%bl \n\t"
"cmpb $0, %%bl \n\t"
diff --git a/libvo/osd_template.c b/libvo/osd_template.c
index 2452f3b7a5..55c23dfb32 100644
--- a/libvo/osd_template.c
+++ b/libvo/osd_template.c
@@ -86,7 +86,7 @@ void vo_draw_alpha_rgb32(int w,int h, unsigned char* src, unsigned char *srca, i
"pxor %%mm7, %%mm7 \n\t"
"xorl %%eax, %%eax \n\t"
"pcmpeqb %%mm6, %%mm6 \n\t" // F..F
- ".align 16\n\t"
+ ".balign 16\n\t"
"1: \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // dstbase
"movq %%mm0, %%mm1 \n\t"
@@ -121,7 +121,7 @@ void vo_draw_alpha_rgb32(int w,int h, unsigned char* src, unsigned char *srca, i
"xorl %%eax, %%eax \n\t"
"xorl %%ebx, %%ebx \n\t"
"xorl %%edx, %%edx \n\t"
- ".align 16\n\t"
+ ".balign 16\n\t"
"1: \n\t"
"movb (%1, %%eax), %%bl \n\t"
"cmpb $0, %%bl \n\t"
diff --git a/postproc/rgb2rgb.c b/postproc/rgb2rgb.c
index 2232343883..67f33de38b 100644
--- a/postproc/rgb2rgb.c
+++ b/postproc/rgb2rgb.c
@@ -585,7 +585,7 @@ void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
#ifdef HAVE_MMX
asm volatile (
"xorl %%eax, %%eax \n\t"
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
PREFETCH" 32(%0, %%eax) \n\t"
"movq (%0, %%eax), %%mm0 \n\t"
@@ -636,7 +636,7 @@ void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, u
//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
asm volatile(
"xorl %%eax, %%eax \n\t"
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
PREFETCH" 32(%1, %%eax, 2) \n\t"
PREFETCH" 32(%2, %%eax) \n\t"
@@ -710,7 +710,7 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
"xorl %%eax, %%eax \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%eax, 4) \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
@@ -760,7 +760,7 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
asm volatile(
"xorl %%eax, %%eax \n\t"
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%eax, 4) \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
diff --git a/postproc/rgb2rgb_template.c b/postproc/rgb2rgb_template.c
index 2232343883..67f33de38b 100644
--- a/postproc/rgb2rgb_template.c
+++ b/postproc/rgb2rgb_template.c
@@ -585,7 +585,7 @@ void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
#ifdef HAVE_MMX
asm volatile (
"xorl %%eax, %%eax \n\t"
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
PREFETCH" 32(%0, %%eax) \n\t"
"movq (%0, %%eax), %%mm0 \n\t"
@@ -636,7 +636,7 @@ void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, u
//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
asm volatile(
"xorl %%eax, %%eax \n\t"
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
PREFETCH" 32(%1, %%eax, 2) \n\t"
PREFETCH" 32(%2, %%eax) \n\t"
@@ -710,7 +710,7 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
"xorl %%eax, %%eax \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%eax, 4) \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
@@ -760,7 +760,7 @@ void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
asm volatile(
"xorl %%eax, %%eax \n\t"
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
PREFETCH" 64(%0, %%eax, 4) \n\t"
"movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
diff --git a/postproc/swscale.c b/postproc/swscale.c
index 103c47d612..56e53f8bf6 100644
--- a/postproc/swscale.c
+++ b/postproc/swscale.c
@@ -143,7 +143,7 @@ static int canMMX2BeUsed=0;
"punpcklwd %%mm5, %%mm5 \n\t"\
"punpcklwd %%mm5, %%mm5 \n\t"\
"xorl %%eax, %%eax \n\t"\
- ".align 16 \n\t"\
+ ".balign 16 \n\t"\
"1: \n\t"\
"movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
"movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
@@ -197,7 +197,7 @@ static int canMMX2BeUsed=0;
"punpcklwd %%mm5, %%mm5 \n\t"\
"movq %%mm5, asm_uvalpha1 \n\t"\
"xorl %%eax, %%eax \n\t"\
- ".align 16 \n\t"\
+ ".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
"movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
@@ -262,7 +262,7 @@ static int canMMX2BeUsed=0;
#define YSCALEYUV2RGB1 \
"xorl %%eax, %%eax \n\t"\
- ".align 16 \n\t"\
+ ".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
"movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
@@ -311,7 +311,7 @@ static int canMMX2BeUsed=0;
// do vertical chrominance interpolation
#define YSCALEYUV2RGB1b \
"xorl %%eax, %%eax \n\t"\
- ".align 16 \n\t"\
+ ".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
"movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
@@ -1310,7 +1310,7 @@ FUNNY_Y_CODE
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
"movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
"movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
@@ -1442,7 +1442,7 @@ FUNNYUVCODE
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
"movl %0, %%esi \n\t"
"movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
diff --git a/postproc/swscale_template.c b/postproc/swscale_template.c
index 103c47d612..56e53f8bf6 100644
--- a/postproc/swscale_template.c
+++ b/postproc/swscale_template.c
@@ -143,7 +143,7 @@ static int canMMX2BeUsed=0;
"punpcklwd %%mm5, %%mm5 \n\t"\
"punpcklwd %%mm5, %%mm5 \n\t"\
"xorl %%eax, %%eax \n\t"\
- ".align 16 \n\t"\
+ ".balign 16 \n\t"\
"1: \n\t"\
"movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
"movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
@@ -197,7 +197,7 @@ static int canMMX2BeUsed=0;
"punpcklwd %%mm5, %%mm5 \n\t"\
"movq %%mm5, asm_uvalpha1 \n\t"\
"xorl %%eax, %%eax \n\t"\
- ".align 16 \n\t"\
+ ".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
"movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
@@ -262,7 +262,7 @@ static int canMMX2BeUsed=0;
#define YSCALEYUV2RGB1 \
"xorl %%eax, %%eax \n\t"\
- ".align 16 \n\t"\
+ ".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
"movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
@@ -311,7 +311,7 @@ static int canMMX2BeUsed=0;
// do vertical chrominance interpolation
#define YSCALEYUV2RGB1b \
"xorl %%eax, %%eax \n\t"\
- ".align 16 \n\t"\
+ ".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
"movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
@@ -1310,7 +1310,7 @@ FUNNY_Y_CODE
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
"movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
"movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
@@ -1442,7 +1442,7 @@ FUNNYUVCODE
"xorl %%eax, %%eax \n\t" // i
"xorl %%ebx, %%ebx \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
- ".align 16 \n\t"
+ ".balign 16 \n\t"
"1: \n\t"
"movl %0, %%esi \n\t"
"movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]