diff options
author | reimar <reimar@b3059339-0415-0410-9bf9-f77b7e298cf2> | 2009-01-16 09:21:21 +0000 |
---|---|---|
committer | reimar <reimar@b3059339-0415-0410-9bf9-f77b7e298cf2> | 2009-01-16 09:21:21 +0000 |
commit | 93d598c7b2d6f71fa6381829dc6ec3610ed322e8 (patch) | |
tree | cfb70dc87a86bbb17b02e26e9ff614fe53345463 /libvo | |
parent | 452cda5332e48c77ae5a5594eff3f57d54ded30c (diff) | |
download | mpv-93d598c7b2d6f71fa6381829dc6ec3610ed322e8.tar.bz2 mpv-93d598c7b2d6f71fa6381829dc6ec3610ed322e8.tar.xz |
Lots and lots of #ifdef ARCH_... -> #if ARCH_...
and #ifdef HAVE_MMX etc -> #if HAVE_MMX.
There might be still more that need to be fixed.
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@28325 b3059339-0415-0410-9bf9-f77b7e298cf2
Diffstat (limited to 'libvo')
-rw-r--r-- | libvo/aclib.c | 79 | ||||
-rw-r--r-- | libvo/aclib_template.c | 25 | ||||
-rw-r--r-- | libvo/osd.c | 86 | ||||
-rw-r--r-- | libvo/osd_template.c | 42 |
4 files changed, 142 insertions, 90 deletions
diff --git a/libvo/aclib.c b/libvo/aclib.c index 68548f579e..ca06b84ad5 100644 --- a/libvo/aclib.c +++ b/libvo/aclib.c @@ -18,7 +18,7 @@ //Feel free to fine-tune the above 2, it might be possible to get some speedup with them :) //#define STATISTICS -#ifdef ARCH_X86 +#if ARCH_X86 #define CAN_COMPILE_X86_ASM #endif @@ -30,19 +30,19 @@ #ifdef CAN_COMPILE_X86_ASM -#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) +#if (HAVE_MMX && !HAVE_3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT) #define COMPILE_MMX #endif -#if (defined (HAVE_MMX2) && !defined (HAVE_SSE2)) || defined (RUNTIME_CPUDETECT) +#if (HAVE_MMX2 && !HAVE_SSE2) || defined (RUNTIME_CPUDETECT) #define COMPILE_MMX2 #endif -#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) +#if (HAVE_3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT) #define COMPILE_3DNOW #endif -#if defined (HAVE_SSE2) || defined (RUNTIME_CPUDETECT) +#if HAVE_SSE2 || defined (RUNTIME_CPUDETECT) #define COMPILE_SSE #endif @@ -51,12 +51,23 @@ #undef HAVE_3DNOW #undef HAVE_SSE #undef HAVE_SSE2 +#define HAVE_MMX 0 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 0 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 /* #ifdef COMPILE_C #undef HAVE_MMX #undef HAVE_MMX2 #undef HAVE_3DNOW -#undef ARCH_X86 +#undef HAVE_SSE +#undef HAVE_SSE2 +#define HAVE_MMX 0 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 0 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 #define RENAME(a) a ## _C #include "aclib_template.c" #endif @@ -64,11 +75,16 @@ //MMX versions #ifdef COMPILE_MMX #undef RENAME -#define HAVE_MMX +#undef HAVE_MMX #undef HAVE_MMX2 #undef HAVE_3DNOW #undef HAVE_SSE #undef HAVE_SSE2 +#define HAVE_MMX 1 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 0 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 #define RENAME(a) a ## _MMX #include "aclib_template.c" #endif @@ -76,11 +92,18 @@ //MMX2 versions #ifdef COMPILE_MMX2 #undef RENAME -#define HAVE_MMX -#define HAVE_MMX2 +#undef HAVE_MMX +#undef HAVE_MMX2 #undef HAVE_3DNOW #undef HAVE_SSE #undef HAVE_SSE2 +#define HAVE_MMX 1 +#define HAVE_MMX2 1 +#define HAVE_3DNOW 0 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 +#define HAVE_MMX +#define HAVE_MMX2 #define RENAME(a) a ## _MMX2 #include "aclib_template.c" #endif @@ -88,11 +111,16 @@ //3DNOW versions #ifdef COMPILE_3DNOW #undef RENAME -#define HAVE_MMX +#undef HAVE_MMX #undef HAVE_MMX2 -#define HAVE_3DNOW +#undef HAVE_3DNOW #undef HAVE_SSE #undef HAVE_SSE2 +#define HAVE_MMX 1 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 1 +#define HAVE_SSE 0 +#define HAVE_SSE2 0 #define RENAME(a) a ## _3DNow #include "aclib_template.c" #endif @@ -100,11 +128,16 @@ //SSE versions (only used on SSE2 cpus) #ifdef COMPILE_SSE #undef RENAME -#define HAVE_MMX -#define HAVE_MMX2 +#undef HAVE_MMX +#undef HAVE_MMX2 #undef HAVE_3DNOW -#define HAVE_SSE -#define HAVE_SSE2 +#undef HAVE_SSE +#undef HAVE_SSE2 +#define HAVE_MMX 1 +#define HAVE_MMX2 1 +#define HAVE_3DNOW 0 +#define HAVE_SSE 1 +#define HAVE_SSE2 1 #define RENAME(a) a ## _SSE #include "aclib_template.c" #endif @@ -130,13 +163,13 @@ void * fast_memcpy(void * to, const void * from, size_t len) #endif //CAN_COMPILE_X86_ASM memcpy(to, from, len); // prior to mmx we use the standart memcpy #else -#ifdef HAVE_SSE2 +#if HAVE_SSE2 fast_memcpy_SSE(to, from, len); -#elif defined (HAVE_MMX2) +#elif HAVE_MMX2 fast_memcpy_MMX2(to, from, len); -#elif defined (HAVE_3DNOW) +#elif HAVE_3DNOW fast_memcpy_3DNow(to, from, len); -#elif defined (HAVE_MMX) +#elif HAVE_MMX fast_memcpy_MMX(to, from, len); #else memcpy(to, from, len); // prior to mmx we use the standart memcpy @@ -164,13 +197,13 @@ void * mem2agpcpy(void * to, const void * from, size_t len) #endif //CAN_COMPILE_X86_ASM memcpy(to, from, len); // prior to mmx we use the standart memcpy #else -#ifdef HAVE_SSE2 +#if HAVE_SSE2 mem2agpcpy_SSE(to, from, len); -#elif defined (HAVE_MMX2) +#elif HAVE_MMX2 mem2agpcpy_MMX2(to, from, len); -#elif defined (HAVE_3DNOW) +#elif HAVE_3DNOW mem2agpcpy_3DNow(to, from, len); -#elif defined (HAVE_MMX) +#elif HAVE_MMX mem2agpcpy_MMX(to, from, len); #else memcpy(to, from, len); // prior to mmx we use the standart memcpy diff --git a/libvo/aclib_template.c b/libvo/aclib_template.c index 7288831172..74f0371a63 100644 --- a/libvo/aclib_template.c +++ b/libvo/aclib_template.c @@ -3,7 +3,7 @@ This file contains functions which improve and expand standard C-library */ -#ifndef HAVE_SSE2 +#if !HAVE_SSE2 /* P3 processor has only one SSE decoder so can execute only 1 sse insn per cpu clock, but it has 3 mmx decoders (include load/store unit) @@ -13,6 +13,7 @@ I have doubts. Anyway SSE2 version of this code can be written better. */ #undef HAVE_SSE +#define HAVE_SSE 0 #endif @@ -65,7 +66,7 @@ If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru. #undef HAVE_ONLY_MMX1 -#if defined(HAVE_MMX) && !defined(HAVE_MMX2) && !defined(HAVE_3DNOW) && !defined(HAVE_SSE) +#if HAVE_MMX && !HAVE_MMX2 && !HAVE_3DNOW && !HAVE_SSE /* means: mmx v.1. Note: Since we added alignment of destinition it speedups of memory copying on PentMMX, Celeron-1 and P2 upto 12% versus standard (non MMX-optimized) version. @@ -76,7 +77,7 @@ If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru. #undef HAVE_K6_2PLUS -#if !defined( HAVE_MMX2) && defined( HAVE_3DNOW) +#if !HAVE_MMX2 && HAVE_3DNOW #define HAVE_K6_2PLUS #endif @@ -95,7 +96,7 @@ __asm__ volatile(\ } #undef MMREG_SIZE -#ifdef HAVE_SSE +#if HAVE_SSE #define MMREG_SIZE 16 #else #define MMREG_SIZE 64 //8 @@ -104,23 +105,23 @@ __asm__ volatile(\ #undef PREFETCH #undef EMMS -#ifdef HAVE_MMX2 +#if HAVE_MMX2 #define PREFETCH "prefetchnta" -#elif defined ( HAVE_3DNOW ) +#elif HAVE_3DNOW #define PREFETCH "prefetch" #else #define PREFETCH " # nop" #endif /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */ -#ifdef HAVE_3DNOW +#if HAVE_3DNOW #define EMMS "femms" #else #define EMMS "emms" #endif #undef MOVNTQ -#ifdef HAVE_MMX2 +#if HAVE_MMX2 #define MOVNTQ "movntq" #else #define MOVNTQ "movq" @@ -183,7 +184,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len) perform reading and writing to be multiple to a number of processor's decoders, but it's not always possible. */ -#ifdef HAVE_SSE /* Only P3 (may be Cyrix3) */ +#if HAVE_SSE /* Only P3 (may be Cyrix3) */ if(((unsigned long)from) & 15) /* if SRC is misaligned */ for(; i>0; i--) @@ -343,12 +344,12 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len) } #endif /* Have SSE */ -#ifdef HAVE_MMX2 +#if HAVE_MMX2 /* since movntq is weakly-ordered, a "sfence" * is needed to become ordered again. */ __asm__ volatile ("sfence":::"memory"); #endif -#ifndef HAVE_SSE +#if !HAVE_SSE /* enables to use FPU */ __asm__ volatile (EMMS:::"memory"); #endif @@ -427,7 +428,7 @@ static void * RENAME(mem2agpcpy)(void * to, const void * from, size_t len) from=((const unsigned char *)from)+64; to=((unsigned char *)to)+64; } -#ifdef HAVE_MMX2 +#if HAVE_MMX2 /* since movntq is weakly-ordered, a "sfence" * is needed to become ordered again. */ __asm__ volatile ("sfence":::"memory"); diff --git a/libvo/osd.c b/libvo/osd.c index da7d61fb50..df91ffbae0 100644 --- a/libvo/osd.c +++ b/libvo/osd.c @@ -11,7 +11,7 @@ #include <inttypes.h> #include "cpudetect.h" -#ifdef ARCH_X86 +#if ARCH_X86 #define CAN_COMPILE_X86_ASM #endif @@ -23,21 +23,21 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF //Note: we have C, X86-nommx, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one //Plain C versions -#if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT) +#if !HAVE_MMX || defined (RUNTIME_CPUDETECT) #define COMPILE_C #endif #ifdef CAN_COMPILE_X86_ASM -#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) +#if (HAVE_MMX && !HAVE_3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT) #define COMPILE_MMX #endif -#if defined (HAVE_MMX2) || defined (RUNTIME_CPUDETECT) +#if HAVE_MMX2 || defined (RUNTIME_CPUDETECT) #define COMPILE_MMX2 #endif -#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT) +#if (HAVE_3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT) #define COMPILE_3DNOW #endif #endif //CAN_COMPILE_X86_ASM @@ -45,6 +45,9 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF #undef HAVE_MMX #undef HAVE_MMX2 #undef HAVE_3DNOW +#define HAVE_MMX 0 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 0 #ifndef CAN_COMPILE_X86_ASM @@ -52,6 +55,9 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF #undef HAVE_MMX #undef HAVE_MMX2 #undef HAVE_3DNOW +#define HAVE_MMX 0 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 0 #define RENAME(a) a ## _C #include "osd_template.c" #endif @@ -64,6 +70,9 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF #undef HAVE_MMX #undef HAVE_MMX2 #undef HAVE_3DNOW +#define HAVE_MMX 0 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 0 #define RENAME(a) a ## _X86 #include "osd_template.c" #endif @@ -71,9 +80,12 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF //MMX versions #ifdef COMPILE_MMX #undef RENAME -#define HAVE_MMX +#undef HAVE_MMX #undef HAVE_MMX2 #undef HAVE_3DNOW +#define HAVE_MMX 1 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 0 #define RENAME(a) a ## _MMX #include "osd_template.c" #endif @@ -81,9 +93,12 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF //MMX2 versions #ifdef COMPILE_MMX2 #undef RENAME -#define HAVE_MMX -#define HAVE_MMX2 +#undef HAVE_MMX +#undef HAVE_MMX2 #undef HAVE_3DNOW +#define HAVE_MMX 1 +#define HAVE_MMX2 1 +#define HAVE_3DNOW 0 #define RENAME(a) a ## _MMX2 #include "osd_template.c" #endif @@ -91,9 +106,12 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF //3DNOW versions #ifdef COMPILE_3DNOW #undef RENAME -#define HAVE_MMX +#undef HAVE_MMX #undef HAVE_MMX2 -#define HAVE_3DNOW +#undef HAVE_3DNOW +#define HAVE_MMX 1 +#define HAVE_MMX2 0 +#define HAVE_3DNOW 1 #define RENAME(a) a ## _3DNow #include "osd_template.c" #endif @@ -116,13 +134,13 @@ void vo_draw_alpha_yv12(int w,int h, unsigned char* src, unsigned char *srca, in vo_draw_alpha_yv12_C(w, h, src, srca, srcstride, dstbase, dststride); #endif #else //RUNTIME_CPUDETECT -#ifdef HAVE_MMX2 +#if HAVE_MMX2 vo_draw_alpha_yv12_MMX2(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_3DNOW) +#elif HAVE_3DNOW vo_draw_alpha_yv12_3DNow(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_MMX) +#elif HAVE_MMX vo_draw_alpha_yv12_MMX(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined(ARCH_X86) +#elif ARCH_X86 vo_draw_alpha_yv12_X86(w, h, src, srca, srcstride, dstbase, dststride); #else vo_draw_alpha_yv12_C(w, h, src, srca, srcstride, dstbase, dststride); @@ -146,13 +164,13 @@ void vo_draw_alpha_yuy2(int w,int h, unsigned char* src, unsigned char *srca, in vo_draw_alpha_yuy2_C(w, h, src, srca, srcstride, dstbase, dststride); #endif #else //RUNTIME_CPUDETECT -#ifdef HAVE_MMX2 +#if HAVE_MMX2 vo_draw_alpha_yuy2_MMX2(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_3DNOW) +#elif HAVE_3DNOW vo_draw_alpha_yuy2_3DNow(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_MMX) +#elif HAVE_MMX vo_draw_alpha_yuy2_MMX(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined(ARCH_X86) +#elif ARCH_X86 vo_draw_alpha_yuy2_X86(w, h, src, srca, srcstride, dstbase, dststride); #else vo_draw_alpha_yuy2_C(w, h, src, srca, srcstride, dstbase, dststride); @@ -176,13 +194,13 @@ void vo_draw_alpha_uyvy(int w,int h, unsigned char* src, unsigned char *srca, in vo_draw_alpha_uyvy_C(w, h, src, srca, srcstride, dstbase, dststride); #endif #else //RUNTIME_CPUDETECT -#ifdef HAVE_MMX2 +#if HAVE_MMX2 vo_draw_alpha_uyvy_MMX2(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_3DNOW) +#elif HAVE_3DNOW vo_draw_alpha_uyvy_3DNow(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_MMX) +#elif HAVE_MMX vo_draw_alpha_uyvy_MMX(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined(ARCH_X86) +#elif ARCH_X86 vo_draw_alpha_uyvy_X86(w, h, src, srca, srcstride, dstbase, dststride); #else vo_draw_alpha_uyvy_C(w, h, src, srca, srcstride, dstbase, dststride); @@ -206,13 +224,13 @@ void vo_draw_alpha_rgb24(int w,int h, unsigned char* src, unsigned char *srca, i vo_draw_alpha_rgb24_C(w, h, src, srca, srcstride, dstbase, dststride); #endif #else //RUNTIME_CPUDETECT -#ifdef HAVE_MMX2 +#if HAVE_MMX2 vo_draw_alpha_rgb24_MMX2(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_3DNOW) +#elif HAVE_3DNOW vo_draw_alpha_rgb24_3DNow(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_MMX) +#elif HAVE_MMX vo_draw_alpha_rgb24_MMX(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined(ARCH_X86) +#elif ARCH_X86 vo_draw_alpha_rgb24_X86(w, h, src, srca, srcstride, dstbase, dststride); #else vo_draw_alpha_rgb24_C(w, h, src, srca, srcstride, dstbase, dststride); @@ -236,13 +254,13 @@ void vo_draw_alpha_rgb32(int w,int h, unsigned char* src, unsigned char *srca, i vo_draw_alpha_rgb32_C(w, h, src, srca, srcstride, dstbase, dststride); #endif #else //RUNTIME_CPUDETECT -#ifdef HAVE_MMX2 +#if HAVE_MMX2 vo_draw_alpha_rgb32_MMX2(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_3DNOW) +#elif HAVE_3DNOW vo_draw_alpha_rgb32_3DNow(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined (HAVE_MMX) +#elif HAVE_MMX vo_draw_alpha_rgb32_MMX(w, h, src, srca, srcstride, dstbase, dststride); -#elif defined(ARCH_X86) +#elif ARCH_X86 vo_draw_alpha_rgb32_X86(w, h, src, srca, srcstride, dstbase, dststride); #else vo_draw_alpha_rgb32_C(w, h, src, srca, srcstride, dstbase, dststride); @@ -281,13 +299,13 @@ void vo_draw_alpha_init(void){ mp_msg(MSGT_OSD,MSGL_INFO,"Using Unoptimized OnScreenDisplay\n"); #endif #else //RUNTIME_CPUDETECT -#ifdef HAVE_MMX2 +#if HAVE_MMX2 mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX (with tiny bit MMX2) Optimized OnScreenDisplay\n"); -#elif defined (HAVE_3DNOW) +#elif HAVE_3DNOW mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX (with tiny bit 3DNow) Optimized OnScreenDisplay\n"); -#elif defined (HAVE_MMX) +#elif HAVE_MMX mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX Optimized OnScreenDisplay\n"); -#elif defined(ARCH_X86) +#elif ARCH_X86 mp_msg(MSGT_OSD,MSGL_INFO,"Using X86 Optimized OnScreenDisplay\n"); #else mp_msg(MSGT_OSD,MSGL_INFO,"Using Unoptimized OnScreenDisplay\n"); diff --git a/libvo/osd_template.c b/libvo/osd_template.c index c44f060a5d..8700a176c6 100644 --- a/libvo/osd_template.c +++ b/libvo/osd_template.c @@ -7,11 +7,11 @@ #undef PREFETCHW #undef PAVGB -#ifdef HAVE_3DNOW +#if HAVE_3DNOW #define PREFETCH "prefetch" #define PREFETCHW "prefetchw" #define PAVGB "pavgusb" -#elif defined ( HAVE_MMX2 ) +#elif HAVE_MMX2 #define PREFETCH "prefetchnta" #define PREFETCHW "prefetcht0" #define PAVGB "pavgb" @@ -20,7 +20,7 @@ #define PREFETCHW " # nop" #endif -#ifdef HAVE_3DNOW +#if HAVE_3DNOW /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */ #define EMMS "femms" #else @@ -29,10 +29,10 @@ static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){ int y; -#if defined(FAST_OSD) && !defined(HAVE_MMX) +#if defined(FAST_OSD) && !HAVE_MMX w=w>>1; #endif -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile( "pcmpeqb %%mm5, %%mm5\n\t" // F..F "movq %%mm5, %%mm4\n\t" @@ -43,7 +43,7 @@ static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, u #endif for(y=0;y<h;y++){ register int x; -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile( PREFETCHW" %0\n\t" PREFETCH" %1\n\t" @@ -91,7 +91,7 @@ static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, u srca+=srcstride; dstbase+=dststride; } -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile(EMMS:::"memory"); #endif return; @@ -99,10 +99,10 @@ static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, u static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){ int y; -#if defined(FAST_OSD) && !defined(HAVE_MMX) +#if defined(FAST_OSD) && !HAVE_MMX w=w>>1; #endif -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile( "pxor %%mm7, %%mm7\n\t" "pcmpeqb %%mm5, %%mm5\n\t" // F..F @@ -114,7 +114,7 @@ static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, u #endif for(y=0;y<h;y++){ register int x; -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile( PREFETCHW" %0\n\t" PREFETCH" %1\n\t" @@ -163,7 +163,7 @@ static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, u srca+=srcstride; dstbase+=dststride; } -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile(EMMS:::"memory"); #endif return; @@ -195,7 +195,7 @@ static inline void RENAME(vo_draw_alpha_uyvy)(int w,int h, unsigned char* src, u static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){ int y; -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile( "pxor %%mm7, %%mm7\n\t" "pcmpeqb %%mm6, %%mm6\n\t" // F..F @@ -204,8 +204,8 @@ static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src, for(y=0;y<h;y++){ register unsigned char *dst = dstbase; register int x; -#if defined(ARCH_X86) && (!defined(ARCH_X86_64) || defined(HAVE_MMX)) -#ifdef HAVE_MMX +#if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX) +#if HAVE_MMX __asm__ volatile( PREFETCHW" %0\n\t" PREFETCH" %1\n\t" @@ -295,7 +295,7 @@ static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src, srca+=srcstride; dstbase+=dststride; } -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile(EMMS:::"memory"); #endif return; @@ -306,8 +306,8 @@ static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src, #ifdef WORDS_BIGENDIAN dstbase++; #endif -#ifdef HAVE_MMX -#ifdef HAVE_3DNOW +#if HAVE_MMX +#if HAVE_3DNOW __asm__ volatile( "pxor %%mm7, %%mm7\n\t" "pcmpeqb %%mm6, %%mm6\n\t" // F..F @@ -324,9 +324,9 @@ static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src, #endif /* HAVE_MMX */ for(y=0;y<h;y++){ register int x; -#if defined(ARCH_X86) && (!defined(ARCH_X86_64) || defined(HAVE_MMX)) -#ifdef HAVE_MMX -#ifdef HAVE_3DNOW +#if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX) +#if HAVE_MMX +#if HAVE_3DNOW __asm__ volatile( PREFETCHW" %0\n\t" PREFETCH" %1\n\t" @@ -460,7 +460,7 @@ static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src, srca+=srcstride; dstbase+=dststride; } -#ifdef HAVE_MMX +#if HAVE_MMX __asm__ volatile(EMMS:::"memory"); #endif return; |