summaryrefslogtreecommitdiffstats
path: root/libvo
diff options
context:
space:
mode:
authorUoti Urpala <uau@glyph.nonexistent.invalid>2009-01-19 00:04:43 +0200
committerUoti Urpala <uau@glyph.nonexistent.invalid>2009-01-19 00:04:43 +0200
commit79e1aa7cc7a9ac695c3af361bca72013ef272885 (patch)
treecc52764dfa4fab2cfc74c6aee5d71b27edbf014b /libvo
parent3ba97da297e1e1a5ca9dabdda60cc6a62f77affc (diff)
parent39cb032fd184a62416f1e62d26e05576cfd8dbe4 (diff)
downloadmpv-79e1aa7cc7a9ac695c3af361bca72013ef272885.tar.bz2
mpv-79e1aa7cc7a9ac695c3af361bca72013ef272885.tar.xz
Merge svn changes up to r28341
Conflicts: configure libmpcodecs/native/rtjpegn.c
Diffstat (limited to 'libvo')
-rw-r--r--libvo/aclib.c79
-rw-r--r--libvo/aclib_template.c25
-rw-r--r--libvo/fastmemcpy.h4
-rw-r--r--libvo/osd.c86
-rw-r--r--libvo/osd_template.c42
-rw-r--r--libvo/vo_macosx.m13
6 files changed, 149 insertions, 100 deletions
diff --git a/libvo/aclib.c b/libvo/aclib.c
index 68548f579e..4c51709b2a 100644
--- a/libvo/aclib.c
+++ b/libvo/aclib.c
@@ -18,31 +18,31 @@
//Feel free to fine-tune the above 2, it might be possible to get some speedup with them :)
//#define STATISTICS
-#ifdef ARCH_X86
+#if ARCH_X86
#define CAN_COMPILE_X86_ASM
#endif
//Note: we have MMX, MMX2, 3DNOW version there is no 3DNOW+MMX2 one
//Plain C versions
-//#if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT)
+//#if !HAVE_MMX || defined (RUNTIME_CPUDETECT)
//#define COMPILE_C
//#endif
#ifdef CAN_COMPILE_X86_ASM
-#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
+#if (HAVE_MMX && !HAVE_3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX
#endif
-#if (defined (HAVE_MMX2) && !defined (HAVE_SSE2)) || defined (RUNTIME_CPUDETECT)
+#if (HAVE_MMX2 && !HAVE_SSE2) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX2
#endif
-#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
+#if (HAVE_3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
#define COMPILE_3DNOW
#endif
-#if defined (HAVE_SSE2) || defined (RUNTIME_CPUDETECT)
+#if HAVE_SSE2 || defined (RUNTIME_CPUDETECT)
#define COMPILE_SSE
#endif
@@ -51,12 +51,23 @@
#undef HAVE_3DNOW
#undef HAVE_SSE
#undef HAVE_SSE2
+#define HAVE_MMX 0
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
/*
#ifdef COMPILE_C
#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
-#undef ARCH_X86
+#undef HAVE_SSE
+#undef HAVE_SSE2
+#define HAVE_MMX 0
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
#define RENAME(a) a ## _C
#include "aclib_template.c"
#endif
@@ -64,11 +75,16 @@
//MMX versions
#ifdef COMPILE_MMX
#undef RENAME
-#define HAVE_MMX
+#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
#undef HAVE_SSE
#undef HAVE_SSE2
+#define HAVE_MMX 1
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
#define RENAME(a) a ## _MMX
#include "aclib_template.c"
#endif
@@ -76,11 +92,16 @@
//MMX2 versions
#ifdef COMPILE_MMX2
#undef RENAME
-#define HAVE_MMX
-#define HAVE_MMX2
+#undef HAVE_MMX
+#undef HAVE_MMX2
#undef HAVE_3DNOW
#undef HAVE_SSE
#undef HAVE_SSE2
+#define HAVE_MMX 1
+#define HAVE_MMX2 1
+#define HAVE_3DNOW 0
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
#define RENAME(a) a ## _MMX2
#include "aclib_template.c"
#endif
@@ -88,11 +109,16 @@
//3DNOW versions
#ifdef COMPILE_3DNOW
#undef RENAME
-#define HAVE_MMX
+#undef HAVE_MMX
#undef HAVE_MMX2
-#define HAVE_3DNOW
+#undef HAVE_3DNOW
#undef HAVE_SSE
#undef HAVE_SSE2
+#define HAVE_MMX 1
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 1
+#define HAVE_SSE 0
+#define HAVE_SSE2 0
#define RENAME(a) a ## _3DNow
#include "aclib_template.c"
#endif
@@ -100,11 +126,16 @@
//SSE versions (only used on SSE2 cpus)
#ifdef COMPILE_SSE
#undef RENAME
-#define HAVE_MMX
-#define HAVE_MMX2
+#undef HAVE_MMX
+#undef HAVE_MMX2
#undef HAVE_3DNOW
-#define HAVE_SSE
-#define HAVE_SSE2
+#undef HAVE_SSE
+#undef HAVE_SSE2
+#define HAVE_MMX 1
+#define HAVE_MMX2 1
+#define HAVE_3DNOW 0
+#define HAVE_SSE 1
+#define HAVE_SSE2 1
#define RENAME(a) a ## _SSE
#include "aclib_template.c"
#endif
@@ -130,13 +161,13 @@ void * fast_memcpy(void * to, const void * from, size_t len)
#endif //CAN_COMPILE_X86_ASM
memcpy(to, from, len); // prior to mmx we use the standart memcpy
#else
-#ifdef HAVE_SSE2
+#if HAVE_SSE2
fast_memcpy_SSE(to, from, len);
-#elif defined (HAVE_MMX2)
+#elif HAVE_MMX2
fast_memcpy_MMX2(to, from, len);
-#elif defined (HAVE_3DNOW)
+#elif HAVE_3DNOW
fast_memcpy_3DNow(to, from, len);
-#elif defined (HAVE_MMX)
+#elif HAVE_MMX
fast_memcpy_MMX(to, from, len);
#else
memcpy(to, from, len); // prior to mmx we use the standart memcpy
@@ -164,13 +195,13 @@ void * mem2agpcpy(void * to, const void * from, size_t len)
#endif //CAN_COMPILE_X86_ASM
memcpy(to, from, len); // prior to mmx we use the standart memcpy
#else
-#ifdef HAVE_SSE2
+#if HAVE_SSE2
mem2agpcpy_SSE(to, from, len);
-#elif defined (HAVE_MMX2)
+#elif HAVE_MMX2
mem2agpcpy_MMX2(to, from, len);
-#elif defined (HAVE_3DNOW)
+#elif HAVE_3DNOW
mem2agpcpy_3DNow(to, from, len);
-#elif defined (HAVE_MMX)
+#elif HAVE_MMX
mem2agpcpy_MMX(to, from, len);
#else
memcpy(to, from, len); // prior to mmx we use the standart memcpy
diff --git a/libvo/aclib_template.c b/libvo/aclib_template.c
index 7288831172..74f0371a63 100644
--- a/libvo/aclib_template.c
+++ b/libvo/aclib_template.c
@@ -3,7 +3,7 @@
This file contains functions which improve and expand standard C-library
*/
-#ifndef HAVE_SSE2
+#if !HAVE_SSE2
/*
P3 processor has only one SSE decoder so can execute only 1 sse insn per
cpu clock, but it has 3 mmx decoders (include load/store unit)
@@ -13,6 +13,7 @@
I have doubts. Anyway SSE2 version of this code can be written better.
*/
#undef HAVE_SSE
+#define HAVE_SSE 0
#endif
@@ -65,7 +66,7 @@ If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru.
#undef HAVE_ONLY_MMX1
-#if defined(HAVE_MMX) && !defined(HAVE_MMX2) && !defined(HAVE_3DNOW) && !defined(HAVE_SSE)
+#if HAVE_MMX && !HAVE_MMX2 && !HAVE_3DNOW && !HAVE_SSE
/* means: mmx v.1. Note: Since we added alignment of destinition it speedups
of memory copying on PentMMX, Celeron-1 and P2 upto 12% versus
standard (non MMX-optimized) version.
@@ -76,7 +77,7 @@ If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru.
#undef HAVE_K6_2PLUS
-#if !defined( HAVE_MMX2) && defined( HAVE_3DNOW)
+#if !HAVE_MMX2 && HAVE_3DNOW
#define HAVE_K6_2PLUS
#endif
@@ -95,7 +96,7 @@ __asm__ volatile(\
}
#undef MMREG_SIZE
-#ifdef HAVE_SSE
+#if HAVE_SSE
#define MMREG_SIZE 16
#else
#define MMREG_SIZE 64 //8
@@ -104,23 +105,23 @@ __asm__ volatile(\
#undef PREFETCH
#undef EMMS
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
#define PREFETCH "prefetchnta"
-#elif defined ( HAVE_3DNOW )
+#elif HAVE_3DNOW
#define PREFETCH "prefetch"
#else
#define PREFETCH " # nop"
#endif
/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
-#ifdef HAVE_3DNOW
+#if HAVE_3DNOW
#define EMMS "femms"
#else
#define EMMS "emms"
#endif
#undef MOVNTQ
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
#define MOVNTQ "movntq"
#else
#define MOVNTQ "movq"
@@ -183,7 +184,7 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
perform reading and writing to be multiple to a number of
processor's decoders, but it's not always possible.
*/
-#ifdef HAVE_SSE /* Only P3 (may be Cyrix3) */
+#if HAVE_SSE /* Only P3 (may be Cyrix3) */
if(((unsigned long)from) & 15)
/* if SRC is misaligned */
for(; i>0; i--)
@@ -343,12 +344,12 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
}
#endif /* Have SSE */
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
/* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */
__asm__ volatile ("sfence":::"memory");
#endif
-#ifndef HAVE_SSE
+#if !HAVE_SSE
/* enables to use FPU */
__asm__ volatile (EMMS:::"memory");
#endif
@@ -427,7 +428,7 @@ static void * RENAME(mem2agpcpy)(void * to, const void * from, size_t len)
from=((const unsigned char *)from)+64;
to=((unsigned char *)to)+64;
}
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
/* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */
__asm__ volatile ("sfence":::"memory");
diff --git a/libvo/fastmemcpy.h b/libvo/fastmemcpy.h
index be0b752558..69ded04bcf 100644
--- a/libvo/fastmemcpy.h
+++ b/libvo/fastmemcpy.h
@@ -24,8 +24,8 @@
#include <string.h>
#ifdef CONFIG_FASTMEMCPY
-#if defined(HAVE_MMX) || defined(HAVE_MMX2) || defined(HAVE_3DNOW) \
-/* || defined(HAVE_SSE) || defined(HAVE_SSE2) */
+#if HAVE_MMX || HAVE_MMX2 || HAVE_3DNOW \
+/* || HAVE_SSE || HAVE_SSE2 */
#include <stddef.h>
void * fast_memcpy(void * to, const void * from, size_t len);
diff --git a/libvo/osd.c b/libvo/osd.c
index da7d61fb50..df91ffbae0 100644
--- a/libvo/osd.c
+++ b/libvo/osd.c
@@ -11,7 +11,7 @@
#include <inttypes.h>
#include "cpudetect.h"
-#ifdef ARCH_X86
+#if ARCH_X86
#define CAN_COMPILE_X86_ASM
#endif
@@ -23,21 +23,21 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF
//Note: we have C, X86-nommx, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
//Plain C versions
-#if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT)
+#if !HAVE_MMX || defined (RUNTIME_CPUDETECT)
#define COMPILE_C
#endif
#ifdef CAN_COMPILE_X86_ASM
-#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
+#if (HAVE_MMX && !HAVE_3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX
#endif
-#if defined (HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
+#if HAVE_MMX2 || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX2
#endif
-#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
+#if (HAVE_3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
#define COMPILE_3DNOW
#endif
#endif //CAN_COMPILE_X86_ASM
@@ -45,6 +45,9 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF
#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
+#define HAVE_MMX 0
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 0
#ifndef CAN_COMPILE_X86_ASM
@@ -52,6 +55,9 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF
#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
+#define HAVE_MMX 0
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 0
#define RENAME(a) a ## _C
#include "osd_template.c"
#endif
@@ -64,6 +70,9 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF
#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
+#define HAVE_MMX 0
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 0
#define RENAME(a) a ## _X86
#include "osd_template.c"
#endif
@@ -71,9 +80,12 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF
//MMX versions
#ifdef COMPILE_MMX
#undef RENAME
-#define HAVE_MMX
+#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
+#define HAVE_MMX 1
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 0
#define RENAME(a) a ## _MMX
#include "osd_template.c"
#endif
@@ -81,9 +93,12 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF
//MMX2 versions
#ifdef COMPILE_MMX2
#undef RENAME
-#define HAVE_MMX
-#define HAVE_MMX2
+#undef HAVE_MMX
+#undef HAVE_MMX2
#undef HAVE_3DNOW
+#define HAVE_MMX 1
+#define HAVE_MMX2 1
+#define HAVE_3DNOW 0
#define RENAME(a) a ## _MMX2
#include "osd_template.c"
#endif
@@ -91,9 +106,12 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF
//3DNOW versions
#ifdef COMPILE_3DNOW
#undef RENAME
-#define HAVE_MMX
+#undef HAVE_MMX
#undef HAVE_MMX2
-#define HAVE_3DNOW
+#undef HAVE_3DNOW
+#define HAVE_MMX 1
+#define HAVE_MMX2 0
+#define HAVE_3DNOW 1
#define RENAME(a) a ## _3DNow
#include "osd_template.c"
#endif
@@ -116,13 +134,13 @@ void vo_draw_alpha_yv12(int w,int h, unsigned char* src, unsigned char *srca, in
vo_draw_alpha_yv12_C(w, h, src, srca, srcstride, dstbase, dststride);
#endif
#else //RUNTIME_CPUDETECT
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
vo_draw_alpha_yv12_MMX2(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_3DNOW)
+#elif HAVE_3DNOW
vo_draw_alpha_yv12_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_MMX)
+#elif HAVE_MMX
vo_draw_alpha_yv12_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined(ARCH_X86)
+#elif ARCH_X86
vo_draw_alpha_yv12_X86(w, h, src, srca, srcstride, dstbase, dststride);
#else
vo_draw_alpha_yv12_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -146,13 +164,13 @@ void vo_draw_alpha_yuy2(int w,int h, unsigned char* src, unsigned char *srca, in
vo_draw_alpha_yuy2_C(w, h, src, srca, srcstride, dstbase, dststride);
#endif
#else //RUNTIME_CPUDETECT
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
vo_draw_alpha_yuy2_MMX2(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_3DNOW)
+#elif HAVE_3DNOW
vo_draw_alpha_yuy2_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_MMX)
+#elif HAVE_MMX
vo_draw_alpha_yuy2_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined(ARCH_X86)
+#elif ARCH_X86
vo_draw_alpha_yuy2_X86(w, h, src, srca, srcstride, dstbase, dststride);
#else
vo_draw_alpha_yuy2_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -176,13 +194,13 @@ void vo_draw_alpha_uyvy(int w,int h, unsigned char* src, unsigned char *srca, in
vo_draw_alpha_uyvy_C(w, h, src, srca, srcstride, dstbase, dststride);
#endif
#else //RUNTIME_CPUDETECT
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
vo_draw_alpha_uyvy_MMX2(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_3DNOW)
+#elif HAVE_3DNOW
vo_draw_alpha_uyvy_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_MMX)
+#elif HAVE_MMX
vo_draw_alpha_uyvy_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined(ARCH_X86)
+#elif ARCH_X86
vo_draw_alpha_uyvy_X86(w, h, src, srca, srcstride, dstbase, dststride);
#else
vo_draw_alpha_uyvy_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -206,13 +224,13 @@ void vo_draw_alpha_rgb24(int w,int h, unsigned char* src, unsigned char *srca, i
vo_draw_alpha_rgb24_C(w, h, src, srca, srcstride, dstbase, dststride);
#endif
#else //RUNTIME_CPUDETECT
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
vo_draw_alpha_rgb24_MMX2(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_3DNOW)
+#elif HAVE_3DNOW
vo_draw_alpha_rgb24_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_MMX)
+#elif HAVE_MMX
vo_draw_alpha_rgb24_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined(ARCH_X86)
+#elif ARCH_X86
vo_draw_alpha_rgb24_X86(w, h, src, srca, srcstride, dstbase, dststride);
#else
vo_draw_alpha_rgb24_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -236,13 +254,13 @@ void vo_draw_alpha_rgb32(int w,int h, unsigned char* src, unsigned char *srca, i
vo_draw_alpha_rgb32_C(w, h, src, srca, srcstride, dstbase, dststride);
#endif
#else //RUNTIME_CPUDETECT
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
vo_draw_alpha_rgb32_MMX2(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_3DNOW)
+#elif HAVE_3DNOW
vo_draw_alpha_rgb32_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (HAVE_MMX)
+#elif HAVE_MMX
vo_draw_alpha_rgb32_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined(ARCH_X86)
+#elif ARCH_X86
vo_draw_alpha_rgb32_X86(w, h, src, srca, srcstride, dstbase, dststride);
#else
vo_draw_alpha_rgb32_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -281,13 +299,13 @@ void vo_draw_alpha_init(void){
mp_msg(MSGT_OSD,MSGL_INFO,"Using Unoptimized OnScreenDisplay\n");
#endif
#else //RUNTIME_CPUDETECT
-#ifdef HAVE_MMX2
+#if HAVE_MMX2
mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX (with tiny bit MMX2) Optimized OnScreenDisplay\n");
-#elif defined (HAVE_3DNOW)
+#elif HAVE_3DNOW
mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX (with tiny bit 3DNow) Optimized OnScreenDisplay\n");
-#elif defined (HAVE_MMX)
+#elif HAVE_MMX
mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX Optimized OnScreenDisplay\n");
-#elif defined(ARCH_X86)
+#elif ARCH_X86
mp_msg(MSGT_OSD,MSGL_INFO,"Using X86 Optimized OnScreenDisplay\n");
#else
mp_msg(MSGT_OSD,MSGL_INFO,"Using Unoptimized OnScreenDisplay\n");
diff --git a/libvo/osd_template.c b/libvo/osd_template.c
index c44f060a5d..8700a176c6 100644
--- a/libvo/osd_template.c
+++ b/libvo/osd_template.c
@@ -7,11 +7,11 @@
#undef PREFETCHW
#undef PAVGB
-#ifdef HAVE_3DNOW
+#if HAVE_3DNOW
#define PREFETCH "prefetch"
#define PREFETCHW "prefetchw"
#define PAVGB "pavgusb"
-#elif defined ( HAVE_MMX2 )
+#elif HAVE_MMX2
#define PREFETCH "prefetchnta"
#define PREFETCHW "prefetcht0"
#define PAVGB "pavgb"
@@ -20,7 +20,7 @@
#define PREFETCHW " # nop"
#endif
-#ifdef HAVE_3DNOW
+#if HAVE_3DNOW
/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
#define EMMS "femms"
#else
@@ -29,10 +29,10 @@
static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
int y;
-#if defined(FAST_OSD) && !defined(HAVE_MMX)
+#if defined(FAST_OSD) && !HAVE_MMX
w=w>>1;
#endif
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(
"pcmpeqb %%mm5, %%mm5\n\t" // F..F
"movq %%mm5, %%mm4\n\t"
@@ -43,7 +43,7 @@ static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, u
#endif
for(y=0;y<h;y++){
register int x;
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(
PREFETCHW" %0\n\t"
PREFETCH" %1\n\t"
@@ -91,7 +91,7 @@ static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, u
srca+=srcstride;
dstbase+=dststride;
}
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(EMMS:::"memory");
#endif
return;
@@ -99,10 +99,10 @@ static inline void RENAME(vo_draw_alpha_yv12)(int w,int h, unsigned char* src, u
static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
int y;
-#if defined(FAST_OSD) && !defined(HAVE_MMX)
+#if defined(FAST_OSD) && !HAVE_MMX
w=w>>1;
#endif
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(
"pxor %%mm7, %%mm7\n\t"
"pcmpeqb %%mm5, %%mm5\n\t" // F..F
@@ -114,7 +114,7 @@ static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, u
#endif
for(y=0;y<h;y++){
register int x;
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(
PREFETCHW" %0\n\t"
PREFETCH" %1\n\t"
@@ -163,7 +163,7 @@ static inline void RENAME(vo_draw_alpha_yuy2)(int w,int h, unsigned char* src, u
srca+=srcstride;
dstbase+=dststride;
}
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(EMMS:::"memory");
#endif
return;
@@ -195,7 +195,7 @@ static inline void RENAME(vo_draw_alpha_uyvy)(int w,int h, unsigned char* src, u
static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){
int y;
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(
"pxor %%mm7, %%mm7\n\t"
"pcmpeqb %%mm6, %%mm6\n\t" // F..F
@@ -204,8 +204,8 @@ static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src,
for(y=0;y<h;y++){
register unsigned char *dst = dstbase;
register int x;
-#if defined(ARCH_X86) && (!defined(ARCH_X86_64) || defined(HAVE_MMX))
-#ifdef HAVE_MMX
+#if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
+#if HAVE_MMX
__asm__ volatile(
PREFETCHW" %0\n\t"
PREFETCH" %1\n\t"
@@ -295,7 +295,7 @@ static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src,
srca+=srcstride;
dstbase+=dststride;
}
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(EMMS:::"memory");
#endif
return;
@@ -306,8 +306,8 @@ static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src,
#ifdef WORDS_BIGENDIAN
dstbase++;
#endif
-#ifdef HAVE_MMX
-#ifdef HAVE_3DNOW
+#if HAVE_MMX
+#if HAVE_3DNOW
__asm__ volatile(
"pxor %%mm7, %%mm7\n\t"
"pcmpeqb %%mm6, %%mm6\n\t" // F..F
@@ -324,9 +324,9 @@ static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src,
#endif /* HAVE_MMX */
for(y=0;y<h;y++){
register int x;
-#if defined(ARCH_X86) && (!defined(ARCH_X86_64) || defined(HAVE_MMX))
-#ifdef HAVE_MMX
-#ifdef HAVE_3DNOW
+#if ARCH_X86 && (!ARCH_X86_64 || HAVE_MMX)
+#if HAVE_MMX
+#if HAVE_3DNOW
__asm__ volatile(
PREFETCHW" %0\n\t"
PREFETCH" %1\n\t"
@@ -460,7 +460,7 @@ static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src,
srca+=srcstride;
dstbase+=dststride;
}
-#ifdef HAVE_MMX
+#if HAVE_MMX
__asm__ volatile(EMMS:::"memory");
#endif
return;
diff --git a/libvo/vo_macosx.m b/libvo/vo_macosx.m
index dcce691354..38a9c9dbe9 100644
--- a/libvo/vo_macosx.m
+++ b/libvo/vo_macosx.m
@@ -50,8 +50,7 @@ BOOL shared_buffer = false;
static char *buffer_name;
//Screen
-int screen_id;
-BOOL screen_force;
+int screen_id = -1;
NSRect screen_frame;
NSScreen *screen_handle;
NSArray *screen_array;
@@ -110,13 +109,13 @@ static int config(uint32_t width, uint32_t height, uint32_t d_width, uint32_t d_
screen_array = [NSScreen screens];
if(screen_id < [screen_array count])
{
- screen_handle = [screen_array objectAtIndex:screen_id];
+ screen_handle = [screen_array objectAtIndex:(screen_id < 0 ? 0 : screen_id)];
}
else
{
mp_msg(MSGT_VO, MSGL_FATAL, "Get device error: Device ID %d do not exist, falling back to main device.\n", screen_id);
screen_handle = [screen_array objectAtIndex:0];
- screen_id = 0;
+ screen_id = -1;
}
screen_frame = [screen_handle frame];
vo_screenwidth = screen_frame.size.width;
@@ -324,7 +323,7 @@ static void uninit(void)
}
static opt_t subopts[] = {
-{"device_id", OPT_ARG_INT, &screen_id, (opt_test_f)int_non_neg},
+{"device_id", OPT_ARG_INT, &screen_id, NULL},
{"shared_buffer", OPT_ARG_BOOL, &shared_buffer, NULL},
{"buffer_name", OPT_ARG_MSTRZ,&buffer_name, NULL},
{NULL}
@@ -334,7 +333,7 @@ static int preinit(const char *arg)
{
// set defaults
- screen_id = 0;
+ screen_id = -1;
shared_buffer = false;
buffer_name = NULL;
@@ -858,7 +857,7 @@ static int control(uint32_t request, void *data)
}
old_frame = [window frame]; //save main window size & position
- if(screen_force)
+ if(screen_id >= 0)
screen_frame = [screen_handle frame];
else {
screen_frame = [[window screen] frame];