summaryrefslogtreecommitdiffstats
path: root/liba52
diff options
context:
space:
mode:
Diffstat (limited to 'liba52')
-rw-r--r--liba52/a52_internal.h2
-rw-r--r--liba52/bitstream.h2
-rw-r--r--liba52/downmix.c4
-rw-r--r--liba52/imdct.c22
-rw-r--r--liba52/imdct_3dnow.h28
-rw-r--r--liba52/resample.c8
-rw-r--r--liba52/resample_mmx.c2
-rw-r--r--liba52/srfftp_3dnow.h4
8 files changed, 37 insertions, 35 deletions
diff --git a/liba52/a52_internal.h b/liba52/a52_internal.h
index 3854c16707..1f6d205931 100644
--- a/liba52/a52_internal.h
+++ b/liba52/a52_internal.h
@@ -107,7 +107,7 @@ struct a52_state_s {
#define DELTA_BIT_NONE (2)
#define DELTA_BIT_RESERVED (3)
-#ifdef ARCH_X86_64
+#if ARCH_X86_64
# define REG_a "rax"
# define REG_d "rdx"
# define REG_S "rsi"
diff --git a/liba52/bitstream.h b/liba52/bitstream.h
index 63b1823b17..fcf2e47390 100644
--- a/liba52/bitstream.h
+++ b/liba52/bitstream.h
@@ -37,7 +37,7 @@
#define ALT_BITSTREAM_READER
/* used to avoid misaligned exceptions on some archs (alpha, ...) */
-#if defined (ARCH_X86) || defined(HAVE_ARMV6)
+#if ARCH_X86 || defined(HAVE_ARMV6)
# define unaligned32(a) (*(uint32_t*)(a))
#else
# ifdef __GNUC__
diff --git a/liba52/downmix.c b/liba52/downmix.c
index ffb149b760..c44317fd6f 100644
--- a/liba52/downmix.c
+++ b/liba52/downmix.c
@@ -56,7 +56,7 @@ void downmix_accel_init(uint32_t mm_accel)
{
a52_upmix= upmix_C;
a52_downmix= downmix_C;
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if ARCH_X86 || ARCH_X86_64
if(mm_accel & MM_ACCEL_X86_MMX) a52_upmix= upmix_MMX;
if(mm_accel & MM_ACCEL_X86_SSE) a52_downmix= downmix_SSE;
if(mm_accel & MM_ACCEL_X86_3DNOW) a52_downmix= downmix_3dnow;
@@ -686,7 +686,7 @@ void upmix_C (sample_t * samples, int acmod, int output)
}
}
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if ARCH_X86 || ARCH_X86_64
static void mix2to1_SSE (sample_t * dest, sample_t * src, sample_t bias)
{
__asm__ volatile(
diff --git a/liba52/imdct.c b/liba52/imdct.c
index 325461c6fd..9ad36249d6 100644
--- a/liba52/imdct.c
+++ b/liba52/imdct.c
@@ -54,6 +54,7 @@ void (*a52_imdct_512) (sample_t * data, sample_t * delay, sample_t bias);
#ifdef RUNTIME_CPUDETECT
#undef HAVE_3DNOWEX
+#define HAVE_3DNOWEX 0
#endif
typedef struct complex_s {
@@ -118,7 +119,7 @@ static complex_t __attribute__((aligned(16))) * w[7] = {w_1, w_2, w_4, w_8, w_16
static sample_t __attribute__((aligned(16))) xcos1[128];
static sample_t __attribute__((aligned(16))) xsin1[128];
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if ARCH_X86 || ARCH_X86_64
// NOTE: SSE needs 16byte alignment or it will segfault
//
static float __attribute__((aligned(16))) sseSinCos1c[256];
@@ -365,7 +366,7 @@ void imdct_do_512 (sample_t * data, sample_t * delay, sample_t bias)
}
}
-#ifdef HAVE_ALTIVEC
+#if HAVE_ALTIVEC
#ifdef HAVE_ALTIVEC_H
#include <altivec.h>
@@ -710,10 +711,9 @@ imdct_do_512_altivec(sample_t data[],sample_t delay[], sample_t bias)
// Stuff below this line is borrowed from libac3
#include "srfftp.h"
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
-#ifndef HAVE_3DNOW
+#if ARCH_X86 || ARCH_X86_64
+#undef HAVE_3DNOW
#define HAVE_3DNOW 1
-#endif
#include "srfftp_3dnow.h"
const i_cmplx_t x_plus_minus_3dnow __attribute__ ((aligned (8))) = {{ 0x00000000UL, 0x80000000UL }};
@@ -721,8 +721,10 @@ const i_cmplx_t x_minus_plus_3dnow __attribute__ ((aligned (8))) = {{ 0x80000000
const complex_t HSQRT2_3DNOW __attribute__ ((aligned (8))) = { 0.707106781188, 0.707106781188 };
#undef HAVE_3DNOWEX
+#define HAVE_3DNOWEX 0
#include "imdct_3dnow.h"
-#define HAVE_3DNOWEX
+#undef HAVE_3DNOWEX
+#define HAVE_3DNOWEX 1
#include "imdct_3dnow.h"
void
@@ -1202,7 +1204,7 @@ void a52_imdct_init (uint32_t mm_accel)
w[i][k].imag = sin (-M_PI * k / j);
}
}
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if ARCH_X86 || ARCH_X86_64
for (i = 0; i < 128; i++) {
sseSinCos1c[2*i+0]= xcos1[i];
sseSinCos1c[2*i+1]= -xcos1[i];
@@ -1256,7 +1258,7 @@ void a52_imdct_init (uint32_t mm_accel)
ifft128 = ifft128_c;
ifft64 = ifft64_c;
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if ARCH_X86 || ARCH_X86_64
if(mm_accel & MM_ACCEL_X86_SSE)
{
fprintf (stderr, "Using SSE optimized IMDCT transform\n");
@@ -1276,7 +1278,7 @@ void a52_imdct_init (uint32_t mm_accel)
}
else
#endif // ARCH_X86 || ARCH_X86_64
-#ifdef HAVE_ALTIVEC
+#if HAVE_ALTIVEC
if (mm_accel & MM_ACCEL_PPC_ALTIVEC)
{
fprintf(stderr, "Using AltiVec optimized IMDCT transform\n");
@@ -1285,7 +1287,7 @@ void a52_imdct_init (uint32_t mm_accel)
else
#endif
-#ifdef LIBA52_DJBFFT
+#if LIBA52_DJBFFT
if (mm_accel & MM_ACCEL_DJBFFT) {
fprintf (stderr, "Using djbfft for IMDCT transform\n");
ifft128 = (void (*) (complex_t *)) fftc4_un128;
diff --git a/liba52/imdct_3dnow.h b/liba52/imdct_3dnow.h
index 2291158950..eeab33b27c 100644
--- a/liba52/imdct_3dnow.h
+++ b/liba52/imdct_3dnow.h
@@ -26,7 +26,7 @@
#undef FFT_ASMB16_3DNOW
#undef FFT_128P_3DNOW
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
#define FFT_4_3DNOW fft_4_3dnowex
#define FFT_8_3DNOW fft_8_3dnowex
#define FFT_ASMB_3DNOW fft_asmb_3dnowex
@@ -52,7 +52,7 @@ static void FFT_4_3DNOW(complex_t *x)
"pxor %3, %%mm1\n\t" /* -mm1.re | mm1.im */
"pfadd %%mm1, %%mm3\n\t" /* vi.im = x[3].re - x[1].re; */
"movq %%mm3, %%mm4\n\t" /* vi.re =-x[3].im + x[1].im; mm4 = vi */
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd %%mm4, %%mm4\n\t"
#else
"punpckldq %%mm4, %%mm5\n\t"
@@ -129,7 +129,7 @@ static void FFT_8_3DNOW(complex_t *x)
"movq (%1), %%mm1\n\t"
"movq 16(%1), %%mm4\n\t"
"movq %%mm1, %%mm2\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd %%mm3, %%mm3\n\t"
#else
"punpckldq %%mm3, %%mm6\n\t"
@@ -160,7 +160,7 @@ static void FFT_8_3DNOW(complex_t *x)
"movq %2, %%mm1\n\t"
"movq 56(%3), %%mm3\n\t"
"pfsub 40(%3), %%mm0\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd %%mm1, %%mm1\n\t"
#else
"punpckldq %%mm1, %%mm2\n\t"
@@ -168,7 +168,7 @@ static void FFT_8_3DNOW(complex_t *x)
#endif
"pxor %%mm7, %%mm1\n\t"
"pfadd %%mm1, %%mm0\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd %%mm3, %%mm3\n\t"
#else
"punpckldq %%mm3, %%mm2\n\t"
@@ -182,7 +182,7 @@ static void FFT_8_3DNOW(complex_t *x)
"pfmul %4, %%mm0\n\t"
"movq 40(%3), %%mm5\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd %%mm5, %%mm5\n\t"
#else
"punpckldq %%mm5, %%mm1\n\t"
@@ -205,7 +205,7 @@ static void FFT_8_3DNOW(complex_t *x)
/* x[3] x[7] */
__asm__ volatile(
"movq %1, %%mm0\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd %3, %%mm1\n\t"
#else
"movq %3, %%mm1\n\t"
@@ -218,7 +218,7 @@ static void FFT_8_3DNOW(complex_t *x)
"movq 56(%4), %%mm3\n\t"
"pxor %%mm7, %%mm3\n\t"
"pfadd %%mm3, %%mm2\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd %%mm2, %%mm2\n\t"
#else
"punpckldq %%mm2, %%mm5\n\t"
@@ -331,7 +331,7 @@ static void FFT_128P_3DNOW(complex_t *a)
}
static void
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
imdct_do_512_3dnowex
#else
imdct_do_512_3dnow
@@ -371,14 +371,14 @@ imdct_do_512_3dnow
"punpckldq %4, %%mm1\n\t" /* mm1 = xcos[j] | xsin[j] */
"movq %%mm0, %%mm2\n\t"
"pfmul %%mm1, %%mm0\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd %%mm1, %%mm1\n\t"
#else
"punpckldq %%mm1, %%mm5\n\t"
"punpckhdq %%mm5, %%mm1\n\t"
#endif
"pfmul %%mm1, %%mm2\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pfpnacc %%mm2, %%mm0\n\t"
#else
"pxor %%mm7, %%mm0\n\t"
@@ -445,7 +445,7 @@ imdct_do_512_3dnow
__asm__ volatile (
"movq %1, %%mm0\n\t" /* ac3_buf[i].re | ac3_buf[i].im */
"movq %%mm0, %%mm1\n\t" /* ac3_buf[i].re | ac3_buf[i].im */
-#ifndef HAVE_3DNOWEX
+#if !HAVE_3DNOWEX
"punpckldq %%mm1, %%mm2\n\t"
"punpckhdq %%mm2, %%mm1\n\t"
#else
@@ -455,7 +455,7 @@ imdct_do_512_3dnow
"punpckldq %2, %%mm3\n\t" /* ac3_xsin[i] | ac3_xcos[i] */
"pfmul %%mm3, %%mm0\n\t"
"pfmul %%mm3, %%mm1\n\t"
-#ifndef HAVE_3DNOWEX
+#if !HAVE_3DNOWEX
"pxor %%mm7, %%mm0\n\t"
"pfacc %%mm1, %%mm0\n\t"
"punpckldq %%mm0, %%mm1\n\t"
@@ -543,7 +543,7 @@ imdct_do_512_3dnow
"movd (%1), %%mm1\n\t"
"punpckldq (%2), %%mm0\n\t"
"punpckldq 508(%2), %%mm1\n\t"
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
"pswapd (%3), %%mm3\n\t"
"pswapd -512(%3), %%mm4\n\t"
#else
diff --git a/liba52/resample.c b/liba52/resample.c
index cd87f2050c..7284f567f7 100644
--- a/liba52/resample.c
+++ b/liba52/resample.c
@@ -38,18 +38,18 @@ int (* a52_resample) (float * _f, int16_t * s16)=NULL;
#include "resample_c.c"
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if ARCH_X86 || ARCH_X86_64
#include "resample_mmx.c"
#endif
-#ifdef HAVE_ALTIVEC
+#if HAVE_ALTIVEC
#include "resample_altivec.c"
#endif
void* a52_resample_init(uint32_t mm_accel,int flags,int chans){
void* tmp;
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if ARCH_X86 || ARCH_X86_64
if(mm_accel&MM_ACCEL_X86_MMX){
tmp=a52_resample_MMX(flags,chans);
if(tmp){
@@ -59,7 +59,7 @@ void* tmp;
}
}
#endif
-#ifdef HAVE_ALTIVEC
+#if HAVE_ALTIVEC
if(mm_accel&MM_ACCEL_PPC_ALTIVEC){
tmp=a52_resample_altivec(flags,chans);
if(tmp){
diff --git a/liba52/resample_mmx.c b/liba52/resample_mmx.c
index 9a37fd4ad3..782d9cd3f0 100644
--- a/liba52/resample_mmx.c
+++ b/liba52/resample_mmx.c
@@ -76,7 +76,7 @@ static int a52_resample_MONO_to_5_MMX(float * _f, int16_t * s16){
static int a52_resample_STEREO_to_2_MMX(float * _f, int16_t * s16){
int32_t * f = (int32_t *) _f;
/* benchmark scores are 0.3% better with SSE but we would need to set bias=0 and premultiply it
-#ifdef HAVE_SSE
+#if HAVE_SSE
__asm__ volatile(
"mov $-1024, %%"REG_S" \n\t"
"1: \n\t"
diff --git a/liba52/srfftp_3dnow.h b/liba52/srfftp_3dnow.h
index 9f26c59b43..1d66c5b89c 100644
--- a/liba52/srfftp_3dnow.h
+++ b/liba52/srfftp_3dnow.h
@@ -46,7 +46,7 @@ typedef struct
"m"(x_minus_plus_3dnow)\
:"memory");
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
#define PSWAP_MM(mm_base,mm_hlp) "pswapd "mm_base","mm_base"\n\t"
#else
#define PSWAP_MM(mm_base,mm_hlp)\
@@ -54,7 +54,7 @@ typedef struct
"psrlq $32, "mm_base"\n\t"\
"punpckldq "mm_hlp","mm_base"\n\t"
#endif
-#ifdef HAVE_3DNOWEX
+#if HAVE_3DNOWEX
#define PFNACC_MM(mm_base,mm_hlp) "pfnacc "mm_base","mm_base"\n\t"
#else
#define PFNACC_MM(mm_base,mm_hlp)\