summaryrefslogtreecommitdiffstats
path: root/postproc
diff options
context:
space:
mode:
authormichael <michael@b3059339-0415-0410-9bf9-f77b7e298cf2>2004-04-06 00:25:47 +0000
committermichael <michael@b3059339-0415-0410-9bf9-f77b7e298cf2>2004-04-06 00:25:47 +0000
commit834569d109748c88bb3c725056599733a760afa4 (patch)
tree181d150675d00db5ca39f71d4664c646fd075eb3 /postproc
parent99b14d0fa38cecdc41cdd2fd3e007a84e0314a57 (diff)
downloadmpv-834569d109748c88bb3c725056599733a760afa4.tar.bz2
mpv-834569d109748c88bb3c725056599733a760afa4.tar.xz
AltiVec hScale, all size patch by (Romain Dolbeau <dolbeaur at club-internet dot fr>)
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@12131 b3059339-0415-0410-9bf9-f77b7e298cf2
Diffstat (limited to 'postproc')
-rw-r--r--postproc/swscale.c42
-rw-r--r--postproc/swscale_altivec_template.c186
-rw-r--r--postproc/swscale_template.c4
3 files changed, 222 insertions, 10 deletions
diff --git a/postproc/swscale.c b/postproc/swscale.c
index 38284a19f5..dd10521582 100644
--- a/postproc/swscale.c
+++ b/postproc/swscale.c
@@ -1041,6 +1041,21 @@ static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *out
if(min>minFilterSize) minFilterSize= min;
}
+ if (flags & SWS_CPU_CAPS_ALTIVEC) {
+ // we can handle the special case 4,
+ // so we don't want to go to the full 8
+ if (minFilterSize < 5)
+ filterAlign = 4;
+
+ // we really don't want to waste our time
+ // doing useless computation, so fall-back on
+ // the scalar C code for very small filter.
+ // vectorizing is worth it only if you have
+ // decent-sized vector.
+ if (minFilterSize < 3)
+ filterAlign = 1;
+ }
+
ASSERT(minFilterSize > 0)
filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1));
ASSERT(filterSize > 0)
@@ -1947,7 +1962,10 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int
/* precalculate horizontal scaler filter coefficients */
{
- const int filterAlign= (flags & SWS_CPU_CAPS_MMX) ? 4 : 1;
+ const int filterAlign=
+ (flags & SWS_CPU_CAPS_MMX) ? 4 :
+ (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
srcW , dstW, filterAlign, 1<<14,
@@ -1976,14 +1994,20 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int
/* precalculate vertical scaler filter coefficients */
- initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
- srcH , dstH, 1, (1<<12)-4,
- (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
- srcFilter->lumV, dstFilter->lumV);
- initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
- c->chrSrcH, c->chrDstH, 1, (1<<12)-4,
- (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
- srcFilter->chrV, dstFilter->chrV);
+ {
+ const int filterAlign=
+ (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
+
+ initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
+ srcH , dstH, filterAlign, (1<<12)-4,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
+ srcFilter->lumV, dstFilter->lumV);
+ initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
+ c->chrSrcH, c->chrDstH, filterAlign, (1<<12)-4,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
+ srcFilter->chrV, dstFilter->chrV);
+ }
// Calculate Buffer Sizes so that they won't run out while handling these damn slices
c->vLumBufSize= c->vLumFilterSize;
diff --git a/postproc/swscale_altivec_template.c b/postproc/swscale_altivec_template.c
index 0f6a3cda34..5cd70683a5 100644
--- a/postproc/swscale_altivec_template.c
+++ b/postproc/swscale_altivec_template.c
@@ -20,7 +20,19 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-static const vector unsigned int altivec_vectorShiftInt19 = {19, 19, 19, 19};
+#ifdef CONFIG_DARWIN
+static const vector signed int vzero =
+ (vector signed int)(0, 0, 0, 0);
+static const vector unsigned int altivec_vectorShiftInt19 =
+ (vector unsigned int)(19, 19, 19, 19);
+#else
+static const vector signed int vzero =
+ (vector signed int){0,0,0,0};
+static const vector unsigned int altivec_vectorShiftInt19 =
+ (vector unsigned int){19, 19, 19, 19};
+
+#endif
+
static inline void
altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) {
register int i;
@@ -201,3 +213,175 @@ yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
altivec_packIntArrayToCharArray(v,vDest,chrDstW);
}
}
+
+static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) {
+ register int i;
+ int __attribute__ ((aligned (16))) tempo[4];
+
+ if (filterSize % 4) {
+ for(i=0; i<dstW; i++) {
+ register int j;
+ register int srcPos = filterPos[i];
+ register int val = 0;
+ for(j=0; j<filterSize; j++) {
+ val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+ }
+ dst[i] = MIN(MAX(0, val>>7), (1<<15)-1);
+ }
+ }
+ else
+ switch (filterSize) {
+ case 4:
+ {
+ for(i=0; i<dstW; i++) {
+ register int j;
+ register int srcPos = filterPos[i];
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1;
+ if ((((int)src + srcPos)% 16) > 12) {
+ src_v1 = vec_ld(srcPos + 16, src);
+ }
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+ vector signed short src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ // now put our elements in the even slots
+ src_v = vec_mergeh(src_v, (vector signed short)vzero);
+
+ vector signed short filter_v = vec_ld(i << 3, filter);
+ // the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2)
+
+ // the neat trick : we only care for half the elements,
+ // high or low depending on (i<<3)%16 (it's 0 or 8 here),
+ // and we're going to use vec_mule, so we chose
+ // carefully how to "unpack" the elements into the even slots
+ if ((i << 3) % 16)
+ filter_v = vec_mergel(filter_v,(vector signed short)vzero);
+ else
+ filter_v = vec_mergeh(filter_v,(vector signed short)vzero);
+
+ vector signed int val_vEven = vec_mule(src_v, filter_v);
+ vector signed int val_s = vec_sums(val_vEven, vzero);
+ vec_st(val_s, 0, tempo);
+ dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
+ }
+ }
+ break;
+
+ case 8:
+ {
+ for(i=0; i<dstW; i++) {
+ register int srcPos = filterPos[i];
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1;
+ if ((((int)src + srcPos)% 16) > 8) {
+ src_v1 = vec_ld(srcPos + 16, src);
+ }
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+ vector signed short src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ vector signed short filter_v = vec_ld(i << 4, filter);
+ // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
+
+ vector signed int val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
+ vector signed int val_s = vec_sums(val_v, vzero);
+ vec_st(val_s, 0, tempo);
+ dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
+ }
+ }
+ break;
+
+ case 16:
+ {
+ for(i=0; i<dstW; i++) {
+ register int srcPos = filterPos[i];
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+ vector signed short src_vA = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ vector signed short src_vB = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+
+ vector signed short filter_v0 = vec_ld(i << 5, filter);
+ vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
+ // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
+
+ vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
+ vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+ vector signed int val_s = vec_sums(val_v, vzero);
+
+ vec_st(val_s, 0, tempo);
+ dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
+ }
+ }
+ break;
+
+ default:
+ {
+ for(i=0; i<dstW; i++) {
+ register int j;
+ register int srcPos = filterPos[i];
+
+ vector signed int val_v = (vector signed int)vzero;
+ vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
+ vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter);
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char permS = vec_lvsl(srcPos, src);
+
+ for (j = 0 ; j < filterSize - 15; j += 16) {
+ vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
+
+ vector signed short src_vA = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ vector signed short src_vB = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+
+ vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+ vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
+ vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF);
+ vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF);
+
+ vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
+ val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+ filter_v0R = filter_v2R;
+ src_v0 = src_v1;
+ }
+
+ if (j < (filterSize-7)) {
+ // loading src_v0 is useless, it's already done above
+ //vector unsigned char src_v0 = vec_ld(srcPos + j, src);
+ vector unsigned char src_v1;
+ if ((((int)src + srcPos)% 16) > 8) {
+ src_v1 = vec_ld(srcPos + j + 16, src);
+ }
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
+
+ vector signed short src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ // loading filter_v0R is useless, it's already done above
+ //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
+ vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+ vector signed short filter_v = vec_perm(filter_v0R, filter_v1R, permF);
+
+ val_v = vec_msums(src_v, filter_v, val_v);
+ }
+
+ vector signed int val_s = vec_sums(val_v, vzero);
+
+ vec_st(val_s, 0, tempo);
+ dst[i] = MIN(MAX(0, tempo[3]>>7), (1<<15)-1);
+ }
+
+ }
+ }
+}
diff --git a/postproc/swscale_template.c b/postproc/swscale_template.c
index 6a8117f2fb..2d266f5655 100644
--- a/postproc/swscale_template.c
+++ b/postproc/swscale_template.c
@@ -2154,6 +2154,9 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
);
}
#else
+#ifdef HAVE_ALTIVEC
+ hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
+#else
int i;
for(i=0; i<dstW; i++)
{
@@ -2171,6 +2174,7 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
// dst[i] = val>>7;
}
#endif
+#endif
}
// *** horizontal scale Y line to temp buffer
static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,