summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsdrik <sdrik@b3059339-0415-0410-9bf9-f77b7e298cf2>2009-03-20 14:15:14 +0000
committersdrik <sdrik@b3059339-0415-0410-9bf9-f77b7e298cf2>2009-03-20 14:15:14 +0000
commit57fda37dbb84b6858b90e99a7d4ec9314d134fc7 (patch)
tree1b21f420439b066e62d7b3687d700d585a3e6c4c
parent6667f198c24c8cd20269c95e72210f73f93cf546 (diff)
downloadmpv-57fda37dbb84b6858b90e99a7d4ec9314d134fc7.tar.bz2
mpv-57fda37dbb84b6858b90e99a7d4ec9314d134fc7.tar.xz
Add alpha channel scaling
git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@29011 b3059339-0415-0410-9bf9-f77b7e298cf2
-rw-r--r--libswscale/swscale.c238
-rw-r--r--libswscale/swscale_internal.h5
-rw-r--r--libswscale/swscale_template.c256
3 files changed, 411 insertions, 88 deletions
diff --git a/libswscale/swscale.c b/libswscale/swscale.c
index 17fe3c6fa7..d0cf6534e4 100644
--- a/libswscale/swscale.c
+++ b/libswscale/swscale.c
@@ -473,7 +473,7 @@ const char *sws_format_name(enum PixelFormat format)
static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+ int16_t **alpSrc, uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, int dstW, int chrDstW)
{
//FIXME Optimize (just quickly written not optimized..)
int i;
@@ -502,6 +502,17 @@ static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilt
uDest[i]= av_clip_uint8(u>>19);
vDest[i]= av_clip_uint8(v>>19);
}
+
+ if (CONFIG_SWSCALE_ALPHA && aDest)
+ for (i=0; i<dstW; i++){
+ int val=1<<18;
+ int j;
+ for (j=0; j<lumFilterSize; j++)
+ val += alpSrc[j][i] * lumFilter[j];
+
+ aDest[i]= av_clip_uint8(val>>19);
+ }
+
}
static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
@@ -555,13 +566,14 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
}
}
-#define YSCALE_YUV_2_PACKEDX_NOCLIP_C(type) \
+#define YSCALE_YUV_2_PACKEDX_NOCLIP_C(type,alpha) \
for (i=0; i<(dstW>>1); i++){\
int j;\
int Y1 = 1<<18;\
int Y2 = 1<<18;\
int U = 1<<18;\
int V = 1<<18;\
+ int av_unused A1, A2;\
type av_unused *r, *b, *g;\
const int i2= 2*i;\
\
@@ -579,9 +591,19 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
Y2>>=19;\
U >>=19;\
V >>=19;\
+ if (alpha){\
+ A1 = 1<<18;\
+ A2 = 1<<18;\
+ for (j=0; j<lumFilterSize; j++){\
+ A1 += alpSrc[j][i2 ] * lumFilter[j];\
+ A2 += alpSrc[j][i2+1] * lumFilter[j];\
+ }\
+ A1>>=19;\
+ A2>>=19;\
+ }\
-#define YSCALE_YUV_2_PACKEDX_C(type) \
- YSCALE_YUV_2_PACKEDX_NOCLIP_C(type)\
+#define YSCALE_YUV_2_PACKEDX_C(type,alpha) \
+ YSCALE_YUV_2_PACKEDX_NOCLIP_C(type,alpha)\
if ((Y1|Y2|U|V)&256)\
{\
if (Y1>255) Y1=255; \
@@ -592,14 +614,19 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
else if (U<0) U=0; \
if (V>255) V=255; \
else if (V<0) V=0; \
+ }\
+ if (alpha && ((A1|A2)&256)){\
+ A1=av_clip_uint8(A1);\
+ A2=av_clip_uint8(A2);\
}
-#define YSCALE_YUV_2_PACKEDX_FULL_C \
+#define YSCALE_YUV_2_PACKEDX_FULL_C(rnd,alpha) \
for (i=0; i<dstW; i++){\
int j;\
int Y = 0;\
int U = -128<<19;\
int V = -128<<19;\
+ int av_unused A;\
int R,G,B;\
\
for (j=0; j<lumFilterSize; j++){\
@@ -612,9 +639,17 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
Y >>=10;\
U >>=10;\
V >>=10;\
+ if (alpha){\
+ A = rnd;\
+ for (j=0; j<lumFilterSize; j++)\
+ A += alpSrc[j][i ] * lumFilter[j];\
+ A >>=19;\
+ if (A&256)\
+ A = av_clip_uint8(A);\
+ }\
-#define YSCALE_YUV_2_RGBX_FULL_C(rnd) \
- YSCALE_YUV_2_PACKEDX_FULL_C\
+#define YSCALE_YUV_2_RGBX_FULL_C(rnd,alpha) \
+ YSCALE_YUV_2_PACKEDX_FULL_C(rnd>>3,alpha)\
Y-= c->yuv2rgb_y_offset;\
Y*= c->yuv2rgb_y_coeff;\
Y+= rnd;\
@@ -656,19 +691,25 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
else if (Y2<0)Y2=0; \
}
-#define YSCALE_YUV_2_RGBX_C(type) \
- YSCALE_YUV_2_PACKEDX_C(type) /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/\
+#define YSCALE_YUV_2_RGBX_C(type,alpha) \
+ YSCALE_YUV_2_PACKEDX_C(type,alpha) /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/\
r = (type *)c->table_rV[V]; \
g = (type *)(c->table_gU[U] + c->table_gV[V]); \
b = (type *)c->table_bU[U]; \
-#define YSCALE_YUV_2_PACKED2_C \
+#define YSCALE_YUV_2_PACKED2_C(type,alpha) \
for (i=0; i<(dstW>>1); i++){ \
const int i2= 2*i; \
int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>19; \
int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19; \
int U= (uvbuf0[i ]*uvalpha1+uvbuf1[i ]*uvalpha)>>19; \
int V= (uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19; \
+ type av_unused *r, *b, *g; \
+ int av_unused A1, A2; \
+ if (alpha){\
+ A1= (abuf0[i2 ]*yalpha1+abuf1[i2 ]*yalpha)>>19; \
+ A2= (abuf0[i2+1]*yalpha1+abuf1[i2+1]*yalpha)>>19; \
+ }\
#define YSCALE_YUV_2_GRAY16_2_C \
for (i=0; i<(dstW>>1); i++){ \
@@ -676,20 +717,25 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>11; \
int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>11; \
-#define YSCALE_YUV_2_RGB2_C(type) \
- YSCALE_YUV_2_PACKED2_C\
- type *r, *b, *g;\
+#define YSCALE_YUV_2_RGB2_C(type,alpha) \
+ YSCALE_YUV_2_PACKED2_C(type,alpha)\
r = (type *)c->table_rV[V];\
g = (type *)(c->table_gU[U] + c->table_gV[V]);\
b = (type *)c->table_bU[U];\
-#define YSCALE_YUV_2_PACKED1_C \
+#define YSCALE_YUV_2_PACKED1_C(type,alpha) \
for (i=0; i<(dstW>>1); i++){\
const int i2= 2*i;\
int Y1= buf0[i2 ]>>7;\
int Y2= buf0[i2+1]>>7;\
int U= (uvbuf1[i ])>>7;\
int V= (uvbuf1[i+VOFW])>>7;\
+ type av_unused *r, *b, *g;\
+ int av_unused A1, A2;\
+ if (alpha){\
+ A1= abuf0[i2 ]>>7;\
+ A2= abuf0[i2+1]>>7;\
+ }\
#define YSCALE_YUV_2_GRAY16_1_C \
for (i=0; i<(dstW>>1); i++){\
@@ -697,24 +743,28 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
int Y1= buf0[i2 ]<<1;\
int Y2= buf0[i2+1]<<1;\
-#define YSCALE_YUV_2_RGB1_C(type) \
- YSCALE_YUV_2_PACKED1_C\
- type *r, *b, *g;\
+#define YSCALE_YUV_2_RGB1_C(type,alpha) \
+ YSCALE_YUV_2_PACKED1_C(type,alpha)\
r = (type *)c->table_rV[V];\
g = (type *)(c->table_gU[U] + c->table_gV[V]);\
b = (type *)c->table_bU[U];\
-#define YSCALE_YUV_2_PACKED1B_C \
+#define YSCALE_YUV_2_PACKED1B_C(type,alpha) \
for (i=0; i<(dstW>>1); i++){\
const int i2= 2*i;\
int Y1= buf0[i2 ]>>7;\
int Y2= buf0[i2+1]>>7;\
int U= (uvbuf0[i ] + uvbuf1[i ])>>8;\
int V= (uvbuf0[i+VOFW] + uvbuf1[i+VOFW])>>8;\
+ type av_unused *r, *b, *g;\
+ int av_unused A1, A2;\
+ if (alpha){\
+ A1= abuf0[i2 ]>>7;\
+ A2= abuf0[i2+1]>>7;\
+ }\
-#define YSCALE_YUV_2_RGB1B_C(type) \
- YSCALE_YUV_2_PACKED1B_C\
- type *r, *b, *g;\
+#define YSCALE_YUV_2_RGB1B_C(type,alpha) \
+ YSCALE_YUV_2_PACKED1B_C(type,alpha)\
r = (type *)c->table_rV[V];\
g = (type *)(c->table_gU[U] + c->table_gV[V]);\
b = (type *)c->table_bU[U];\
@@ -772,17 +822,52 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
#define YSCALE_YUV_2_ANYRGB_C(func, func2, func_g16, func_monoblack)\
switch(c->dstFormat)\
{\
- case PIX_FMT_RGB32:\
- case PIX_FMT_BGR32:\
- case PIX_FMT_RGB32_1:\
- case PIX_FMT_BGR32_1:\
- func(uint32_t)\
- ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
- ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
+ case PIX_FMT_RGBA:\
+ case PIX_FMT_BGRA:\
+ if (CONFIG_SMALL){\
+ int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\
+ func(uint32_t,needAlpha)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? (A1<<24) : 0);\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? (A2<<24) : 0);\
+ }\
+ }else{\
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){\
+ func(uint32_t,1)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (A1<<24);\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (A2<<24);\
+ }\
+ }else{\
+ func(uint32_t,0)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
+ }\
+ }\
+ }\
+ break;\
+ case PIX_FMT_ARGB:\
+ case PIX_FMT_ABGR:\
+ if (CONFIG_SMALL){\
+ int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\
+ func(uint32_t,needAlpha)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? A1 : 0);\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? A2 : 0);\
+ }\
+ }else{\
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){\
+ func(uint32_t,1)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + A1;\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + A2;\
+ }\
+ }else{\
+ func(uint32_t,0)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
+ }\
+ }\
} \
break;\
case PIX_FMT_RGB24:\
- func(uint8_t)\
+ func(uint8_t,0)\
((uint8_t*)dest)[0]= r[Y1];\
((uint8_t*)dest)[1]= g[Y1];\
((uint8_t*)dest)[2]= b[Y1];\
@@ -793,7 +878,7 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
}\
break;\
case PIX_FMT_BGR24:\
- func(uint8_t)\
+ func(uint8_t,0)\
((uint8_t*)dest)[0]= b[Y1];\
((uint8_t*)dest)[1]= g[Y1];\
((uint8_t*)dest)[2]= r[Y1];\
@@ -812,7 +897,7 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
const int dr2= dither_2x2_8[y&1 ][1];\
const int dg2= dither_2x2_4[y&1 ][1];\
const int db2= dither_2x2_8[(y&1)^1][1];\
- func(uint16_t)\
+ func(uint16_t,0)\
((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
}\
@@ -827,7 +912,7 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
const int dr2= dither_2x2_8[y&1 ][1];\
const int dg2= dither_2x2_8[y&1 ][0];\
const int db2= dither_2x2_8[(y&1)^1][1];\
- func(uint16_t)\
+ func(uint16_t,0)\
((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
}\
@@ -838,7 +923,7 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
{\
const uint8_t * const d64= dither_8x8_73[y&7];\
const uint8_t * const d32= dither_8x8_32[y&7];\
- func(uint8_t)\
+ func(uint8_t,0)\
((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\
((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\
}\
@@ -849,7 +934,7 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
{\
const uint8_t * const d64= dither_8x8_73 [y&7];\
const uint8_t * const d128=dither_8x8_220[y&7];\
- func(uint8_t)\
+ func(uint8_t,0)\
((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\
+ ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\
}\
@@ -860,7 +945,7 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
{\
const uint8_t * const d64= dither_8x8_73 [y&7];\
const uint8_t * const d128=dither_8x8_220[y&7];\
- func(uint8_t)\
+ func(uint8_t,0)\
((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\
((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\
}\
@@ -909,15 +994,15 @@ static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFil
static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, int dstW, int y)
+ int16_t **alpSrc, uint8_t *dest, int dstW, int y)
{
int i;
- YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGBX_C, YSCALE_YUV_2_PACKEDX_C(void), YSCALE_YUV_2_GRAY16_C, YSCALE_YUV_2_MONOX_C)
+ YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGBX_C, YSCALE_YUV_2_PACKEDX_C(void,0), YSCALE_YUV_2_GRAY16_C, YSCALE_YUV_2_MONOX_C)
}
static inline void yuv2rgbXinC_full(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, int dstW, int y)
+ int16_t **alpSrc, uint8_t *dest, int dstW, int y)
{
int i;
int step= fmt_depth(c->dstFormat)/8;
@@ -930,12 +1015,33 @@ static inline void yuv2rgbXinC_full(SwsContext *c, int16_t *lumFilter, int16_t *
case PIX_FMT_RGB24:
aidx--;
case PIX_FMT_RGBA:
- YSCALE_YUV_2_RGBX_FULL_C(1<<21)
- dest[aidx]= 255;
- dest[0]= R>>22;
- dest[1]= G>>22;
- dest[2]= B>>22;
- dest+= step;
+ if (CONFIG_SMALL){
+ int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;
+ YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha)
+ dest[aidx]= needAlpha ? A : 255;
+ dest[0]= R>>22;
+ dest[1]= G>>22;
+ dest[2]= B>>22;
+ dest+= step;
+ }
+ }else{
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1)
+ dest[aidx]= A;
+ dest[0]= R>>22;
+ dest[1]= G>>22;
+ dest[2]= B>>22;
+ dest+= step;
+ }
+ }else{
+ YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0)
+ dest[aidx]= 255;
+ dest[0]= R>>22;
+ dest[1]= G>>22;
+ dest[2]= B>>22;
+ dest+= step;
+ }
+ }
}
break;
case PIX_FMT_ABGR:
@@ -944,12 +1050,33 @@ static inline void yuv2rgbXinC_full(SwsContext *c, int16_t *lumFilter, int16_t *
case PIX_FMT_BGR24:
aidx--;
case PIX_FMT_BGRA:
- YSCALE_YUV_2_RGBX_FULL_C(1<<21)
- dest[aidx]= 255;
- dest[0]= B>>22;
- dest[1]= G>>22;
- dest[2]= R>>22;
- dest+= step;
+ if (CONFIG_SMALL){
+ int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;
+ YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha)
+ dest[aidx]= needAlpha ? A : 255;
+ dest[0]= B>>22;
+ dest[1]= G>>22;
+ dest[2]= R>>22;
+ dest+= step;
+ }
+ }else{
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1)
+ dest[aidx]= A;
+ dest[0]= B>>22;
+ dest[1]= G>>22;
+ dest[2]= R>>22;
+ dest+= step;
+ }
+ }else{
+ YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0)
+ dest[aidx]= 255;
+ dest[0]= B>>22;
+ dest[1]= G>>22;
+ dest[2]= R>>22;
+ dest+= step;
+ }
+ }
}
break;
default:
@@ -2644,12 +2771,17 @@ SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int d
// allocate pixbufs (we use dynamic allocation because otherwise we would need to
c->lumPixBuf= av_malloc(c->vLumBufSize*2*sizeof(int16_t*));
c->chrPixBuf= av_malloc(c->vChrBufSize*2*sizeof(int16_t*));
+ if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat))
+ c->alpPixBuf= av_malloc(c->vLumBufSize*2*sizeof(int16_t*));
//Note we need at least one pixel more at the end because of the MMX code (just in case someone wanna replace the 4000/8000)
/* align at 16 bytes for AltiVec */
for (i=0; i<c->vLumBufSize; i++)
c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= av_mallocz(VOF+1);
for (i=0; i<c->vChrBufSize; i++)
c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= av_malloc((VOF+1)*2);
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
+ for (i=0; i<c->vLumBufSize; i++)
+ c->alpPixBuf[i]= c->alpPixBuf[i+c->vLumBufSize]= av_mallocz(VOF+1);
//try to avoid drawing green stuff between the right end and the stride end
for (i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, (VOF+1)*2);
@@ -3200,6 +3332,12 @@ void sws_freeContext(SwsContext *c){
av_freep(&c->chrPixBuf);
}
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ for (i=0; i<c->vLumBufSize; i++)
+ av_freep(&c->alpPixBuf[i]);
+ av_freep(&c->alpPixBuf);
+ }
+
av_freep(&c->vLumFilter);
av_freep(&c->vChrFilter);
av_freep(&c->hLumFilter);
diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h
index ca1528e0f1..7e385da93f 100644
--- a/libswscale/swscale_internal.h
+++ b/libswscale/swscale_internal.h
@@ -87,6 +87,7 @@ typedef struct SwsContext{
int16_t **lumPixBuf;
int16_t **chrPixBuf;
+ int16_t **alpPixBuf;
int16_t *hLumFilter;
int16_t *hLumFilterPos;
int16_t *hChrFilter;
@@ -156,6 +157,8 @@ typedef struct SwsContext{
#define VROUNDER_OFFSET "11*8+4*4*256*2+16"
#define U_TEMP "11*8+4*4*256*2+24"
#define V_TEMP "11*8+4*4*256*2+32"
+#define Y_TEMP "11*8+4*4*256*2+40"
+#define ALP_MMX_FILTER_OFFSET "11*8+4*4*256*2+48"
uint64_t redDither __attribute__((aligned(8)));
uint64_t greenDither __attribute__((aligned(8)));
@@ -176,6 +179,8 @@ typedef struct SwsContext{
uint64_t vRounder __attribute__((aligned(8)));
uint64_t u_temp __attribute__((aligned(8)));
uint64_t v_temp __attribute__((aligned(8)));
+ uint64_t y_temp __attribute__((aligned(8)));
+ int32_t alpMmxFilter[4*MAX_FILTER_SIZE];
#if HAVE_ALTIVEC
diff --git a/libswscale/swscale_template.c b/libswscale/swscale_template.c
index 6d54fc35df..2730942352 100644
--- a/libswscale/swscale_template.c
+++ b/libswscale/swscale_template.c
@@ -644,6 +644,14 @@
#define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
+#define REAL_YSCALEYUV2RGB1_ALPHA(index) \
+ "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
+ "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
+ "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
+ "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
+ "packuswb %%mm1, %%mm7 \n\t"
+#define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
+
#define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
"movq "#b", "#q2" \n\t" /* B */\
"movq "#r", "#t" \n\t" /* R */\
@@ -909,8 +917,8 @@
static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, int16_t **alpSrc,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
{
#if HAVE_MMX
if(!(c->flags & SWS_BITEXACT)){
@@ -919,6 +927,9 @@ static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t *
YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
}
+ if (CONFIG_SWSCALE_ALPHA && aDest){
+ YSCALEYUV2YV12X_ACCURATE( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
+ }
YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
}else{
@@ -926,6 +937,9 @@ static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t *
YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
}
+ if (CONFIG_SWSCALE_ALPHA && aDest){
+ YSCALEYUV2YV12X( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
+ }
YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
}
@@ -939,7 +953,7 @@ yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
#else //HAVE_ALTIVEC
yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
chrFilter, chrSrc, chrFilterSize,
- dest, uDest, vDest, dstW, chrDstW);
+ alpSrc, dest, uDest, vDest, aDest, dstW, chrDstW);
#endif //!HAVE_ALTIVEC
}
@@ -952,19 +966,20 @@ yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
dest, uDest, dstW, chrDstW, dstFormat);
}
-static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc,
- uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
+static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc, int16_t *alpSrc,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
{
int i;
#if HAVE_MMX
if(!(c->flags & SWS_BITEXACT)){
- long p= uDest ? 3 : 1;
- uint8_t *src[3]= {lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
- uint8_t *dst[3]= {dest, uDest, vDest};
- x86_reg counter[3] = {dstW, chrDstW, chrDstW};
+ long p= 4;
+ uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
+ uint8_t *dst[4]= {aDest, dest, uDest, vDest};
+ x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
if (c->flags & SWS_ACCURATE_RND){
while(p--){
+ if (dst[p]){
__asm__ volatile(
YSCALEYUV2YV121_ACCURATE
:: "r" (src[p]), "r" (dst[p] + counter[p]),
@@ -972,8 +987,10 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chr
: "%"REG_a
);
}
+ }
}else{
while(p--){
+ if (dst[p]){
__asm__ volatile(
YSCALEYUV2YV121
:: "r" (src[p]), "r" (dst[p] + counter[p]),
@@ -981,6 +998,7 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chr
: "%"REG_a
);
}
+ }
}
return;
}
@@ -1013,6 +1031,12 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chr
uDest[i]= u;
vDest[i]= v;
}
+
+ if (CONFIG_SWSCALE_ALPHA && aDest)
+ for (i=0; i<dstW; i++){
+ int val= (alpSrc[i]+64)>>7;
+ aDest[i]= av_clip_uint8(val);
+ }
}
@@ -1021,7 +1045,7 @@ static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chr
*/
static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, long dstW, long dstY)
+ int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
{
#if HAVE_MMX
x86_reg dummy=0;
@@ -1029,12 +1053,28 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
if (c->flags & SWS_ACCURATE_RND){
switch(c->dstFormat){
case PIX_FMT_RGB32:
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ "movq %%mm2, "U_TEMP"(%0) \n\t"
+ "movq %%mm4, "V_TEMP"(%0) \n\t"
+ "movq %%mm5, "Y_TEMP"(%0) \n\t"
+ YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
+ "movq "Y_TEMP"(%0), %%mm5 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ "packuswb %%mm7, %%mm1 \n\t"
+ WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
+
+ YSCALEYUV2PACKEDX_END
+ }else{
YSCALEYUV2PACKEDX_ACCURATE
YSCALEYUV2RGBX
"pcmpeqd %%mm7, %%mm7 \n\t"
WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
YSCALEYUV2PACKEDX_END
+ }
return;
case PIX_FMT_BGR24:
YSCALEYUV2PACKEDX_ACCURATE
@@ -1095,11 +1135,22 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
switch(c->dstFormat)
{
case PIX_FMT_RGB32:
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ "packuswb %%mm7, %%mm1 \n\t"
+ WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
+ YSCALEYUV2PACKEDX_END
+ }else{
YSCALEYUV2PACKEDX
YSCALEYUV2RGBX
"pcmpeqd %%mm7, %%mm7 \n\t"
WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
YSCALEYUV2PACKEDX_END
+ }
return;
case PIX_FMT_BGR24:
YSCALEYUV2PACKEDX
@@ -1161,7 +1212,7 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
#if HAVE_ALTIVEC
/* The following list of supported dstFormat values should
match what's found in the body of ff_yuv2packedX_altivec() */
- if (!(c->flags & SWS_BITEXACT) &&
+ if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf
(c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB))
@@ -1172,14 +1223,14 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
#endif
yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
chrFilter, chrSrc, chrFilterSize,
- dest, dstW, dstY);
+ alpSrc, dest, dstW, dstY);
}
/**
* vertical bilinear scale YV12 to RGB
*/
static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
- uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
+ uint16_t *abuf0, uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
{
int yalpha1=4095- yalpha;
int uvalpha1=4095-uvalpha;
@@ -1191,6 +1242,49 @@ static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *
{
//Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
case PIX_FMT_RGB32:
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+#if ARCH_X86_64
+ __asm__ volatile(
+ "mov %4, %%"REG_b" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ YSCALEYUV2RGB_YA(%%REGBP, %5, %6, %7)
+ "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
+ "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
+ "packuswb %%mm7, %%mm1 \n\t"
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ ,"r" (abuf0), "r" (abuf1)
+ : "%"REG_b, "%"REG_BP
+ );
+#else
+ *(uint16_t **)(&c->u_temp)=abuf0;
+ *(uint16_t **)(&c->v_temp)=abuf1;
+ __asm__ volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ "push %0 \n\t"
+ "push %1 \n\t"
+ "mov "U_TEMP"(%5), %0 \n\t"
+ "mov "V_TEMP"(%5), %1 \n\t"
+ YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
+ "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
+ "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
+ "packuswb %%mm7, %%mm1 \n\t"
+ "pop %1 \n\t"
+ "pop %0 \n\t"
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+#endif
+ }else{
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
@@ -1204,6 +1298,7 @@ static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
+ }
return;
case PIX_FMT_BGR24:
__asm__ volatile(
@@ -1279,14 +1374,14 @@ static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *
}
}
#endif //HAVE_MMX
-YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C, YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
+YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C(void,0), YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
}
/**
* YV12 to RGB without scaling or interpolating
*/
static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
- uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
+ uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
{
const int yalpha1=0;
int i;
@@ -1296,7 +1391,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
if (flags&SWS_FULL_CHR_H_INT)
{
- RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
+ RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
return;
}
@@ -1307,6 +1402,21 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
switch(dstFormat)
{
case PIX_FMT_RGB32:
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ __asm__ volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ YSCALEYUV2RGB1_ALPHA(%%REGBP)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ }else{
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
@@ -1320,6 +1430,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
+ }
return;
case PIX_FMT_BGR24:
__asm__ volatile(
@@ -1400,6 +1511,21 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
switch(dstFormat)
{
case PIX_FMT_RGB32:
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ __asm__ volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ YSCALEYUV2RGB1_ALPHA(%%REGBP)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ }else{
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
@@ -1413,6 +1539,7 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
+ }
return;
case PIX_FMT_BGR24:
__asm__ volat