summaryrefslogtreecommitdiffstats
path: root/libswscale/swscale.c
diff options
context:
space:
mode:
Diffstat (limited to 'libswscale/swscale.c')
-rw-r--r--libswscale/swscale.c627
1 files changed, 278 insertions, 349 deletions
diff --git a/libswscale/swscale.c b/libswscale/swscale.c
index 4742f2fc04..cdcc2a695b 100644
--- a/libswscale/swscale.c
+++ b/libswscale/swscale.c
@@ -561,8 +561,7 @@ static inline void yuv2yuvXinC(const int16_t *lumFilter, const int16_t **lumSrc,
{
//FIXME Optimize (just quickly written not optimized..)
int i;
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int val=1<<18;
int j;
for (j=0; j<lumFilterSize; j++)
@@ -572,13 +571,11 @@ static inline void yuv2yuvXinC(const int16_t *lumFilter, const int16_t **lumSrc,
}
if (uDest)
- for (i=0; i<chrDstW; i++)
- {
+ for (i=0; i<chrDstW; i++) {
int u=1<<18;
int v=1<<18;
int j;
- for (j=0; j<chrFilterSize; j++)
- {
+ for (j=0; j<chrFilterSize; j++) {
u += chrSrc[j][i] * chrFilter[j];
v += chrSrc[j][i + VOFW] * chrFilter[j];
}
@@ -588,7 +585,7 @@ static inline void yuv2yuvXinC(const int16_t *lumFilter, const int16_t **lumSrc,
}
if (CONFIG_SWSCALE_ALPHA && aDest)
- for (i=0; i<dstW; i++){
+ for (i=0; i<dstW; i++) {
int val=1<<18;
int j;
for (j=0; j<lumFilterSize; j++)
@@ -605,8 +602,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
{
//FIXME Optimize (just quickly written not optimized..)
int i;
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int val=1<<18;
int j;
for (j=0; j<lumFilterSize; j++)
@@ -619,13 +615,11 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
return;
if (dstFormat == PIX_FMT_NV12)
- for (i=0; i<chrDstW; i++)
- {
+ for (i=0; i<chrDstW; i++) {
int u=1<<18;
int v=1<<18;
int j;
- for (j=0; j<chrFilterSize; j++)
- {
+ for (j=0; j<chrFilterSize; j++) {
u += chrSrc[j][i] * chrFilter[j];
v += chrSrc[j][i + VOFW] * chrFilter[j];
}
@@ -634,13 +628,11 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
uDest[2*i+1]= av_clip_uint8(v>>19);
}
else
- for (i=0; i<chrDstW; i++)
- {
+ for (i=0; i<chrDstW; i++) {
int u=1<<18;
int v=1<<18;
int j;
- for (j=0; j<chrFilterSize; j++)
- {
+ for (j=0; j<chrFilterSize; j++) {
u += chrSrc[j][i] * chrFilter[j];
v += chrSrc[j][i + VOFW] * chrFilter[j];
}
@@ -651,7 +643,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
}
#define YSCALE_YUV_2_PACKEDX_NOCLIP_C(type,alpha) \
- for (i=0; i<(dstW>>1); i++){\
+ for (i=0; i<(dstW>>1); i++) {\
int j;\
int Y1 = 1<<18;\
int Y2 = 1<<18;\
@@ -661,13 +653,11 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
type av_unused *r, *b, *g;\
const int i2= 2*i;\
\
- for (j=0; j<lumFilterSize; j++)\
- {\
+ for (j=0; j<lumFilterSize; j++) {\
Y1 += lumSrc[j][i2] * lumFilter[j];\
Y2 += lumSrc[j][i2+1] * lumFilter[j];\
}\
- for (j=0; j<chrFilterSize; j++)\
- {\
+ for (j=0; j<chrFilterSize; j++) {\
U += chrSrc[j][i] * chrFilter[j];\
V += chrSrc[j][i+VOFW] * chrFilter[j];\
}\
@@ -675,10 +665,10 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
Y2>>=19;\
U >>=19;\
V >>=19;\
- if (alpha){\
+ if (alpha) {\
A1 = 1<<18;\
A2 = 1<<18;\
- for (j=0; j<lumFilterSize; j++){\
+ for (j=0; j<lumFilterSize; j++) {\
A1 += alpSrc[j][i2 ] * lumFilter[j];\
A2 += alpSrc[j][i2+1] * lumFilter[j];\
}\
@@ -688,8 +678,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
#define YSCALE_YUV_2_PACKEDX_C(type,alpha) \
YSCALE_YUV_2_PACKEDX_NOCLIP_C(type,alpha)\
- if ((Y1|Y2|U|V)&256)\
- {\
+ if ((Y1|Y2|U|V)&256) {\
if (Y1>255) Y1=255; \
else if (Y1<0)Y1=0; \
if (Y2>255) Y2=255; \
@@ -699,13 +688,13 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
if (V>255) V=255; \
else if (V<0) V=0; \
}\
- if (alpha && ((A1|A2)&256)){\
+ if (alpha && ((A1|A2)&256)) {\
A1=av_clip_uint8(A1);\
A2=av_clip_uint8(A2);\
}
#define YSCALE_YUV_2_PACKEDX_FULL_C(rnd,alpha) \
- for (i=0; i<dstW; i++){\
+ for (i=0; i<dstW; i++) {\
int j;\
int Y = 0;\
int U = -128<<19;\
@@ -713,17 +702,17 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
int av_unused A;\
int R,G,B;\
\
- for (j=0; j<lumFilterSize; j++){\
+ for (j=0; j<lumFilterSize; j++) {\
Y += lumSrc[j][i ] * lumFilter[j];\
}\
- for (j=0; j<chrFilterSize; j++){\
+ for (j=0; j<chrFilterSize; j++) {\
U += chrSrc[j][i ] * chrFilter[j];\
V += chrSrc[j][i+VOFW] * chrFilter[j];\
}\
Y >>=10;\
U >>=10;\
V >>=10;\
- if (alpha){\
+ if (alpha) {\
A = rnd;\
for (j=0; j<lumFilterSize; j++)\
A += alpSrc[j][i ] * lumFilter[j];\
@@ -740,7 +729,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
R= Y + V*c->yuv2rgb_v2r_coeff;\
G= Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;\
B= Y + U*c->yuv2rgb_u2b_coeff;\
- if ((R|G|B)&(0xC0000000)){\
+ if ((R|G|B)&(0xC0000000)) {\
if (R>=(256<<22)) R=(256<<22)-1; \
else if (R<0)R=0; \
if (G>=(256<<22)) G=(256<<22)-1; \
@@ -751,7 +740,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
#define YSCALE_YUV_2_GRAY16_C \
- for (i=0; i<(dstW>>1); i++){\
+ for (i=0; i<(dstW>>1); i++) {\
int j;\
int Y1 = 1<<18;\
int Y2 = 1<<18;\
@@ -760,15 +749,13 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
\
const int i2= 2*i;\
\
- for (j=0; j<lumFilterSize; j++)\
- {\
+ for (j=0; j<lumFilterSize; j++) {\
Y1 += lumSrc[j][i2] * lumFilter[j];\
Y2 += lumSrc[j][i2+1] * lumFilter[j];\
}\
Y1>>=11;\
Y2>>=11;\
- if ((Y1|Y2|U|V)&65536)\
- {\
+ if ((Y1|Y2|U|V)&65536) {\
if (Y1>65535) Y1=65535; \
else if (Y1<0)Y1=0; \
if (Y2>65535) Y2=65535; \
@@ -782,7 +769,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
b = (type *)c->table_bU[U]; \
#define YSCALE_YUV_2_PACKED2_C(type,alpha) \
- for (i=0; i<(dstW>>1); i++){ \
+ for (i=0; i<(dstW>>1); i++) { \
const int i2= 2*i; \
int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>19; \
int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19; \
@@ -790,13 +777,13 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
int V= (uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19; \
type av_unused *r, *b, *g; \
int av_unused A1, A2; \
- if (alpha){\
+ if (alpha) {\
A1= (abuf0[i2 ]*yalpha1+abuf1[i2 ]*yalpha)>>19; \
A2= (abuf0[i2+1]*yalpha1+abuf1[i2+1]*yalpha)>>19; \
}\
#define YSCALE_YUV_2_GRAY16_2_C \
- for (i=0; i<(dstW>>1); i++){ \
+ for (i=0; i<(dstW>>1); i++) { \
const int i2= 2*i; \
int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>11; \
int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>11; \
@@ -808,7 +795,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
b = (type *)c->table_bU[U];\
#define YSCALE_YUV_2_PACKED1_C(type,alpha) \
- for (i=0; i<(dstW>>1); i++){\
+ for (i=0; i<(dstW>>1); i++) {\
const int i2= 2*i;\
int Y1= buf0[i2 ]>>7;\
int Y2= buf0[i2+1]>>7;\
@@ -816,13 +803,13 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
int V= (uvbuf1[i+VOFW])>>7;\
type av_unused *r, *b, *g;\
int av_unused A1, A2;\
- if (alpha){\
+ if (alpha) {\
A1= abuf0[i2 ]>>7;\
A2= abuf0[i2+1]>>7;\
}\
#define YSCALE_YUV_2_GRAY16_1_C \
- for (i=0; i<(dstW>>1); i++){\
+ for (i=0; i<(dstW>>1); i++) {\
const int i2= 2*i;\
int Y1= buf0[i2 ]<<1;\
int Y2= buf0[i2+1]<<1;\
@@ -834,7 +821,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
b = (type *)c->table_bU[U];\
#define YSCALE_YUV_2_PACKED1B_C(type,alpha) \
- for (i=0; i<(dstW>>1); i++){\
+ for (i=0; i<(dstW>>1); i++) {\
const int i2= 2*i;\
int Y1= buf0[i2 ]>>7;\
int Y2= buf0[i2+1]>>7;\
@@ -842,7 +829,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
int V= (uvbuf0[i+VOFW] + uvbuf1[i+VOFW])>>8;\
type av_unused *r, *b, *g;\
int av_unused A1, A2;\
- if (alpha){\
+ if (alpha) {\
A1= abuf0[i2 ]>>7;\
A2= abuf0[i2+1]>>7;\
}\
@@ -856,7 +843,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
#define YSCALE_YUV_2_MONO2_C \
const uint8_t * const d128=dither_8x8_220[y&7];\
uint8_t *g= c->table_gU[128] + c->table_gV[128];\
- for (i=0; i<dstW-7; i+=8){\
+ for (i=0; i<dstW-7; i+=8) {\
int acc;\
acc = g[((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19) + d128[0]];\
acc+= acc + g[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19) + d128[1]];\
@@ -875,20 +862,18 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
const uint8_t * const d128=dither_8x8_220[y&7];\
uint8_t *g= c->table_gU[128] + c->table_gV[128];\
int acc=0;\
- for (i=0; i<dstW-1; i+=2){\
+ for (i=0; i<dstW-1; i+=2) {\
int j;\
int Y1=1<<18;\
int Y2=1<<18;\
\
- for (j=0; j<lumFilterSize; j++)\
- {\
+ for (j=0; j<lumFilterSize; j++) {\
Y1 += lumSrc[j][i] * lumFilter[j];\
Y2 += lumSrc[j][i+1] * lumFilter[j];\
}\
Y1>>=19;\
Y2>>=19;\
- if ((Y1|Y2)&256)\
- {\
+ if ((Y1|Y2)&256) {\
if (Y1>255) Y1=255;\
else if (Y1<0)Y1=0;\
if (Y2>255) Y2=255;\
@@ -896,7 +881,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
}\
acc+= acc + g[Y1+d128[(i+0)&7]];\
acc+= acc + g[Y2+d128[(i+1)&7]];\
- if ((i&7)==6){\
+ if ((i&7)==6) {\
((uint8_t*)dest)[0]= c->dstFormat == PIX_FMT_MONOBLACK ? acc : ~acc;\
dest++;\
}\
@@ -904,8 +889,7 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
#define YSCALE_YUV_2_ANYRGB_C(func, func2, func_g16, func_monoblack)\
- switch(c->dstFormat)\
- {\
+ switch(c->dstFormat) {\
case PIX_FMT_RGB48BE:\
case PIX_FMT_RGB48LE:\
func(uint8_t,0)\
@@ -926,19 +910,19 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
break;\
case PIX_FMT_RGBA:\
case PIX_FMT_BGRA:\
- if (CONFIG_SMALL){\
+ if (CONFIG_SMALL) {\
int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\
func(uint32_t,needAlpha)\
((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? (A1<<24) : 0);\
((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? (A2<<24) : 0);\
}\
- }else{\
- if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){\
+ } else {\
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {\
func(uint32_t,1)\
((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (A1<<24);\
((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (A2<<24);\
}\
- }else{\
+ } else {\
func(uint32_t,0)\
((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
@@ -948,19 +932,19 @@ static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc
break;\
case PIX_FMT_ARGB:\
case PIX_FMT_ABGR:\
- if (CONFIG_SMALL){\
+ if (CONFIG_SMALL) {\
int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\
func(uint32_t,needAlpha)\
((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? A1 : 0);\
((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? A2 : 0);\
}\
- }else{\
- if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){\
+ } else {\
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {\
func(uint32_t,1)\
((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + A1;\
((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + A2;\
}\
- }else{\
+ } else {\
func(uint32_t,0)\
((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
@@ -1110,14 +1094,14 @@ static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, con
int step= fmt_depth(c->dstFormat)/8;
int aidx= 3;
- switch(c->dstFormat){
+ switch(c->dstFormat) {
case PIX_FMT_ARGB:
dest++;
aidx= 0;
case PIX_FMT_RGB24:
aidx--;
case PIX_FMT_RGBA:
- if (CONFIG_SMALL){
+ if (CONFIG_SMALL) {
int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;
YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha)
dest[aidx]= needAlpha ? A : 255;
@@ -1126,8 +1110,8 @@ static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, con
dest[2]= B>>22;
dest+= step;
}
- }else{
- if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ } else {
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1)
dest[aidx]= A;
dest[0]= R>>22;
@@ -1135,7 +1119,7 @@ static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, con
dest[2]= B>>22;
dest+= step;
}
- }else{
+ } else {
YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0)
dest[aidx]= 255;
dest[0]= R>>22;
@@ -1152,7 +1136,7 @@ static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, con
case PIX_FMT_BGR24:
aidx--;
case PIX_FMT_BGRA:
- if (CONFIG_SMALL){
+ if (CONFIG_SMALL) {
int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;
YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha)
dest[aidx]= needAlpha ? A : 255;
@@ -1161,8 +1145,8 @@ static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, con
dest[2]= R>>22;
dest+= step;
}
- }else{
- if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
+ } else {
+ if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1)
dest[aidx]= A;
dest[0]= B>>22;
@@ -1170,7 +1154,7 @@ static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, con
dest[2]= R>>22;
dest+= step;
}
- }else{
+ } else {
YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0)
dest[aidx]= 255;
dest[0]= B>>22;
@@ -1186,10 +1170,11 @@ static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, con
}
}
-static void fillPlane(uint8_t* plane, int stride, int width, int height, int y, uint8_t val){
+static void fillPlane(uint8_t* plane, int stride, int width, int height, int y, uint8_t val)
+{
int i;
uint8_t *ptr = plane + stride*y;
- for (i=0; i<height; i++){
+ for (i=0; i<height; i++) {
memset(ptr, val, width);
ptr += stride;
}
@@ -1241,8 +1226,7 @@ static inline void rgb48ToUV_half(uint8_t *dstU, uint8_t *dstV,
static inline void name(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)\
{\
int i;\
- for (i=0; i<width; i++)\
- {\
+ for (i=0; i<width; i++) {\
int b= (((const type*)src)[i]>>shb)&maskb;\
int g= (((const type*)src)[i]>>shg)&maskg;\
int r= (((const type*)src)[i]>>shr)&maskr;\
@@ -1258,9 +1242,10 @@ BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY
BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY , GY<<5, BY<<11, RGB2YUV_SHIFT+8)
BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY , GY<<5, BY<<10, RGB2YUV_SHIFT+7)
-static inline void abgrToA(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused){
+static inline void abgrToA(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
+{
int i;
- for (i=0; i<width; i++){
+ for (i=0; i<width; i++) {
dst[i]= src[4*i];
}
}
@@ -1269,8 +1254,7 @@ static inline void abgrToA(uint8_t *dst, const uint8_t *src, long width, uint32_
static inline void name(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, const uint8_t *dummy, long width, uint32_t *unused)\
{\
int i;\
- for (i=0; i<width; i++)\
- {\
+ for (i=0; i<width; i++) {\
int b= (((const type*)src)[i]&maskb)>>shb;\
int g= (((const type*)src)[i]&maskg)>>shg;\
int r= (((const type*)src)[i]&maskr)>>shr;\
@@ -1282,8 +1266,7 @@ static inline void name(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, const
static inline void name ## _half(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, const uint8_t *dummy, long width, uint32_t *unused)\
{\
int i;\
- for (i=0; i<width; i++)\
- {\
+ for (i=0; i<width; i++) {\
int pix0= ((const type*)src)[2*i+0];\
int pix1= ((const type*)src)[2*i+1];\
int g= (pix0&~(maskr|maskb))+(pix1&~(maskr|maskb));\
@@ -1308,8 +1291,7 @@ BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU
static inline void palToY(uint8_t *dst, const uint8_t *src, long width, uint32_t *pal)
{
int i;
- for (i=0; i<width; i++)
- {
+ for (i=0; i<width; i++) {
int d= src[i];
dst[i]= pal[d] & 0xFF;
@@ -1322,8 +1304,7 @@ static inline void palToUV(uint8_t *dstU, uint8_t *dstV,
{
int i;
assert(src1 == src2);
- for (i=0; i<width; i++)
- {
+ for (i=0; i<width; i++) {
int p= pal[src1[i]];
dstU[i]= p>>8;
@@ -1334,7 +1315,7 @@ static inline void palToUV(uint8_t *dstU, uint8_t *dstV,
static inline void monowhite2Y(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
{
int i, j;
- for (i=0; i<width/8; i++){
+ for (i=0; i<width/8; i++) {
int d= ~src[i];
for(j=0; j<8; j++)
dst[8*i+j]= ((d>>(7-j))&1)*255;
@@ -1344,7 +1325,7 @@ static inline void monowhite2Y(uint8_t *dst, const uint8_t *src, long width, uin
static inline void monoblack2Y(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
{
int i, j;
- for (i=0; i<width/8; i++){
+ for (i=0; i<width/8; i++) {
int d= src[i];
for(j=0; j<8; j++)
dst[8*i+j]= ((d>>(7-j))&1)*255;
@@ -1471,53 +1452,44 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
// NOTE: the +1 is for the MMX scaler which reads over the end
*filterPos = av_malloc((dstW+1)*sizeof(int16_t));
- if (FFABS(xInc - 0x10000) <10) // unscaled
- {
+ if (FFABS(xInc - 0x10000) <10) { // unscaled
int i;
filterSize= 1;
filter= av_mallocz(dstW*sizeof(*filter)*filterSize);
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
filter[i*filterSize]= fone;
(*filterPos)[i]=i;
}
- }
- else if (flags&SWS_POINT) // lame looking point sampling mode
- {
+ } else if (flags&SWS_POINT) { // lame looking point sampling mode
int i;
int xDstInSrc;
filterSize= 1;
filter= av_malloc(dstW*sizeof(*filter)*filterSize);
xDstInSrc= xInc/2 - 0x8000;
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
(*filterPos)[i]= xx;
filter[i]= fone;
xDstInSrc+= xInc;
}
- }
- else if ((xInc <= (1<<16) && (flags&SWS_AREA)) || (flags&SWS_FAST_BILINEAR)) // bilinear upscale
- {
+ } else if ((xInc <= (1<<16) && (flags&SWS_AREA)) || (flags&SWS_FAST_BILINEAR)) { // bilinear upscale
int i;
int xDstInSrc;
filterSize= 2;
filter= av_malloc(dstW*sizeof(*filter)*filterSize);
xDstInSrc= xInc/2 - 0x8000;
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
int j;
(*filterPos)[i]= xx;
//bilinear upscale / linear interpolate / area averaging
- for (j=0; j<filterSize; j++)
- {
+ for (j=0; j<filterSize; j++) {
int64_t coeff= fone - FFABS((xx<<16) - xDstInSrc)*(fone>>16);
if (coeff<0) coeff=0;
filter[i*filterSize + j]= coeff;
@@ -1525,9 +1497,7 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
}
xDstInSrc+= xInc;
}
- }
- else
- {
+ } else {
int xDstInSrc;
int sizeFactor;
@@ -1552,13 +1522,11 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
filter= av_malloc(dstW*sizeof(*filter)*filterSize);
xDstInSrc= xInc - 0x10000;
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int xx= (xDstInSrc - ((filterSize-2)<<16)) / (1<<17);
int j;
(*filterPos)[i]= xx;
- for (j=0; j<filterSize; j++)
- {
+ for (j=0; j<filterSize; j++) {
int64_t d= ((int64_t)FFABS((xx<<17) - xDstInSrc))<<13;
double floatd;
int64_t coeff;
@@ -1567,8 +1535,7 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
d= d*dstW/srcW;
floatd= d * (1.0/(1<<30));
- if (flags & SWS_BICUBIC)
- {
+ if (flags & SWS_BICUBIC) {
int64_t B= (param[0] != SWS_PARAM_DEFAULT ? param[0] : 0) * (1<<24);
int64_t C= (param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6) * (1<<24);
int64_t dd = ( d*d)>>30;
@@ -1582,14 +1549,12 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
coeff=0.0;
coeff *= fone>>(30+24);
}
-/* else if (flags & SWS_X)
- {
+/* else if (flags & SWS_X) {
double p= param ? param*0.01 : 0.3;
coeff = d ? sin(d*PI)/(d*PI) : 1.0;
coeff*= pow(2.0, - p*d*d);
}*/
- else if (flags & SWS_X)
- {
+ else if (flags & SWS_X) {
double A= param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
double c;
@@ -1600,42 +1565,29 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
if (c<0.0) c= -pow(-c, A);
else c= pow( c, A);
coeff= (c*0.5 + 0.5)*fone;
- }
- else if (flags & SWS_AREA)
- {
+ } else if (flags & SWS_AREA) {
int64_t d2= d - (1<<29);
if (d2*xInc < -(1LL<<(29+16))) coeff= 1.0 * (1LL<<(30+16));
else if (d2*xInc < (1LL<<(29+16))) coeff= -d2*xInc + (1LL<<(29+16));
else coeff=0.0;
coeff *= fone>>(30+16);
- }
- else if (flags & SWS_GAUSS)
- {
+ } else if (flags & SWS_GAUSS) {
double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
coeff = (pow(2.0, - p*floatd*floatd))*fone;
- }
- else if (flags & SWS_SINC)
- {
+ } else if (flags & SWS_SINC) {
coeff = (d ? sin(floatd*PI)/(floatd*PI) : 1.0)*fone;
- }
- else if (flags & SWS_LANCZOS)
- {
+ } else if (flags & SWS_LANCZOS) {
double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
coeff = (d ? sin(floatd*PI)*sin(floatd*PI/p)/(floatd*floatd*PI*PI/p) : 1.0)*fone;
if (floatd>p) coeff=0;
- }
- else if (flags & SWS_BILINEAR)
- {
+ } else if (flags & SWS_BILINEAR) {
coeff= (1<<30) - d;
if (coeff<0) coeff=0;
coeff *= fone >> 30;
- }
- else if (flags & SWS_SPLINE)
- {
+ } else if (flags & SWS_SPLINE) {
double p=-2.196152422706632;
coeff = getSplineCoeff(1.0, 0.0, p, -p-1.0, floatd) * fone;
- }
- else {
+ } else {
coeff= 0.0; //GCC warning killer
assert(0);
}
@@ -1657,16 +1609,15 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
assert(filter2Size>0);
filter2= av_mallocz(filter2Size*dstW*sizeof(*filter2));
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int j, k;
- if(srcFilter){
- for (k=0; k<srcFilter->length; k++){
+ if(srcFilter) {
+ for (k=0; k<srcFilter->length; k++) {
for (j=0; j<filterSize; j++)
filter2[i*filter2Size + k + j] += srcFilter->coeff[k]*filter[i*filterSize + j];
}
- }else{
+ } else {
for (j=0; j<filterSize; j++)
filter2[i*filter2Size + j]= filter[i*filterSize + j];
}
@@ -1679,15 +1630,13 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
/* try to reduce the filter-size (step1 find size and shift left) */
// Assume it is near normalized (*0.5 or *2.0 is OK but * 0.001 is not).
minFilterSize= 0;
- for (i=dstW-1; i>=0; i--)
- {
+ for (i=dstW-1; i>=0; i--) {
int min= filter2Size;
int j;
int64_t cutOff=0.0;
/* get rid off near zero elements on the left by shifting left */
- for (j=0; j<filter2Size; j++)
- {
+ for (j=0; j<filter2Size; j++) {
int k;
cutOff += FFABS(filter2[i*filter2Size]);
@@ -1705,8 +1654,7 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
cutOff=0;
/* count near zeros on the right */
- for (j=filter2Size-1; j>0; j--)
- {
+ for (j=filter2Size-1; j>0; j--) {
cutOff += FFABS(filter2[i*filter2Size + j]);
if (cutOff > SWS_MAX_REDUCE_CUTOFF*fone) break;
@@ -1748,12 +1696,10 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
if (flags&SWS_PRINT_INFO)
av_log(NULL, AV_LOG_VERBOSE, "SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize);
/* try to reduce the filter-size (step2 reduce it) */
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int j;
- for (j=0; j<filterSize; j++)
- {
+ for (j=0; j<filterSize; j++) {
if (j>=filter2Size) filter[i*filterSize + j]= 0;
else filter[i*filterSize + j]= filter2[i*filter2Size + j];
if((flags & SWS_BITEXACT) && j>=minFilterSize)
@@ -1765,14 +1711,11 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
//FIXME try to align filterPos if possible
//fix borders
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int j;
- if ((*filterPos)[i] < 0)
- {
+ if ((*filterPos)[i] < 0) {
// move filter coefficients left to compensate for filterPos
- for (j=1; j<filterSize; j++)
- {
+ for (j=1; j<filterSize; j++) {
int left= FFMAX(j + (*filterPos)[i], 0);
filter[i*filterSize + left] += filter[i*filterSize + j];
filter[i*filterSize + j]=0;
@@ -1780,12 +1723,10 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
(*filterPos)[i]= 0;
}
- if ((*filterPos)[i] + filterSize > srcW)
- {
+ if ((*filterPos)[i] + filterSize > srcW) {
int shift= (*filterPos)[i] + filterSize - srcW;
// move filter coefficients right to compensate for filterPos
- for (j=filterSize-2; j>=0; j--)
- {
+ for (j=filterSize-2; j>=0; j--) {
int right= FFMIN(j + shift, filterSize-1);
filter[i*filterSize +right] += filter[i*filterSize +j];
filter[i*filterSize +j]=0;
@@ -1799,19 +1740,16 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
*outFilter= av_mallocz(*outFilterSize*(dstW+1)*sizeof(int16_t));
/* normalize & store in outFilter */
- for (i=0; i<dstW; i++)
- {
+ for (i=0; i<dstW; i++) {
int j;
int64_t error=0;
int64_t sum=0;
- for (j=0; j<filterSize; j++)
- {
+ for (j=0; j<filterSize; j++) {
sum+= filter[i*filterSize + j];
}
sum= (sum + one/2)/ one;
- for (j=0; j<*outFilterSize; j++)
- {
+ for (j=0; j<*outFilterSize; j++) {
int64_t v= filter[i*filterSize + j] + error;
int intV= ROUNDED_DIV(v, sum);
(*outFilter)[i*(*outFilterSize) + j]= intV;
@@ -1820,8 +1758,7 @@ static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outF
}
(*filterPos)[dstW]= (*filterPos)[dstW-1]; // the MMX scaler will read over the end
- for (i=0; i<*outFilterSize; i++)
- {
+ for (i=0; i<*outFilterSize; i++) {
int j= dstW*(*outFilterSize);
(*outFilter)[j + i]= (*outFilter)[j + i - (*outFilterSize)];
}
@@ -1933,12 +1870,10 @@ static int initMMX2HScaler(int dstW, int xInc, uint8_t *filterCode, int16_t *fil
xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
fragmentPos=0;
- for (i=0; i<dstW/numSplits; i++)
- {
+ for (i=0; i<dstW/numSplits; i++) {
int xx=xpos>>16;
- if ((i&3) == 0)
- {
+ if ((i&3) == 0) {
int a=0;
int b=((xpos+xInc)>>16) - xx;
int c=((xpos+xInc*2)>>16) - xx;
@@ -1968,8 +1903,7 @@ static int initMMX2HScaler(int dstW, int xInc, uint8_t *filterCode, int16_t *fil
if (i+4-inc>=dstW) shift=maxShift; //avoid overread
else if ((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //Align
- if (shift && i>=shift)
- {
+ if (shift && i>=shift) {
filterCode[fragmentPos + imm8OfPShufW1]+= 0x55*shift;
filterCode[fragmentPos + imm8OfPShufW2]+= 0x55*shift;
filterPos[i/2]-=shift;
@@ -1990,10 +1924,11 @@ static int initMMX2HScaler(int dstW, int xInc, uint8_t *filterCode, int16_t *fil
}
#endif /* COMPILE_MMX2 */
-static void globalInit(void){
+static void globalInit(void)
+{
// generating tables:
int i;
- for (i=0; i<768; i++){
+ for (i=0; i<768; i++) {
int c= av_clip_uint8(i-256);
clip_table[i]=c;
}
@@ -2054,18 +1989,17 @@ static SwsFunc getSwsFunc(SwsContext *c)
}
static int PlanarToNV12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
/* Copy Y plane */
if (dstStride[0]==srcStride[0] && srcStride[0] > 0)
memcpy(dst, src[0], srcSliceH*dstStride[0]);
- else
- {
+ else {
int i;
const uint8_t *srcPtr= src[0];
uint8_t *dstPtr= dst;
- for (i=0; i<srcSliceH; i++)
- {
+ for (i=0; i<srcSliceH; i++) {
memcpy(dstPtr, srcPtr, c->srcW);
srcPtr+= srcStride[0];
dstPtr+= dstStride[0];
@@ -2081,7 +2015,8 @@ static int PlanarToNV12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], i
}
static int PlanarToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
@@ -2090,7 +2025,8 @@ static int PlanarToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], i
}
static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
@@ -2099,7 +2035,8 @@ static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], i
}
static int YUV422PToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
yuv422ptoyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
@@ -2108,7 +2045,8 @@ static int YUV422PToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[],
}
static int YUV422PToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
-