summaryrefslogtreecommitdiffstats
path: root/video
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2014-12-08 14:59:26 +0100
committerwm4 <wm4@nowhere>2014-12-08 14:59:57 +0100
commit4cae83db76bbcc82813825a3ae3f2a563d5ba809 (patch)
tree25cf136f3a3ab4931c063776fdc13e0f7f8386f8 /video
parent2e1daaff83a4fe7a213d18fba3adb31bb173dedb (diff)
downloadmpv-4cae83db76bbcc82813825a3ae3f2a563d5ba809.tar.bz2
mpv-4cae83db76bbcc82813825a3ae3f2a563d5ba809.tar.xz
vo_opengl: refactor: merge convolution function and sampler entrypoint
There's not much of a reason to have the actual convolution code in a separate function. Merging them actually simplifies the code a bit, and gets rid of the repetitious macro invocations to define the functions for each filter size. There should be no changes in behavior or output.
Diffstat (limited to 'video')
-rw-r--r--video/out/gl_video_shaders.glsl103
1 files changed, 36 insertions, 67 deletions
diff --git a/video/out/gl_video_shaders.glsl b/video/out/gl_video_shaders.glsl
index c4a70af2e4..f6b84fc00c 100644
--- a/video/out/gl_video_shaders.glsl
+++ b/video/out/gl_video_shaders.glsl
@@ -257,87 +257,56 @@ WEIGHTS_N(weights16, 16)
WEIGHTS_N(weights32, 32)
WEIGHTS_N(weights64, 64)
-#define CONVOLUTION_SEP_N(NAME, N) \
- vec4 NAME(VIDEO_SAMPLER tex, vec2 texcoord, vec2 pt, float weights[N]) {\
- vec4 res = vec4(0); \
- for (int n = 0; n < N; n++) { \
- res += weights[n] * texture(tex, texcoord + pt * n); \
- } \
- return res; \
- }
-
-CONVOLUTION_SEP_N(convolution_sep2, 2)
-CONVOLUTION_SEP_N(convolution_sep4, 4)
-CONVOLUTION_SEP_N(convolution_sep6, 6)
-CONVOLUTION_SEP_N(convolution_sep8, 8)
-CONVOLUTION_SEP_N(convolution_sep12, 12)
-CONVOLUTION_SEP_N(convolution_sep16, 16)
-CONVOLUTION_SEP_N(convolution_sep32, 32)
-CONVOLUTION_SEP_N(convolution_sep64, 64)
-
// The dir parameter is (0, 1) or (1, 0), and we expect the shader compiler to
// remove all the redundant multiplications and additions.
-#define SAMPLE_CONVOLUTION_SEP_N(NAME, N, SAMPLERT, CONV_FUNC, WEIGHTS_FUNC)\
+#define SAMPLE_CONVOLUTION_SEP_N(NAME, N, SAMPLERT, WEIGHTS_FUNC) \
vec4 NAME(vec2 dir, SAMPLERT lookup, VIDEO_SAMPLER tex, vec2 texsize, \
vec2 texcoord) { \
vec2 pt = (1 / texsize) * dir; \
float fcoord = dot(fract(texcoord * texsize - 0.5), dir); \
- vec2 base = texcoord - fcoord * pt; \
- return CONV_FUNC(tex, base - pt * (N / 2 - 1), pt, \
- WEIGHTS_FUNC(lookup, fcoord)); \
- }
-
-SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep2, 2, sampler1D, convolution_sep2, weights2)
-SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep4, 4, sampler1D, convolution_sep4, weights4)
-SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep6, 6, sampler2D, convolution_sep6, weights6)
-SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep8, 8, sampler2D, convolution_sep8, weights8)
-SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep12, 12, sampler2D, convolution_sep12, weights12)
-SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep16, 16, sampler2D, convolution_sep16, weights16)
-SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep32, 32, sampler2D, convolution_sep32, weights32)
-SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep64, 64, sampler2D, convolution_sep64, weights64)
-
-
-#define CONVOLUTION_N(NAME, N) \
- vec4 NAME(VIDEO_SAMPLER tex, vec2 texcoord, vec2 pt, float taps_x[N], \
- float taps_y[N]) { \
- vec4 res = vec4(0); \
- for (int y = 0; y < N; y++) { \
- vec4 line = vec4(0); \
- for (int x = 0; x < N; x++) \
- line += taps_x[x] * texture(tex, texcoord + pt * vec2(x, y));\
- res += taps_y[y] * line; \
- } \
- return res; \
+ vec2 base = texcoord - fcoord * pt - pt * (N / 2 - 1); \
+ float weights[N] = WEIGHTS_FUNC(lookup, fcoord); \
+ vec4 res = vec4(0); \
+ for (int n = 0; n < N; n++) { \
+ res += weights[n] * texture(tex, base + pt * n); \
+ } \
+ return res; \
}
-CONVOLUTION_N(convolution2, 2)
-CONVOLUTION_N(convolution4, 4)
-CONVOLUTION_N(convolution6, 6)
-CONVOLUTION_N(convolution8, 8)
-CONVOLUTION_N(convolution12, 12)
-CONVOLUTION_N(convolution16, 16)
-CONVOLUTION_N(convolution32, 32)
-CONVOLUTION_N(convolution64, 64)
+SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep2, 2, sampler1D, weights2)
+SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep4, 4, sampler1D, weights4)
+SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep6, 6, sampler2D, weights6)
+SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep8, 8, sampler2D, weights8)
+SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep12, 12, sampler2D, weights12)
+SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep16, 16, sampler2D, weights16)
+SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep32, 32, sampler2D, weights32)
+SAMPLE_CONVOLUTION_SEP_N(sample_convolution_sep64, 64, sampler2D, weights64)
-#define SAMPLE_CONVOLUTION_N(NAME, N, SAMPLERT, CONV_FUNC, WEIGHTS_FUNC) \
+#define SAMPLE_CONVOLUTION_N(NAME, N, SAMPLERT, WEIGHTS_FUNC) \
vec4 NAME(SAMPLERT lookup, VIDEO_SAMPLER tex, vec2 texsize, vec2 texcoord) {\
vec2 pt = 1 / texsize; \
vec2 fcoord = fract(texcoord * texsize - 0.5); \
- vec2 base = texcoord - fcoord * pt; \
- return CONV_FUNC(tex, base - pt * (N / 2 - 1), pt, \
- WEIGHTS_FUNC(lookup, fcoord.x), \
- WEIGHTS_FUNC(lookup, fcoord.y)); \
+ vec2 base = texcoord - fcoord * pt - pt * (N / 2 - 1); \
+ vec4 res = vec4(0); \
+ float w_x[N] = WEIGHTS_FUNC(lookup, fcoord.x); \
+ float w_y[N] = WEIGHTS_FUNC(lookup, fcoord.y); \
+ for (int y = 0; y < N; y++) { \
+ vec4 line = vec4(0); \
+ for (int x = 0; x < N; x++) \
+ line += w_x[x] * texture(tex, base + pt * vec2(x, y)); \
+ res += w_y[y] * line; \
+ } \
+ return res; \
}
-SAMPLE_CONVOLUTION_N(sample_convolution2, 2, sampler1D, convolution2, weights2)
-SAMPLE_CONVOLUTION_N(sample_convolution4, 4, sampler1D, convolution4, weights4)
-SAMPLE_CONVOLUTION_N(sample_convolution6, 6, sampler2D, convolution6, weights6)
-SAMPLE_CONVOLUTION_N(sample_convolution8, 8, sampler2D, convolution8, weights8)
-SAMPLE_CONVOLUTION_N(sample_convolution12, 12, sampler2D, convolution12, weights12)
-SAMPLE_CONVOLUTION_N(sample_convolution16, 16, sampler2D, convolution16, weights16)
-SAMPLE_CONVOLUTION_N(sample_convolution32, 32, sampler2D, convolution32, weights32)
-SAMPLE_CONVOLUTION_N(sample_convolution64, 64, sampler2D, convolution64, weights64)
-
+SAMPLE_CONVOLUTION_N(sample_convolution2, 2, sampler1D, weights2)
+SAMPLE_CONVOLUTION_N(sample_convolution4, 4, sampler1D, weights4)
+SAMPLE_CONVOLUTION_N(sample_convolution6, 6, sampler2D, weights6)
+SAMPLE_CONVOLUTION_N(sample_convolution8, 8, sampler2D, weights8)
+SAMPLE_CONVOLUTION_N(sample_convolution12, 12, sampler2D, weights12)
+SAMPLE_CONVOLUTION_N(sample_convolution16, 16, sampler2D, weights16)
+SAMPLE_CONVOLUTION_N(sample_convolution32, 32, sampler2D, weights32)
+SAMPLE_CONVOLUTION_N(sample_convolution64, 64, sampler2D, weights64)
// Unsharp masking
vec4 sample_sharpen3(VIDEO_SAMPLER tex, vec2 texsize, vec2 texcoord, float param1) {