summaryrefslogtreecommitdiffstats
path: root/video/out/vo_opengl_old.c
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2012-12-27 18:07:37 +0100
committerwm4 <wm4@nowhere>2012-12-28 14:23:29 +0100
commitd78bde15ab4be5f46a6fb5fc5a35d6acbc6c39cf (patch)
tree52c0cec2b97bb84a07774edadc9b5bb33b46be23 /video/out/vo_opengl_old.c
parent1e56e68701363f38ae008d2b243dc2476a2f4943 (diff)
downloadmpv-d78bde15ab4be5f46a6fb5fc5a35d6acbc6c39cf.tar.bz2
mpv-d78bde15ab4be5f46a6fb5fc5a35d6acbc6c39cf.tar.xz
vo_opengl_old: reject 9-15 bit formats if textures have less than 16 bit
For 9-15 bit material, cutting off the lower bits leads to significant quality reduction, because these formats leave the most significant bits unused (e.g. 10 bit padded to 16 bit, transferred as 8 bit -> only 2 bits left). 16 bit formats still can be played like this, as cutting the lower bits merely reduces quality in this case. This problem was encountered with the following GPU/driver combination: OpenGL vendor string: Intel Open Source Technology Center OpenGL renderer string: Mesa DRI Intel(R) 915GM x86/MMX/SSE2 OpenGL version string: 1.4 Mesa 9.0.1 It appears 16 bit support is rather common on GPUs, so testing the actual texture depth wasn't needed until now. (There are some other Mesa GPU/driver combinations which support 16 bit only when using RG textures instead of LUMINANCE_ALPHA. This is due to OpenGL driver bugs.)
Diffstat (limited to 'video/out/vo_opengl_old.c')
-rw-r--r--video/out/vo_opengl_old.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/video/out/vo_opengl_old.c b/video/out/vo_opengl_old.c
index a21db3a328..acbb205405 100644
--- a/video/out/vo_opengl_old.c
+++ b/video/out/vo_opengl_old.c
@@ -75,6 +75,7 @@ struct gl_priv {
uint32_t image_format;
int many_fmts;
int have_texture_rg;
+ int max_tex_component_size;
int ati_hack;
int force_pbo;
int use_glFinish;
@@ -347,6 +348,22 @@ static void autodetectGlExtensions(struct vo *vo)
}
p->video_eq.capabilities = eq_caps;
+ {
+ int target = p->use_rectangle == 1 ? GL_TEXTURE_RECTANGLE : GL_TEXTURE_2D;
+ GLint gl_texfmt;
+ GLenum gl_format, gl_type;
+ glFindFormat(IMGFMT_420P16, p->have_texture_rg, NULL, &gl_texfmt,
+ &gl_format, &gl_type);
+ glCreateClearTex(gl, target, gl_texfmt, gl_format, gl_type,
+ GL_LINEAR, 64, 64, 0);
+ int tex_size_token = p->have_texture_rg ? GL_TEXTURE_RED_SIZE
+ : GL_TEXTURE_INTENSITY_SIZE;
+ GLint size = 8;
+ gl->GetTexLevelParameteriv(target, 0, tex_size_token, &size);
+ mp_msg(MSGT_VO, MSGL_V, "[gl] 16 bit texture depth: %d.\n", size);
+ p->max_tex_component_size = size;
+ }
+
if (is_ati && (p->lscale == 1 || p->lscale == 2 || p->cscale == 1 || p->cscale == 2))
mp_msg(MSGT_VO, MSGL_WARN, "[gl] Selected scaling mode may be broken on"
" ATI cards.\n"
@@ -838,7 +855,8 @@ static int query_format(struct vo *vo, uint32_t format)
if (format == IMGFMT_RGB24 || format == IMGFMT_RGBA)
return caps;
if (p->use_yuv && mp_get_chroma_shift(format, NULL, NULL, &depth) &&
- (depth == 8 || depth == 16 || glYUVLargeRange(p->use_yuv)) &&
+ (depth == 8 || depth == 16 ||
+ p->max_tex_component_size >= 16 && glYUVLargeRange(p->use_yuv)) &&
(IMGFMT_IS_YUVP16_NE(format) || !IMGFMT_IS_YUVP16(format)))
return caps;
// HACK, otherwise we get only b&w with some filters (e.g. -vf eq)