From d78bde15ab4be5f46a6fb5fc5a35d6acbc6c39cf Mon Sep 17 00:00:00 2001 From: wm4 Date: Thu, 27 Dec 2012 18:07:37 +0100 Subject: vo_opengl_old: reject 9-15 bit formats if textures have less than 16 bit For 9-15 bit material, cutting off the lower bits leads to significant quality reduction, because these formats leave the most significant bits unused (e.g. 10 bit padded to 16 bit, transferred as 8 bit -> only 2 bits left). 16 bit formats still can be played like this, as cutting the lower bits merely reduces quality in this case. This problem was encountered with the following GPU/driver combination: OpenGL vendor string: Intel Open Source Technology Center OpenGL renderer string: Mesa DRI Intel(R) 915GM x86/MMX/SSE2 OpenGL version string: 1.4 Mesa 9.0.1 It appears 16 bit support is rather common on GPUs, so testing the actual texture depth wasn't needed until now. (There are some other Mesa GPU/driver combinations which support 16 bit only when using RG textures instead of LUMINANCE_ALPHA. This is due to OpenGL driver bugs.) --- video/out/gl_common.c | 1 + video/out/gl_common.h | 2 ++ video/out/vo_opengl_old.c | 20 +++++++++++++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) (limited to 'video') diff --git a/video/out/gl_common.c b/video/out/gl_common.c index 00e21ff312..42d035337f 100644 --- a/video/out/gl_common.c +++ b/video/out/gl_common.c @@ -364,6 +364,7 @@ struct gl_functions gl_functions[] = { DEF_FN_HARD(DrawArrays), DEF_FN_HARD(GetString), DEF_FN_HARD(GetError), + DEF_FN_HARD(GetTexLevelParameteriv), {0} }, }, diff --git a/video/out/gl_common.h b/video/out/gl_common.h index de893966df..4afc192343 100644 --- a/video/out/gl_common.h +++ b/video/out/gl_common.h @@ -318,6 +318,8 @@ struct GL { void (GLAPIENTRY *EnableClientState)(GLenum); void (GLAPIENTRY *DisableClientState)(GLenum); GLenum (GLAPIENTRY *GetError)(void); + void (GLAPIENTRY *GetTexLevelParameteriv)(GLenum, GLint, GLenum, GLint *); + void (GLAPIENTRY *GenBuffers)(GLsizei, GLuint *); void (GLAPIENTRY *DeleteBuffers)(GLsizei, const GLuint *); diff --git a/video/out/vo_opengl_old.c b/video/out/vo_opengl_old.c index a21db3a328..acbb205405 100644 --- a/video/out/vo_opengl_old.c +++ b/video/out/vo_opengl_old.c @@ -75,6 +75,7 @@ struct gl_priv { uint32_t image_format; int many_fmts; int have_texture_rg; + int max_tex_component_size; int ati_hack; int force_pbo; int use_glFinish; @@ -347,6 +348,22 @@ static void autodetectGlExtensions(struct vo *vo) } p->video_eq.capabilities = eq_caps; + { + int target = p->use_rectangle == 1 ? GL_TEXTURE_RECTANGLE : GL_TEXTURE_2D; + GLint gl_texfmt; + GLenum gl_format, gl_type; + glFindFormat(IMGFMT_420P16, p->have_texture_rg, NULL, &gl_texfmt, + &gl_format, &gl_type); + glCreateClearTex(gl, target, gl_texfmt, gl_format, gl_type, + GL_LINEAR, 64, 64, 0); + int tex_size_token = p->have_texture_rg ? GL_TEXTURE_RED_SIZE + : GL_TEXTURE_INTENSITY_SIZE; + GLint size = 8; + gl->GetTexLevelParameteriv(target, 0, tex_size_token, &size); + mp_msg(MSGT_VO, MSGL_V, "[gl] 16 bit texture depth: %d.\n", size); + p->max_tex_component_size = size; + } + if (is_ati && (p->lscale == 1 || p->lscale == 2 || p->cscale == 1 || p->cscale == 2)) mp_msg(MSGT_VO, MSGL_WARN, "[gl] Selected scaling mode may be broken on" " ATI cards.\n" @@ -838,7 +855,8 @@ static int query_format(struct vo *vo, uint32_t format) if (format == IMGFMT_RGB24 || format == IMGFMT_RGBA) return caps; if (p->use_yuv && mp_get_chroma_shift(format, NULL, NULL, &depth) && - (depth == 8 || depth == 16 || glYUVLargeRange(p->use_yuv)) && + (depth == 8 || depth == 16 || + p->max_tex_component_size >= 16 && glYUVLargeRange(p->use_yuv)) && (IMGFMT_IS_YUVP16_NE(format) || !IMGFMT_IS_YUVP16(format))) return caps; // HACK, otherwise we get only b&w with some filters (e.g. -vf eq) -- cgit v1.2.3