From c5094206ce6ff1a557540ed6e0d8505bc6db0031 Mon Sep 17 00:00:00 2001 From: stepshal Date: Sun, 26 Jun 2016 01:07:38 +0700 Subject: Fix misspellings --- video/out/opengl/video.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index f46fdc1c9f..4387208ead 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2564,7 +2564,7 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t, if (t->still) gl_video_reset_surfaces(p); - // First of all, figure out if we have a frame availble at all, and draw + // First of all, figure out if we have a frame available at all, and draw // it manually + reset the queue if not if (p->surfaces[p->surface_now].pts == MP_NOPTS_VALUE) { if (!gl_video_upload_image(p, t->current)) -- cgit v1.2.3 From 9278ce98f7198e61116043fe69f885bfca54ec1c Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Sun, 26 Jun 2016 19:04:36 +0200 Subject: vo_opengl: implement ARIB STD-B68 (HLG) HDR TRC This HDR function is unique in that it's still display-referred, it just allows for values above the reference peak (super-highlights). The official standard doesn't actually document this very well, but the nominal peak turns out to be exactly 12.0 - so we normalize to this value internally in mpv. (This lets us preserve the property that the textures are encoded in the range [0,1], preventing clipping and making the best use of an integer texture's range) This was grouped together with SMPTE ST2084 when checking libavutil compatibility since they were added in the same release window, in a similar timeframe. --- video/out/opengl/video.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 4387208ead..6fd92ddb29 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2172,12 +2172,15 @@ static void pass_colormanage(struct gl_video *p, float peak_src, enum mp_csp_prim prim_orig = p->image_params.primaries; enum mp_csp_trc trc_orig = p->image_params.gamma; - // One exception: SMPTE ST.2084 is not implemented by LittleCMS - // for technical limitation reasons, so we use a gamma 2.2 input curve - // here instead. We could pick any value we want here, the difference - // is just coding efficiency. - if (trc_orig == MP_CSP_TRC_SMPTE_ST2084) + // One exception: HDR is not implemented by LittleCMS for technical + // limitation reasons, so we use a gamma 2.2 input curve here instead. + // We could pick any value we want here, the difference is just coding + // efficiency. + if (trc_orig == MP_CSP_TRC_SMPTE_ST2084 || + trc_orig == MP_CSP_TRC_ARIB_STD_B67) + { trc_orig = MP_CSP_TRC_GAMMA22; + } if (gl_video_get_lut3d(p, prim_orig, trc_orig)) { prim_dst = prim_orig; @@ -2216,6 +2219,11 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // If the source has no information known, it's display-referred // (and should be treated relative to the specified desired peak_dst) peak_src = peak_dst; + + // Exception: ARIB STD-B67's nominal peak is exactly 12 times the + // target's reference peak + if (trc_src == MP_CSP_TRC_ARIB_STD_B67) + peak_src = 12 * peak_dst; } // All operations from here on require linear light as a starting point, -- cgit v1.2.3 From f3b6966d14e8cb34477474b85c83beb46e542e70 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Sun, 26 Jun 2016 19:28:06 +0200 Subject: vo_opengl: implement the Panasonic V-Log function User request and not that hard. Closes #3157. Note that FFmpeg doesn't support this and there's no signalling in HEVC etc., so the only way users can access it is by using vf_format manually. Mind: This encoding uses full range values, not TV range. --- video/out/opengl/video.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 6fd92ddb29..a870fe0e4f 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2177,7 +2177,8 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // We could pick any value we want here, the difference is just coding // efficiency. if (trc_orig == MP_CSP_TRC_SMPTE_ST2084 || - trc_orig == MP_CSP_TRC_ARIB_STD_B67) + trc_orig == MP_CSP_TRC_ARIB_STD_B67 || + trc_orig == MP_CSP_TRC_V_LOG) { trc_orig = MP_CSP_TRC_GAMMA22; } @@ -2224,6 +2225,10 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // target's reference peak if (trc_src == MP_CSP_TRC_ARIB_STD_B67) peak_src = 12 * peak_dst; + + // Similar deal for V-Log + if (trc_src == MP_CSP_TRC_V_LOG) + peak_src = 46.0855 * peak_dst; } // All operations from here on require linear light as a starting point, -- cgit v1.2.3 From f9fe5d06ad8ba5f8aa6068dd3c6a1a9fc6332707 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Tue, 28 Jun 2016 01:18:55 +0200 Subject: vo_opengl: use image_params instead of *_src for autoconfig I'm not even sure why we ever consulted *_src to begin with, since that just describes the current image format - and not the original metadata. (And in fact, we specifically had logic to work around the impliciations this had on linear scaling) image_params is *the* authoritative source on the intended (i.e. reference) image metadata, whereas *_src may be changed by previous passes already. So only consult image_params for picking auto-generated values. Also, add some more missing "wide gamut" and "non-gamma" curves to the autoconfig blacklist. (Maybe it would make sense to move this list to csputils in the future? Or perhaps even auto-detect it based on the associated primaries) --- video/out/opengl/video.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index a870fe0e4f..89030a8952 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2194,26 +2194,29 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // Some exceptions apply to source spaces that even hardcore technoluddites // would probably not enjoy viewing unaltered if (prim_dst == MP_CSP_PRIM_AUTO) { - prim_dst = prim_src; + prim_dst = p->image_params.primaries; // Avoid outputting very wide gamut content automatically, since the // majority target audience has standard gamut displays - if (prim_dst == MP_CSP_PRIM_BT_2020 || prim_dst == MP_CSP_PRIM_PRO_PHOTO) + if (prim_dst == MP_CSP_PRIM_BT_2020 || + prim_dst == MP_CSP_PRIM_PRO_PHOTO || + prim_dst == MP_CSP_PRIM_V_GAMUT) + { prim_dst = MP_CSP_PRIM_BT_709; + } } if (trc_dst == MP_CSP_TRC_AUTO) { - trc_dst = trc_src; - // Avoid outputting linear light at all costs. First try - // falling back to the image gamma (e.g. in the case that the input - // was linear light due to linear-scaling) - if (trc_dst == MP_CSP_TRC_LINEAR) - trc_dst = p->image_params.gamma; - - // Failing that, pick gamma 2.2 as a reasonable default. This is also - // picked as a default for outputting HDR content - if (trc_dst == MP_CSP_TRC_LINEAR || trc_dst == MP_CSP_TRC_SMPTE_ST2084) + trc_dst = p->image_params.gamma; + + // Avoid outputting linear light or HDR content "by default" + if (trc_dst == MP_CSP_TRC_LINEAR || + trc_dst == MP_CSP_TRC_SMPTE_ST2084 || + trc_dst == MP_CSP_TRC_ARIB_STD_B67 || + trc_dst == MP_CSP_TRC_V_LOG) + { trc_dst = MP_CSP_TRC_GAMMA22; + } } if (!peak_src) { @@ -2223,11 +2226,11 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // Exception: ARIB STD-B67's nominal peak is exactly 12 times the // target's reference peak - if (trc_src == MP_CSP_TRC_ARIB_STD_B67) + if (p->image_params.gamma == MP_CSP_TRC_ARIB_STD_B67) peak_src = 12 * peak_dst; // Similar deal for V-Log - if (trc_src == MP_CSP_TRC_V_LOG) + if (p->image_params.gamma == MP_CSP_TRC_V_LOG) peak_src = 46.0855 * peak_dst; } -- cgit v1.2.3 From 6e6c32ed6cc70f21e99c882e57944e272906c368 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Tue, 28 Jun 2016 13:55:10 +0200 Subject: vo_opengl: revise the logic for picking the default color space Too many "exceptions" these days, it's easier to just hard-code a whitelist instead of a blacklist. And besides, it only really makes sense to avoid adaptation for BT.601 specifically, since that's the one we auto-guess based on the resolution. --- video/out/opengl/video.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 89030a8952..fff41a1b91 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2189,20 +2189,19 @@ static void pass_colormanage(struct gl_video *p, float peak_src, } } - // When auto-guessing the output color params, just pick the source color - // params to preserve the authentic "look and feel" of wrong/naive players. - // Some exceptions apply to source spaces that even hardcore technoluddites - // would probably not enjoy viewing unaltered if (prim_dst == MP_CSP_PRIM_AUTO) { - prim_dst = p->image_params.primaries; + // The vast majority of people are on sRGB or BT.709 displays, so pick + // this as the default output color space. + prim_dst = MP_CSP_PRIM_BT_709; - // Avoid outputting very wide gamut content automatically, since the - // majority target audience has standard gamut displays - if (prim_dst == MP_CSP_PRIM_BT_2020 || - prim_dst == MP_CSP_PRIM_PRO_PHOTO || - prim_dst == MP_CSP_PRIM_V_GAMUT) + if (p->image_params.primaries == MP_CSP_PRIM_BT_601_525 || + p->image_params.primaries == MP_CSP_PRIM_BT_601_625) { - prim_dst = MP_CSP_PRIM_BT_709; + // Since we auto-pick BT.601 and BT.709 based on the dimensions, + // combined with the fact that they're very similar to begin with, + // and to avoid confusing the average user, just don't adapt BT.601 + // content automatically at all. + prim_dst = p->image_params.primaries; } } -- cgit v1.2.3 From dc9a5cbfd7c30d4f0597ec0aad91dadf63defbba Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Tue, 28 Jun 2016 14:28:32 +0200 Subject: vo_opengl: revise the transfer curve logic Instead of hard-coding a big list, move some of the functionality to csputils. Affects both the auto-guess blacklist and the peak estimation. Also update the comments. --- video/out/opengl/video.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index fff41a1b91..519857f7f9 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2206,31 +2206,24 @@ static void pass_colormanage(struct gl_video *p, float peak_src, } if (trc_dst == MP_CSP_TRC_AUTO) { + // Most people seem to complain when the image is darker or brighter + // than what they're "used to", so just avoid changing the gamma + // altogether by default. The only exceptions to this rule apply to + // very unusual TRCs, which even hardcode technoluddites would probably + // not enjoy viewing unaltered. trc_dst = p->image_params.gamma; - // Avoid outputting linear light or HDR content "by default" - if (trc_dst == MP_CSP_TRC_LINEAR || - trc_dst == MP_CSP_TRC_SMPTE_ST2084 || - trc_dst == MP_CSP_TRC_ARIB_STD_B67 || - trc_dst == MP_CSP_TRC_V_LOG) - { + // Avoid outputting linear light or HDR content "by default". For these + // just pick gamma 2.2 as a default, since it's a good estimate for + // the response of typical displays + if (trc_dst == MP_CSP_TRC_LINEAR || mp_trc_is_hdr(trc_dst)) trc_dst = MP_CSP_TRC_GAMMA22; - } } if (!peak_src) { // If the source has no information known, it's display-referred // (and should be treated relative to the specified desired peak_dst) - peak_src = peak_dst; - - // Exception: ARIB STD-B67's nominal peak is exactly 12 times the - // target's reference peak - if (p->image_params.gamma == MP_CSP_TRC_ARIB_STD_B67) - peak_src = 12 * peak_dst; - - // Similar deal for V-Log - if (p->image_params.gamma == MP_CSP_TRC_V_LOG) - peak_src = 46.0855 * peak_dst; + peak_src = peak_dst * mp_csp_trc_rel_peak(p->image_params.gamma); } // All operations from here on require linear light as a starting point, -- cgit v1.2.3 From 40c9b380232acdb159952bd8926851bb50ef78ea Mon Sep 17 00:00:00 2001 From: wm4 Date: Tue, 28 Jun 2016 19:41:40 +0200 Subject: vo_opengl: minor typo and coding style fixes --- video/out/opengl/video.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 519857f7f9..a704a261df 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2585,7 +2585,7 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t, } // Find the right frame for this instant - if (t->current&& t->current->pts != MP_NOPTS_VALUE) { + if (t->current && t->current->pts != MP_NOPTS_VALUE) { int next = fbosurface_wrap(p->surface_now + 1); while (p->surfaces[next].pts != MP_NOPTS_VALUE && p->surfaces[next].pts > p->surfaces[p->surface_now].pts && @@ -2623,7 +2623,7 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t, // this should be done before the step where we find the right frame, but // it only barely matters at the very beginning of playback, and this way // makes the code much more linear. - int surface_dst = fbosurface_wrap(p->surface_idx+1); + int surface_dst = fbosurface_wrap(p->surface_idx + 1); for (int i = 0; i < t->num_frames; i++) { // Avoid overwriting data we might still need if (surface_dst == surface_bse - 1) @@ -2642,7 +2642,7 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t, vp_w, vp_h, FBOTEX_FUZZY); p->surfaces[surface_dst].pts = f->pts; p->surface_idx = surface_dst; - surface_dst = fbosurface_wrap(surface_dst+1); + surface_dst = fbosurface_wrap(surface_dst + 1); } } @@ -2653,7 +2653,7 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t, // end of playback or start of playback. bool valid = true; for (int i = surface_bse, ii; valid && i != surface_end; i = ii) { - ii = fbosurface_wrap(i+1); + ii = fbosurface_wrap(i + 1); if (p->surfaces[i].pts == MP_NOPTS_VALUE || p->surfaces[ii].pts == MP_NOPTS_VALUE) { @@ -2781,7 +2781,7 @@ void gl_video_render_frame(struct gl_video *p, struct vo_frame *frame, int fbo) goto done; pass_render_frame(p); - // For the non-interplation case, we draw to a single "cache" + // For the non-interpolation case, we draw to a single "cache" // FBO to speed up subsequent re-draws (if any exist) int dest_fbo = fbo; if (frame->num_vsyncs > 1 && frame->display_synced && -- cgit v1.2.3 From d5615102d5bce7c506279a2578a34da25f91301f Mon Sep 17 00:00:00 2001 From: Muhammad Faiz Date: Fri, 24 Jun 2016 00:49:10 +0700 Subject: vo_opengl: add output_size uniform to custom shader logically, scaler should know its input and output size Signed-off-by: wm4 --- video/out/opengl/video.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index a704a261df..592dfa6e56 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -1239,6 +1239,9 @@ static void load_shader(struct gl_video *p, struct bstr body) gl_sc_uniform_f(p->sc, "frame", p->frames_uploaded); gl_sc_uniform_vec2(p->sc, "image_size", (GLfloat[]){p->image_params.w, p->image_params.h}); + gl_sc_uniform_vec2(p->sc, "target_size", + (GLfloat[]){p->dst_rect.x1 - p->dst_rect.x0, + p->dst_rect.y1 - p->dst_rect.y0}); } static const char *get_custom_shader_fn(struct gl_video *p, const char *body) -- cgit v1.2.3 From 1b71cfccba06a11c58be7be075701b5c79e95331 Mon Sep 17 00:00:00 2001 From: wm4 Date: Fri, 1 Jul 2016 19:47:31 +0200 Subject: vo_opengl: remove OSD bitmap packing It's packed in the OSD common layer already. --- video/out/opengl/video.c | 1 - 1 file changed, 1 deletion(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 592dfa6e56..d8343698ca 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -41,7 +41,6 @@ #include "user_shaders.h" #include "video/out/filter_kernels.h" #include "video/out/aspect.h" -#include "video/out/bitmap_packer.h" #include "video/out/dither.h" #include "video/out/vo.h" -- cgit v1.2.3 From 823c353faaab569264f6d6d8c3d335bb0173b9e1 Mon Sep 17 00:00:00 2001 From: wm4 Date: Sun, 3 Jul 2016 16:00:51 +0200 Subject: vo_opengl: move PBO upload handling to shared code This introduces a gl_pbo_upload_tex() function, which works almost like our gl_upload_tex() glTexSubImage2D() wrapper, except it takes a struct which caches the PBO handles. It also takes the full texture size (to make allocating an ideal buffer size easier), and a parameter to disable PBOs (so that the caller doesn't have to duplicate the gl_upload_tex() call if PBOs are disabled or unavailable). This also removes warnings and fallbacks on PBO failure. We just silently try using PBOs on every frame, and if that fails at some point, revert to normal texture uploads. Probably doesn't matter. --- video/out/opengl/video.c | 79 +++++------------------------------------------- 1 file changed, 8 insertions(+), 71 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index d8343698ca..271725aaeb 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -96,8 +96,8 @@ struct texplane { GLenum gl_format; GLenum gl_type; GLuint gl_texture; - int gl_buffer; char swizzle[5]; + struct gl_pbo_upload pbo; }; struct video_image { @@ -878,7 +878,7 @@ static void uninit_video(struct gl_video *p) struct texplane *plane = &vimg->planes[n]; gl->DeleteTextures(1, &plane->gl_texture); - gl->DeleteBuffers(1, &plane->gl_buffer); + gl_pbo_upload_uninit(&plane->pbo); } *vimg = (struct video_image){0}; @@ -2890,54 +2890,6 @@ struct voctrl_performance_data gl_video_perfdata(struct gl_video *p) }; } -static bool unmap_image(struct gl_video *p, struct mp_image *mpi) -{ - GL *gl = p->gl; - bool ok = true; - struct video_image *vimg = &p->image; - for (int n = 0; n < p->plane_count; n++) { - struct texplane *plane = &vimg->planes[n]; - gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, plane->gl_buffer); - ok = gl->UnmapBuffer(GL_PIXEL_UNPACK_BUFFER) && ok; - mpi->planes[n] = NULL; // PBO offset 0 - } - gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); - return ok; -} - -static bool map_image(struct gl_video *p, struct mp_image *mpi) -{ - GL *gl = p->gl; - - if (!p->opts.pbo) - return false; - - struct video_image *vimg = &p->image; - - for (int n = 0; n < p->plane_count; n++) { - struct texplane *plane = &vimg->planes[n]; - mpi->stride[n] = mp_image_plane_w(mpi, n) * p->image_desc.bytes[n]; - size_t buffer_size = mp_image_plane_h(mpi, n) * mpi->stride[n]; - if (!plane->gl_buffer) { - gl->GenBuffers(1, &plane->gl_buffer); - gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, plane->gl_buffer); - gl->BufferData(GL_PIXEL_UNPACK_BUFFER, buffer_size, - NULL, GL_DYNAMIC_DRAW); - } - gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, plane->gl_buffer); - mpi->planes[n] = gl->MapBufferRange(GL_PIXEL_UNPACK_BUFFER, 0, - buffer_size, GL_MAP_WRITE_BIT | - GL_MAP_INVALIDATE_BUFFER_BIT); - gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); - if (!mpi->planes[n]) { - unmap_image(p, mpi); - return false; - } - } - memset(mpi->bufs, 0, sizeof(mpi->bufs)); - return true; -} - // This assumes nv12, with textures set to GL_NEAREST filtering. static void reinterleave_vdpau(struct gl_video *p, struct gl_hwdec_frame *frame) { @@ -3034,32 +2986,17 @@ static bool gl_video_upload_image(struct gl_video *p, struct mp_image *mpi) gl_timer_start(p->upload_timer); - mp_image_t pbo_mpi = *mpi; - bool pbo = map_image(p, &pbo_mpi); - if (pbo) { - mp_image_copy(&pbo_mpi, mpi); - if (unmap_image(p, &pbo_mpi)) { - mpi = &pbo_mpi; - } else { - MP_FATAL(p, "Video PBO upload failed. Disabling PBOs.\n"); - pbo = false; - p->opts.pbo = 0; - } - } - vimg->image_flipped = mpi->stride[0] < 0; for (int n = 0; n < p->plane_count; n++) { struct texplane *plane = &vimg->planes[n]; - if (pbo) - gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, plane->gl_buffer); - gl->ActiveTexture(GL_TEXTURE0 + n); + gl->BindTexture(plane->gl_target, plane->gl_texture); - gl_upload_tex(gl, plane->gl_target, plane->gl_format, plane->gl_type, - mpi->planes[n], mpi->stride[n], 0, 0, plane->w, plane->h); + gl_pbo_upload_tex(&plane->pbo, gl, p->opts.pbo, plane->gl_target, + plane->gl_format, plane->gl_type, plane->w, plane->h, + mpi->planes[n], mpi->stride[n], + 0, 0, plane->w, plane->h); + gl->BindTexture(plane->gl_target, 0); } - gl->ActiveTexture(GL_TEXTURE0); - if (pbo) - gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); gl_timer_stop(p->upload_timer); -- cgit v1.2.3 From 8c7f9dc1a89e90e44b2b7dfb13fa899ad52352ee Mon Sep 17 00:00:00 2001 From: wm4 Date: Sun, 3 Jul 2016 16:09:26 +0200 Subject: vo_opengl: support inconsistent negative strides per plane GL generally does not support flipping the image on upload, meaning negative strides are not supported. vo_opengl handles this by flipping rendering if the stride is inverted, and gl_pbo_upload() "ignores" negative strides by uploading without flipping the image. If individual planes had strides with different signs, this broke. The flipping affected the entire image, and only the sign of the first plane was respected. This is just a crazy corner case that will never happen, but it turns out this is quite simple to support, and actually improves the code somewhat. --- video/out/opengl/video.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 271725aaeb..bd1eb893af 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -97,12 +97,12 @@ struct texplane { GLenum gl_type; GLuint gl_texture; char swizzle[5]; + bool flipped; struct gl_pbo_upload pbo; }; struct video_image { struct texplane planes[4]; - bool image_flipped; struct mp_image *mpi; // original input image bool hwdec_mapped; }; @@ -675,7 +675,7 @@ static int pass_bind(struct gl_video *p, struct img_tex tex) } // Rotation by 90° and flipping. -static void get_plane_source_transform(struct gl_video *p, int w, int h, +static void get_plane_source_transform(struct gl_video *p, struct texplane *t, struct gl_transform *out_tr) { struct gl_transform tr = identity_trans; @@ -688,11 +688,11 @@ static void get_plane_source_transform(struct gl_video *p, int w, int h, // basically, recenter to keep the whole image in view float b[2] = {1, 1}; gl_transform_vec(rot, &b[0], &b[1]); - tr.t[0] += b[0] < 0 ? w : 0; - tr.t[1] += b[1] < 0 ? h : 0; + tr.t[0] += b[0] < 0 ? t->w : 0; + tr.t[1] += b[1] < 0 ? t->h : 0; - if (p->image.image_flipped) { - struct gl_transform flip = {{{1, 0}, {0, -1}}, {0, h}}; + if (t->flipped) { + struct gl_transform flip = {{{1, 0}, {0, -1}}, {0, t->h}}; gl_transform_trans(flip, &tr); } @@ -763,7 +763,7 @@ static void pass_get_img_tex(struct gl_video *p, struct video_image *vimg, .components = p->image_desc.components[n], }; snprintf(tex[n].swizzle, sizeof(tex[n].swizzle), "%s", t->swizzle); - get_plane_source_transform(p, t->w, t->h, &tex[n].transform); + get_plane_source_transform(p, t, &tex[n].transform); if (p->image_params.rotate % 180 == 90) MPSWAP(int, tex[n].w, tex[n].h); @@ -2986,10 +2986,12 @@ static bool gl_video_upload_image(struct gl_video *p, struct mp_image *mpi) gl_timer_start(p->upload_timer); - vimg->image_flipped = mpi->stride[0] < 0; + for (int n = 0; n < p->plane_count; n++) { struct texplane *plane = &vimg->planes[n]; + plane->flipped = mpi->stride[0] < 0; + gl->BindTexture(plane->gl_target, plane->gl_texture); gl_pbo_upload_tex(&plane->pbo, gl, p->opts.pbo, plane->gl_target, plane->gl_format, plane->gl_type, plane->w, plane->h, -- cgit v1.2.3 From 3abf9c9204e2fcbc1910deb102efab4ab9d8c149 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Wed, 8 Jun 2016 17:32:40 +0200 Subject: vo_opengl: don't constantly resize the output FBO Commit 883d3114 seems to have (accidentally?) dropped the FBOTEX_FUZZY from the output_fbo resize, which means that current master will keep resizing and resizing the FBO as you change the window size, introducing severe memory leaking after a while. (Not sure why that would cause memory leaks, but I blame nvidia) Either way, it's bad for performance too, so it's worth fixing. --- video/out/opengl/video.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index bd1eb893af..6c7646bef4 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2791,7 +2791,7 @@ void gl_video_render_frame(struct gl_video *p, struct vo_frame *frame, int fbo) { fbotex_change(&p->output_fbo, p->gl, p->log, p->vp_w, abs(p->vp_h), - p->opts.fbo_format, 0); + p->opts.fbo_format, FBOTEX_FUZZY); dest_fbo = p->output_fbo.fbo; p->output_fbo_valid = true; } -- cgit v1.2.3 From d81fb97f4587f73f62a760b99f686139f9b8d966 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Wed, 29 Jun 2016 09:16:13 +0200 Subject: mp_image: split colorimetry metadata into its own struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This has two reasons: 1. I tend to add new fields to this metadata, and every time I've done so I've consistently forgotten to update all of the dozens of places in which this colorimetry metadata might end up getting used. While most usages don't really care about most of the metadata, sometimes the intend was simply to “copy” the colorimetry metadata from one struct to another. With this being inside a substruct, those lines of code can now simply read a.color = b.color without having to care about added or removed fields. 2. It makes the type definitions nicer for upcoming refactors. In going through all of the usages, I also expanded a few where I felt that omitting the “young” fields was a bug. --- video/out/opengl/video.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 6c7646bef4..59dd64cb65 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -729,7 +729,7 @@ static void pass_get_img_tex(struct gl_video *p, struct video_image *vimg, // The existing code assumes we just have a single tex multiplier for // all of the planes. This may change in the future - float tex_mul = 1.0 / mp_get_csp_mul(p->image_params.colorspace, + float tex_mul = 1.0 / mp_get_csp_mul(p->image_params.color.space, p->image_desc.component_bits, p->image_desc.component_full_bits); @@ -793,7 +793,7 @@ static void init_video(struct gl_video *p) mp_image_params_guess_csp(&p->image_params); int eq_caps = MP_CSP_EQ_CAPS_GAMMA; - if (p->image_params.colorspace != MP_CSP_BT_2020_C) + if (p->image_params.color.space != MP_CSP_BT_2020_C) eq_caps |= MP_CSP_EQ_CAPS_COLORMATRIX; if (p->image_desc.flags & MP_IMGFLAG_XYZ) eq_caps |= MP_CSP_EQ_CAPS_BRIGHTNESS; @@ -1985,7 +1985,7 @@ static void pass_convert_yuv(struct gl_video *p) GLSLF("color = color.%s;\n", p->color_swizzle); // Pre-colormatrix input gamma correction - if (cparams.colorspace == MP_CSP_XYZ) + if (cparams.color.space == MP_CSP_XYZ) GLSL(color.rgb = pow(color.rgb, vec3(2.6));) // linear light // We always explicitly normalize the range in pass_read_video @@ -2000,7 +2000,7 @@ static void pass_convert_yuv(struct gl_video *p) GLSL(color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;) - if (p->image_params.colorspace == MP_CSP_BT_2020_C) { + if (p->image_params.color.space == MP_CSP_BT_2020_C) { // Conversion for C'rcY'cC'bc via the BT.2020 CL system: // C'bc = (B'-Y'c) / 1.9404 | C'bc <= 0 // = (B'-Y'c) / 1.5816 | C'bc > 0 @@ -2111,7 +2111,7 @@ static void pass_scale_main(struct gl_video *p) // Pre-conversion, like linear light/sigmoidization GLSLF("// scaler pre-conversion\n"); if (p->use_linear) { - pass_linearize(p->sc, p->image_params.gamma); + pass_linearize(p->sc, p->image_params.color.gamma); pass_opt_hook_point(p, "LINEAR", NULL); } @@ -2171,8 +2171,8 @@ static void pass_colormanage(struct gl_video *p, float peak_src, if (p->use_lut_3d) { // The 3DLUT is always generated against the original source space - enum mp_csp_prim prim_orig = p->image_params.primaries; - enum mp_csp_trc trc_orig = p->image_params.gamma; + enum mp_csp_prim prim_orig = p->image_params.color.primaries; + enum mp_csp_trc trc_orig = p->image_params.color.gamma; // One exception: HDR is not implemented by LittleCMS for technical // limitation reasons, so we use a gamma 2.2 input curve here instead. @@ -2196,14 +2196,14 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // this as the default output color space. prim_dst = MP_CSP_PRIM_BT_709; - if (p->image_params.primaries == MP_CSP_PRIM_BT_601_525 || - p->image_params.primaries == MP_CSP_PRIM_BT_601_625) + if (p->image_params.color.primaries == MP_CSP_PRIM_BT_601_525 || + p->image_params.color.primaries == MP_CSP_PRIM_BT_601_625) { // Since we auto-pick BT.601 and BT.709 based on the dimensions, // combined with the fact that they're very similar to begin with, // and to avoid confusing the average user, just don't adapt BT.601 // content automatically at all. - prim_dst = p->image_params.primaries; + prim_dst = p->image_params.color.primaries; } } @@ -2213,7 +2213,7 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // altogether by default. The only exceptions to this rule apply to // very unusual TRCs, which even hardcode technoluddites would probably // not enjoy viewing unaltered. - trc_dst = p->image_params.gamma; + trc_dst = p->image_params.color.gamma; // Avoid outputting linear light or HDR content "by default". For these // just pick gamma 2.2 as a default, since it's a good estimate for @@ -2225,7 +2225,7 @@ static void pass_colormanage(struct gl_video *p, float peak_src, if (!peak_src) { // If the source has no information known, it's display-referred // (and should be treated relative to the specified desired peak_dst) - peak_src = peak_dst * mp_csp_trc_rel_peak(p->image_params.gamma); + peak_src = peak_dst * mp_csp_trc_rel_peak(p->image_params.color.gamma); } // All operations from here on require linear light as a starting point, @@ -2513,7 +2513,7 @@ static void pass_render_frame(struct gl_video *p) rect.mt *= scale[1]; rect.mb *= scale[1]; // We should always blend subtitles in non-linear light if (p->use_linear) { - pass_delinearize(p->sc, p->image_params.gamma); + pass_delinearize(p->sc, p->image_params.color.gamma); p->use_linear = false; } finish_pass_fbo(p, &p->blend_subs_fbo, p->texture_w, p->texture_h, @@ -2542,8 +2542,8 @@ static void pass_draw_to_screen(struct gl_video *p, int fbo) GLSL(color.rgb = pow(color.rgb, vec3(user_gamma));) } - pass_colormanage(p, p->image_params.peak, p->image_params.primaries, - p->use_linear ? MP_CSP_TRC_LINEAR : p->image_params.gamma); + pass_colormanage(p, p->image_params.color.peak, p->image_params.color.primaries, + p->use_linear ? MP_CSP_TRC_LINEAR : p->image_params.color.gamma); // Draw checkerboard pattern to indicate transparency if (p->has_alpha && p->opts.alpha_mode == ALPHA_BLEND_TILES) { -- cgit v1.2.3 From 923e3c7b20f0a238062b0ac538a751c6c363a8cb Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Wed, 29 Jun 2016 09:28:17 +0200 Subject: vo_opengl: generalize HDR tone mapping mechanism This involves multiple changes: 1. Brightness metadata is split into nominal peak and signal peak. For a quick and dirty explanation: nominal peak is the brightest value that your color space can represent (i.e. the brightness of an encoded 1.0), and signal peak is the brightest value that actually occurs in the video (i.e. the brightest thing that's displayed). 2. vo_opengl uses a new decision logic to figure out the right nom_peak and sig_peak for all situations. It also does a better job of picking the right target gamut/colorspace to use for the OSD. (Which still is and still should be treated as sRGB). This change in logic also fixes #3293 en passant. 3. Since it was growing rapidly, the logic for auto-guessing / inferring the right colorimetry configuration (in pass_colormanage) was split from the logic for actually performing the adaptation (now pass_color_map). Right now, the new logic doesn't do a whole lot since HDR metadata is still ignored (but not for long). --- video/out/opengl/video.c | 130 ++++++++++++++++++++++------------------------- 1 file changed, 61 insertions(+), 69 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 59dd64cb65..a4cc6cfac8 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2158,19 +2158,29 @@ static void pass_scale_main(struct gl_video *p) } } -// Adapts the colors from the given color space to the display device's native -// gamut. -static void pass_colormanage(struct gl_video *p, float peak_src, - enum mp_csp_prim prim_src, - enum mp_csp_trc trc_src) -{ - GLSLF("// color management\n"); - enum mp_csp_trc trc_dst = p->opts.target_trc; - enum mp_csp_prim prim_dst = p->opts.target_prim; - float peak_dst = p->opts.target_brightness; +// Adapts the colors to the right output color space. (Final pass during +// rendering) +// If OSD is true, ignore any changes that may have been made to the video +// by previous passes (i.e. linear scaling) +static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool osd) +{ + struct mp_colorspace ref = src; + + if (p->use_linear && !osd) + src.gamma = MP_CSP_TRC_LINEAR; + + // Figure out the target color space from the options, or auto-guess if + // none were set + struct mp_colorspace dst = { + .gamma = p->opts.target_trc, + .primaries = p->opts.target_prim, + .nom_peak = mp_csp_trc_nom_peak(p->opts.target_trc, p->opts.target_brightness), + }; if (p->use_lut_3d) { - // The 3DLUT is always generated against the original source space + // The 3DLUT is always generated against the video's original source + // space, *not* the reference space. (To avoid having to regenerate + // the 3DLUT for the OSD on every frame) enum mp_csp_prim prim_orig = p->image_params.color.primaries; enum mp_csp_trc trc_orig = p->image_params.color.gamma; @@ -2186,87 +2196,66 @@ static void pass_colormanage(struct gl_video *p, float peak_src, } if (gl_video_get_lut3d(p, prim_orig, trc_orig)) { - prim_dst = prim_orig; - trc_dst = trc_orig; + dst.primaries = prim_orig; + dst.gamma = trc_orig; } } - if (prim_dst == MP_CSP_PRIM_AUTO) { + if (dst.primaries == MP_CSP_PRIM_AUTO) { // The vast majority of people are on sRGB or BT.709 displays, so pick // this as the default output color space. - prim_dst = MP_CSP_PRIM_BT_709; + dst.primaries = MP_CSP_PRIM_BT_709; - if (p->image_params.color.primaries == MP_CSP_PRIM_BT_601_525 || - p->image_params.color.primaries == MP_CSP_PRIM_BT_601_625) + if (ref.primaries == MP_CSP_PRIM_BT_601_525 || + ref.primaries == MP_CSP_PRIM_BT_601_625) { // Since we auto-pick BT.601 and BT.709 based on the dimensions, // combined with the fact that they're very similar to begin with, // and to avoid confusing the average user, just don't adapt BT.601 // content automatically at all. - prim_dst = p->image_params.color.primaries; + dst.primaries = ref.gamma; } } - if (trc_dst == MP_CSP_TRC_AUTO) { + if (dst.gamma == MP_CSP_TRC_AUTO) { // Most people seem to complain when the image is darker or brighter // than what they're "used to", so just avoid changing the gamma // altogether by default. The only exceptions to this rule apply to // very unusual TRCs, which even hardcode technoluddites would probably // not enjoy viewing unaltered. - trc_dst = p->image_params.color.gamma; + dst.gamma = ref.gamma; // Avoid outputting linear light or HDR content "by default". For these // just pick gamma 2.2 as a default, since it's a good estimate for // the response of typical displays - if (trc_dst == MP_CSP_TRC_LINEAR || mp_trc_is_hdr(trc_dst)) - trc_dst = MP_CSP_TRC_GAMMA22; + if (dst.gamma == MP_CSP_TRC_LINEAR || mp_trc_is_hdr(dst.gamma)) + dst.gamma = MP_CSP_TRC_GAMMA22; } - if (!peak_src) { - // If the source has no information known, it's display-referred - // (and should be treated relative to the specified desired peak_dst) - peak_src = peak_dst * mp_csp_trc_rel_peak(p->image_params.color.gamma); - } - - // All operations from here on require linear light as a starting point, - // so we linearize even if trc_src == trc_dst when one of the other - // operations needs it - bool need_gamma = trc_src != trc_dst || prim_src != prim_dst || - peak_src != peak_dst; - if (need_gamma) - pass_linearize(p->sc, trc_src); - - // Adapt and tone map for a different reference peak brightness - if (peak_src != peak_dst) - { - GLSLF("// HDR tone mapping\n"); - float rel_peak = peak_src / peak_dst; - // Normalize such that 1 is the target brightness (and values above - // 1 are out of range) - GLSLF("color.rgb *= vec3(%f);\n", rel_peak); - // Tone map back down to the range [0,1] - pass_tone_map(p->sc, rel_peak, p->opts.hdr_tone_mapping, - p->opts.tone_mapping_param); + // For the src peaks, the correct brightness metadata may be present for + // sig_peak, nom_peak, both, or neither. To handle everything in a generic + // way, it's important to never automatically infer a sig_peak that is + // below the nom_peak (since we don't know what bits the image contains, + // doing so would potentially badly clip). The only time in which this + // may be the case is when the mastering metadata explicitly says so, i.e. + // the sig_peak was already set. So to simplify the logic as much as + // possible, make sure the nom_peak is present and correct first, and just + // set sig_peak = nom_peak if missing. + if (!src.nom_peak) { + // For display-referred colorspaces, we treat it as relative to + // target_brightness + src.nom_peak = mp_csp_trc_nom_peak(src.gamma, p->opts.target_brightness); } - // Adapt to the right colorspace if necessary - if (prim_src != prim_dst) { - struct mp_csp_primaries csp_src = mp_get_csp_primaries(prim_src), - csp_dst = mp_get_csp_primaries(prim_dst); - float m[3][3] = {{0}}; - mp_get_cms_matrix(csp_src, csp_dst, MP_INTENT_RELATIVE_COLORIMETRIC, m); - gl_sc_uniform_mat3(p->sc, "cms_matrix", true, &m[0][0]); - GLSL(color.rgb = cms_matrix * color.rgb;) - } + if (!src.sig_peak) + src.sig_peak = src.nom_peak; - if (need_gamma) { - // If the target encoding function has a fixed peak, we need to - // un-normalize back to the encoding signal range - if (trc_dst == MP_CSP_TRC_SMPTE_ST2084) - GLSLF("color.rgb *= vec3(%f);\n", peak_dst / 10000); + MP_DBG(p, "HDR src nom: %f sig: %f, dst: %f\n", + src.nom_peak, src.sig_peak, dst.nom_peak); - pass_delinearize(p->sc, trc_dst); - } + // Adapt from src to dst as necessary + pass_color_map(p->sc, src, dst, p->opts.hdr_tone_mapping, + p->opts.tone_mapping_param); if (p->use_lut_3d) { gl_sc_uniform_sampler(p->sc, "lut_3d", GL_TEXTURE_3D, TEXUNIT_3DLUT); @@ -2407,11 +2396,15 @@ static void pass_draw_osd(struct gl_video *p, int draw_flags, double pts, default: abort(); } - // Subtitle color management, they're assumed to be display-referred - // sRGB by default + // When subtitles need to be color managed, assume they're in sRGB + // (for lack of anything saner to do) if (cms) { - pass_colormanage(p, p->opts.target_brightness, - MP_CSP_PRIM_BT_709, MP_CSP_TRC_SRGB); + static const struct mp_colorspace csp_srgb = { + .primaries = MP_CSP_PRIM_BT_709, + .gamma = MP_CSP_TRC_SRGB, + }; + + pass_colormanage(p, csp_srgb, true); } gl_sc_set_vao(p->sc, mpgl_osd_get_vao(p->osd)); gl_sc_gen_shader_and_reset(p->sc); @@ -2542,8 +2535,7 @@ static void pass_draw_to_screen(struct gl_video *p, int fbo) GLSL(color.rgb = pow(color.rgb, vec3(user_gamma));) } - pass_colormanage(p, p->image_params.color.peak, p->image_params.color.primaries, - p->use_linear ? MP_CSP_TRC_LINEAR : p->image_params.color.gamma); + pass_colormanage(p, p->image_params.color, false); // Draw checkerboard pattern to indicate transparency if (p->has_alpha && p->opts.alpha_mode == ALPHA_BLEND_TILES) { -- cgit v1.2.3 From be230d16e57d948a990d16f06d4da11cfea97701 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Sun, 3 Jul 2016 19:23:03 +0200 Subject: vo_opengl: move eval_szexpr to user_shaders.c This moves some of the bulky user-shader specific logic into the file dedicated to it. Rather than expose video.c state, variable lookup is now done via a simulated closure. --- video/out/opengl/video.c | 132 +++++++++++------------------------------------ 1 file changed, 30 insertions(+), 102 deletions(-) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index a4cc6cfac8..7ae91eed0d 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -1544,112 +1544,40 @@ static void user_hook_old(struct gl_video *p, struct img_tex tex, GLSLF("color = %s(HOOKED_raw, HOOKED_pos, HOOKED_size);\n", fn_name); } -// Returns whether successful. 'result' is left untouched on failure -static bool eval_szexpr(struct gl_video *p, struct img_tex tex, - struct szexp expr[MAX_SZEXP_SIZE], - float *result) -{ - float stack[MAX_SZEXP_SIZE] = {0}; - int idx = 0; // points to next element to push - - for (int i = 0; i < MAX_SZEXP_SIZE; i++) { - switch (expr[i].tag) { - case SZEXP_END: - goto done; - - case SZEXP_CONST: - // Since our SZEXPs are bound by MAX_SZEXP_SIZE, it should be - // impossible to overflow the stack - assert(idx < MAX_SZEXP_SIZE); - stack[idx++] = expr[i].val.cval; - continue; - - case SZEXP_OP1: - if (idx < 1) { - MP_WARN(p, "Stack underflow in RPN expression!\n"); - return false; - } - - switch (expr[i].val.op) { - case SZEXP_OP_NOT: stack[idx-1] = !stack[idx-1]; break; - default: abort(); - } - continue; - - case SZEXP_OP2: - if (idx < 2) { - MP_WARN(p, "Stack underflow in RPN expression!\n"); - return false; - } - - // Pop the operands in reverse order - float op2 = stack[--idx]; - float op1 = stack[--idx]; - float res = 0.0; - switch (expr[i].val.op) { - case SZEXP_OP_ADD: res = op1 + op2; break; - case SZEXP_OP_SUB: res = op1 - op2; break; - case SZEXP_OP_MUL: res = op1 * op2; break; - case SZEXP_OP_DIV: res = op1 / op2; break; - case SZEXP_OP_GT: res = op1 > op2; break; - case SZEXP_OP_LT: res = op1 < op2; break; - default: abort(); - } - - if (!isfinite(res)) { - MP_WARN(p, "Illegal operation in RPN expression!\n"); - return false; - } - - stack[idx++] = res; - continue; - - case SZEXP_VAR_W: - case SZEXP_VAR_H: { - struct bstr name = expr[i].val.varname; - struct img_tex var_tex; - - // The size of OUTPUT is determined. It could be useful for certain - // user shaders to skip passes. - if (bstr_equals0(name, "OUTPUT")) { - int vp_w = p->dst_rect.x1 - p->dst_rect.x0; - int vp_h = p->dst_rect.y1 - p->dst_rect.y0; - stack[idx++] = (expr[i].tag == SZEXP_VAR_W) ? vp_w : vp_h; - continue; - } - - // HOOKED is a special case - if (bstr_equals0(name, "HOOKED")) { - var_tex = tex; - goto found_tex; - } +struct szexp_ctx { + struct gl_video *p; + struct img_tex tex; +}; - for (int o = 0; o < p->saved_tex_num; o++) { - if (bstr_equals0(name, p->saved_tex[o].name)) { - var_tex = p->saved_tex[o].tex; - goto found_tex; - } - } +static bool szexp_lookup(void *priv, struct bstr var, float size[2]) +{ + struct szexp_ctx *ctx = priv; + struct gl_video *p = ctx->p; - MP_WARN(p, "Texture %.*s not found in RPN expression!\n", BSTR_P(name)); - return false; + // The size of OUTPUT is determined. It could be useful for certain + // user shaders to skip passes. + if (bstr_equals0(var, "OUTPUT")) { + size[0] = p->dst_rect.x1 - p->dst_rect.x0; + size[1] = p->dst_rect.y1 - p->dst_rect.y0; + return true; + } -found_tex: - stack[idx++] = (expr[i].tag == SZEXP_VAR_W) ? var_tex.w : var_tex.h; - continue; - } - } + // HOOKED is a special case + if (bstr_equals0(var, "HOOKED")) { + size[0] = ctx->tex.w; + size[1] = ctx->tex.h; + return true; } -done: - // Return the single stack element - if (idx != 1) { - MP_WARN(p, "Malformed stack after RPN expression!\n"); - return false; + for (int o = 0; o < p->saved_tex_num; o++) { + if (bstr_equals0(var, p->saved_tex[o].name)) { + size[0] = p->saved_tex[o].tex.w; + size[1] = p->saved_tex[o].tex.h; + return true; + } } - *result = stack[0]; - return true; + return false; } static bool user_hook_cond(struct gl_video *p, struct img_tex tex, void *priv) @@ -1658,7 +1586,7 @@ static bool user_hook_cond(struct gl_video *p, struct img_tex tex, void *priv) assert(shader); float res = false; - eval_szexpr(p, tex, shader->cond, &res); + eval_szexpr(p->log, &(struct szexp_ctx){p, tex}, szexp_lookup, shader->cond, &res); return res; } @@ -1676,8 +1604,8 @@ static void user_hook(struct gl_video *p, struct img_tex tex, // to do this and display an error message than just crash OpenGL float w = 1.0, h = 1.0; - eval_szexpr(p, tex, shader->width, &w); - eval_szexpr(p, tex, shader->height, &h); + eval_szexpr(p->log, &(struct szexp_ctx){p, tex}, szexp_lookup, shader->width, &w); + eval_szexpr(p->log, &(struct szexp_ctx){p, tex}, szexp_lookup, shader->height, &h); *trans = (struct gl_transform){{{w / tex.w, 0}, {0, h / tex.h}}}; gl_transform_trans(shader->offset, trans); -- cgit v1.2.3 From f98e1b0b966d11b50df79d04dbab54ca20a08319 Mon Sep 17 00:00:00 2001 From: wm4 Date: Mon, 4 Jul 2016 13:34:31 +0200 Subject: vo_opengl: error out gracefully when trying to use FBOs without FBO API This can for example happen with vo_opengl_cb, if it is used with a GL implementation that does not supports FBOs. (mpv itself should never attempt to use FBOs if they're not available.) Without this check it would trigger an assert() in our dummy glBindFramebuffer wrapper. Suspected cause of #3308, although it's still unlikely. --- video/out/opengl/video.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'video/out/opengl/video.c') diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 7ae91eed0d..468bee90b5 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2667,6 +2667,11 @@ void gl_video_render_frame(struct gl_video *p, struct vo_frame *frame, int fbo) GL *gl = p->gl; struct video_image *vimg = &p->image; + if (fbo && !(gl->mpgl_caps & MPGL_CAP_FB)) { + MP_FATAL(p, "Rendering to FBO requested, but no FBO extension found!\n"); + return; + } + p->broken_frame = false; gl->BindFramebuffer(GL_FRAMEBUFFER, fbo); -- cgit v1.2.3