From 05f0980b9610c3d0f75f8004578ae61d3e3145e4 Mon Sep 17 00:00:00 2001 From: Kotori Itsuka Date: Thu, 17 Jan 2019 20:20:37 +1000 Subject: vo_gpu: allow resetting target-peak to the trc default Add "auto" the possible values of target-peak. The default value for target_peak is to calculate the target using mp_trc_nom_peak. Unfortunately, this default was outside the acceptable range of 10-10000 nits, which prevented its later reassignment. So add an "auto" choice to target-peak which lets clients and scripts go back to using the trc default after assigning a value. --- video/out/gpu/video.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index b0fa9eb4d9..c12fb8536c 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -351,7 +351,8 @@ const struct m_sub_options gl_video_conf = { OPT_FLAG("gamma-auto", gamma_auto, 0), OPT_CHOICE_C("target-prim", target_prim, 0, mp_csp_prim_names), OPT_CHOICE_C("target-trc", target_trc, 0, mp_csp_trc_names), - OPT_INTRANGE("target-peak", target_peak, 0, 10, 10000), + OPT_CHOICE_OR_INT("target-peak", target_peak, 0, 10, 10000, + ({"auto", 0})), OPT_CHOICE("tone-mapping", tone_mapping, 0, ({"clip", TONE_MAPPING_CLIP}, {"mobius", TONE_MAPPING_MOBIUS}, -- cgit v1.2.3 From 3fe882d4ae80fa060a71dad0d6d1605afcfe98b6 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Thu, 27 Dec 2018 18:34:19 +0100 Subject: vo_gpu: improve tone mapping desaturation Instead of desaturating towards luma, we desaturate towards the per-channel tone mapped version. This essentially proves a smooth roll-off towards the "hollywood"-style (non-chromatic) tone mapping algorithm, which works better for bright content, while continuing to use the "linear" style (chromatic) tone mapping algorithm for primarily in-gamut content. We also split up the desaturation algorithm into strength and exponent, which allows users to use less aggressive desaturation settings without affecting the overall curve. --- video/out/gpu/video.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index c12fb8536c..9ffdc62d20 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -313,9 +313,12 @@ static const struct gl_video_opts gl_video_opts_def = { .alpha_mode = ALPHA_BLEND_TILES, .background = {0, 0, 0, 255}, .gamma = 1.0f, - .tone_mapping = TONE_MAPPING_HABLE, - .tone_mapping_param = NAN, - .tone_mapping_desat = 0.5, + .tone_map = { + .curve = TONE_MAPPING_HABLE, + .curve_param = NAN, + .desat = 0.75, + .desat_exp = 1.5, + }, .early_flush = -1, .hwdec_interop = "auto", }; @@ -353,20 +356,22 @@ const struct m_sub_options gl_video_conf = { OPT_CHOICE_C("target-trc", target_trc, 0, mp_csp_trc_names), OPT_CHOICE_OR_INT("target-peak", target_peak, 0, 10, 10000, ({"auto", 0})), - OPT_CHOICE("tone-mapping", tone_mapping, 0, + OPT_CHOICE("tone-mapping", tone_map.curve, 0, ({"clip", TONE_MAPPING_CLIP}, {"mobius", TONE_MAPPING_MOBIUS}, {"reinhard", TONE_MAPPING_REINHARD}, {"hable", TONE_MAPPING_HABLE}, {"gamma", TONE_MAPPING_GAMMA}, {"linear", TONE_MAPPING_LINEAR})), - OPT_CHOICE("hdr-compute-peak", compute_hdr_peak, 0, + OPT_CHOICE("hdr-compute-peak", tone_map.compute_peak, 0, ({"auto", 0}, {"yes", 1}, {"no", -1})), - OPT_FLOAT("tone-mapping-param", tone_mapping_param, 0), - OPT_FLOAT("tone-mapping-desaturate", tone_mapping_desat, 0), - OPT_FLAG("gamut-warning", gamut_warning, 0), + OPT_FLOAT("tone-mapping-param", tone_map.curve_param, 0), + OPT_FLOAT("tone-mapping-desaturate", tone_map.desat, 0), + OPT_FLOATRANGE("tone-mapping-desaturate-exponent", + tone_map.desat_exp, 0, 0.0, 20.0), + OPT_FLAG("gamut-warning", tone_map.gamut_warning, 0), OPT_FLAG("opengl-pbo", pbo, 0), SCALER_OPTS("scale", SCALER_SCALE), SCALER_OPTS("dscale", SCALER_DSCALE), @@ -2472,7 +2477,8 @@ static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool if (!dst.sig_peak) dst.sig_peak = mp_trc_nom_peak(dst.gamma); - bool detect_peak = p->opts.compute_hdr_peak >= 0 && mp_trc_is_hdr(src.gamma); + struct gl_tone_map_opts tone_map = p->opts.tone_map; + bool detect_peak = tone_map.compute_peak >= 0 && mp_trc_is_hdr(src.gamma); if (detect_peak && !p->hdr_peak_ssbo) { struct { uint32_t counter; @@ -2493,8 +2499,8 @@ static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool p->hdr_peak_ssbo = ra_buf_create(ra, ¶ms); if (!p->hdr_peak_ssbo) { MP_WARN(p, "Failed to create HDR peak detection SSBO, disabling.\n"); + tone_map.compute_peak = p->opts.tone_map.compute_peak = -1; detect_peak = false; - p->opts.compute_hdr_peak = -1; } } @@ -2515,9 +2521,7 @@ static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool } // Adapt from src to dst as necessary - pass_color_map(p->sc, src, dst, p->opts.tone_mapping, - p->opts.tone_mapping_param, p->opts.tone_mapping_desat, - detect_peak, p->opts.gamut_warning, p->use_linear && !osd); + pass_color_map(p->sc, p->use_linear && !osd, src, dst, &tone_map); if (p->use_lut_3d) { gl_sc_uniform_texture(p->sc, "lut_3d", p->lut_3d_texture); @@ -3583,12 +3587,12 @@ static void check_gl_features(struct gl_video *p) } bool have_compute_peak = have_compute && have_ssbo; - if (!have_compute_peak && p->opts.compute_hdr_peak >= 0) { - int msgl = p->opts.compute_hdr_peak == 1 ? MSGL_WARN : MSGL_V; + if (!have_compute_peak && p->opts.tone_map.compute_peak >= 0) { + int msgl = p->opts.tone_map.compute_peak == 1 ? MSGL_WARN : MSGL_V; MP_MSG(p, msgl, "Disabling HDR peak computation (one or more of the " "following is not supported: compute shaders=%d, " "SSBO=%d).\n", have_compute, have_ssbo); - p->opts.compute_hdr_peak = -1; + p->opts.tone_map.compute_peak = -1; } p->forced_dumb_mode = p->opts.dumb_mode > 0 || !have_fbo || !have_texrg; @@ -3610,7 +3614,6 @@ static void check_gl_features(struct gl_video *p) .alpha_mode = p->opts.alpha_mode, .use_rectangle = p->opts.use_rectangle, .background = p->opts.background, - .compute_hdr_peak = p->opts.compute_hdr_peak, .dither_algo = p->opts.dither_algo, .dither_depth = p->opts.dither_depth, .dither_size = p->opts.dither_size, @@ -3618,9 +3621,7 @@ static void check_gl_features(struct gl_video *p) .temporal_dither_period = p->opts.temporal_dither_period, .tex_pad_x = p->opts.tex_pad_x, .tex_pad_y = p->opts.tex_pad_y, - .tone_mapping = p->opts.tone_mapping, - .tone_mapping_param = p->opts.tone_mapping_param, - .tone_mapping_desat = p->opts.tone_mapping_desat, + .tone_map = p->opts.tone_map, .early_flush = p->opts.early_flush, .icc_opts = p->opts.icc_opts, .hwdec_interop = p->opts.hwdec_interop, -- cgit v1.2.3 From 6179dcbb798aa9e3501af82ae46975e881d80626 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Tue, 1 Jan 2019 07:30:00 +0100 Subject: vo_gpu: redesign peak detection algorithm The previous approach of using an FIR with tunable hard threshold for scene changes had several problems: - the FIR involved annoying hard-coded buffer sizes, high VRAM usage, and the FIR sum was prone to numerical overflow which limited the number of frames we could average over. We also totally redesign the scene change detection. - the hard scene change detection was prone to both false positives and false negatives, each with their own (annoying) issues. Scrap this entirely and switch to a dual approach of using a simple single-pole IIR low pass filter to smooth out noise, while using a softer scene change curve (with tunable low and high thresholds), based on `smoothstep`. The IIR filter is extremely simple in its implementation and has an arbitrarily user-tunable cutoff frequency, while the smoothstep-based scene change curve provides a good, tunable tradeoff between adaptation speed and stability - without exhibiting either of the traditional issues associated with the hard cutoff. Another way to think about the new options is that the "low threshold" provides a margin of error within which we don't care about small fluctuations in the scene (which will therefore be smoothed out by the IIR filter). --- video/out/gpu/video.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index 9ffdc62d20..a29f09bc3d 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -316,6 +316,9 @@ static const struct gl_video_opts gl_video_opts_def = { .tone_map = { .curve = TONE_MAPPING_HABLE, .curve_param = NAN, + .decay_rate = 100.0, + .scene_threshold_low = 50, + .scene_threshold_high = 200, .desat = 0.75, .desat_exp = 1.5, }, @@ -367,6 +370,11 @@ const struct m_sub_options gl_video_conf = { ({"auto", 0}, {"yes", 1}, {"no", -1})), + OPT_FLOATRANGE("hdr-peak-decay-rate", tone_map.decay_rate, 0, 1.0, 1000.0), + OPT_INTRANGE("hdr-scene-threshold-low", + tone_map.scene_threshold_low, 0, 0, 10000), + OPT_INTRANGE("hdr-scene-threshold-high", + tone_map.scene_threshold_high, 0, 0, 10000), OPT_FLOAT("tone-mapping-param", tone_map.curve_param, 0), OPT_FLOAT("tone-mapping-desaturate", tone_map.desat, 0), OPT_FLOATRANGE("tone-mapping-desaturate-exponent", @@ -2478,17 +2486,18 @@ static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool dst.sig_peak = mp_trc_nom_peak(dst.gamma); struct gl_tone_map_opts tone_map = p->opts.tone_map; - bool detect_peak = tone_map.compute_peak >= 0 && mp_trc_is_hdr(src.gamma); + bool detect_peak = tone_map.compute_peak >= 0 && mp_trc_is_hdr(src.gamma) + && src.sig_peak > dst.sig_peak; + if (detect_peak && !p->hdr_peak_ssbo) { struct { + float average[2]; + uint32_t frame_sum; + uint32_t frame_max; uint32_t counter; - uint32_t frame_idx; - uint32_t frame_num; - uint32_t frame_max[PEAK_DETECT_FRAMES+1]; - uint32_t frame_sum[PEAK_DETECT_FRAMES+1]; - uint32_t total_max; - uint32_t total_sum; - } peak_ssbo = {0}; + } peak_ssbo = { + .average = { 0.25, src.sig_peak }, + }; struct ra_buf_params params = { .type = RA_BUF_TYPE_SHADER_STORAGE, @@ -2508,15 +2517,10 @@ static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool pass_describe(p, "detect HDR peak"); pass_is_compute(p, 8, 8, true); // 8x8 is good for performance gl_sc_ssbo(p->sc, "PeakDetect", p->hdr_peak_ssbo, + "vec2 average;" + "uint frame_sum;" + "uint frame_max;" "uint counter;" - "uint frame_idx;" - "uint frame_num;" - "uint frame_max[%d];" - "uint frame_avg[%d];" - "uint total_max;" - "uint total_avg;", - PEAK_DETECT_FRAMES + 1, - PEAK_DETECT_FRAMES + 1 ); } -- cgit v1.2.3 From 12e58ff8a65c537a222a3fb954f88d98a3a5bfd2 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Wed, 2 Jan 2019 03:03:38 +0100 Subject: vo_gpu: allow boosting dark scenes when tone mapping In theory our "eye adaptation" algorithm works in both ways, both darkening bright scenes and brightening dark scenes. But I've always just prevented the latter with a hard clamp, since I wanted to avoid blowing up dark scenes into looking funny (and full of noise). But allowing a tiny bit of over-exposure might be a good thing. I won't change the default just yet (better let users test), but a moderate value of 1.2 might be better than the current 1.0 limit. Needs testing especially on dark scenes. --- video/out/gpu/video.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index a29f09bc3d..6bf0bb31a1 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -316,6 +316,7 @@ static const struct gl_video_opts gl_video_opts_def = { .tone_map = { .curve = TONE_MAPPING_HABLE, .curve_param = NAN, + .max_boost = 1.0, .decay_rate = 100.0, .scene_threshold_low = 50, .scene_threshold_high = 200, @@ -376,6 +377,7 @@ const struct m_sub_options gl_video_conf = { OPT_INTRANGE("hdr-scene-threshold-high", tone_map.scene_threshold_high, 0, 0, 10000), OPT_FLOAT("tone-mapping-param", tone_map.curve_param, 0), + OPT_FLOATRANGE("tone-mapping-max-boost", tone_map.max_boost, 0, 1.0, 10.0), OPT_FLOAT("tone-mapping-desaturate", tone_map.desat, 0), OPT_FLOATRANGE("tone-mapping-desaturate-exponent", tone_map.desat_exp, 0, 0.0, 20.0), -- cgit v1.2.3 From fdd671188d7edb8d150ec2c93656fb80bf031f12 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Wed, 2 Jan 2019 07:18:29 +0100 Subject: vo_gpu: improve accuracy of HDR brightness estimation This change switches to a logarithmic mean to estimate the average signal brightness. This handles dark scenes with isolated highlights much more faithfully than the linear mean did, since the log of the signal roughly corresponds to the perceptual brightness. --- video/out/gpu/video.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index 6bf0bb31a1..be49551dfb 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -2494,7 +2494,7 @@ static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool if (detect_peak && !p->hdr_peak_ssbo) { struct { float average[2]; - uint32_t frame_sum; + int32_t frame_sum; uint32_t frame_max; uint32_t counter; } peak_ssbo = { @@ -2520,7 +2520,7 @@ static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool pass_is_compute(p, 8, 8, true); // 8x8 is good for performance gl_sc_ssbo(p->sc, "PeakDetect", p->hdr_peak_ssbo, "vec2 average;" - "uint frame_sum;" + "int frame_sum;" "uint frame_max;" "uint counter;" ); -- cgit v1.2.3 From 4e8022da269d02c3bb23e4e119e4b1dc9aa3f3e4 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Thu, 3 Jan 2019 00:44:15 +0100 Subject: vo_gpu: allow color management in dumb mode There's no point to disallow target-trc/prim in dumb mode, since they still work fine. --- video/out/gpu/video.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index be49551dfb..5e805019ea 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -3513,9 +3513,9 @@ static bool check_dumb_mode(struct gl_video *p) return false; // otherwise, use auto-detection - if (o->target_prim || o->target_trc || o->correct_downscaling || - o->linear_downscaling || o->linear_upscaling || o->sigmoid_upscaling || - o->interpolation || o->blend_subs || o->deband || o->unsharp) + if (o->correct_downscaling || o->linear_downscaling || + o->linear_upscaling || o->sigmoid_upscaling || o->interpolation || + o->blend_subs || o->deband || o->unsharp) return false; // check remaining scalers (tscale is already implicitly excluded above) for (int i = 0; i < SCALER_COUNT; i++) { @@ -3527,8 +3527,6 @@ static bool check_dumb_mode(struct gl_video *p) } if (o->user_shaders && o->user_shaders[0]) return false; - if (p->use_lut_3d) - return false; return true; } @@ -3631,6 +3629,9 @@ static void check_gl_features(struct gl_video *p) .early_flush = p->opts.early_flush, .icc_opts = p->opts.icc_opts, .hwdec_interop = p->opts.hwdec_interop, + .target_trc = p->opts.target_trc, + .target_prim = p->opts.target_prim, + .target_peak = p->opts.target_peak, }; for (int n = 0; n < SCALER_COUNT; n++) p->opts.scaler[n] = gl_video_opts_def.scaler[n]; -- cgit v1.2.3 From b4b719e33748970a9bf98a82a017d8f149ecb557 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Thu, 3 Jan 2019 17:18:58 +0100 Subject: vo_gpu: clamp sigmoid function Can explode on some clips otherwise --- video/out/gpu/video.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index 5e805019ea..df357b3552 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -2381,6 +2381,7 @@ static void pass_scale_main(struct gl_video *p) // values at 1 and 0, and then scale/shift them, respectively. sig_offset = 1.0/(1+expf(sig_slope * sig_center)); sig_scale = 1.0/(1+expf(sig_slope * (sig_center-1))) - sig_offset; + GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);) GLSLF("color.rgb = %f - log(1.0/(color.rgb * %f + %f) - 1.0) * 1.0/%f;\n", sig_center, sig_scale, sig_offset, sig_slope); pass_opt_hook_point(p, "SIGMOID", NULL); @@ -2408,6 +2409,7 @@ static void pass_scale_main(struct gl_video *p) GLSLF("// scaler post-conversion\n"); if (use_sigmoid) { // Inverse of the transformation above + GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);) GLSLF("color.rgb = (1.0/(1.0 + exp(%f * (%f - color.rgb))) - %f) * 1.0/%f;\n", sig_slope, sig_center, sig_offset, sig_scale); } -- cgit v1.2.3 From 3f1bc25d4de6150b0acff7e92d3e3084a7d989f0 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Fri, 4 Jan 2019 16:46:38 +0100 Subject: vo_gpu: use dB units for scene change detection Rather than the linear cd/m^2 units, these (relative) logarithmic units lend themselves much better to actually detecting scene changes, especially since the scene averaging was changed to also work logarithmically. --- video/out/gpu/video.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index df357b3552..24e6990139 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -318,8 +318,8 @@ static const struct gl_video_opts gl_video_opts_def = { .curve_param = NAN, .max_boost = 1.0, .decay_rate = 100.0, - .scene_threshold_low = 50, - .scene_threshold_high = 200, + .scene_threshold_low = 5.5, + .scene_threshold_high = 10.0, .desat = 0.75, .desat_exp = 1.5, }, @@ -372,10 +372,10 @@ const struct m_sub_options gl_video_conf = { {"yes", 1}, {"no", -1})), OPT_FLOATRANGE("hdr-peak-decay-rate", tone_map.decay_rate, 0, 1.0, 1000.0), - OPT_INTRANGE("hdr-scene-threshold-low", - tone_map.scene_threshold_low, 0, 0, 10000), - OPT_INTRANGE("hdr-scene-threshold-high", - tone_map.scene_threshold_high, 0, 0, 10000), + OPT_FLOATRANGE("hdr-scene-threshold-low", + tone_map.scene_threshold_low, 0, 0, 20.0), + OPT_FLOATRANGE("hdr-scene-threshold-high", + tone_map.scene_threshold_high, 0, 0, 20.0), OPT_FLOAT("tone-mapping-param", tone_map.curve_param, 0), OPT_FLOATRANGE("tone-mapping-max-boost", tone_map.max_boost, 0, 1.0, 10.0), OPT_FLOAT("tone-mapping-desaturate", tone_map.desat, 0), -- cgit v1.2.3 From 8b563a034604ff5ab2ad92d12c63e806f45d1bb6 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Wed, 9 Jan 2019 17:14:19 +0100 Subject: vo_gpu: fix initial seeding of the peak detect ssbo This solves some edge cases when using files with very weird metadata (e.g. MaxCLL 10k and so forth). Instead of just blindly seeding it with the tagged metadata, forcibly set the initial state from the detected values. --- video/out/gpu/video.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index 24e6990139..593f5fb9c1 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -2499,9 +2499,7 @@ static void pass_colormanage(struct gl_video *p, struct mp_colorspace src, bool int32_t frame_sum; uint32_t frame_max; uint32_t counter; - } peak_ssbo = { - .average = { 0.25, src.sig_peak }, - }; + } peak_ssbo = {0}; struct ra_buf_params params = { .type = RA_BUF_TYPE_SHADER_STORAGE, -- cgit v1.2.3 From b3cbd4650984902548432f15be9f267f9cb2230e Mon Sep 17 00:00:00 2001 From: Bin Jin Date: Thu, 7 Mar 2019 14:53:52 +0000 Subject: vo_gpu: make texture offset available to CHROMA hooks Before this commit, texture offset is set after all source textures are finalized. Which means CHROMA hooks won't be able to align with luma planes. This could be problematic for chroma prescalers utilizing information from luma plane. Fix this by find the reference texture early, and set global texture offset early. --- video/out/gpu/video.c | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) (limited to 'video/out/gpu/video.c') diff --git a/video/out/gpu/video.c b/video/out/gpu/video.c index 593f5fb9c1..416ba928d1 100644 --- a/video/out/gpu/video.c +++ b/video/out/gpu/video.c @@ -2072,6 +2072,23 @@ static void pass_read_video(struct gl_video *p) } } + // The basic idea is we assume the rgb/luma texture is the "reference" and + // scale everything else to match, after all planes are finalized. + // We find the reference texture first, in order to maintain texture offset + // between hooks on different type of planes. + int reference_tex_num = 0; + for (int n = 0; n < 4; n++) { + switch (img[n].type) { + case PLANE_RGB: + case PLANE_XYZ: + case PLANE_LUMA: break; + default: continue; + } + + reference_tex_num = n; + break; + } + // Dispatch the hooks for all of these textures, saving and perhaps // modifying them in the process for (int n = 0; n < 4; n++) { @@ -2086,26 +2103,18 @@ static void pass_read_video(struct gl_video *p) } img[n] = pass_hook(p, name, img[n], &offsets[n]); + + if (reference_tex_num == n) { + // The reference texture is finalized now. + p->texture_w = img[n].w; + p->texture_h = img[n].h; + p->texture_offset = offsets[n]; + } } // At this point all planes are finalized but they may not be at the // required size yet. Furthermore, they may have texture offsets that - // require realignment. For lack of something better to do, we assume - // the rgb/luma texture is the "reference" and scale everything else - // to match. - for (int n = 0; n < 4; n++) { - switch (img[n].type) { - case PLANE_RGB: - case PLANE_XYZ: - case PLANE_LUMA: break; - default: continue; - } - - p->texture_w = img[n].w; - p->texture_h = img[n].h; - p->texture_offset = offsets[n]; - break; - } + // require realignment. // Compute the reference rect struct mp_rect_f src = {0.0, 0.0, p->image_params.w, p->image_params.h}; -- cgit v1.2.3