summaryrefslogtreecommitdiffstats
path: root/video
diff options
context:
space:
mode:
Diffstat (limited to 'video')
-rw-r--r--video/csputils.c16
-rw-r--r--video/csputils.h3
-rw-r--r--video/decode/dec_video.c27
-rw-r--r--video/decode/dec_video.h2
-rw-r--r--video/decode/vd_lavc.c3
-rw-r--r--video/decode/vdpau.c77
-rw-r--r--video/filter/vf_vdpaurb.c3
-rw-r--r--video/hwdec.h1
-rw-r--r--video/mp_image.c8
-rw-r--r--video/mp_image.h2
-rw-r--r--video/out/cocoa_common.m11
-rw-r--r--video/out/filter_kernels.c49
-rw-r--r--video/out/filter_kernels.h1
-rw-r--r--video/out/opengl/common.h3
-rw-r--r--video/out/opengl/context.c2
-rw-r--r--video/out/opengl/context_rpi.c2
-rw-r--r--video/out/opengl/header_fixes.h17
-rw-r--r--video/out/opengl/video.c103
-rw-r--r--video/out/opengl/video.h1
-rw-r--r--video/out/vo.c5
-rw-r--r--video/out/vo.h1
-rw-r--r--video/out/vo_direct3d.c2
-rw-r--r--video/out/vo_opengl_cb.c11
-rw-r--r--video/out/vo_tct.c306
-rw-r--r--video/out/win_state.c2
25 files changed, 583 insertions, 75 deletions
diff --git a/video/csputils.c b/video/csputils.c
index 0c2e847186..8f6a9e351e 100644
--- a/video/csputils.c
+++ b/video/csputils.c
@@ -100,6 +100,22 @@ const struct m_opt_choice_alternatives mp_chroma_names[] = {
{0}
};
+void mp_colorspace_merge(struct mp_colorspace *orig, struct mp_colorspace *new)
+{
+ if (!orig->space)
+ orig->space = new->space;
+ if (!orig->levels)
+ orig->levels = new->levels;
+ if (!orig->primaries)
+ orig->primaries = new->primaries;
+ if (!orig->gamma)
+ orig->gamma = new->gamma;
+ if (!orig->nom_peak)
+ orig->nom_peak = new->nom_peak;
+ if (!orig->sig_peak)
+ orig->sig_peak = new->sig_peak;
+}
+
// The short name _must_ match with what vf_stereo3d accepts (if supported).
// The long name in comments is closer to the Matroska spec (StereoMode element).
// The numeric index matches the Matroska StereoMode value. If you add entries
diff --git a/video/csputils.h b/video/csputils.h
index 0406ddf35f..9eaafbe75d 100644
--- a/video/csputils.h
+++ b/video/csputils.h
@@ -125,6 +125,9 @@ struct mp_colorspace {
float sig_peak; // signal peak, highest value that occurs in the source
};
+// Replaces unknown values in the first struct by those of the second struct
+void mp_colorspace_merge(struct mp_colorspace *orig, struct mp_colorspace *new);
+
struct mp_csp_params {
struct mp_colorspace color; // input colorspace
enum mp_csp_levels levels_out; // output device
diff --git a/video/decode/dec_video.c b/video/decode/dec_video.c
index 7e144a72bc..dea5b594c7 100644
--- a/video/decode/dec_video.c
+++ b/video/decode/dec_video.c
@@ -64,6 +64,7 @@ void video_reset(struct dec_video *d_video)
d_video->decoded_pts = MP_NOPTS_VALUE;
d_video->codec_pts = MP_NOPTS_VALUE;
d_video->codec_dts = MP_NOPTS_VALUE;
+ d_video->has_broken_decoded_pts = 0;
d_video->last_format = d_video->fixed_format = (struct mp_image_params){0};
d_video->dropped_frames = 0;
d_video->current_state = DATA_AGAIN;
@@ -245,6 +246,7 @@ static void fix_image_params(struct dec_video *d_video,
p.stereo_out = opts->video_stereo_mode;
// Detect colorspace from resolution.
+ mp_colorspace_merge(&p.color, &c->color);
mp_image_params_guess_csp(&p);
d_video->last_format = *params;
@@ -316,8 +318,14 @@ static struct mp_image *decode_packet(struct dec_video *d_video,
pts = dts;
if (!opts->correct_pts || pts == MP_NOPTS_VALUE) {
- if (opts->correct_pts && !d_video->header->missing_timestamps)
- MP_WARN(d_video, "No video PTS! Making something up.\n");
+ if (opts->correct_pts && !d_video->header->missing_timestamps) {
+ if (d_video->has_broken_decoded_pts <= 1) {
+ MP_WARN(d_video, "No video PTS! Making something up.\n");
+ if (d_video->has_broken_decoded_pts == 1)
+ MP_WARN(d_video, "Ignoring further missing PTS warnings.\n");
+ d_video->has_broken_decoded_pts++;
+ }
+ }
double frame_time = 1.0f / (d_video->fps > 0 ? d_video->fps : 25);
double base = d_video->first_packet_pdts;
@@ -458,12 +466,15 @@ void video_work(struct dec_video *d_video)
struct demux_packet *new_segment = d_video->new_segment;
d_video->new_segment = NULL;
- // Could avoid decoder reinit; would still need flush.
- d_video->codec = new_segment->codec;
- if (d_video->vd_driver)
- d_video->vd_driver->uninit(d_video);
- d_video->vd_driver = NULL;
- video_init_best_codec(d_video);
+ if (d_video->codec == new_segment->codec) {
+ video_reset(d_video);
+ } else {
+ d_video->codec = new_segment->codec;
+ if (d_video->vd_driver)
+ d_video->vd_driver->uninit(d_video);
+ d_video->vd_driver = NULL;
+ video_init_best_codec(d_video);
+ }
d_video->start = new_segment->start;
d_video->end = new_segment->end;
diff --git a/video/decode/dec_video.h b/video/decode/dec_video.h
index f7bff61c9f..1d2b3f087e 100644
--- a/video/decode/dec_video.h
+++ b/video/decode/dec_video.h
@@ -62,6 +62,8 @@ struct dec_video {
// There was at least one packet with non-sense timestamps.
int has_broken_packet_pts; // <0: uninitialized, 0: no problems, 1: broken
+ int has_broken_decoded_pts;
+
// Final PTS of previously decoded image
double decoded_pts;
diff --git a/video/decode/vd_lavc.c b/video/decode/vd_lavc.c
index 12e60483ee..d25c99981e 100644
--- a/video/decode/vd_lavc.c
+++ b/video/decode/vd_lavc.c
@@ -125,6 +125,7 @@ const struct m_sub_options vd_lavc_conf = {
};
extern const struct vd_lavc_hwdec mp_vd_lavc_vdpau;
+extern const struct vd_lavc_hwdec mp_vd_lavc_vdpau_copy;
extern const struct vd_lavc_hwdec mp_vd_lavc_videotoolbox;
extern const struct vd_lavc_hwdec mp_vd_lavc_videotoolbox_copy;
extern const struct vd_lavc_hwdec mp_vd_lavc_vaapi;
@@ -177,6 +178,7 @@ static const struct vd_lavc_hwdec *const hwdec_list[] = {
#endif
#if HAVE_VDPAU_HWACCEL
&mp_vd_lavc_vdpau,
+ &mp_vd_lavc_vdpau_copy,
#endif
#if HAVE_VIDEOTOOLBOX_HWACCEL
&mp_vd_lavc_videotoolbox,
@@ -596,6 +598,7 @@ static void uninit_avctx(struct dec_video *vd)
if (ctx->hwdec && ctx->hwdec->uninit)
ctx->hwdec->uninit(ctx);
ctx->hwdec = NULL;
+ assert(ctx->hwdec_priv == NULL);
av_freep(&ctx->avctx);
diff --git a/video/decode/vdpau.c b/video/decode/vdpau.c
index 0003182dcb..93a1e6d9a3 100644
--- a/video/decode/vdpau.c
+++ b/video/decode/vdpau.c
@@ -21,6 +21,7 @@
#include "lavc.h"
#include "common/common.h"
+#include "video/mp_image_pool.h"
#include "video/vdpau.h"
#include "video/hwdec.h"
@@ -28,6 +29,9 @@ struct priv {
struct mp_log *log;
struct mp_vdpau_ctx *mpvdp;
uint64_t preemption_counter;
+ // vdpau-copy
+ Display *display;
+ struct mp_image_pool *sw_pool;
};
static int init_decoder(struct lavc_ctx *ctx, int w, int h)
@@ -76,9 +80,16 @@ static void uninit(struct lavc_ctx *ctx)
{
struct priv *p = ctx->hwdec_priv;
- talloc_free(p);
+ if (p->display) {
+ // for copy path: we own this stuff
+ mp_vdpau_destroy(p->mpvdp);
+ XCloseDisplay(p->display);
+ }
+
+ TA_FREEP(&ctx->hwdec_priv);
- av_freep(&ctx->avctx->hwaccel_context);
+ if (ctx->avctx)
+ av_freep(&ctx->avctx->hwaccel_context);
}
static int init(struct lavc_ctx *ctx)
@@ -102,6 +113,56 @@ static int probe(struct lavc_ctx *ctx, struct vd_lavc_hwdec *hwdec,
return 0;
}
+static int init_copy(struct lavc_ctx *ctx)
+{
+ struct priv *p = talloc_ptrtype(NULL, p);
+ *p = (struct priv) {
+ .log = mp_log_new(p, ctx->log, "vdpau"),
+ };
+
+ p->display = XOpenDisplay(NULL);
+ if (!p->display)
+ goto error;
+
+ p->mpvdp = mp_vdpau_create_device_x11(p->log, p->display, true);
+ if (!p->mpvdp)
+ goto error;
+
+ p->sw_pool = talloc_steal(p, mp_image_pool_new(17));
+
+ ctx->hwdec_priv = p;
+
+ mp_vdpau_handle_preemption(p->mpvdp, &p->preemption_counter);
+ return 0;
+
+error:
+ if (p->display)
+ XCloseDisplay(p->display);
+ talloc_free(p);
+ return -1;
+}
+
+static int probe_copy(struct lavc_ctx *ctx, struct vd_lavc_hwdec *hwdec,
+ const char *codec)
+{
+ assert(!ctx->hwdec_priv);
+ int r = init_copy(ctx);
+ if (ctx->hwdec_priv)
+ uninit(ctx);
+ ctx->hwdec_priv = NULL;
+
+ return r < 0 ? HWDEC_ERR_NO_CTX : 0;
+}
+
+static struct mp_image *copy_image(struct lavc_ctx *ctx, struct mp_image *img)
+{
+ struct priv *p = ctx->hwdec_priv;
+ struct mp_hwdec_ctx *hwctx = &p->mpvdp->hwctx;
+ struct mp_image *out = hwctx->download_image(hwctx, img, p->sw_pool);
+ talloc_free(img);
+ return out;
+}
+
const struct vd_lavc_hwdec mp_vd_lavc_vdpau = {
.type = HWDEC_VDPAU,
.image_format = IMGFMT_VDPAU,
@@ -112,3 +173,15 @@ const struct vd_lavc_hwdec mp_vd_lavc_vdpau = {
.allocate_image = allocate_image,
.process_image = update_format,
};
+
+const struct vd_lavc_hwdec mp_vd_lavc_vdpau_copy = {
+ .type = HWDEC_VDPAU_COPY,
+ .copying = true,
+ .image_format = IMGFMT_VDPAU,
+ .probe = probe_copy,
+ .init = init_copy,
+ .uninit = uninit,
+ .init_decoder = init_decoder,
+ .allocate_image = allocate_image,
+ .process_image = copy_image,
+};
diff --git a/video/filter/vf_vdpaurb.c b/video/filter/vf_vdpaurb.c
index 2e6da79766..59067b54bf 100644
--- a/video/filter/vf_vdpaurb.c
+++ b/video/filter/vf_vdpaurb.c
@@ -87,6 +87,9 @@ static int vf_open(vf_instance_t *vf)
{
struct vf_priv_s *p = vf->priv;
+ MP_WARN(vf, "This filter is deprecated and will be removed.\n");
+ MP_WARN(vf, "Use --hwdec=vdpau-copy instead.\n");
+
vf->filter_ext = filter_ext;
vf->filter = NULL;
vf->reconfig = reconfig;
diff --git a/video/hwdec.h b/video/hwdec.h
index 34b65fe15c..857d07c894 100644
--- a/video/hwdec.h
+++ b/video/hwdec.h
@@ -11,6 +11,7 @@ enum hwdec_type {
HWDEC_AUTO,
HWDEC_AUTO_COPY,
HWDEC_VDPAU,
+ HWDEC_VDPAU_COPY,
HWDEC_VIDEOTOOLBOX,
HWDEC_VIDEOTOOLBOX_COPY,
HWDEC_VAAPI,
diff --git a/video/mp_image.c b/video/mp_image.c
index 694bea1bf0..b56c9e60b3 100644
--- a/video/mp_image.c
+++ b/video/mp_image.c
@@ -513,9 +513,15 @@ char *mp_image_params_to_str_buf(char *b, size_t bs,
mp_snprintf_cat(b, bs, " %s", mp_imgfmt_to_name(p->imgfmt));
if (p->hw_subfmt)
mp_snprintf_cat(b, bs, "[%s]", mp_imgfmt_to_name(p->hw_subfmt));
- mp_snprintf_cat(b, bs, " %s/%s",
+ mp_snprintf_cat(b, bs, " %s/%s/%s/%s",
m_opt_choice_str(mp_csp_names, p->color.space),
+ m_opt_choice_str(mp_csp_prim_names, p->color.primaries),
+ m_opt_choice_str(mp_csp_trc_names, p->color.gamma),
m_opt_choice_str(mp_csp_levels_names, p->color.levels));
+ if (p->color.nom_peak)
+ mp_snprintf_cat(b, bs, " NP=%f", p->color.nom_peak);
+ if (p->color.sig_peak)
+ mp_snprintf_cat(b, bs, " SP=%f", p->color.sig_peak);
mp_snprintf_cat(b, bs, " CL=%s",
m_opt_choice_str(mp_chroma_names, p->chroma_location));
if (p->rotate)
diff --git a/video/mp_image.h b/video/mp_image.h
index 13e364ae24..7c0f7bad7e 100644
--- a/video/mp_image.h
+++ b/video/mp_image.h
@@ -133,7 +133,7 @@ void mp_image_params_guess_csp(struct mp_image_params *params);
char *mp_image_params_to_str_buf(char *b, size_t bs,
const struct mp_image_params *p);
-#define mp_image_params_to_str(p) mp_image_params_to_str_buf((char[80]){0}, 80, p)
+#define mp_image_params_to_str(p) mp_image_params_to_str_buf((char[99]){0}, 99, p)
bool mp_image_params_valid(const struct mp_image_params *p);
bool mp_image_params_equal(const struct mp_image_params *p1,
diff --git a/video/out/cocoa_common.m b/video/out/cocoa_common.m
index ba241f42cc..719169df48 100644
--- a/video/out/cocoa_common.m
+++ b/video/out/cocoa_common.m
@@ -121,9 +121,14 @@ static void run_on_main_thread(struct vo *vo, void(^block)(void))
static void queue_new_video_size(struct vo *vo, int w, int h)
{
struct vo_cocoa_state *s = vo->cocoa;
+ struct mp_vo_opts *opts = vo->opts;
if ([s->window conformsToProtocol: @protocol(MpvSizing)]) {
id<MpvSizing> win = (id<MpvSizing>) s->window;
- [win queueNewVideoSize:NSMakeSize(w, h)];
+ NSRect r = NSMakeRect(0, 0, w, h);
+ if(!opts->hidpi_window_scale) {
+ r = [s->current_screen convertRectFromBacking:r];
+ }
+ [win queueNewVideoSize:NSMakeSize(r.size.width, r.size.height)];
}
}
@@ -488,8 +493,10 @@ static void create_ui(struct vo *vo, struct mp_rect *win, int geo_flags)
if (s->embedded) {
parent = (NSView *) (intptr_t) opts->WinID;
} else {
- const NSRect wr =
+ NSRect wr =
NSMakeRect(win->x0, win->y0, win->x1 - win->x0, win->y1 - win->y0);
+ if(!opts->hidpi_window_scale)
+ wr = [s->current_screen convertRectFromBacking:wr];
s->window = create_window(wr, s->current_screen, opts->border, adapter);
parent = [s->window contentView];
}
diff --git a/video/out/filter_kernels.c b/video/out/filter_kernels.c
index fe5265c70c..c5a12295f7 100644
--- a/video/out/filter_kernels.c
+++ b/video/out/filter_kernels.c
@@ -92,34 +92,45 @@ bool mp_init_filter(struct filter_kernel *filter, const int *sizes,
}
}
-// Sample from the blurred, windowed kernel. Note: The window is always
-// stretched to the true radius, regardless of the filter blur/scale.
-static double sample_filter(struct filter_kernel *filter,
- struct filter_window *window, double x)
+// Sample from a blurred and tapered window
+static double sample_window(struct filter_window *kernel, double x)
{
- double bk = filter->f.blur > 0.0 ? filter->f.blur : 1.0;
- double bw = window->blur > 0.0 ? window->blur : 1.0;
- double c = fabs(x) / (filter->inv_scale * bk);
- double w = window->weight ? window->weight(window, x/bw * window->radius
- / filter->f.radius)
- : 1.0;
- double v = c < filter->f.radius ? w * filter->f.weight(&filter->f, c) : 0.0;
- return filter->clamp ? fmax(0.0, fmin(1.0, v)) : v;
+ if (!kernel->weight)
+ return 1.0;
+
+ // All windows are symmetric, this makes life easier
+ x = fabs(x);
+ if (x >= kernel->radius)
+ return 0.0;
+
+ // Stretch and taper the window size as needed
+ x = kernel->blur > 0.0 ? x / kernel->blur : x;
+ x = x <= kernel->taper ? 0.0 : (x - kernel->taper) / (1 - kernel->taper);
+
+ return kernel->weight(kernel, x);
+}
+
+// Evaluate a filter's kernel and window at a given absolute position
+static double sample_filter(struct filter_kernel *filter, double x)
+{
+ // The window is always stretched to the entire kernel
+ double w = sample_window(&filter->w, x / filter->f.radius * filter->w.radius);
+ double k = sample_window(&filter->f, x / filter->inv_scale);
+ return filter->clamp ? fmax(0.0, fmin(1.0, w * k)) : w * k;
}
// Calculate the 1D filtering kernel for N sample points.
// N = number of samples, which is filter->size
// The weights will be stored in out_w[0] to out_w[N - 1]
// f = x0 - abs(x0), subpixel position in the range [0,1) or [0,1].
-static void mp_compute_weights(struct filter_kernel *filter,
- struct filter_window *window,
- double f, float *out_w)
+static void mp_compute_weights(struct filter_kernel *filter, double f,
+ float *out_w)
{
assert(filter->size > 0);
double sum = 0;
for (int n = 0; n < filter->size; n++) {
double x = f - (n - filter->size / 2 + 1);
- double w = sample_filter(filter, window, x);
+ double w = sample_filter(filter, x);
out_w[n] = w;
sum += w;
}
@@ -138,17 +149,16 @@ static void mp_compute_weights(struct filter_kernel *filter,
// [0.5 / count, 1.0 - 0.5 / count].
void mp_compute_lut(struct filter_kernel *filter, int count, float *out_array)
{
- struct filter_window *window = &filter->w;
if (filter->polar) {
// Compute a 1D array indexed by radius
for (int x = 0; x < count; x++) {
double r = x * filter->f.radius / (count - 1);
- out_array[x] = sample_filter(filter, window, r);
+ out_array[x] = sample_filter(filter, r);
}
} else {
// Compute a 2D array indexed by subpixel position
for (int n = 0; n < count; n++) {
- mp_compute_weights(filter, window, n / (double)(count - 1),
+ mp_compute_weights(filter, n / (double)(count - 1),
out_array + filter->size * n);
}
}
@@ -321,6 +331,7 @@ const struct filter_window mp_filter_windows[] = {
{"triangle", 1, triangle},
{"bartlett", 1, triangle},
{"hanning", 1, hanning},
+ {"tukey", 1, hanning, .taper = 0.5},
{"hamming", 1, hamming},
{"quadric", 1.5, quadric},
{"welch", 1, welch},
diff --git a/video/out/filter_kernels.h b/video/out/filter_kernels.h
index 2354ef4d0c..fc90a1cdde 100644
--- a/video/out/filter_kernels.h
+++ b/video/out/filter_kernels.h
@@ -22,6 +22,7 @@ struct filter_window {
double params[2]; // User-defined custom filter parameters. Not used by
// all filters
double blur; // Blur coefficient (sharpens or widens the filter)
+ double taper; // Taper coefficient (flattens the filter's center)
};
struct filter_kernel {
diff --git a/video/out/opengl/common.h b/video/out/opengl/common.h
index 5abe839b8d..afb5b61f7e 100644
--- a/video/out/opengl/common.h
+++ b/video/out/opengl/common.h
@@ -36,6 +36,9 @@
#include <OpenGL/gl.h>
#include <OpenGL/gl3.h>
#include <OpenGL/glext.h>
+#elif HAVE_IOS_GL
+#include <OpenGLES/ES2/glext.h>
+#include <OpenGLES/ES3/glext.h>
#elif HAVE_ANDROID_GL
#include <GLES3/gl3.h>
#else
diff --git a/video/out/opengl/context.c b/video/out/opengl/context.c
index 0f5b61e37c..fb3471cd3b 100644
--- a/video/out/opengl/context.c
+++ b/video/out/opengl/context.c
@@ -125,7 +125,7 @@ int mpgl_validate_backend_opt(struct mp_log *log, const struct m_option *opt,
#if HAVE_C11_TLS
#define MP_TLS _Thread_local
-#elif defined(__GNUC__)
+#elif HAVE_GCC_TLS
#define MP_TLS __thread
#endif
diff --git a/video/out/opengl/context_rpi.c b/video/out/opengl/context_rpi.c
index 96c8199ef4..fa19a6c205 100644
--- a/video/out/opengl/context_rpi.c
+++ b/video/out/opengl/context_rpi.c
@@ -42,7 +42,7 @@ struct priv {
EGL_DISPMANX_WINDOW_T egl_window;
int x, y, w, h;
double display_fps;
- atomic_bool reload_display;
+ atomic_int reload_display;
int win_params[4];
};
diff --git a/video/out/opengl/header_fixes.h b/video/out/opengl/header_fixes.h
index 9953f7e497..9a7108dcda 100644
--- a/video/out/opengl/header_fixes.h
+++ b/video/out/opengl/header_fixes.h
@@ -92,6 +92,23 @@
#define GL_UNSIGNED_SHORT_8_8_REV_APPLE 0x85BB
#endif
+#if HAVE_IOS_GL
+#define GL_WRITE_ONLY GL_WRITE_ONLY_OES
+#define GL_TEXTURE_1D 0x0DE0
+#define GL_R16 0x822A
+#define GL_RG16 0x822C
+#define GL_RGB10 0x8052
+#define GL_RGB16 0x8054
+#define GL_RGBA12 0x805A
+#define GL_RGBA16 0x805B
+#define GL_LUMINANCE8 GL_LUMINANCE8_EXT
+#define GL_LUMINANCE8_ALPHA8 GL_LUMINANCE8_ALPHA8_EXT
+#define GL_LUMINANCE16 0x8042
+#define GL_LUMINANCE16_ALPHA16 0x8048
+#define GL_TEXTURE_RED_SIZE 0x805C
+#define GL_TEXTURE_LUMINANCE_SIZE 0x8060
+#endif
+
// GL_ARB_timer_query and EXT_disjoint_timer_query
#ifndef GL_TIME_ELAPSED
// Same as GL_TIME_ELAPSED_EXT
diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c
index 9461153615..498d89259e 100644
--- a/video/out/opengl/video.c
+++ b/video/out/opengl/video.c
@@ -104,6 +104,7 @@ struct texplane {
struct video_image {
struct texplane planes[4];
struct mp_image *mpi; // original input image
+ uint64_t id; // unique ID identifying mpi contents
bool hwdec_mapped;
};
@@ -153,6 +154,7 @@ struct tex_hook {
struct fbosurface {
struct fbotex fbotex;
+ uint64_t id;
double pts;
};
@@ -322,6 +324,7 @@ static const struct gl_video_opts gl_video_opts_def = {
.target_brightness = 250,
.hdr_tone_mapping = TONE_MAPPING_HABLE,
.tone_mapping_param = NAN,
+ .early_flush = -1,
};
static int validate_scaler_opt(struct mp_log *log, const m_option_t *opt,
@@ -337,7 +340,10 @@ static int validate_window_opt(struct mp_log *log, const m_option_t *opt,
OPT_FLOAT(n"-param1", scaler[i].kernel.params[0], 0), \
OPT_FLOAT(n"-param2", scaler[i].kernel.params[1], 0), \
OPT_FLOAT(n"-blur", scaler[i].kernel.blur, 0), \
+ OPT_FLOATRANGE(n"-taper", scaler[i].kernel.taper, 0, 0.0, 1.0), \
OPT_FLOAT(n"-wparam", scaler[i].window.params[0], 0), \
+ OPT_FLOAT(n"-wblur", scaler[i].window.blur, 0), \
+ OPT_FLOATRANGE(n"-wtaper", scaler[i].window.taper, 0, 0.0, 1.0), \
OPT_FLAG(n"-clamp", scaler[i].clamp, 0), \
OPT_FLOATRANGE(n"-radius", scaler[i].radius, 0, 0.5, 16.0), \
OPT_FLOATRANGE(n"-antiring", scaler[i].antiring, 0, 0.0, 1.0), \
@@ -412,7 +418,8 @@ const struct m_sub_options gl_video_conf = {
OPT_INTRANGE("opengl-tex-pad-x", tex_pad_x, 0, 0, 4096),
OPT_INTRANGE("opengl-tex-pad-y", tex_pad_y, 0, 0, 4096),
OPT_SUBSTRUCT("", icc_opts, mp_icc_conf, 0),
- OPT_FLAG("opengl-early-flush", early_flush, 0),
+ OPT_CHOICE("opengl-early-flush", early_flush, 0,
+ ({"no", 0}, {"yes", 1}, {"auto", -1})),
{0}
},
@@ -515,7 +522,8 @@ static void uninit_scaler(struct gl_video *p, struct scaler *scaler);
static void check_gl_features(struct gl_video *p);
static bool init_format(struct gl_video *p, int fmt, bool test_only);
static void init_image_desc(struct gl_video *p, int fmt);
-static bool gl_video_upload_image(struct gl_video *p, struct mp_image *mpi);
+static bool gl_video_upload_image(struct gl_video *p, struct mp_image *mpi,
+ uint64_t id);
static const char *handle_scaler_opt(const char *name, bool tscale);
static void reinit_from_options(struct gl_video *p);
static void get_scale_factors(struct gl_video *p, bool transpose_rot, double xy[2]);
@@ -563,8 +571,10 @@ void gl_video_set_debug(struct gl_video *p, bool enable)
static void gl_video_reset_surfaces(struct gl_video *p)
{
- for (int i = 0; i < FBOSURFACES_MAX; i++)
+ for (int i = 0; i < FBOSURFACES_MAX; i++) {
+ p->surfaces[i].id = 0;
p->surfaces[i].pts = MP_NOPTS_VALUE;
+ }
p->surface_idx = 0;
p->surface_now = 0;
p->frames_drawn = 0;
@@ -951,6 +961,7 @@ static void unmap_current_image(struct gl_video *p)
p->hwdec->driver->unmap(p->hwdec);
memset(vimg->planes, 0, sizeof(vimg->planes));
vimg->hwdec_mapped = false;
+ vimg->id = 0; // needs to be mapped again
}
}
@@ -958,6 +969,7 @@ static void unref_current_image(struct gl_video *p)
{
unmap_current_image(p);
mp_image_unrefp(&p->image.mpi);
+ p->image.id = 0;
}
static void uninit_video(struct gl_video *p)
@@ -1349,7 +1361,8 @@ static bool scaler_fun_eq(struct scaler_fun a, struct scaler_fun b)
return ((!a.name && !b.name) || strcmp(a.name, b.name) == 0) &&
double_seq(a.params[0], b.params[0]) &&
double_seq(a.params[1], b.params[1]) &&
- a.blur == b.blur;
+ a.blur == b.blur &&
+ a.taper == b.taper;
}
static bool scaler_conf_eq(struct scaler_config a, struct scaler_config b)
@@ -1410,6 +1423,11 @@ static void reinit_scaler(struct gl_video *p, struct scaler *scaler,
if (conf->window.blur > 0.0)
scaler->kernel->w.blur = conf->window.blur;
+ if (conf->kernel.taper > 0.0)
+ scaler->kernel->f.taper = conf->kernel.taper;
+ if (conf->window.taper > 0.0)
+ scaler->kernel->w.taper = conf->window.taper;
+
if (scaler->kernel->f.resizable && conf->radius > 0.0)
scaler->kernel->f.radius = conf->radius;
@@ -1995,8 +2013,6 @@ static void pass_convert_yuv(struct gl_video *p)
p->components = 3;
if (!p->has_alpha || p->opts.alpha_mode == ALPHA_NO) {
GLSL(color.a = 1.0;)
- } else if (p->opts.alpha_mode == ALPHA_BLEND) {
- GLSL(color = vec4(color.rgb * color.a, 1.0);)
} else { // alpha present in image
p->components = 4;
GLSL(color = vec4(color.rgb * color.a, color.a);)
@@ -2519,12 +2535,20 @@ static void pass_draw_to_screen(struct gl_video *p, int fbo)
pass_colormanage(p, p->image_params.color, false);
- // Draw checkerboard pattern to indicate transparency
- if (p->has_alpha && p->opts.alpha_mode == ALPHA_BLEND_TILES) {
- GLSLF("// transparency checkerboard\n");
- GLSL(bvec2 tile = lessThan(fract(gl_FragCoord.xy / 32.0), vec2(0.5));)
- GLSL(vec3 background = vec3(tile.x == tile.y ? 1.0 : 0.75);)
- GLSL(color.rgb = mix(background, color.rgb, color.a);)
+ if (p->has_alpha){
+ if (p->opts.alpha_mode == ALPHA_BLEND_TILES) {
+ // Draw checkerboard pattern to indicate transparency
+ GLSLF("// transparency checkerboard\n");
+ GLSL(bvec2 tile = lessThan(fract(gl_FragCoord.xy / 32.0), vec2(0.5));)
+ GLSL(vec3 background = vec3(tile.x == tile.y ? 1.0 : 0.75);)
+ GLSL(color.rgb = mix(background, color.rgb, color.a);)
+ } else if (p->opts.alpha_mode == ALPHA_BLEND) {
+ // Blend into background color (usually black)
+ struct m_color c = p->opts.background;
+ GLSLF("vec4 background = vec4(%f, %f, %f, %f);\n",
+ c.r / 255.0, c.g / 255.0, c.b / 255.0, c.a / 255.0);
+ GLSL(color = mix(background, vec4(color.rgb, 1.0), color.a);)
+ }
}
pass_opt_hook_point(p, "OUTPUT", NULL);
@@ -2550,22 +2574,23 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t,
// First of all, figure out if we have a frame available at all, and draw
// it manually + reset the queue if not
- if (p->surfaces[p->surface_now].pts == MP_NOPTS_VALUE) {
- if (!gl_video_upload_image(p, t->current))
+ if (p->surfaces[p->surface_now].id == 0) {
+ if (!gl_video_upload_image(p, t->current, t->frame_id))
return;
pass_render_frame(p);
finish_pass_fbo(p, &p->surfaces[p->surface_now].fbotex,
vp_w, vp_h, FBOTEX_FUZZY);
+ p->surfaces[p->surface_now].id = p->image.id;
p->surfaces[p->surface_now].pts = p->image.mpi->pts;
p->surface_idx = p->surface_now;
}
// Find the right frame for this instant
- if (t->current && t->current->pts != MP_NOPTS_VALUE) {
+ if (t->current) {
int next = fbosurface_wrap(p->surface_now + 1);
- while (p->surfaces[next].pts != MP_NOPTS_VALUE &&
- p->surfaces[next].pts > p->surfaces[p->surface_now].pts &&
- p->surfaces[p->surface_now].pts < t->current->pts)
+ while (p->surfaces[next].id &&
+ p->surfaces[next].id > p->surfaces[p->surface_now].id &&
+ p->surfaces[p->surface_now].id < t->frame_id)
{
p->surface_now = next;
next = fbosurface_wrap(next + 1);
@@ -2607,16 +2632,17 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t,
break;
struct mp_image *f = t->frames[i];
- if (!mp_image_params_equal(&f->params, &p->real_image_params) ||
- f->pts == MP_NOPTS_VALUE)
+ uint64_t f_id = t->frame_id + i;
+ if (!mp_image_params_equal(&f->params, &p->real_image_params))
continue;
- if (f->pts > p->surfaces[p->surface_idx].pts) {
- if (!gl_video_upload_image(p, f))
+ if (f_id > p->surfaces[p->surface_idx].id) {
+ if (!gl_video_upload_image(p, f, f_id))
return;
pass_render_frame(p);
finish_pass_fbo(p, &p->surfaces[surface_dst].fbotex,
vp_w, vp_h, FBOTEX_FUZZY);
+ p->surfaces[surface_dst].id = f_id;
p->surfaces[surface_dst].pts = f->pts;
p->surface_idx = surface_dst;
surface_dst = fbosurface_wrap(surface_dst + 1);
@@ -2631,11 +2657,9 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t,
bool valid = true;
for (int i = surface_bse, ii; valid && i != surface_end; i = ii) {
ii = fbosurface_wrap(i + 1);
- if (p->surfaces[i].pts == MP_NOPTS_VALUE ||
- p->surfaces[ii].pts == MP_NOPTS_VALUE)
- {
+ if (p->surfaces[i].id == 0 || p->surfaces[ii].id == 0) {
valid = false;
- } else if (p->surfaces[ii].pts < p->surfaces[i].pts) {
+ } else if (p->surfaces[ii].id < p->surfaces[i].id) {