summaryrefslogtreecommitdiffstats
path: root/filters
diff options
context:
space:
mode:
Diffstat (limited to 'filters')
-rw-r--r--filters/f_async_queue.c375
-rw-r--r--filters/f_async_queue.h135
-rw-r--r--filters/f_auto_filters.c120
-rw-r--r--filters/f_auto_filters.h4
-rw-r--r--filters/f_autoconvert.c113
-rw-r--r--filters/f_autoconvert.h3
-rw-r--r--filters/f_decoder_wrapper.c670
-rw-r--r--filters/f_decoder_wrapper.h41
-rw-r--r--filters/f_demux_in.c14
-rw-r--r--filters/f_hwtransfer.c499
-rw-r--r--filters/f_hwtransfer.h17
-rw-r--r--filters/f_lavfi.c128
-rw-r--r--filters/f_lavfi.h3
-rw-r--r--filters/f_output_chain.c95
-rw-r--r--filters/f_output_chain.h5
-rw-r--r--filters/f_swresample.c73
-rw-r--r--filters/f_swresample.h4
-rw-r--r--filters/f_swscale.c14
-rw-r--r--filters/f_swscale.h3
-rw-r--r--filters/f_utils.c2
-rw-r--r--filters/f_utils.h6
-rw-r--r--filters/filter.c194
-rw-r--r--filters/filter.h82
-rw-r--r--filters/filter_internal.h5
-rw-r--r--filters/user_filters.c24
-rw-r--r--filters/user_filters.h2
26 files changed, 2101 insertions, 530 deletions
diff --git a/filters/f_async_queue.c b/filters/f_async_queue.c
new file mode 100644
index 0000000000..95db385d7f
--- /dev/null
+++ b/filters/f_async_queue.c
@@ -0,0 +1,375 @@
+#include <limits.h>
+#include <stdatomic.h>
+
+#include "audio/aframe.h"
+#include "common/common.h"
+#include "common/msg.h"
+#include "osdep/threads.h"
+
+#include "f_async_queue.h"
+#include "filter_internal.h"
+
+struct mp_async_queue {
+ // This is just a wrapper, so the API user can talloc_free() it, instead of
+ // having to call a special unref function.
+ struct async_queue *q;
+};
+
+struct async_queue {
+ _Atomic uint64_t refcount;
+
+ mp_mutex lock;
+
+ // -- protected by lock
+ struct mp_async_queue_config cfg;
+ bool active; // queue was resumed; consumer may request frames
+ bool reading; // data flow: reading => consumer has requested frames
+ int64_t samples_size; // queue size in the cfg.sample_unit
+ size_t byte_size; // queue size in bytes (using approx. frame sizes)
+ int num_frames;
+ struct mp_frame *frames;
+ int eof_count; // number of MP_FRAME_EOF in frames[], for draining
+ struct mp_filter *conn[2]; // filters: in (0), out (1)
+};
+
+static void reset_queue(struct async_queue *q)
+{
+ mp_mutex_lock(&q->lock);
+ q->active = q->reading = false;
+ for (int n = 0; n < q->num_frames; n++)
+ mp_frame_unref(&q->frames[n]);
+ q->num_frames = 0;
+ q->eof_count = 0;
+ q->samples_size = 0;
+ q->byte_size = 0;
+ for (int n = 0; n < 2; n++) {
+ if (q->conn[n])
+ mp_filter_wakeup(q->conn[n]);
+ }
+ mp_mutex_unlock(&q->lock);
+}
+
+static void unref_queue(struct async_queue *q)
+{
+ if (!q)
+ return;
+ int count = atomic_fetch_add(&q->refcount, -1) - 1;
+ assert(count >= 0);
+ if (count == 0) {
+ reset_queue(q);
+ mp_mutex_destroy(&q->lock);
+ talloc_free(q);
+ }
+}
+
+static void on_free_queue(void *p)
+{
+ struct mp_async_queue *q = p;
+ unref_queue(q->q);
+}
+
+struct mp_async_queue *mp_async_queue_create(void)
+{
+ struct mp_async_queue *r = talloc_zero(NULL, struct mp_async_queue);
+ r->q = talloc_zero(NULL, struct async_queue);
+ *r->q = (struct async_queue){
+ .refcount = 1,
+ };
+ mp_mutex_init(&r->q->lock);
+ talloc_set_destructor(r, on_free_queue);
+ mp_async_queue_set_config(r, (struct mp_async_queue_config){0});
+ return r;
+}
+
+static int64_t frame_get_samples(struct async_queue *q, struct mp_frame frame)
+{
+ int64_t res = 1;
+ if (frame.type == MP_FRAME_AUDIO && q->cfg.sample_unit == AQUEUE_UNIT_SAMPLES) {
+ struct mp_aframe *aframe = frame.data;
+ res = mp_aframe_get_size(aframe);
+ }
+ if (mp_frame_is_signaling(frame))
+ return 0;
+ return res;
+}
+
+static bool is_full(struct async_queue *q)
+{
+ if (q->samples_size >= q->cfg.max_samples || q->byte_size >= q->cfg.max_bytes)
+ return true;
+ if (q->num_frames >= 2 && q->cfg.max_duration > 0) {
+ double pts1 = mp_frame_get_pts(q->frames[q->num_frames - 1]);
+ double pts2 = mp_frame_get_pts(q->frames[0]);
+ if (pts1 != MP_NOPTS_VALUE && pts2 != MP_NOPTS_VALUE &&
+ pts2 - pts1 >= q->cfg.max_duration)
+ return true;
+ }
+ return false;
+}
+
+// Add or remove a frame from the accounted queue size.
+// dir==1: add, dir==-1: remove
+static void account_frame(struct async_queue *q, struct mp_frame frame,
+ int dir)
+{
+ assert(dir == 1 || dir == -1);
+
+ q->samples_size += dir * frame_get_samples(q, frame);
+ q->byte_size += dir * mp_frame_approx_size(frame);
+
+ if (frame.type == MP_FRAME_EOF)
+ q->eof_count += dir;
+}
+
+static void recompute_sizes(struct async_queue *q)
+{
+ q->eof_count = 0;
+ q->samples_size = 0;
+ q->byte_size = 0;
+ for (int n = 0; n < q->num_frames; n++)
+ account_frame(q, q->frames[n], 1);
+}
+
+void mp_async_queue_set_config(struct mp_async_queue *queue,
+ struct mp_async_queue_config cfg)
+{
+ struct async_queue *q = queue->q;
+
+ cfg.max_bytes = MPCLAMP(cfg.max_bytes, 1, (size_t)-1 / 2);
+
+ assert(cfg.sample_unit == AQUEUE_UNIT_FRAME ||
+ cfg.sample_unit == AQUEUE_UNIT_SAMPLES);
+
+ cfg.max_samples = MPMAX(cfg.max_samples, 1);
+
+ mp_mutex_lock(&q->lock);
+ bool recompute = q->cfg.sample_unit != cfg.sample_unit;
+ q->cfg = cfg;
+ if (recompute)
+ recompute_sizes(q);
+ mp_mutex_unlock(&q->lock);
+}
+
+void mp_async_queue_reset(struct mp_async_queue *queue)
+{
+ reset_queue(queue->q);
+}
+
+bool mp_async_queue_is_active(struct mp_async_queue *queue)
+{
+ struct async_queue *q = queue->q;
+ mp_mutex_lock(&q->lock);
+ bool res = q->active;
+ mp_mutex_unlock(&q->lock);
+ return res;
+}
+
+bool mp_async_queue_is_full(struct mp_async_queue *queue)
+{
+ struct async_queue *q = queue->q;
+ mp_mutex_lock(&q->lock);
+ bool res = is_full(q);
+ mp_mutex_unlock(&q->lock);
+ return res;
+}
+
+void mp_async_queue_resume(struct mp_async_queue *queue)
+{
+ struct async_queue *q = queue->q;
+
+ mp_mutex_lock(&q->lock);
+ if (!q->active) {
+ q->active = true;
+ // Possibly make the consumer request new frames.
+ if (q->conn[1])
+ mp_filter_wakeup(q->conn[1]);
+ }
+ mp_mutex_unlock(&q->lock);
+}
+
+void mp_async_queue_resume_reading(struct mp_async_queue *queue)
+{
+ struct async_queue *q = queue->q;
+
+ mp_mutex_lock(&q->lock);
+ if (!q->active || !q->reading) {
+ q->active = true;
+ q->reading = true;
+ // Possibly start producer/consumer.
+ for (int n = 0; n < 2; n++) {
+ if (q->conn[n])
+ mp_filter_wakeup(q->conn[n]);
+ }
+ }
+ mp_mutex_unlock(&q->lock);
+}
+
+int64_t mp_async_queue_get_samples(struct mp_async_queue *queue)
+{
+ struct async_queue *q = queue->q;
+ mp_mutex_lock(&q->lock);
+ int64_t res = q->samples_size;
+ mp_mutex_unlock(&q->lock);
+ return res;
+}
+
+int mp_async_queue_get_frames(struct mp_async_queue *queue)
+{
+ struct async_queue *q = queue->q;
+ mp_mutex_lock(&q->lock);
+ int res = q->num_frames;
+ mp_mutex_unlock(&q->lock);
+ return res;
+}
+
+struct priv {
+ struct async_queue *q;
+ struct mp_filter *notify;
+};
+
+static void destroy(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+ struct async_queue *q = p->q;
+
+ mp_mutex_lock(&q->lock);
+ for (int n = 0; n < 2; n++) {
+ if (q->conn[n] == f)
+ q->conn[n] = NULL;
+ }
+ mp_mutex_unlock(&q->lock);
+
+ unref_queue(q);
+}
+
+static void process_in(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+ struct async_queue *q = p->q;
+ assert(q->conn[0] == f);
+
+ mp_mutex_lock(&q->lock);
+ if (!q->reading) {
+ // mp_async_queue_reset()/reset_queue() is usually called asynchronously,
+ // so we might have requested a frame earlier, and now can't use it.
+ // Discard it; the expectation is that this is a benign logical race
+ // condition, and the filter graph will be reset anyway.
+ if (mp_pin_out_has_data(f->ppins[0])) {
+ struct mp_frame frame = mp_pin_out_read(f->ppins[0]);
+ mp_frame_unref(&frame);
+ MP_DBG(f, "discarding frame due to async reset\n");
+ }
+ } else if (!is_full(q) && mp_pin_out_request_data(f->ppins[0])) {
+ struct mp_frame frame = mp_pin_out_read(f->ppins[0]);
+ account_frame(q, frame, 1);
+ MP_TARRAY_INSERT_AT(q, q->frames, q->num_frames, 0, frame);
+ // Notify reader that we have new frames.
+ if (q->conn[1])
+ mp_filter_wakeup(q->conn[1]);
+ bool full = is_full(q);
+ if (!full)
+ mp_pin_out_request_data_next(f->ppins[0]);
+ if (p->notify && full)
+ mp_filter_wakeup(p->notify);
+ }
+ if (p->notify && !q->num_frames)
+ mp_filter_wakeup(p->notify);
+ mp_mutex_unlock(&q->lock);
+}
+
+static void process_out(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+ struct async_queue *q = p->q;
+ assert(q->conn[1] == f);
+
+ if (!mp_pin_in_needs_data(f->ppins[0]))
+ return;
+
+ mp_mutex_lock(&q->lock);
+ if (q->active && !q->reading) {
+ q->reading = true;
+ mp_filter_wakeup(q->conn[0]);
+ }
+ if (q->active && q->num_frames) {
+ struct mp_frame frame = q->frames[q->num_frames - 1];
+ q->num_frames -= 1;
+ account_frame(q, frame, -1);
+ assert(q->samples_size >= 0);
+ mp_pin_in_write(f->ppins[0], frame);
+ // Notify writer that we need new frames.
+ if (q->conn[0])
+ mp_filter_wakeup(q->conn[0]);
+ }
+ mp_mutex_unlock(&q->lock);
+}
+
+static void reset(struct mp_filter *f)
+{
+ struct priv *p = f->priv;
+ struct async_queue *q = p->q;
+
+ mp_mutex_lock(&q->lock);
+ // If the queue is in reading state, it is logical that it should request
+ // input immediately.
+ if (mp_pin_get_dir(f->pins[0]) == MP_PIN_IN && q->reading)
+ mp_filter_wakeup(f);
+ mp_mutex_unlock(&q->lock);
+}
+
+// producer
+static const struct mp_filter_info info_in = {
+ .name = "async_queue_in",
+ .priv_size = sizeof(struct priv),
+ .destroy = destroy,
+ .process = process_in,
+ .reset = reset,
+};
+
+// consumer
+static const struct mp_filter_info info_out = {
+ .name = "async_queue_out",
+ .priv_size = sizeof(struct priv),
+ .destroy = destroy,
+ .process = process_out,
+};
+
+void mp_async_queue_set_notifier(struct mp_filter *f, struct mp_filter *notify)
+{
+ assert(mp_filter_get_info(f) == &info_in);
+ struct priv *p = f->priv;
+ if (p->notify != notify) {
+ p->notify = notify;
+ if (notify)
+ mp_filter_wakeup(notify);
+ }
+}
+
+struct mp_filter *mp_async_queue_create_filter(struct mp_filter *parent,
+ enum mp_pin_dir dir,
+ struct mp_async_queue *queue)
+{
+ bool is_in = dir == MP_PIN_IN;
+ assert(queue);
+
+ struct mp_filter *f = mp_filter_create(parent, is_in ? &info_in : &info_out);
+ if (!f)
+ return NULL;
+
+ struct priv *p = f->priv;
+
+ struct async_queue *q = queue->q;
+
+ mp_filter_add_pin(f, dir, is_in ? "in" : "out");
+
+ atomic_fetch_add(&q->refcount, 1);
+ p->q = q;
+
+ mp_mutex_lock(&q->lock);
+ int slot = is_in ? 0 : 1;
+ assert(!q->conn[slot]); // fails if already connected on this end
+ q->conn[slot] = f;
+ mp_mutex_unlock(&q->lock);
+
+ return f;
+}
diff --git a/filters/f_async_queue.h b/filters/f_async_queue.h
new file mode 100644
index 0000000000..46dafcdc97
--- /dev/null
+++ b/filters/f_async_queue.h
@@ -0,0 +1,135 @@
+#pragma once
+
+#include <stdint.h>
+#include "filter.h"
+
+// A thread safe queue, which buffers a configurable number of frames like a
+// FIFO. It's part of the filter framework, and intended to provide such a
+// queue between filters. Since a filter graph can't be used from multiple
+// threads without synchronization, this provides 2 filters, which are
+// implicitly connected. (This seemed much saner than having special thread
+// safe mp_pins or such in the filter framework.)
+struct mp_async_queue;
+
+// Create a blank queue. Can be freed with talloc_free(). To use it, you need
+// to create input and output filters with mp_async_queue_create_filter().
+// Note that freeing it will only unref it. (E.g. you can free it once you've
+// created the input and output filters.)
+struct mp_async_queue *mp_async_queue_create(void);
+
+// Clear all queued data and make the queue "inactive". The latter prevents any
+// further communication until mp_async_queue_resume() is called.
+// For correct operation, you also need to call reset on the access filters
+void mp_async_queue_reset(struct mp_async_queue *queue);
+
+// Put the queue into "active" mode. If it wasn't, then the consumer is woken
+// up (and if there is no data in the queue, this will in turn wake up the
+// producer, i.e. start transfers automatically).
+// If there is a writer end but no reader end, this will simply make the queue
+// fill up.
+void mp_async_queue_resume(struct mp_async_queue *queue);
+
+// Like mp_async_queue_resume(), but also allows the producer writing to the
+// queue, even if the consumer will request any data yet.
+void mp_async_queue_resume_reading(struct mp_async_queue *queue);
+
+// Returns true if out of mp_async_queue_reset()/mp_async_queue_resume(), the
+// latter was most recently called.
+bool mp_async_queue_is_active(struct mp_async_queue *queue);
+
+// Returns true if the queue reached its configured size, the input filter
+// accepts no further frames. Always returns false if not active (then it does
+// not accept any input at all).
+bool mp_async_queue_is_full(struct mp_async_queue *queue);
+
+// Get the total of samples buffered within the queue itself. This doesn't count
+// samples buffered in the access filters. mp_async_queue_config.sample_unit is
+// used to define what "1 sample" means.
+int64_t mp_async_queue_get_samples(struct mp_async_queue *queue);
+
+// Get the total number of frames buffered within the queue itself. Frames
+// buffered in the access filters are not included.
+int mp_async_queue_get_frames(struct mp_async_queue *queue);
+
+// Create a filter to access the queue, and connect it. It's not allowed to
+// connect an already connected end of the queue. The filter can be freed at
+// any time.
+//
+// The queue starts out in "inactive" mode, where the queue does not allow
+// the producer to write any data. You need to call mp_async_queue_resume() to
+// start communication. Actual transfers happen only once the consumer filter
+// has read requests on its mp_pin.
+// If the producer filter requested a new frame from its filter graph, and the
+// queue is asynchronously set to "inactive", then the requested frame will be
+// silently discarded once it reaches the producer filter.
+//
+// Resetting a queue filter does not affect the queue at all. Managing the
+// queue state is the API user's responsibility. Note that resetting an input
+// filter (dir==MP_PIN_IN) while the queue is active and in "reading" state
+// (the output filter requested data at any point before the last
+// mp_async_queue_reset(), or mp_async_queue_resume_reading() was called), the
+// filter will immediately request data after the reset.
+//
+// For proper global reset, this order should be preferred:
+// - mp_async_queue_reset()
+// - reset producer and consumer filters on their respective threads (in any
+// order)
+// - do whatever other reset work is required
+// - mp_async_queue_resume()
+//
+// parent: filter graph the filter should be part of (or for standalone use,
+// create one with mp_filter_create_root())
+// dir: MP_PIN_IN for a filter that writes to the queue, MP_PIN_OUT to read
+// queue: queue to attach to (which end of it depends on dir)
+// The returned filter will have exactly 1 pin with the requested dir.
+struct mp_filter *mp_async_queue_create_filter(struct mp_filter *parent,
+ enum mp_pin_dir dir,
+ struct mp_async_queue *queue);
+
+// Set a filter that should be woken up with mp_filter_wakeup() in the following
+// situations:
+// - mp_async_queue_is_full() changes to true (at least for a short moment)
+// - mp_async_queue_get_frames() changes to 0 (at least until new data is fed)
+// This is a workaround for the filter design, which does not allow you to write
+// to the queue in a "sequential" way (write, then check condition).
+// Calling this again on the same filter removes the previous notify filter.
+// f: must be a filter returned by mp_async_queue_create_filter(, MP_PIN_IN,)
+// notify: filter to be woken up
+void mp_async_queue_set_notifier(struct mp_filter *f, struct mp_filter *notify);
+
+enum mp_async_queue_sample_unit {
+ AQUEUE_UNIT_FRAME = 0, // a frame counts as 1 sample
+ AQUEUE_UNIT_SAMPLES, // number of audio samples (1 for other media types,
+ // 0 for signaling)
+};
+
+// Setting this struct to all-0 is equivalent to defaults.
+struct mp_async_queue_config {
+ // Maximum size of frames buffered. mp_frame_approx_size() is used. May be
+ // overshot by up to 1 full frame. Clamped to [1, SIZE_MAX/2].
+ int64_t max_bytes;
+
+ // Defines what a "sample" is; affects the fields below.
+ enum mp_async_queue_sample_unit sample_unit;
+
+ // Maximum number of frames allowed to be buffered at a time (if
+ // unit!=AQUEUE_UNIT_FRAME, can be overshot by the contents of 1 mp_frame).
+ // 0 is treated as 1.
+ int64_t max_samples;
+
+ // Maximum allowed timestamp difference between 2 frames. This still allows
+ // at least 2 samples. Behavior is unclear on timestamp resets (even if EOF
+ // frames are between them). A value of 0 disables this completely.
+ double max_duration;
+};
+
+// Configure the queue size. By default, the queue size is 1 frame.
+// The wakeup_threshold_* fields can be used to avoid too frequent wakeups by
+// delaying wakeups, and then making the producer to filter multiple frames at
+// once.
+// In all cases, the filters can still read/write if the producer/consumer got
+// woken up by something else.
+// If the current queue contains more frames than the new config allows, the
+// queue will remain over-allocated until these frames have been read.
+void mp_async_queue_set_config(struct mp_async_queue *queue,
+ struct mp_async_queue_config cfg);
diff --git a/filters/f_auto_filters.c b/filters/f_auto_filters.c
index 944fe89eab..6fa38b96c2 100644
--- a/filters/f_auto_filters.c
+++ b/filters/f_auto_filters.c
@@ -1,9 +1,12 @@
#include <math.h>
+#include "audio/aframe.h"
+#include "audio/format.h"
#include "common/common.h"
#include "common/msg.h"
#include "options/m_config.h"
#include "options/options.h"
+#include "video/filter/refqueue.h"
#include "video/mp_image.h"
#include "video/mp_image_pool.h"
@@ -19,7 +22,7 @@
struct deint_priv {
struct mp_subfilter sub;
int prev_imgfmt;
- int prev_setting;
+ bool deinterlace_active;
struct m_config_cache *opts;
};
@@ -43,15 +46,18 @@ static void deint_process(struct mp_filter *f)
return;
}
+ struct mp_image *img = frame.data;
+ bool interlaced = img->fields & MP_IMGFIELD_INTERLACED;
+
m_config_cache_update(p->opts);
struct filter_opts *opts = p->opts->opts;
+ bool should_deinterlace = (opts->deinterlace == -1 && interlaced) ||
+ opts->deinterlace == 1;
- if (!opts->deinterlace)
+ if (!should_deinterlace)
mp_subfilter_destroy(&p->sub);
- struct mp_image *img = frame.data;
-
- if (img->imgfmt == p->prev_imgfmt && p->prev_setting == opts->deinterlace) {
+ if (img->imgfmt == p->prev_imgfmt && p->deinterlace_active == should_deinterlace) {
mp_subfilter_continue(&p->sub);
return;
}
@@ -62,24 +68,50 @@ static void deint_process(struct mp_filter *f)
assert(!p->sub.filter);
p->prev_imgfmt = img->imgfmt;
- p->prev_setting = opts->deinterlace;
- if (!p->prev_setting) {
+ p->deinterlace_active = should_deinterlace;
+ if (!p->deinterlace_active) {
mp_subfilter_continue(&p->sub);
return;
}
+ char *field_parity;
+ switch (opts->field_parity) {
+ case MP_FIELD_PARITY_TFF:
+ field_parity = "tff";
+ break;
+ case MP_FIELD_PARITY_BFF:
+ field_parity = "bff";
+ break;
+ default:
+ field_parity = "auto";
+ }
+
bool has_filter = true;
if (img->imgfmt == IMGFMT_VDPAU) {
- char *args[] = {"deint", "yes", NULL};
+ char *args[] = {"deint", "yes",
+ "parity", field_parity, NULL};
p->sub.filter =
mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "vdpaupp", args);
} else if (img->imgfmt == IMGFMT_D3D11) {
+ char *args[] = {"parity", field_parity, NULL};
p->sub.filter =
- mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "d3d11vpp", NULL);
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "d3d11vpp", args);
} else if (img->imgfmt == IMGFMT_CUDA) {
- char *args[] = {"mode", "send_field", NULL};
+ char *args[] = {"mode", "send_field",
+ "parity", field_parity, NULL};
+ p->sub.filter =
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "bwdif_cuda", args);
+ } else if (img->imgfmt == IMGFMT_VULKAN) {
+ char *args[] = {"mode", "send_field",
+ "parity", field_parity, NULL};
p->sub.filter =
- mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "yadif_cuda", args);
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "bwdif_vulkan", args);
+ } else if (img->imgfmt == IMGFMT_VAAPI) {
+ char *args[] = {"deint", "motion-adaptive",
+ "interlaced-only", "yes",
+ "parity", field_parity, NULL};
+ p->sub.filter =
+ mp_create_user_filter(f, MP_OUTPUT_CHAIN_VIDEO, "vavpp", args);
} else {
has_filter = false;
}
@@ -94,7 +126,7 @@ static void deint_process(struct mp_filter *f)
struct mp_autoconvert *ac = mp_autoconvert_create(subf);
if (ac) {
filters[0] = ac->f;
- // We know vf_yadif does not support hw inputs.
+ // We know vf_bwdif does not support hw inputs.
mp_autoconvert_add_all_sw_imgfmts(ac);
if (!mp_autoconvert_probe_input_video(ac, img)) {
@@ -106,9 +138,10 @@ static void deint_process(struct mp_filter *f)
}
}
- char *args[] = {"mode", "send_field", NULL};
+ char *args[] = {"mode", "send_field",
+ "parity", field_parity, NULL};
filters[1] =
- mp_create_user_filter(subf, MP_OUTPUT_CHAIN_VIDEO, "yadif", args);
+ mp_create_user_filter(subf, MP_OUTPUT_CHAIN_VIDEO, "bwdif", args);
mp_chain_filters(subf->ppins[0], subf->ppins[1], filters, 2);
p->sub.filter = subf;
@@ -152,6 +185,12 @@ static const struct mp_filter_info deint_filter = {
.destroy = deint_destroy,
};
+bool mp_deint_active(struct mp_filter *f)
+{
+ struct deint_priv *p = f->priv;
+ return p->deinterlace_active;
+}
+
struct mp_filter *mp_deint_create(struct mp_filter *parent)
{
struct mp_filter *f = mp_filter_create(parent, &deint_filter);
@@ -295,7 +334,8 @@ struct mp_filter *mp_autorotate_create(struct mp_filter *parent)
struct aspeed_priv {
struct mp_subfilter sub;
- double cur_speed;
+ double cur_speed, cur_speed_drop;
+ int current_filter;
};
static void aspeed_process(struct mp_filter *f)
@@ -305,26 +345,48 @@ static void aspeed_process(struct mp_filter *f)
if (!mp_subfilter_read(&p->sub))
return;
- if (fabs(p->cur_speed - 1.0) < 1e-8) {
+ if (!p->sub.filter)
+ p->current_filter = 0;
+
+ double speed = p->cur_speed * p->cur_speed_drop;
+
+ int req_filter = 0;
+ if (fabs(speed - 1.0) >= 1e-8) {
+ req_filter = p->cur_speed_drop == 1.0 ? 1 : 2;
+ if (p->sub.frame.type == MP_FRAME_AUDIO &&
+ !af_fmt_is_pcm(mp_aframe_get_format(p->sub.frame.data)))
+ req_filter = 2;
+ }
+
+ if (req_filter != p->current_filter) {
if (p->sub.filter)
- MP_VERBOSE(f, "removing scaletempo\n");
+ MP_VERBOSE(f, "removing audio speed filter\n");
if (!mp_subfilter_drain_destroy(&p->sub))
return;
- } else if (!p->sub.filter) {
- MP_VERBOSE(f, "adding scaletempo\n");
- p->sub.filter =
- mp_create_user_filter(f, MP_OUTPUT_CHAIN_AUDIO, "scaletempo", NULL);
- if (!p->sub.filter) {
- MP_ERR(f, "could not create scaletempo filter\n");
- mp_subfilter_continue(&p->sub);
- return;
+
+ if (req_filter) {
+ if (req_filter == 1) {
+ MP_VERBOSE(f, "adding scaletempo2\n");
+ p->sub.filter = mp_create_user_filter(f, MP_OUTPUT_CHAIN_AUDIO,
+ "scaletempo2", NULL);
+ } else if (req_filter == 2) {
+ MP_VERBOSE(f, "adding drop\n");
+ p->sub.filter = mp_create_user_filter(f, MP_OUTPUT_CHAIN_AUDIO,
+ "drop", NULL);
+ }
+ if (!p->sub.filter) {
+ MP_ERR(f, "could not create filter\n");
+ mp_subfilter_continue(&p->sub);
+ return;
+ }
+ p->current_filter = req_filter;
}
}
if (p->sub.filter) {
struct mp_filter_command cmd = {
.type = MP_FILTER_COMMAND_SET_SPEED,
- .speed = p->cur_speed,
+ .speed = speed,
};
mp_filter_command(p->sub.filter, &cmd);
}
@@ -341,6 +403,11 @@ static bool aspeed_command(struct mp_filter *f, struct mp_filter_command *cmd)
return true;
}
+ if (cmd->type == MP_FILTER_COMMAND_SET_SPEED_DROP) {
+ p->cur_speed_drop = cmd->speed;
+ return true;
+ }
+
if (cmd->type == MP_FILTER_COMMAND_IS_ACTIVE) {
cmd->is_active = !!p->sub.filter;
return true;
@@ -381,6 +448,7 @@ struct mp_filter *mp_autoaspeed_create(struct mp_filter *parent)
struct aspeed_priv *p = f->priv;
p->cur_speed = 1.0;
+ p->cur_speed_drop = 1.0;
p->sub.in = mp_filter_add_pin(f, MP_PIN_IN, "in");
p->sub.out = mp_filter_add_pin(f, MP_PIN_OUT, "out");
diff --git a/filters/f_auto_filters.h b/filters/f_auto_filters.h
index 98043c9301..f926f6e449 100644
--- a/filters/f_auto_filters.h
+++ b/filters/f_auto_filters.h
@@ -9,5 +9,7 @@ struct mp_filter *mp_deint_create(struct mp_filter *parent);
// Rotate according to mp_image.rotate and VO capabilities.
struct mp_filter *mp_autorotate_create(struct mp_filter *parent);
-// Insert a filter that inserts scaletempo depending on speed settings.
+// Insert a filter that inserts scaletempo2 depending on speed settings.
struct mp_filter *mp_autoaspeed_create(struct mp_filter *parent);
+
+bool mp_deint_active(struct mp_filter *parent);
diff --git a/filters/f_autoconvert.c b/filters/f_autoconvert.c
index 5e0caaf321..e045d74c96 100644
--- a/filters/f_autoconvert.c
+++ b/filters/f_autoconvert.c
@@ -1,5 +1,3 @@
-#include "config.h"
-
#include "audio/aframe.h"
#include "audio/chmap_sel.h"
#include "audio/format.h"
@@ -152,9 +150,14 @@ static bool build_image_converter(struct mp_autoconvert *c, struct mp_log *log,
for (int n = 0; n < p->num_imgfmts; n++) {
bool samefmt = img->params.imgfmt == p->imgfmts[n];
bool samesubffmt = img->params.hw_subfmt == p->subfmts[n];
- if (samefmt && (samesubffmt || !p->subfmts[n])) {
+ /*
+ * In practice, `p->subfmts` is not usually populated today, in which
+ * case we must actively probe formats below to establish if the VO can
+ * accept the subfmt being used by the hwdec.
+ */
+ if (samefmt && samesubffmt) {
if (p->imgparams_set) {
- if (!mp_image_params_equal(&p->imgparams, &img->params))
+ if (!mp_image_params_static_equal(&p->imgparams, &img->params))
break;
}
return true;
@@ -162,6 +165,9 @@ static bool build_image_converter(struct mp_autoconvert *c, struct mp_log *log,
}
struct mp_filter *conv = mp_filter_create(f, &convert_filter);
+ if (!conv)
+ return false;
+
mp_filter_add_pin(conv, MP_PIN_IN, "in");
mp_filter_add_pin(conv, MP_PIN_OUT, "out");
@@ -185,27 +191,77 @@ static bool build_image_converter(struct mp_autoconvert *c, struct mp_log *log,
bool dst_all_hw = true;
bool dst_have_sw = false;
+ bool has_src_hw_fmt = false;
for (int n = 0; n < num_fmts; n++) {
bool is_hw = IMGFMT_IS_HWACCEL(fmts[n]);
dst_all_hw &= is_hw;
dst_have_sw |= !is_hw;
+ has_src_hw_fmt |= is_hw && fmts[n] == imgpar.imgfmt;
}
- // Source is sw, all targets are hw -> try to upload.
- bool sw_to_hw = imgfmt_is_sw && dst_all_hw;
// Source is hw, some targets are sw -> try to download.
bool hw_to_sw = !imgfmt_is_sw && dst_have_sw;
- if (sw_to_hw && num_fmts > 0) {
- // We can probably use this! Very lazy and very approximate.
- struct mp_hwupload *upload = mp_hwupload_create(conv, fmts[0]);
- if (upload) {
- mp_info(log, "HW-uploading to %s\n", mp_imgfmt_to_name(fmts[0]));
- filters[2] = upload->f;
- hwupload_fmt = mp_hwupload_find_upload_format(upload, img->imgfmt);
-