summaryrefslogtreecommitdiffstats
path: root/video/filter
diff options
context:
space:
mode:
Diffstat (limited to 'video/filter')
-rw-r--r--video/filter/refqueue.c230
-rw-r--r--video/filter/refqueue.h36
-rw-r--r--video/filter/vf.c37
-rw-r--r--video/filter/vf.h5
-rw-r--r--video/filter/vf_crop.c31
-rw-r--r--video/filter/vf_d3d11vpp.c537
-rw-r--r--video/filter/vf_format.c4
-rw-r--r--video/filter/vf_vavpp.c256
-rw-r--r--video/filter/vf_vdpaupp.c176
-rw-r--r--video/filter/vf_vdpaurb.c30
10 files changed, 1054 insertions, 288 deletions
diff --git a/video/filter/refqueue.c b/video/filter/refqueue.c
new file mode 100644
index 0000000000..04de3124a4
--- /dev/null
+++ b/video/filter/refqueue.c
@@ -0,0 +1,230 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+
+#include "common/common.h"
+#include "video/mp_image.h"
+
+#include "refqueue.h"
+
+struct mp_refqueue {
+ int needed_past_frames;
+ int needed_future_frames;
+ int flags;
+
+ bool second_field; // current frame has to output a second field yet
+ bool eof;
+
+ // Queue of input frames, used to determine past/current/future frames.
+ // queue[0] is the newest frame, queue[num_queue - 1] the oldest.
+ struct mp_image **queue;
+ int num_queue;
+ // queue[pos] is the current frame, unless pos is an invalid index.
+ int pos;
+};
+
+struct mp_refqueue *mp_refqueue_alloc(void)
+{
+ struct mp_refqueue *q = talloc_zero(NULL, struct mp_refqueue);
+ mp_refqueue_flush(q);
+ return q;
+}
+
+void mp_refqueue_free(struct mp_refqueue *q)
+{
+ mp_refqueue_flush(q);
+ talloc_free(q);
+}
+
+// The minimum number of frames required before and after the current frame.
+void mp_refqueue_set_refs(struct mp_refqueue *q, int past, int future)
+{
+ assert(past >= 0 && future >= 0);
+ q->needed_past_frames = past;
+ q->needed_future_frames = MPMAX(future, 1); // at least 1 for determining PTS
+}
+
+// MP_MODE_* flags
+void mp_refqueue_set_mode(struct mp_refqueue *q, int flags)
+{
+ q->flags = flags;
+}
+
+// Whether the current frame should be deinterlaced.
+bool mp_refqueue_should_deint(struct mp_refqueue *q)
+{
+ if (!mp_refqueue_has_output(q) || !(q->flags & MP_MODE_DEINT))
+ return false;
+
+ return (q->queue[q->pos]->fields & MP_IMGFIELD_INTERLACED) ||
+ !(q->flags & MP_MODE_INTERLACED_ONLY);
+}
+
+// Whether the current output frame is marked as interlaced.
+bool mp_refqueue_is_interlaced(struct mp_refqueue *q)
+{
+ if (!mp_refqueue_has_output(q))
+ return false;
+
+ return q->queue[q->pos]->fields & MP_IMGFIELD_INTERLACED;
+}
+
+// Whether the current output frame (field) is the top field, bottom field
+// otherwise. (Assumes the caller forces deinterlacing.)
+bool mp_refqueue_is_top_field(struct mp_refqueue *q)
+{
+ if (!mp_refqueue_has_output(q))
+ return false;
+
+ return !!(q->queue[q->pos]->fields & MP_IMGFIELD_TOP_FIRST) ^ q->second_field;
+}
+
+// Whether top-field-first mode is enabled.
+bool mp_refqueue_top_field_first(struct mp_refqueue *q)
+{
+ if (!mp_refqueue_has_output(q))
+ return false;
+
+ return q->queue[q->pos]->fields & MP_IMGFIELD_TOP_FIRST;
+}
+
+// Discard all state.
+void mp_refqueue_flush(struct mp_refqueue *q)
+{
+ for (int n = 0; n < q->num_queue; n++)
+ talloc_free(q->queue[n]);
+ q->num_queue = 0;
+ q->pos = -1;
+ q->second_field = false;
+ q->eof = false;
+}
+
+// Add a new frame to the queue. (Call mp_refqueue_next() to advance the
+// current frame and to discard unneeded past frames.)
+// Ownership goes to the mp_refqueue.
+// Passing NULL means EOF, in which case mp_refqueue_need_input() will return
+// false even if not enough future frames are available.
+void mp_refqueue_add_input(struct mp_refqueue *q, struct mp_image *img)
+{
+ q->eof = !img;
+ if (!img)
+ return;
+
+ MP_TARRAY_INSERT_AT(q, q->queue, q->num_queue, 0, img);
+ q->pos++;
+
+ assert(q->pos >= 0 && q->pos < q->num_queue);
+}
+
+bool mp_refqueue_need_input(struct mp_refqueue *q)
+{
+ return q->pos < q->needed_future_frames && !q->eof;
+}
+
+bool mp_refqueue_has_output(struct mp_refqueue *q)
+{
+ return q->pos >= 0 && !mp_refqueue_need_input(q);
+}
+
+static bool output_next_field(struct mp_refqueue *q)
+{
+ if (q->second_field)
+ return false;
+ if (!(q->flags & MP_MODE_OUTPUT_FIELDS))
+ return false;
+ if (!mp_refqueue_should_deint(q))
+ return false;
+
+ assert(q->pos >= 0);
+
+ // If there's no (reasonable) timestamp, also skip the field.
+ if (q->pos == 0)
+ return false;
+
+ double pts = q->queue[q->pos]->pts;
+ double next_pts = q->queue[q->pos - 1]->pts;
+ if (pts == MP_NOPTS_VALUE || next_pts == MP_NOPTS_VALUE)
+ return false;
+
+ double frametime = next_pts - pts;
+ if (frametime <= 0.0 || frametime >= 1.0)
+ return false;
+
+ q->queue[q->pos]->pts = pts + frametime / 2;
+ q->second_field = true;
+ return true;
+}
+
+// Advance current field, depending on interlace flags.
+void mp_refqueue_next_field(struct mp_refqueue *q)
+{
+ if (!mp_refqueue_has_output(q))
+ return;
+
+ if (!output_next_field(q))
+ mp_refqueue_next(q);
+}
+
+// Advance to next input frame (skips fields even in field output mode).
+void mp_refqueue_next(struct mp_refqueue *q)
+{
+ if (!mp_refqueue_has_output(q))
+ return;
+
+ q->pos--;
+ q->second_field = false;
+
+ assert(q->pos >= -1 && q->pos < q->num_queue);
+
+ // Discard unneeded past frames.
+ while (q->num_queue - (q->pos + 1) > q->needed_past_frames) {
+ assert(q->num_queue > 0);
+ talloc_free(q->queue[q->num_queue - 1]);
+ q->num_queue--;
+ }
+
+ assert(q->pos >= -1 && q->pos < q->num_queue);
+}
+
+// Return a frame by relative position:
+// -1: first past frame
+// 0: current frame
+// 1: first future frame
+// Caller doesn't get ownership. Return NULL if unavailable.
+struct mp_image *mp_refqueue_get(struct mp_refqueue *q, int pos)
+{
+ int i = q->pos - pos;
+ return i >= 0 && i < q->num_queue ? q->queue[i] : NULL;
+}
+
+// Same as mp_refqueue_get(), but return the frame which contains a field
+// relative to the current field's position.
+struct mp_image *mp_refqueue_get_field(struct mp_refqueue *q, int pos)
+{
+ // If the current field is the second field (conceptually), then pos=1
+ // needs to get the next frame. Similarly, pos=-1 needs to get the current
+ // frame, so round towards negative infinity.
+ int round = mp_refqueue_top_field_first(q) != mp_refqueue_is_top_field(q);
+ int frame = (pos < 0 ? pos - (1 - round) : pos + round) / 2;
+ return mp_refqueue_get(q, frame);
+}
+
+bool mp_refqueue_is_second_field(struct mp_refqueue *q)
+{
+ return mp_refqueue_has_output(q) && q->second_field;
+}
diff --git a/video/filter/refqueue.h b/video/filter/refqueue.h
new file mode 100644
index 0000000000..ef23bee906
--- /dev/null
+++ b/video/filter/refqueue.h
@@ -0,0 +1,36 @@
+#ifndef MP_REFQUEUE_H_
+#define MP_REFQUEUE_H_
+
+#include <stdbool.h>
+
+// A helper for deinterlacers which require past/future reference frames.
+
+struct mp_refqueue;
+
+struct mp_refqueue *mp_refqueue_alloc(void);
+void mp_refqueue_free(struct mp_refqueue *q);
+
+void mp_refqueue_set_refs(struct mp_refqueue *q, int past, int future);
+void mp_refqueue_flush(struct mp_refqueue *q);
+void mp_refqueue_add_input(struct mp_refqueue *q, struct mp_image *img);
+bool mp_refqueue_need_input(struct mp_refqueue *q);
+bool mp_refqueue_has_output(struct mp_refqueue *q);
+void mp_refqueue_next(struct mp_refqueue *q);
+void mp_refqueue_next_field(struct mp_refqueue *q);
+struct mp_image *mp_refqueue_get(struct mp_refqueue *q, int pos);
+
+enum {
+ MP_MODE_DEINT = (1 << 0), // deinterlacing enabled
+ MP_MODE_OUTPUT_FIELDS = (1 << 1), // output fields separately
+ MP_MODE_INTERLACED_ONLY = (1 << 2), // only deinterlace marked frames
+};
+
+void mp_refqueue_set_mode(struct mp_refqueue *q, int flags);
+bool mp_refqueue_should_deint(struct mp_refqueue *q);
+bool mp_refqueue_is_interlaced(struct mp_refqueue *q);
+bool mp_refqueue_is_top_field(struct mp_refqueue *q);
+bool mp_refqueue_top_field_first(struct mp_refqueue *q);
+bool mp_refqueue_is_second_field(struct mp_refqueue *q);
+struct mp_image *mp_refqueue_get_field(struct mp_refqueue *q, int pos);
+
+#endif
diff --git a/video/filter/vf.c b/video/filter/vf.c
index d8e7f6b4c8..176ac95b70 100644
--- a/video/filter/vf.c
+++ b/video/filter/vf.c
@@ -61,6 +61,7 @@ extern const vf_info_t vf_info_vapoursynth_lazy;
extern const vf_info_t vf_info_vdpaupp;
extern const vf_info_t vf_info_vdpaurb;
extern const vf_info_t vf_info_buffer;
+extern const vf_info_t vf_info_d3d11vpp;
// list of available filters:
static const vf_info_t *const filter_list[] = {
@@ -99,6 +100,9 @@ static const vf_info_t *const filter_list[] = {
&vf_info_vdpaupp,
&vf_info_vdpaurb,
#endif
+#if HAVE_D3D_HWACCEL
+ &vf_info_d3d11vpp,
+#endif
NULL
};
@@ -244,7 +248,7 @@ static struct vf_instance *vf_open(struct vf_chain *c, const char *name,
*vf = (vf_instance_t) {
.info = desc.p,
.log = mp_log_new(vf, c->log, name),
- .hwdec = c->hwdec,
+ .hwdec_devs = c->hwdec_devs,
.query_format = vf_default_query_format,
.out_pool = talloc_steal(vf, mp_image_pool_new(16)),
.chain = c,
@@ -514,7 +518,23 @@ static void query_formats(uint8_t *fmts, struct vf_instance *vf)
static bool is_conv_filter(struct vf_instance *vf)
{
- return vf && strcmp(vf->info->name, "scale") == 0;
+ return vf && (strcmp(vf->info->name, "scale") == 0 || vf->autoinserted);
+}
+
+static const char *find_conv_filter(uint8_t *fmts_in, uint8_t *fmts_out)
+{
+ for (int n = 0; filter_list[n]; n++) {
+ if (filter_list[n]->test_conversion) {
+ for (int a = IMGFMT_START; a < IMGFMT_END; a++) {
+ for (int b = IMGFMT_START; b < IMGFMT_END; b++) {
+ if (fmts_in[a - IMGFMT_START] && fmts_out[b - IMGFMT_START] &&
+ filter_list[n]->test_conversion(a, b))
+ return filter_list[n]->name;
+ }
+ }
+ }
+ }
+ return "scale";
}
static void update_formats(struct vf_chain *c, struct vf_instance *vf,
@@ -535,7 +555,18 @@ static void update_formats(struct vf_chain *c, struct vf_instance *vf,
// filters after vf work, but vf can't output any format the filters
// after it accept), try to insert a conversion filter.
MP_INFO(c, "Using conversion filter.\n");
- struct vf_instance *conv = vf_open(c, "scale", NULL);
+ // Determine which output formats the filter _could_ accept. For this
+ // to work after the conversion filter is inserted, it is assumed that
+ // conversion filters have a single set of in/output formats that can
+ // be converted to each other.
+ uint8_t out_formats[IMGFMT_END - IMGFMT_START];
+ for (int n = IMGFMT_START; n < IMGFMT_END; n++) {
+ out_formats[n - IMGFMT_START] = vf->last_outfmts[n - IMGFMT_START];
+ vf->last_outfmts[n - IMGFMT_START] = 1;
+ }
+ query_formats(fmts, vf);
+ const char *filter = find_conv_filter(fmts, out_formats);
+ struct vf_instance *conv = vf_open(c, filter, NULL);
if (conv) {
conv->autoinserted = true;
conv->next = vf->next;
diff --git a/video/filter/vf.h b/video/filter/vf.h
index c982b612e1..49296fb9b2 100644
--- a/video/filter/vf.h
+++ b/video/filter/vf.h
@@ -37,6 +37,7 @@ typedef struct vf_info {
const void *priv_defaults;
const struct m_option *options;
void (*print_help)(struct mp_log *log);
+ bool (*test_conversion)(int in, int out);
} vf_info_t;
typedef struct vf_instance {
@@ -92,7 +93,7 @@ typedef struct vf_instance {
struct mp_image_pool *out_pool;
struct vf_priv_s *priv;
struct mp_log *log;
- struct mp_hwdec_info *hwdec;
+ struct mp_hwdec_devices *hwdec_devs;
struct mp_image **out_queued;
int num_out_queued;
@@ -120,7 +121,7 @@ struct vf_chain {
struct mp_log *log;
struct MPOpts *opts;
struct mpv_global *global;
- struct mp_hwdec_info *hwdec;
+ struct mp_hwdec_devices *hwdec_devs;
// Call when the filter chain wants new processing (for filters with
// asynchronous behavior) - must be immutable once filters are created,
diff --git a/video/filter/vf_crop.c b/video/filter/vf_crop.c
index 89b2b6fde1..6f9a788fef 100644
--- a/video/filter/vf_crop.c
+++ b/video/filter/vf_crop.c
@@ -51,10 +51,23 @@ static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
if(vf->priv->crop_y<0) vf->priv->crop_y=(height-vf->priv->crop_h)/2;
// rounding:
+ int orig_x = vf->priv->crop_x;
+ int orig_y = vf->priv->crop_y;
+
struct mp_imgfmt_desc fmt = mp_imgfmt_get_desc(in->imgfmt);
- vf->priv->crop_x = MP_ALIGN_DOWN(vf->priv->crop_x, fmt.align_x);
- vf->priv->crop_y = MP_ALIGN_DOWN(vf->priv->crop_y, fmt.align_y);
+ if (fmt.flags & MP_IMGFLAG_HWACCEL) {
+ vf->priv->crop_x = 0;
+ vf->priv->crop_y = 0;
+ } else {
+ vf->priv->crop_x = MP_ALIGN_DOWN(vf->priv->crop_x, fmt.align_x);
+ vf->priv->crop_y = MP_ALIGN_DOWN(vf->priv->crop_y, fmt.align_y);
+ }
+
+ if (vf->priv->crop_x != orig_x || vf->priv->crop_y != orig_y) {
+ MP_WARN(vf, "Adjusting crop origin to %d/%d for pixel format alignment.\n",
+ vf->priv->crop_x, vf->priv->crop_y);
+ }
// check:
if(vf->priv->crop_w+vf->priv->crop_x>width ||
@@ -71,17 +84,19 @@ static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
static struct mp_image *filter(struct vf_instance *vf, struct mp_image *mpi)
{
- mp_image_crop(mpi, vf->priv->crop_x, vf->priv->crop_y,
- vf->priv->crop_x + vf->priv->crop_w,
- vf->priv->crop_y + vf->priv->crop_h);
+ if (mpi->fmt.flags & MP_IMGFLAG_HWACCEL) {
+ mp_image_set_size(mpi, vf->fmt_out.w, vf->fmt_out.h);
+ } else {
+ mp_image_crop(mpi, vf->priv->crop_x, vf->priv->crop_y,
+ vf->priv->crop_x + vf->priv->crop_w,
+ vf->priv->crop_y + vf->priv->crop_h);
+ }
return mpi;
}
static int query_format(struct vf_instance *vf, unsigned int fmt)
{
- if (!IMGFMT_IS_HWACCEL(fmt))
- return vf_next_query_format(vf, fmt);
- return 0;
+ return vf_next_query_format(vf, fmt);
}
static int vf_open(vf_instance_t *vf){
diff --git a/video/filter/vf_d3d11vpp.c b/video/filter/vf_d3d11vpp.c
new file mode 100644
index 0000000000..a0aa0edae2
--- /dev/null
+++ b/video/filter/vf_d3d11vpp.c
@@ -0,0 +1,537 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <initguid.h>
+#include <assert.h>
+#include <windows.h>
+#include <d3d11.h>
+
+#include "common/common.h"
+#include "osdep/timer.h"
+#include "osdep/windows_utils.h"
+#include "vf.h"
+#include "refqueue.h"
+#include "video/hwdec.h"
+#include "video/mp_image_pool.h"
+
+// missing in MinGW
+#define D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB 0x2
+
+struct vf_priv_s {
+ ID3D11Device *vo_dev;
+
+ ID3D11DeviceContext *device_ctx;
+ ID3D11VideoDevice *video_dev;
+ ID3D11VideoContext *video_ctx;
+
+ ID3D11VideoProcessor *video_proc;
+ ID3D11VideoProcessorEnumerator *vp_enum;
+ D3D11_VIDEO_FRAME_FORMAT d3d_frame_format;
+
+ DXGI_FORMAT out_format;
+ bool out_shared;
+ bool out_rgb;
+
+ bool require_filtering;
+
+ struct mp_image_params params, out_params;
+ int c_w, c_h;
+
+ struct mp_image_pool *pool;
+
+ struct mp_refqueue *queue;
+
+ int deint_enabled;
+ int interlaced_only;
+};
+
+static void release_tex(void *arg)
+{
+ ID3D11Texture2D *texture = arg;
+
+ ID3D11Texture2D_Release(texture);
+}
+
+static struct mp_image *alloc_pool(void *pctx, int fmt, int w, int h)
+{
+ struct vf_instance *vf = pctx;
+ struct vf_priv_s *p = vf->priv;
+ HRESULT hr;
+
+ ID3D11Texture2D *texture = NULL;
+ D3D11_TEXTURE2D_DESC texdesc = {
+ .Width = w,
+ .Height = h,
+ .Format = p->out_format,
+ .MipLevels = 1,
+ .ArraySize = 1,
+ .SampleDesc = { .Count = 1 },
+ .Usage = D3D11_USAGE_DEFAULT,
+ .BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE,
+ .MiscFlags = p->out_shared ? D3D11_RESOURCE_MISC_SHARED : 0,
+ };
+ hr = ID3D11Device_CreateTexture2D(p->vo_dev, &texdesc, NULL, &texture);
+ if (FAILED(hr))
+ return NULL;
+
+ struct mp_image *mpi = mp_image_new_custom_ref(NULL, texture, release_tex);
+ if (!mpi)
+ abort();
+
+ mp_image_setfmt(mpi, p->out_params.imgfmt);
+ mp_image_set_size(mpi, w, h);
+ mpi->params.hw_subfmt = p->out_params.hw_subfmt;
+
+ mpi->planes[1] = (void *)texture;
+ mpi->planes[2] = (void *)(intptr_t)0;
+
+ return mpi;
+}
+
+static void flush_frames(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+ mp_refqueue_flush(p->queue);
+}
+
+static int filter_ext(struct vf_instance *vf, struct mp_image *in)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ mp_refqueue_set_refs(p->queue, 0, 0);
+ mp_refqueue_set_mode(p->queue,
+ (p->deint_enabled ? MP_MODE_DEINT : 0) |
+ MP_MODE_OUTPUT_FIELDS |
+ (p->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0));
+
+ mp_refqueue_add_input(p->queue, in);
+ return 0;
+}
+
+static void destroy_video_proc(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ if (p->video_proc)
+ ID3D11VideoProcessor_Release(p->video_proc);
+ p->video_proc = NULL;
+
+ if (p->vp_enum)
+ ID3D11VideoProcessorEnumerator_Release(p->vp_enum);
+ p->vp_enum = NULL;
+}
+
+static int recreate_video_proc(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+ HRESULT hr;
+
+ destroy_video_proc(vf);
+
+ D3D11_VIDEO_PROCESSOR_CONTENT_DESC vpdesc = {
+ .InputFrameFormat = p->d3d_frame_format,
+ .InputWidth = p->c_w,
+ .InputHeight = p->c_h,
+ .OutputWidth = p->params.w,
+ .OutputHeight = p->params.h,
+ };
+ hr = ID3D11VideoDevice_CreateVideoProcessorEnumerator(p->video_dev, &vpdesc,
+ &p->vp_enum);
+ if (FAILED(hr))
+ goto fail;
+
+ D3D11_VIDEO_PROCESSOR_CAPS caps;
+ hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorCaps(p->vp_enum, &caps);
+ if (FAILED(hr))
+ goto fail;
+
+ MP_VERBOSE(vf, "Found %d rate conversion caps.\n",
+ (int)caps.RateConversionCapsCount);
+
+ int rindex = -1;
+ for (int n = 0; n < caps.RateConversionCapsCount; n++) {
+ D3D11_VIDEO_PROCESSOR_RATE_CONVERSION_CAPS rcaps;
+ hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorRateConversionCaps
+ (p->vp_enum, n, &rcaps);
+ if (FAILED(hr))
+ goto fail;
+ MP_VERBOSE(vf, " - %d: 0x%08x\n", n, (unsigned)rcaps.ProcessorCaps);
+ if (rcaps.ProcessorCaps & D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB)
+ {
+ MP_VERBOSE(vf, " (matching)\n");
+ if (rindex < 0)
+ rindex = n;
+ }
+ }
+
+ if (rindex < 0) {
+ MP_WARN(vf, "No video deinterlacing processor found.\n");
+ rindex = 0;
+ }
+
+ hr = ID3D11VideoDevice_CreateVideoProcessor(p->video_dev, p->vp_enum, rindex,
+ &p->video_proc);
+ if (FAILED(hr)) {
+ MP_ERR(vf, "Failed to create D3D11 video processor.\n");
+ goto fail;
+ }
+
+ // Note: libavcodec does not support cropping left/top with hwaccel.
+ RECT src_rc = {
+ .right = p->params.w,
+ .bottom = p->params.h,
+ };
+ ID3D11VideoContext_VideoProcessorSetStreamSourceRect(p->video_ctx,
+ p->video_proc,
+ 0, TRUE, &src_rc);
+
+ // This is supposed to stop drivers from fucking up the video quality.
+ ID3D11VideoContext_VideoProcessorSetStreamAutoProcessingMode(p->video_ctx,
+ p->video_proc,
+ 0, FALSE);
+
+ ID3D11VideoContext_VideoProcessorSetStreamOutputRate(p->video_ctx,
+ p->video_proc,
+ 0,
+ D3D11_VIDEO_PROCESSOR_OUTPUT_RATE_NORMAL,
+ FALSE, 0);
+
+ D3D11_VIDEO_PROCESSOR_COLOR_SPACE csp = {
+ .YCbCr_Matrix = p->params.colorspace != MP_CSP_BT_601,
+ .Nominal_Range = p->params.colorlevels == MP_CSP_LEVELS_TV ? 1 : 2,
+ };
+ ID3D11VideoContext_VideoProcessorSetStreamColorSpace(p->video_ctx,
+ p->video_proc,
+ 0, &csp);
+ if (p->out_rgb) {
+ if (p->params.colorspace != MP_CSP_BT_601 &&
+ p->params.colorspace != MP_CSP_BT_709)
+ {
+ MP_WARN(vf, "Unsupported video colorspace (%s/%s). Consider "
+ "disabling hardware decoding, or using "
+ "--hwdec=d3d11va-copy to get correct output.\n",
+ m_opt_choice_str(mp_csp_names, p->params.colorspace),
+ m_opt_choice_str(mp_csp_levels_names, p->params.colorlevels));
+ }
+ } else {
+ ID3D11VideoContext_VideoProcessorSetOutputColorSpace(p->video_ctx,
+ p->video_proc,
+ &csp);
+ }
+
+ return 0;
+fail:
+ destroy_video_proc(vf);
+ return -1;
+}
+
+static int render(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+ int res = -1;
+ HRESULT hr;
+ ID3D11VideoProcessorInputView *in_view = NULL;
+ ID3D11VideoProcessorOutputView *out_view = NULL;
+ struct mp_image *in = NULL, *out = NULL;
+ out = mp_image_pool_get(p->pool, p->out_params.imgfmt, p->params.w, p->params.h);
+ if (!out)
+ goto cleanup;
+
+ ID3D11Texture2D *d3d_out_tex = (void *)out->planes[1];
+
+ in = mp_refqueue_get(p->queue, 0);
+ if (!in)
+ goto cleanup;
+ ID3D11Texture2D *d3d_tex = (void *)in->planes[1];
+ int d3d_subindex = (intptr_t)in->planes[2];
+
+ mp_image_copy_attributes(out, in);
+
+ D3D11_VIDEO_FRAME_FORMAT d3d_frame_format;
+ if (!mp_refqueue_is_interlaced(p->queue)) {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
+ } else if (mp_refqueue_top_field_first(p->queue)) {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
+ } else {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
+ }
+
+ D3D11_TEXTURE2D_DESC texdesc;
+ ID3D11Texture2D_GetDesc(d3d_tex, &texdesc);
+ if (!p->video_proc || p->c_w != texdesc.Width || p->c_h != texdesc.Height ||
+ p->d3d_frame_format != d3d_frame_format)
+ {
+ p->c_w = texdesc.Width;
+ p->c_h = texdesc.Height;
+ p->d3d_frame_format = d3d_frame_format;
+ if (recreate_video_proc(vf) < 0)
+ goto cleanup;
+ }
+
+ if (!mp_refqueue_is_interlaced(p->queue)) {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
+ } else if (mp_refqueue_is_top_field(p->queue)) {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
+ } else {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
+ }
+
+ ID3D11VideoContext_VideoProcessorSetStreamFrameFormat(p->video_ctx,
+ p->video_proc,
+ 0, d3d_frame_format);
+
+ D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC indesc = {
+ .ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D,
+ .Texture2D = {
+ .ArraySlice = d3d_subindex,
+ },
+ };
+ hr = ID3D11VideoDevice_CreateVideoProcessorInputView(p->video_dev,
+ (ID3D11Resource *)d3d_tex,
+ p->vp_enum, &indesc,
+ &in_view);
+ if (FAILED(hr)) {
+ MP_ERR(vf, "Could not create ID3D11VideoProcessorInputView\n");
+ goto cleanup;
+ }
+
+ D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC outdesc = {
+ .ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D,
+ };
+ hr = ID3D11VideoDevice_CreateVideoProcessorOutputView(p->video_dev,
+ (ID3D11Resource *)d3d_out_tex,
+ p->vp_enum, &outdesc,
+ &out_view);
+ if (FAILED(hr))
+ goto cleanup;
+
+ D3D11_VIDEO_PROCESSOR_STREAM stream = {
+ .Enable = TRUE,
+ .pInputSurface = in_view,
+ };
+ int frame = mp_refqueue_is_second_field(p->queue);
+ hr = ID3D11VideoContext_VideoProcessorBlt(p->video_ctx, p->video_proc,
+ out_view, frame, 1, &stream);
+ if (FAILED(hr)) {
+ MP_ERR(vf, "VideoProcessorBlt failed.\n");
+ goto cleanup;
+ }
+
+ // Make sure the texture is updated correctly on the shared context.
+ // (I'm not sure if this is correct, though it won't harm.)
+ if (p->out_shared)
+ ID3D11DeviceContext_Flush(p->device_ctx);
+
+ res = 0;
+cleanup:
+ if (in_view)
+ ID3D11VideoProcessorInputView_Release(in_view);
+ if (out_view)
+ ID3D11VideoProcessorOutputView_Release(out_view);
+ if (res >= 0) {
+ vf_add_output_frame(vf, out);
+ } else {
+ talloc_free(out);
+ }
+ mp_refqueue_next_field(p->queue);
+ return res;
+}
+
+static int filter_out(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ if (!mp_refqueue_has_output(p->queue))
+ return 0;
+
+ // no filtering
+ if (!mp_refqueue_should_deint(p->queue) && !p->require_filtering) {
+ struct mp_image *in = mp_refqueue_get(p->queue, 0);
+ vf_add_output_frame(vf, mp_image_new_ref(in));
+ mp_refqueue_next(p->queue);
+ return 0;
+ }
+
+ return render(vf);
+}
+
+static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
+ struct mp_image_params *out)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ flush_frames(vf);
+ talloc_free(p->pool);
+ p->pool = NULL;
+
+ destroy_video_proc(vf);
+
+ *out = *in;
+
+ if (vf_next_query_format(vf, IMGFMT_D3D11VA) ||
+ vf_next_query_format(vf, IMGFMT_D3D11NV12))
+ {
+ out->imgfmt = vf_next_query_format(vf, IMGFMT_D3D11VA)
+ ? IMGFMT_D3D11VA : IMGFMT_D3D11NV12;
+ out->hw_subfmt = IMGFMT_NV12;
+ p->out_format = DXGI_FORMAT_NV12;
+ p->out_shared = false;
+ p->out_rgb = false;
+ } else {
+ out->imgfmt = IMGFMT_D3D11RGB;
+ out->hw_subfmt = IMGFMT_RGB0;
+ p->out_format = DXGI_FORMAT_B8G8R8A8_UNORM;
+ p->out_shared = true;
+ p->out_rgb = true;
+ }
+
+ p->require_filtering = in->hw_subfmt != out->hw_subfmt;
+
+ p->params = *in;
+ p->out_params = *out;
+
+ p->pool = mp_image_pool_new(20);
+ mp_image_pool_set_allocator(p->pool, alloc_pool, vf);
+ mp_image_pool_set_lru(p->pool);
+
+ return 0;
+}
+
+static void uninit(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ destroy_video_proc(vf);
+
+ flush_frames(vf);
+ mp_refqueue_free(p->queue);
+ talloc_free(p->pool);
+
+ if (p->video_ctx)
+ ID3D11VideoContext_Release(p->video_ctx);
+
+ if (p->video_dev)
+ ID3D11VideoDevice_Release(p->video_dev);
+
+ if (p->device_ctx)
+ ID3D11DeviceContext_Release(p->device_ctx);
+
+ if (p->vo_dev)
+ ID3D11Device_Release(p->vo_dev);
+}
+
+static int query_format(struct vf_instance *vf, unsigned int imgfmt)
+{
+ if (imgfmt == IMGFMT_D3D11VA ||
+ imgfmt == IMGFMT_D3D11NV12 ||
+ imgfmt == IMGFMT_D3D11RGB)
+ {
+ return vf_next_query_format(vf, IMGFMT_D3D11VA) ||
+ vf_next_query_format(vf, IMGFMT_D3D11NV12) ||
+ vf_next_query_format(vf, IMGFMT_D3D11RGB);
+ }
+ return 0;
+}
+
+static bool test_conversion(int in, int out)
+{
+ return (in == IMGFMT_D3D11VA ||
+ in == IMGFMT_D3D11NV12 ||
+ in == IMGFMT_D3D11RGB) &&
+ (out == IMGFMT_D3D11VA ||
+ out == IMGFMT_D3D11NV12 ||
+ out == IMGFMT_D3D11RGB);
+}
+
+static int control(struct vf_instance *vf, int request, void* data)
+{
+ struct vf_priv_s *p = vf->priv;
+ switch (request){
+ case VFCTRL_GET_DEINTERLACE:
+ *(int*)data = !!p->deint_enabled;
+ return true;
+ case VFCTRL_SET_DEINTERLACE:
+ p->deint_enabled = !!*(int*)data;
+ return true;
+ case VFCTRL_SEEK_RESET:
+ flush_frames(vf);
+ return true;
+ default:
+ return CONTROL_UNKNOWN;
+ }
+}
+
+static int vf_open(vf_instance_t *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ vf->reconfig = reconfig;
+ vf->filter_ext = filter_ext;
+ vf->filter_out = filter_out;
+ vf->query_format = query_format;
+ vf->uninit = uninit;
+ vf->control = control;
+
+ p->queue = mp_refqueue_alloc();
+
+ p->vo_dev = hwdec_devices_load(vf->hwdec_devs, HWDEC_D3D11VA);
+ if (!p->vo_dev)
+ return 0;
+
+ ID3D11Device_AddRef(p->vo_dev);
+
+ HRESULT hr;
+
+ hr = ID3D11Device_QueryInterface(p->vo_dev, &IID_ID3D11VideoDevice,
+ (void **)&p->video_dev);
+ if (FAILED(hr))
+ goto fail;
+
+ ID3D11Device_GetImmediateContext(p->vo_dev, &p->device_ctx);
+ if (!p->device_ctx)
+ goto fail;
+ hr = ID3D11DeviceContext_QueryInterface(p->device_ctx, &IID_ID3D11VideoContext,
+ (void **)&p->video_ctx);
+ if (FAILED(hr))
+ goto fail;
+
+ return 1;
+
+fail:
+ uninit(vf);
+ return 0;
+}
+
+#define OPT_BASE_STRUCT struct vf_priv_s
+static const m_option_t vf_opts_fields[] = {
+ OPT_FLAG("deint", deint_enabled, 0),
+ OPT_FLAG("interlaced-only", interlaced_only, 0),
+ {0}
+};