summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2016-05-28 18:23:17 +0200
committerwm4 <wm4@nowhere>2016-05-28 19:28:08 +0200
commit49f73eaf7b6f58e82376fc764ab0743c039d5278 (patch)
tree1689158208b01a6a408d814c3d2643f1f416b2a3
parent9eccedf4471621dcb82219a657141bf5960b77dd (diff)
downloadmpv-49f73eaf7b6f58e82376fc764ab0743c039d5278.tar.bz2
mpv-49f73eaf7b6f58e82376fc764ab0743c039d5278.tar.xz
vf_d3d11vpp: add a D3D11 video processor filter
Main use: deinterlacing. I'm not sure how to select the deinterlacing mode at all. You can enumate the available video processors, but at least on Intel, all of them either signal support for all deinterlacers, or none (the latter is apparently used for IVTC). I haven't found anything that actually tells the processor _which_ algorithm to use. Another strange detail is how to select top/bottom fields and field dominance. At least I'm getting quite similar results to vavpp on Linux, so I'm content with it for now. Future plans include removing the D3D11 video processor use from the ANGLE interop code.
-rw-r--r--DOCS/man/vf.rst9
-rw-r--r--player/command.c3
-rw-r--r--video/filter/refqueue.c5
-rw-r--r--video/filter/refqueue.h1
-rw-r--r--video/filter/vf.c4
-rw-r--r--video/filter/vf_d3d11vpp.c490
-rw-r--r--wscript_build.py1
7 files changed, 513 insertions, 0 deletions
diff --git a/DOCS/man/vf.rst b/DOCS/man/vf.rst
index e742bc4ada..40c499cb5a 100644
--- a/DOCS/man/vf.rst
+++ b/DOCS/man/vf.rst
@@ -801,6 +801,15 @@ Available filters are:
This filter must be specified before ``vdpaupp`` in the filter chain if
``vdpaupp`` is used.
+``d3d11vpp``
+ Direct3D 11 video post processing. Currently requires D3D11 hardware
+ decoding for use.
+
+ ``deint=<yes|no>``
+ Whether deinterlacing is enabled (default: no).
+ ``interlaced-only=<yes|no>``
+ If ``yes`` (default), only deinterlace frames marked as interlaced.
+
``buffer=<num>``
Buffer ``<num>`` frames in the filter chain. This filter is probably pretty
useless, except for debugging. (Note that this won't help to smooth out
diff --git a/player/command.c b/player/command.c
index db1e3e7703..ecd6252e26 100644
--- a/player/command.c
+++ b/player/command.c
@@ -2295,6 +2295,9 @@ static int probe_deint_filters(struct MPContext *mpctx)
if (check_output_format(mpctx, IMGFMT_VAAPI) &&
probe_deint_filter(mpctx, "vavpp"))
return 0;
+ if (check_output_format(mpctx, IMGFMT_D3D11VA) &&
+ probe_deint_filter(mpctx, "d3d11vpp"))
+ return 0;
if (probe_deint_filter(mpctx, "yadif"))
return 0;
return -1;
diff --git a/video/filter/refqueue.c b/video/filter/refqueue.c
index aea535cbb6..b8502695b7 100644
--- a/video/filter/refqueue.c
+++ b/video/filter/refqueue.c
@@ -222,3 +222,8 @@ struct mp_image *mp_refqueue_get_field(struct mp_refqueue *q, int pos)
int frame = (pos < 0 ? pos - (1 - round) : pos + round) / 2;
return mp_refqueue_get(q, frame);
}
+
+bool mp_refqueue_is_second_field(struct mp_refqueue *q)
+{
+ return mp_refqueue_has_output(q) && q->second_field;
+}
diff --git a/video/filter/refqueue.h b/video/filter/refqueue.h
index 62c0d4fecb..ef23bee906 100644
--- a/video/filter/refqueue.h
+++ b/video/filter/refqueue.h
@@ -30,6 +30,7 @@ bool mp_refqueue_should_deint(struct mp_refqueue *q);
bool mp_refqueue_is_interlaced(struct mp_refqueue *q);
bool mp_refqueue_is_top_field(struct mp_refqueue *q);
bool mp_refqueue_top_field_first(struct mp_refqueue *q);
+bool mp_refqueue_is_second_field(struct mp_refqueue *q);
struct mp_image *mp_refqueue_get_field(struct mp_refqueue *q, int pos);
#endif
diff --git a/video/filter/vf.c b/video/filter/vf.c
index 7ca1b08f34..9c741a4bfa 100644
--- a/video/filter/vf.c
+++ b/video/filter/vf.c
@@ -61,6 +61,7 @@ extern const vf_info_t vf_info_vapoursynth_lazy;
extern const vf_info_t vf_info_vdpaupp;
extern const vf_info_t vf_info_vdpaurb;
extern const vf_info_t vf_info_buffer;
+extern const vf_info_t vf_info_d3d11vpp;
// list of available filters:
static const vf_info_t *const filter_list[] = {
@@ -99,6 +100,9 @@ static const vf_info_t *const filter_list[] = {
&vf_info_vdpaupp,
&vf_info_vdpaurb,
#endif
+#if HAVE_D3D_HWACCEL
+ &vf_info_d3d11vpp,
+#endif
NULL
};
diff --git a/video/filter/vf_d3d11vpp.c b/video/filter/vf_d3d11vpp.c
new file mode 100644
index 0000000000..6b6441fecd
--- /dev/null
+++ b/video/filter/vf_d3d11vpp.c
@@ -0,0 +1,490 @@
+/*
+ * This file is part of mpv.
+ *
+ * mpv is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * mpv is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with mpv. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <initguid.h>
+#include <assert.h>
+#include <windows.h>
+#include <d3d11.h>
+
+#include "common/common.h"
+#include "osdep/timer.h"
+#include "osdep/windows_utils.h"
+#include "vf.h"
+#include "refqueue.h"
+#include "video/hwdec.h"
+#include "video/mp_image_pool.h"
+
+// missing in MinGW
+#define D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB 0x2
+
+struct vf_priv_s {
+ ID3D11Device *vo_dev;
+
+ ID3D11DeviceContext *device_ctx;
+ ID3D11VideoDevice *video_dev;
+ ID3D11VideoContext *video_ctx;
+
+ ID3D11VideoProcessor *video_proc;
+ ID3D11VideoProcessorEnumerator *vp_enum;
+ D3D11_VIDEO_FRAME_FORMAT d3d_frame_format;
+
+ struct mp_image_params params;
+ int c_w, c_h;
+
+ struct mp_image_pool *pool;
+
+ struct mp_refqueue *queue;
+
+ int deint_enabled;
+ int interlaced_only;
+};
+
+struct d3d11va_surface {
+ ID3D11Texture2D *texture;
+ int subindex;
+ ID3D11VideoDecoderOutputView *surface;
+};
+
+static void release_tex(void *arg)
+{
+ ID3D11Texture2D *texture = arg;
+
+ ID3D11Texture2D_Release(texture);
+}
+
+static struct mp_image *alloc_surface(ID3D11Device *dev, DXGI_FORMAT format,
+ int hw_subfmt, int w, int h, bool shared)
+{
+ HRESULT hr;
+
+ ID3D11Texture2D *texture = NULL;
+ D3D11_TEXTURE2D_DESC texdesc = {
+ .Width = w,
+ .Height = h,
+ .Format = format,
+ .MipLevels = 1,
+ .ArraySize = 1,
+ .SampleDesc = { .Count = 1 },
+ .Usage = D3D11_USAGE_DEFAULT,
+ .BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE,
+ .MiscFlags = shared ? D3D11_RESOURCE_MISC_SHARED : 0,
+ };
+ hr = ID3D11Device_CreateTexture2D(dev, &texdesc, NULL, &texture);
+ if (FAILED(hr))
+ return NULL;
+
+ struct mp_image *mpi = mp_image_new_custom_ref(NULL, texture, release_tex);
+ if (!mpi)
+ abort();
+
+ mp_image_setfmt(mpi, IMGFMT_D3D11VA);
+ mp_image_set_size(mpi, w, h);
+ mpi->params.hw_subfmt = hw_subfmt;
+
+ mpi->planes[1] = (void *)texture;
+ mpi->planes[2] = (void *)(intptr_t)0;
+
+ return mpi;
+}
+
+static struct mp_image *alloc_pool_nv12(void *pctx, int fmt, int w, int h)
+{
+ ID3D11Device *dev = pctx;
+ assert(fmt == IMGFMT_D3D11VA);
+
+ return alloc_surface(dev, DXGI_FORMAT_NV12, IMGFMT_NV12, w, h, false);
+}
+
+static void flush_frames(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+ mp_refqueue_flush(p->queue);
+}
+
+static int filter_ext(struct vf_instance *vf, struct mp_image *in)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ mp_refqueue_set_refs(p->queue, 0, 0);
+ mp_refqueue_set_mode(p->queue,
+ (p->deint_enabled ? MP_MODE_DEINT : 0) |
+ MP_MODE_OUTPUT_FIELDS |
+ (p->interlaced_only ? MP_MODE_INTERLACED_ONLY : 0));
+
+ mp_refqueue_add_input(p->queue, in);
+ return 0;
+}
+
+static void destroy_video_proc(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ if (p->video_proc)
+ ID3D11VideoProcessor_Release(p->video_proc);
+ p->video_proc = NULL;
+
+ if (p->vp_enum)
+ ID3D11VideoProcessorEnumerator_Release(p->vp_enum);
+ p->vp_enum = NULL;
+}
+
+static int recreate_video_proc(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+ HRESULT hr;
+
+ destroy_video_proc(vf);
+
+ // Note: we skip any deinterlacing considerations for now.
+ D3D11_VIDEO_PROCESSOR_CONTENT_DESC vpdesc = {
+ .InputFrameFormat = p->d3d_frame_format,
+ .InputWidth = p->c_w,
+ .InputHeight = p->c_h,
+ .OutputWidth = p->params.w,
+ .OutputHeight = p->params.h,
+ };
+ hr = ID3D11VideoDevice_CreateVideoProcessorEnumerator(p->video_dev, &vpdesc,
+ &p->vp_enum);
+ if (FAILED(hr))
+ goto fail;
+
+ D3D11_VIDEO_PROCESSOR_CAPS caps;
+ hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorCaps(p->vp_enum, &caps);
+ if (FAILED(hr))
+ goto fail;
+
+ int rindex = -1;
+ for (int n = 0; n < caps.RateConversionCapsCount; n++) {
+ D3D11_VIDEO_PROCESSOR_RATE_CONVERSION_CAPS rcaps;
+ hr = ID3D11VideoProcessorEnumerator_GetVideoProcessorRateConversionCaps
+ (p->vp_enum, n, &rcaps);
+ if (FAILED(hr))
+ goto fail;
+ if (rcaps.ProcessorCaps & D3D11_VIDEO_PROCESSOR_PROCESSOR_CAPS_DEINTERLACE_BOB)
+ {
+ rindex = n;
+ break;
+ }
+ }
+
+ if (rindex < 0) {
+ MP_ERR(vf, "No video processor found.\n");
+ goto fail;
+ }
+
+ // Assume RateConversionIndex==0 always works fine for us.
+ hr = ID3D11VideoDevice_CreateVideoProcessor(p->video_dev, p->vp_enum, rindex,
+ &p->video_proc);
+ if (FAILED(hr)) {
+ MP_ERR(vf, "Failed to create D3D11 video processor.\n");
+ goto fail;
+ }
+
+ // Note: libavcodec does not support cropping left/top with hwaccel.
+ RECT src_rc = {
+ .right = p->params.w,
+ .bottom = p->params.h,
+ };
+ ID3D11VideoContext_VideoProcessorSetStreamSourceRect(p->video_ctx,
+ p->video_proc,
+ 0, TRUE, &src_rc);
+
+ // This is supposed to stop drivers from fucking up the video quality.
+ ID3D11VideoContext_VideoProcessorSetStreamAutoProcessingMode(p->video_ctx,
+ p->video_proc,
+ 0, FALSE);
+
+ ID3D11VideoContext_VideoProcessorSetStreamOutputRate(p->video_ctx,
+ p->video_proc,
+ 0,
+ D3D11_VIDEO_PROCESSOR_OUTPUT_RATE_NORMAL,
+ FALSE, 0);
+
+ D3D11_VIDEO_PROCESSOR_COLOR_SPACE csp = {
+ .YCbCr_Matrix = p->params.colorspace != MP_CSP_BT_601,
+ .Nominal_Range = p->params.colorlevels == MP_CSP_LEVELS_TV ? 1 : 2,
+ };
+ ID3D11VideoContext_VideoProcessorSetStreamColorSpace(p->video_ctx,
+ p->video_proc,
+ 0, &csp);
+ ID3D11VideoContext_VideoProcessorSetOutputColorSpace(p->video_ctx,
+ p->video_proc,
+ &csp);
+
+ return 0;
+fail:
+ destroy_video_proc(vf);
+ return -1;
+}
+
+static int render(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+ int res = -1;
+ HRESULT hr;
+ ID3D11VideoProcessorInputView *in_view = NULL;
+ ID3D11VideoProcessorOutputView *out_view = NULL;
+ struct mp_image *in = NULL, *out = NULL;
+ out = mp_image_pool_get(p->pool, IMGFMT_D3D11VA, p->params.w, p->params.h);
+ if (!out)
+ goto cleanup;
+
+ ID3D11Texture2D *d3d_out_tex = (void *)out->planes[1];
+
+ in = mp_refqueue_get(p->queue, 0);
+ if (!in)
+ goto cleanup;
+ ID3D11Texture2D *d3d_tex = (void *)in->planes[1];
+ int d3d_subindex = (intptr_t)in->planes[2];
+
+ mp_image_copy_attributes(out, in);
+
+ D3D11_VIDEO_FRAME_FORMAT d3d_frame_format;
+ if (!mp_refqueue_is_interlaced(p->queue)) {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
+ } else if (mp_refqueue_top_field_first(p->queue)) {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
+ } else {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
+ }
+
+ D3D11_TEXTURE2D_DESC texdesc;
+ ID3D11Texture2D_GetDesc(d3d_tex, &texdesc);
+ if (!p->video_proc || p->c_w != texdesc.Width || p->c_h != texdesc.Height ||
+ p->d3d_frame_format != d3d_frame_format)
+ {
+ p->c_w = texdesc.Width;
+ p->c_h = texdesc.Height;
+ p->d3d_frame_format = d3d_frame_format;
+ if (recreate_video_proc(vf) < 0)
+ goto cleanup;
+ }
+
+ if (!mp_refqueue_is_interlaced(p->queue)) {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
+ } else if (mp_refqueue_is_top_field(p->queue)) {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
+ } else {
+ d3d_frame_format = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_BOTTOM_FIELD_FIRST;
+ }
+
+ ID3D11VideoContext_VideoProcessorSetStreamFrameFormat(p->video_ctx,
+ p->video_proc,
+ 0, d3d_frame_format);
+
+ D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC indesc = {
+ .ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D,
+ .Texture2D = {
+ .ArraySlice = d3d_subindex,
+ },
+ };
+ hr = ID3D11VideoDevice_CreateVideoProcessorInputView(p->video_dev,
+ (ID3D11Resource *)d3d_tex,
+ p->vp_enum, &indesc,
+ &in_view);
+ if (FAILED(hr)) {
+ MP_ERR(vf, "Could not create ID3D11VideoProcessorInputView\n");
+ goto cleanup;
+ }
+
+ D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC outdesc = {
+ .ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D,
+ };
+ hr = ID3D11VideoDevice_CreateVideoProcessorOutputView(p->video_dev,
+ (ID3D11Resource *)d3d_out_tex,
+ p->vp_enum, &outdesc,
+ &out_view);
+ if (FAILED(hr))
+ goto cleanup;
+
+ D3D11_VIDEO_PROCESSOR_STREAM stream = {
+ .Enable = TRUE,
+ .pInputSurface = in_view,
+ };
+ int frame = mp_refqueue_is_second_field(p->queue);
+ hr = ID3D11VideoContext_VideoProcessorBlt(p->video_ctx, p->video_proc,
+ out_view, frame, 1, &stream);
+ if (FAILED(hr)) {
+ MP_ERR(vf, "VideoProcessorBlt failed.\n");
+ goto cleanup;
+ }
+
+ res = 0;
+cleanup:
+ if (in_view)
+ ID3D11VideoProcessorInputView_Release(in_view);
+ if (out_view)
+ ID3D11VideoProcessorOutputView_Release(out_view);
+ if (res >= 0) {
+ vf_add_output_frame(vf, out);
+ } else {
+ talloc_free(out);
+ }
+ mp_refqueue_next_field(p->queue);
+ return res;
+}
+
+
+static int filter_out(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ if (!mp_refqueue_has_output(p->queue))
+ return 0;
+
+ // no filtering
+ if (!mp_refqueue_should_deint(p->queue)) {
+ struct mp_image *in = mp_refqueue_get(p->queue, 0);
+ vf_add_output_frame(vf, mp_image_new_ref(in));
+ mp_refqueue_next(p->queue);
+ return 0;
+ }
+
+ return render(vf);
+}
+
+static int reconfig(struct vf_instance *vf, struct mp_image_params *in,
+ struct mp_image_params *out)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ flush_frames(vf);
+ talloc_free(p->pool);
+ p->pool = NULL;
+
+ destroy_video_proc(vf);
+
+ p->params = *in;
+
+ p->pool = mp_image_pool_new(20);
+ mp_image_pool_set_allocator(p->pool, alloc_pool_nv12, p->vo_dev);
+ mp_image_pool_set_lru(p->pool);
+
+ *out = *in;
+ out->imgfmt = IMGFMT_D3D11VA;
+ out->hw_subfmt = IMGFMT_NV12;
+
+ return 0;
+}
+
+static void uninit(struct vf_instance *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ destroy_video_proc(vf);
+
+ flush_frames(vf);
+ mp_refqueue_free(p->queue);
+ talloc_free(p->pool);
+
+ if (p->video_ctx)
+ ID3D11VideoContext_Release(p->video_ctx);
+
+ if (p->video_dev)
+ ID3D11VideoDevice_Release(p->video_dev);
+
+ if (p->device_ctx)
+ ID3D11DeviceContext_Release(p->device_ctx);
+
+ if (p->vo_dev)
+ ID3D11Device_Release(p->vo_dev);
+}
+
+static int query_format(struct vf_instance *vf, unsigned int imgfmt)
+{
+ if (imgfmt == IMGFMT_D3D11VA)
+ return vf_next_query_format(vf, IMGFMT_D3D11VA);
+ return 0;
+}
+
+static int control(struct vf_instance *vf, int request, void* data)
+{
+ struct vf_priv_s *p = vf->priv;
+ switch (request){
+ case VFCTRL_GET_DEINTERLACE:
+ *(int*)data = !!p->deint_enabled;
+ return true;
+ case VFCTRL_SET_DEINTERLACE:
+ p->deint_enabled = !!*(int*)data;
+ return true;
+ case VFCTRL_SEEK_RESET:
+ flush_frames(vf);
+ return true;
+ default:
+ return CONTROL_UNKNOWN;
+ }
+}
+
+static int vf_open(vf_instance_t *vf)
+{
+ struct vf_priv_s *p = vf->priv;
+
+ vf->reconfig = reconfig;
+ vf->filter_ext = filter_ext;
+ vf->filter_out = filter_out;
+ vf->query_format = query_format;
+ vf->uninit = uninit;
+ vf->control = control;
+
+ p->queue = mp_refqueue_alloc();
+
+ p->vo_dev = hwdec_devices_load(vf->hwdec_devs, HWDEC_D3D11VA);
+ if (!p->vo_dev)
+ return 0;
+
+ ID3D11Device_AddRef(p->vo_dev);
+
+ HRESULT hr;
+
+ hr = ID3D11Device_QueryInterface(p->vo_dev, &IID_ID3D11VideoDevice,
+ (void **)&p->video_dev);
+ if (FAILED(hr))
+ goto fail;
+
+ ID3D11Device_GetImmediateContext(p->vo_dev, &p->device_ctx);
+ if (!p->device_ctx)
+ goto fail;
+ hr = ID3D11DeviceContext_QueryInterface(p->device_ctx, &IID_ID3D11VideoContext,
+ (void **)&p->video_ctx);
+ if (FAILED(hr))
+ goto fail;
+
+ return 1;
+
+fail:
+ uninit(vf);
+ return 0;
+}
+
+#define OPT_BASE_STRUCT struct vf_priv_s
+static const m_option_t vf_opts_fields[] = {
+ OPT_FLAG("deint", deint_enabled, 0),
+ OPT_FLAG("interlaced-only", interlaced_only, 0),
+ {0}
+};
+
+const vf_info_t vf_info_d3d11vpp = {
+ .description = "D3D11 Video Post-Process Filter",
+ .name = "d3d11vpp",
+ .open = vf_open,
+ .priv_size = sizeof(struct vf_priv_s),
+ .priv_defaults = &(const struct vf_priv_s) {
+ .deint_enabled = 1,
+ .interlaced_only = 1,
+ },
+ .options = vf_opts_fields,
+};
diff --git a/wscript_build.py b/wscript_build.py
index 179d810422..0227793688 100644
--- a/wscript_build.py
+++ b/wscript_build.py
@@ -301,6 +301,7 @@ def build(ctx):
( "video/filter/vf.c" ),
( "video/filter/vf_buffer.c" ),
( "video/filter/vf_crop.c" ),
+ ( "video/filter/vf_d3d11vpp.c", "d3d-hwaccel" ),
( "video/filter/vf_dlopen.c", "dlopen" ),
( "video/filter/vf_dsize.c" ),
( "video/filter/vf_eq.c" ),