summaryrefslogtreecommitdiffstats
path: root/video/out/opengl/hwdec_vaegl.c
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2016-04-11 20:46:05 +0200
committerwm4 <wm4@nowhere>2016-04-11 22:03:26 +0200
commitf5ff2656e0d192a2e25fe5f65edf219972211a48 (patch)
tree4400fd5f98c38a7c0923a5f8b468335684334f29 /video/out/opengl/hwdec_vaegl.c
parent49431626cb1bde400ab6de6adc68ce39cdbbf6f8 (diff)
downloadmpv-f5ff2656e0d192a2e25fe5f65edf219972211a48.tar.bz2
mpv-f5ff2656e0d192a2e25fe5f65edf219972211a48.tar.xz
vaapi: determine surface format in decoder, not in renderer
Until now, we have made the assumption that a driver will use only 1 hardware surface format. the format is dictated by the driver (you don't create surfaces with a specific format - you just pass a rt_format and get a surface that will be in a specific driver-chosen format). In particular, the renderer created a dummy surface to probe the format, and hoped the decoder would produce the same format. Due to a driver bug this required a workaround to actually get the same format as the driver did. Change this so that the format is determined in the decoder. The format is then passed down as hw_subfmt, which allows the renderer to configure itself with the correct format. If the hardware surface changes its format midstream, the renderer can be reconfigured using the normal mechanisms. This calls va_surface_init_subformat() each time after the decoder returns a surface. Since libavcodec/AVFrame has no concept of sub- formats, this is unavoidable. It creates and destroys a derived VAImage, but this shouldn't have any bad performance effects (at least I didn't notice any measurable effects). Note that vaDeriveImage() failures are silently ignored as some drivers (the vdpau wrapper) support neither vaDeriveImage, nor EGL interop. In addition, we still probe whether we can map an image in the EGL interop code. This is important as it's the only way to determine whether EGL interop is supported at all. With respect to the driver bug mentioned above, it doesn't matter which format the test surface has. In vf_vavpp, also remove the rt_format guessing business. I think the existing logic was a bit meaningless anyway. It's not even a given that vavpp produces the same rt_format for output.
Diffstat (limited to 'video/out/opengl/hwdec_vaegl.c')
-rw-r--r--video/out/opengl/hwdec_vaegl.c50
1 files changed, 13 insertions, 37 deletions
diff --git a/video/out/opengl/hwdec_vaegl.c b/video/out/opengl/hwdec_vaegl.c
index 7b34d6bb5c..d62a20a219 100644
--- a/video/out/opengl/hwdec_vaegl.c
+++ b/video/out/opengl/hwdec_vaegl.c
@@ -172,30 +172,6 @@ static void destroy(struct gl_hwdec *hw)
va_destroy(p->ctx);
}
-// Create an empty dummy VPP. This works around a weird bug that affects the
-// VA surface format, as it is reported by vaDeriveImage(). Before a VPP
-// context or a decoder context is created, the surface format will be reported
-// as YV12. Surfaces created after context creation will report NV12 (even
-// though surface creation does not take a context as argument!). Existing
-// surfaces will change their format from YV12 to NV12 as soon as the decoder
-// renders to them! Because we want know the surface format in advance (to
-// simplify our renderer configuration logic), we hope that this hack gives
-// us reasonable behavior.
-// See: https://bugs.freedesktop.org/show_bug.cgi?id=79848
-static void insane_hack(struct gl_hwdec *hw)
-{
- struct priv *p = hw->priv;
- VAConfigID config;
- if (vaCreateConfig(p->display, VAProfileNone, VAEntrypointVideoProc,
- NULL, 0, &config) == VA_STATUS_SUCCESS)
- {
- // We want to keep this until the VADisplay is destroyed. It will
- // implicitly free the context.
- VAContextID context;
- vaCreateContext(p->display, config, 0, 0, 0, NULL, 0, &context);
- }
-}
-
static int create(struct gl_hwdec *hw)
{
GL *gl = hw->gl;
@@ -248,7 +224,6 @@ static int create(struct gl_hwdec *hw)
MP_VERBOSE(p, "using VAAPI EGL interop\n");
- insane_hack(hw);
if (!test_format(hw)) {
destroy(hw);
return -1;
@@ -278,6 +253,18 @@ static int reinit(struct gl_hwdec *hw, struct mp_image_params *params)
}
gl->BindTexture(GL_TEXTURE_2D, 0);
+ hw->converted_imgfmt = va_fourcc_to_imgfmt(params->hw_subfmt);
+ if (hw->converted_imgfmt != IMGFMT_NV12 &&
+ hw->converted_imgfmt != IMGFMT_420P)
+ {
+ MP_FATAL(p, "unsupported VA image format %s\n",
+ mp_tag_str(params->hw_subfmt));
+ return -1;
+ }
+
+ MP_VERBOSE(p, "format: %s %s\n", mp_tag_str(params->hw_subfmt),
+ mp_imgfmt_to_name(hw->converted_imgfmt));
+
return 0;
}
@@ -308,18 +295,6 @@ static int map_image(struct gl_hwdec *hw, struct mp_image *hw_image,
goto err;
int mpfmt = va_fourcc_to_imgfmt(va_image->format.fourcc);
- if (mpfmt != IMGFMT_NV12 && mpfmt != IMGFMT_420P) {
- MP_FATAL(p, "unsupported VA image format %s\n",
- mp_tag_str(va_image->format.fourcc));
- goto err;
- }
-
- if (!hw->converted_imgfmt) {
- MP_VERBOSE(p, "format: %s %s\n", mp_tag_str(va_image->format.fourcc),
- mp_imgfmt_to_name(mpfmt));
- hw->converted_imgfmt = mpfmt;
- }
-
if (hw->converted_imgfmt != mpfmt) {
MP_FATAL(p, "mid-stream hwdec format change (%s -> %s) not supported\n",
mp_imgfmt_to_name(hw->converted_imgfmt), mp_imgfmt_to_name(mpfmt));
@@ -387,6 +362,7 @@ static bool test_format(struct gl_hwdec *hw)
va_pool_set_allocator(alloc, p->ctx, VA_RT_FORMAT_YUV420);
struct mp_image *surface = mp_image_pool_get(alloc, IMGFMT_VAAPI, 64, 64);
if (surface) {
+ va_surface_init_subformat(surface);
struct mp_image_params params = surface->params;
if (reinit(hw, &params) >= 0) {
GLuint textures[4];