summaryrefslogtreecommitdiffstats
path: root/video/decode
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2014-03-10 22:36:23 +0100
committerwm4 <wm4@nowhere>2014-03-10 22:56:26 +0100
commitccce58d6d63538911fa7bdd216a32e8444ea36b8 (patch)
tree2a59a762d1242ea796655c42531de0d64765d55c /video/decode
parentfbddbce01dec878c072cd9dd00da9ff035f3350f (diff)
downloadmpv-ccce58d6d63538911fa7bdd216a32e8444ea36b8.tar.bz2
mpv-ccce58d6d63538911fa7bdd216a32e8444ea36b8.tar.xz
video: initialize hw decoder in get_format
Apparently the "right" place to initialize the hardware decoder is in the libavcodec get_format callback. This doesn't change vda.c and vdpau_old.c, because I don't have OSX, and vdpau_old.c is probably going to be removed soon (if Libav ever manages to release Libav 10). So for now the init_decoder callback added with this commit is optional. This also means vdpau.c and vaapi.c don't have to manage and check the image parameters anymore. This change is probably needed for when libavcodec VDA supports gets a new iteration of its API.
Diffstat (limited to 'video/decode')
-rw-r--r--video/decode/lavc.h7
-rw-r--r--video/decode/vaapi.c34
-rw-r--r--video/decode/vd_lavc.c22
-rw-r--r--video/decode/vdpau.c25
4 files changed, 42 insertions, 46 deletions
diff --git a/video/decode/lavc.h b/video/decode/lavc.h
index 867488de95..71d568aa8a 100644
--- a/video/decode/lavc.h
+++ b/video/decode/lavc.h
@@ -38,6 +38,10 @@ typedef struct lavc_ctx {
// For free use by hwdec implementation
void *hwdec_priv;
+ int hwdec_fmt;
+ int hwdec_w;
+ int hwdec_h;
+
// Legacy
bool do_dr1;
struct FramePool *dr1_buffer_pool;
@@ -59,7 +63,10 @@ struct vd_lavc_hwdec {
int (*probe)(struct vd_lavc_hwdec *hwdec, struct mp_hwdec_info *info,
const char *decoder);
int (*init)(struct lavc_ctx *ctx);
+ int (*init_decoder)(struct lavc_ctx *ctx, int fmt, int w, int h);
void (*uninit)(struct lavc_ctx *ctx);
+ // Note: if init_decoder is set, this will always use the values from the
+ // last successful init_decoder call. Otherwise, it's up to you.
struct mp_image *(*allocate_image)(struct lavc_ctx *ctx, int fmt,
int w, int h);
// Process the image returned by the libavcodec decoder.
diff --git a/video/decode/vaapi.c b/video/decode/vaapi.c
index d4b1c41840..bdddfcc7ef 100644
--- a/video/decode/vaapi.c
+++ b/video/decode/vaapi.c
@@ -64,7 +64,6 @@ struct priv {
struct vaapi_context *va_context;
struct vaapi_context va_context_storage;
- int format, w, h;
VASurfaceID surfaces[MAX_SURFACES];
struct va_surface_pool *pool;
@@ -163,15 +162,15 @@ static int is_direct_mapping(VADisplay display)
// We achieve this by reserving surfaces in the pool as needed.
// Releasing surfaces is necessary after filling the surface id list so
// that reserved surfaces can be reused for decoding.
-static bool preallocate_surfaces(struct lavc_ctx *ctx, int num)
+static bool preallocate_surfaces(struct lavc_ctx *ctx, int num, int w, int h)
{
struct priv *p = ctx->hwdec_priv;
- if (!va_surface_pool_reserve(p->pool, num, p->w, p->h)) {
+ if (!va_surface_pool_reserve(p->pool, num, w, h)) {
MP_ERR(p, "Could not allocate surfaces.\n");
return false;
}
for (int i = 0; i < num; i++) {
- struct va_surface *s = va_surface_pool_get(p->pool, p->w, p->h);
+ struct va_surface *s = va_surface_pool_get(p->pool, w, h);
p->surfaces[i] = s->id;
va_surface_release(s);
}
@@ -205,7 +204,7 @@ static bool has_profile(VAProfile *va_profiles, int num_profiles, VAProfile p)
return false;
}
-static int create_decoder(struct lavc_ctx *ctx)
+static int init_decoder(struct lavc_ctx *ctx, int fmt, int w, int h)
{
void *tmp = talloc_new(NULL);
@@ -213,8 +212,6 @@ static int create_decoder(struct lavc_ctx *ctx)
VAStatus status;
int res = -1;
- assert(p->format == IMGFMT_VAAPI);
-
destroy_decoder(ctx);
const struct hwdec_profile_entry *pe = hwdec_find_profile(ctx, profiles);
@@ -254,7 +251,7 @@ static int create_decoder(struct lavc_ctx *ctx)
goto error;
}
- if (!preallocate_surfaces(ctx, num_surfaces)) {
+ if (!preallocate_surfaces(ctx, num_surfaces, w, h)) {
MP_ERR(p, "Could not allocate surfaces.\n");
goto error;
}
@@ -265,7 +262,7 @@ static int create_decoder(struct lavc_ctx *ctx)
if (!CHECK_VA_STATUS(p, "vaQueryConfigEntrypoints()"))
goto error;
- int entrypoint = find_entrypoint(p->format, ep, num_ep);
+ int entrypoint = find_entrypoint(IMGFMT_VAAPI, ep, num_ep);
if (entrypoint < 0) {
MP_ERR(p, "Could not find VA entrypoint.\n");
goto error;
@@ -289,7 +286,7 @@ static int create_decoder(struct lavc_ctx *ctx)
goto error;
status = vaCreateContext(p->display, p->va_context->config_id,
- p->w, p->h, VA_PROGRESSIVE,
+ w, h, VA_PROGRESSIVE,
p->surfaces, num_surfaces,
&p->va_context->context_id);
if (!CHECK_VA_STATUS(p, "vaCreateContext()"))
@@ -306,20 +303,7 @@ static struct mp_image *allocate_image(struct lavc_ctx *ctx, int format,
{
struct priv *p = ctx->hwdec_priv;
- if (format != IMGFMT_VAAPI)
- return NULL;
-
- if (format != p->format || w != p->w || h != p->h ||
- p->va_context->context_id == VA_INVALID_ID)
- {
- p->format = format;
- p->w = w;
- p->h = h;
- if (create_decoder(ctx) < 0)
- return NULL;
- }
-
- struct va_surface *s = va_surface_pool_get(p->pool, p->w, p->h);
+ struct va_surface *s = va_surface_pool_get(p->pool, w, h);
if (s) {
for (int n = 0; n < MAX_SURFACES; n++) {
if (p->surfaces[n] == s->id)
@@ -471,6 +455,7 @@ const struct vd_lavc_hwdec mp_vd_lavc_vaapi = {
.probe = probe,
.init = init,
.uninit = uninit,
+ .init_decoder = init_decoder,
.allocate_image = allocate_image,
};
@@ -480,6 +465,7 @@ const struct vd_lavc_hwdec mp_vd_lavc_vaapi_copy = {
.probe = probe_copy,
.init = init_copy,
.uninit = uninit,
+ .init_decoder = init_decoder,
.allocate_image = allocate_image,
.process_image = copy_image,
};
diff --git a/video/decode/vd_lavc.c b/video/decode/vd_lavc.c
index df0c098155..eed79e81b1 100644
--- a/video/decode/vd_lavc.c
+++ b/video/decode/vd_lavc.c
@@ -339,6 +339,7 @@ static void init_avctx(struct dec_video *vd, const char *decoder,
ctx->do_dr1 = ctx->do_hw_dr1 = 0;
ctx->pix_fmt = AV_PIX_FMT_NONE;
ctx->hwdec = hwdec;
+ ctx->hwdec_fmt = 0;
ctx->avctx = avcodec_alloc_context3(lavc_codec);
AVCodecContext *avctx = ctx->avctx;
avctx->opaque = vd;
@@ -511,8 +512,20 @@ static enum AVPixelFormat get_format_hwdec(struct AVCodecContext *avctx,
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
const int *okfmt = ctx->hwdec->image_formats;
for (int n = 0; okfmt && okfmt[n]; n++) {
- if (imgfmt2pixfmt(okfmt[n]) == fmt[i])
+ if (imgfmt2pixfmt(okfmt[n]) == fmt[i]) {
+ ctx->hwdec_w = avctx->width;
+ ctx->hwdec_h = avctx->height;
+ ctx->hwdec_fmt = okfmt[n];
+ if (ctx->hwdec->init_decoder) {
+ if (ctx->hwdec->init_decoder(ctx, ctx->hwdec_fmt,
+ ctx->hwdec_w, ctx->hwdec_h) < 0)
+ {
+ ctx->hwdec_fmt = 0;
+ break;
+ }
+ }
return fmt[i];
+ }
}
}
@@ -535,7 +548,7 @@ static struct mp_image *get_surface_hwdec(struct dec_video *vd, AVFrame *pic)
* get_buffer callback.
*/
int imgfmt = pixfmt2imgfmt(pic->format);
- if (!IMGFMT_IS_HWACCEL(imgfmt))
+ if (!IMGFMT_IS_HWACCEL(imgfmt) || !ctx->hwdec)
return NULL;
// Using frame->width/height is bad. For non-mod 16 video (which would
@@ -545,6 +558,11 @@ static struct mp_image *get_surface_hwdec(struct dec_video *vd, AVFrame *pic)
int w = ctx->avctx->width;
int h = ctx->avctx->height;
+ if (ctx->hwdec->init_decoder) {
+ if (imgfmt != ctx->hwdec_fmt && w != ctx->hwdec_w && h != ctx->hwdec_h)
+ return NULL;
+ }
+
struct mp_image *mpi = ctx->hwdec->allocate_image(ctx, imgfmt, w, h);
if (mpi) {
diff --git a/video/decode/vdpau.c b/video/decode/vdpau.c
index 6c0c891ad4..995b5460e8 100644
--- a/video/decode/vdpau.c
+++ b/video/decode/vdpau.c
@@ -37,9 +37,6 @@ struct priv {
uint64_t preemption_counter;
AVVDPAUContext context;
-
- int vid_width;
- int vid_height;
};
struct profile_entry {
@@ -97,7 +94,7 @@ static int handle_preemption(struct lavc_ctx *ctx)
return 0;
}
-static bool create_vdp_decoder(struct lavc_ctx *ctx)
+static int init_decoder(struct lavc_ctx *ctx, int fmt, int w, int h)
{
struct priv *p = ctx->hwdec_priv;
struct vdp_functions *vdp = p->mpvdp->vdp;
@@ -125,17 +122,16 @@ static bool create_vdp_decoder(struct lavc_ctx *ctx)
MP_ERR(p, "Codec or profile not supported by hardware.\n");
goto fail;
}
- if (p->vid_width > maxw || p->vid_height > maxh) {
+ if (w > maxw || h > maxh) {
MP_ERR(p, "Video resolution(%dx%d) is larger than the maximum size(%dx%d) supported.\n",
- p->vid_width, p->vid_height, maxw, maxh);
+ w, h, maxw, maxh);
goto fail;
}
int maxrefs = hwdec_get_max_refs(ctx);
vdp_st = vdp->decoder_create(p->vdp_device, pe->hw_profile,
- p->vid_width, p->vid_height, maxrefs,
- &p->context.decoder);
+ w, h, maxrefs, &p->context.decoder);
CHECK_VDP_WARNING(p, "Failed creating VDPAU decoder");
if (vdp_st != VDP_STATUS_OK)
goto fail;
@@ -151,20 +147,8 @@ static struct mp_image *allocate_image(struct lavc_ctx *ctx, int fmt,
{
struct priv *p = ctx->hwdec_priv;
- if (fmt != IMGFMT_VDPAU)
- return NULL;
-
handle_preemption(ctx);
- if (w != p->vid_width || h != p->vid_height ||
- p->context.decoder == VDP_INVALID_HANDLE)
- {
- p->vid_width = w;
- p->vid_height = h;
- if (!create_vdp_decoder(ctx))
- return NULL;
- }
-
VdpChromaType chroma;
mp_vdpau_get_format(IMGFMT_VDPAU, &chroma, NULL);
@@ -226,5 +210,6 @@ const struct vd_lavc_hwdec mp_vd_lavc_vdpau = {
.probe = probe,
.init = init,
.uninit = uninit,
+ .init_decoder = init_decoder,
.allocate_image = allocate_image,
};