diff options
author | wm4 <wm4@nowhere> | 2017-01-17 10:56:16 +0100 |
---|---|---|
committer | wm4 <wm4@nowhere> | 2017-01-17 15:48:56 +0100 |
commit | cda31b71debdd52cfe9a36e8eea318899acdd0d2 (patch) | |
tree | 76f4abd393b15075b7922b1c02dd5c5cf5bfc1f9 /video/decode/vd_lavc.c | |
parent | a4cdd8bb823dbb8f981a4b5efcfcb0f9d118844e (diff) | |
download | mpv-cda31b71debdd52cfe9a36e8eea318899acdd0d2.tar.bz2 mpv-cda31b71debdd52cfe9a36e8eea318899acdd0d2.tar.xz |
vaapi: move AVHWFramesContext setup code to common code
In a way it can be reused. For now, sw_format and initial_pool_size
determination are still vaapi-specific. I'm hoping this can be eventally
moved to libavcodec in some way. Checking the supported_formats array is
not really vaapi-specific, and could be moved to the generic code path
too, but for now it would make things more complex.
hw_cuda.c can't use this, but hw_vdpau.c will in the following commit.
Diffstat (limited to 'video/decode/vd_lavc.c')
-rw-r--r-- | video/decode/vd_lavc.c | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/video/decode/vd_lavc.c b/video/decode/vd_lavc.c index 10f9723db0..a5454056db 100644 --- a/video/decode/vd_lavc.c +++ b/video/decode/vd_lavc.c @@ -598,6 +598,7 @@ static void uninit_avctx(struct dec_video *vd) flush_all(vd); av_frame_free(&ctx->pic); + av_buffer_unref(&ctx->cached_hw_frames_ctx); if (ctx->avctx) { if (avcodec_close(ctx->avctx) < 0) @@ -648,6 +649,59 @@ static void update_image_params(struct dec_video *vd, AVFrame *frame, params->stereo_in = vd->codec->stereo_mode; } +// Allocate and set AVCodecContext.hw_frames_ctx. Also caches them on redundant +// calls (useful because seeks issue get_format, which clears hw_frames_ctx). +// device_ctx: reference to an AVHWDeviceContext +// av_sw_format: AV_PIX_FMT_ for the underlying hardware frame format +// initial_pool_size: number of frames in the memory pool on creation +// Return >=0 on success, <0 on error. +int hwdec_setup_hw_frames_ctx(struct lavc_ctx *ctx, AVBufferRef *device_ctx, + int av_sw_format, int initial_pool_size) +{ + int w = ctx->avctx->coded_width; + int h = ctx->avctx->coded_height; + int av_hw_format = imgfmt2pixfmt(ctx->hwdec_fmt); + + if (ctx->cached_hw_frames_ctx) { + AVHWFramesContext *fctx = (void *)ctx->cached_hw_frames_ctx->data; + if (fctx->width != w || fctx->height != h || + fctx->sw_format != av_sw_format || + fctx->format != av_hw_format) + { + av_buffer_unref(&ctx->cached_hw_frames_ctx); + } + } + + if (!ctx->cached_hw_frames_ctx) { + ctx->cached_hw_frames_ctx = av_hwframe_ctx_alloc(device_ctx); + if (!ctx->cached_hw_frames_ctx) + return -1; + + AVHWFramesContext *fctx = (void *)ctx->cached_hw_frames_ctx->data; + + fctx->format = av_hw_format; + fctx->sw_format = av_sw_format; + fctx->width = w; + fctx->height = h; + + fctx->initial_pool_size = initial_pool_size; + + hwdec_lock(ctx); + int res = av_hwframe_ctx_init(ctx->cached_hw_frames_ctx); + hwdec_unlock(ctx); + + if (res > 0) { + MP_ERR(ctx, "Failed to allocate hw frames.\n"); + av_buffer_unref(&ctx->cached_hw_frames_ctx); + return -1; + } + } + + assert(!ctx->avctx->hw_frames_ctx); + ctx->avctx->hw_frames_ctx = av_buffer_ref(ctx->cached_hw_frames_ctx); + return ctx->avctx->hw_frames_ctx ? 0 : -1; +} + static enum AVPixelFormat get_format_hwdec(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt) { |