summaryrefslogtreecommitdiffstats
path: root/player
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2014-04-28 19:44:35 +0200
committerwm4 <wm4@nowhere>2014-04-28 22:23:31 +0200
commit42f65ce1083ca38605a8c775ee339cf0cc669cb8 (patch)
treef10e5c7b5e8646eb8b8c507f09866a76b68fd9d1 /player
parenta6dafb061fb4be0f02070acd92f8c6a0fde31823 (diff)
downloadmpv-42f65ce1083ca38605a8c775ee339cf0cc669cb8.tar.bz2
mpv-42f65ce1083ca38605a8c775ee339cf0cc669cb8.tar.xz
video: don't drop last frame when deinterlacing with yadif
Or in other words, add support for properly draining remaining frames from video filters. vf_yadif is buffering at least one frame, and the buffered frame was not retrieved on EOF. For most filters, ignore this for now, and just adjust them to the changed semantics of filter_ext. But for vf_lavfi (used by vf_yadif), real support is implemented. libavfilter handles this simply by passing a NULL frame to av_buffersrc_add_frame(), so we just have to make mp_to_av() handle NULL arguments. In load_next_vo_frame(), we first try to output a frame buffered in the VO, then the filter, and then (if EOF is reached and there's still no new frame) the VO again, with draining enabled. I guess this was implemented slightly incorrectly before, because the filter chain still could have had remaining output frames.
Diffstat (limited to 'player')
-rw-r--r--player/video.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/player/video.c b/player/video.c
index 8c607a7036..b995ed2575 100644
--- a/player/video.c
+++ b/player/video.c
@@ -301,12 +301,12 @@ void mp_force_video_refresh(struct MPContext *mpctx)
queue_seek(mpctx, MPSEEK_ABSOLUTE, mpctx->last_vo_pts, 1, true);
}
-static bool filter_output_queued_frame(struct MPContext *mpctx)
+static bool filter_output_queued_frame(struct MPContext *mpctx, bool eof)
{
struct dec_video *d_video = mpctx->d_video;
struct vo *video_out = mpctx->video_out;
- struct mp_image *img = vf_output_queued_frame(d_video->vfilter);
+ struct mp_image *img = vf_output_queued_frame(d_video->vfilter, eof);
if (img)
vo_queue_image(video_out, img);
talloc_free(img);
@@ -316,9 +316,11 @@ static bool filter_output_queued_frame(struct MPContext *mpctx)
static bool load_next_vo_frame(struct MPContext *mpctx, bool eof)
{
- if (vo_get_buffered_frame(mpctx->video_out, eof) >= 0)
+ if (vo_get_buffered_frame(mpctx->video_out, false) >= 0)
return true;
- if (filter_output_queued_frame(mpctx))
+ if (filter_output_queued_frame(mpctx, eof))
+ return true;
+ if (eof && vo_get_buffered_frame(mpctx->video_out, true) >= 0)
return true;
return false;
}
@@ -366,7 +368,7 @@ static void filter_video(struct MPContext *mpctx, struct mp_image *frame,
mp_image_set_params(frame, &d_video->vf_input); // force csp/aspect overrides
vf_filter_frame(d_video->vfilter, frame);
- filter_output_queued_frame(mpctx);
+ filter_output_queued_frame(mpctx, false);
}
// Reconfigure the video chain and the VO on a format change. This is separate,