summaryrefslogtreecommitdiffstats
path: root/player
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2015-01-03 03:01:58 +0100
committerwm4 <wm4@nowhere>2015-01-03 03:01:58 +0100
commitf5ed13bcd4aacd362db9da3b5ad3f8747f13d944 (patch)
tree289bf7a376236d2da9547f5bbcdb9e62529025b2 /player
parent73ea0ddc080a53b5474205703ba3b189d0352d3a (diff)
downloadmpv-f5ed13bcd4aacd362db9da3b5ad3f8747f13d944.tar.bz2
mpv-f5ed13bcd4aacd362db9da3b5ad3f8747f13d944.tar.xz
video: better pipelining with vf_vapoursynth
Most of this is explained in the code comments. This change should improve performance with vapoursynth, especially if concurrent requests are used. This should change nothing if vf_vapoursynth is not in the filter chain, since non-threaded filters obviously can not asynchronously finish filtering of frames.
Diffstat (limited to 'player')
-rw-r--r--player/core.h1
-rw-r--r--player/main.c2
-rw-r--r--player/video.c27
3 files changed, 26 insertions, 4 deletions
diff --git a/player/core.h b/player/core.h
index a28d2e169f..e23699127d 100644
--- a/player/core.h
+++ b/player/core.h
@@ -403,6 +403,7 @@ int mp_initialize(struct MPContext *mpctx);
struct MPContext *mp_create(void);
void mp_destroy(struct MPContext *mpctx);
void mp_print_version(struct mp_log *log, int always);
+void wakeup_playloop(void *ctx);
// misc.c
double get_start_time(struct MPContext *mpctx);
diff --git a/player/main.c b/player/main.c
index 732be8c1da..48bcab0bef 100644
--- a/player/main.c
+++ b/player/main.c
@@ -378,7 +378,7 @@ struct MPContext *mp_create(void)
return mpctx;
}
-static void wakeup_playloop(void *ctx)
+void wakeup_playloop(void *ctx)
{
struct MPContext *mpctx = ctx;
mp_input_wakeup(mpctx->input);
diff --git a/player/video.c b/player/video.c
index f9a6a1c9a0..2bb96abf5a 100644
--- a/player/video.c
+++ b/player/video.c
@@ -178,6 +178,8 @@ static void recreate_video_filters(struct MPContext *mpctx)
vf_destroy(d_video->vfilter);
d_video->vfilter = vf_new(mpctx->global);
d_video->vfilter->hwdec = d_video->hwdec_info;
+ d_video->vfilter->wakeup_callback = wakeup_playloop;
+ d_video->vfilter->wakeup_callback_ctx = mpctx;
vf_append_filter_list(d_video->vfilter, opts->vf_settings);
@@ -435,7 +437,8 @@ static int video_filter(struct MPContext *mpctx, bool eof)
return VD_ERROR;
// There is already a filtered frame available.
- if (vf_output_frame(vf, eof) > 0)
+ // If vf_needs_input() returns > 0, the filter wants input anyway.
+ if (vf_output_frame(vf, eof) > 0 && vf_needs_input(vf) < 1)
return VD_PROGRESS;
// Decoder output is different from filter input?
@@ -496,6 +499,20 @@ static int video_decode_and_filter(struct MPContext *mpctx)
return r;
}
+static int video_feed_async_filter(struct MPContext *mpctx)
+{
+ struct dec_video *d_video = mpctx->d_video;
+ struct vf_chain *vf = d_video->vfilter;
+
+ if (vf->initialized < 0)
+ return VD_ERROR;
+
+ if (vf_needs_input(vf) < 1)
+ return 0;
+ mpctx->sleeptime = 0; // retry until done
+ return video_decode_and_filter(mpctx);
+}
+
/* Modify video timing to match the audio timeline. There are two main
* reasons this is needed. First, video and audio can start from different
* positions at beginning of file or after a seek (MPlayer starts both
@@ -793,8 +810,12 @@ void write_video(struct MPContext *mpctx, double endpts)
double time_frame = MPMAX(mpctx->time_frame, -1);
int64_t pts = mp_time_us() + (int64_t)(time_frame * 1e6);
- if (!vo_is_ready_for_frame(vo, pts))
- return; // wait until VO wakes us up to get more frames
+ // wait until VO wakes us up to get more frames
+ if (!vo_is_ready_for_frame(vo, pts)) {
+ if (video_feed_async_filter(mpctx) < 0)
+ goto error;
+ return;
+ }
int64_t duration = -1;
double diff = -1;