summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2014-05-01 23:53:18 +0200
committerwm4 <wm4@nowhere>2014-05-02 01:09:58 +0200
commiteb9d2039460e413fe2f4f191ad0ddafde3be27df (patch)
tree7e6458cad5655d209c5b6c5a3de53e3f302ad160
parent346daec4038b74f8b2136e9950e34cec50e3d2ec (diff)
downloadmpv-eb9d2039460e413fe2f4f191ad0ddafde3be27df.tar.bz2
mpv-eb9d2039460e413fe2f4f191ad0ddafde3be27df.tar.xz
video: change everything
Change how the video decoding loop works. The structure should now be a bit easier to follow. The interactions on format changes are (probably) simpler. This also aligns the decoding loop with future planned changes, such as moving various things to separate threads.
-rw-r--r--player/core.h3
-rw-r--r--player/playloop.c44
-rw-r--r--player/video.c357
-rw-r--r--video/decode/dec_video.c2
-rw-r--r--video/decode/dec_video.h6
-rw-r--r--video/out/vo.c58
-rw-r--r--video/out/vo.h7
7 files changed, 257 insertions, 220 deletions
diff --git a/player/core.h b/player/core.h
index bd5f174075..f9b55957db 100644
--- a/player/core.h
+++ b/player/core.h
@@ -466,7 +466,8 @@ void build_cue_timeline(struct MPContext *mpctx);
// video.c
int reinit_video_chain(struct MPContext *mpctx);
int reinit_video_filters(struct MPContext *mpctx);
-double update_video(struct MPContext *mpctx, double endpts);
+int update_video(struct MPContext *mpctx, double endpts, bool reconfig_ok,
+ double *frame_duration);
void mp_force_video_refresh(struct MPContext *mpctx);
void update_fps(struct MPContext *mpctx);
void video_execute_format_change(struct MPContext *mpctx);
diff --git a/player/playloop.c b/player/playloop.c
index 7edd498606..1ab9b9abea 100644
--- a/player/playloop.c
+++ b/player/playloop.c
@@ -983,34 +983,35 @@ void run_playloop(struct MPContext *mpctx)
struct vo *vo = mpctx->video_out;
update_fps(mpctx);
- video_left = vo->hasframe || vo->frame_loaded || mpctx->playing_last_frame;
- if (!vo->frame_loaded && (!mpctx->paused || mpctx->restart_playback)) {
-
- double frame_time = update_video(mpctx, endpts);
- if (frame_time < 0) {
+ int r = 1; // like update_video return value
+ video_left = vo->hasframe || mpctx->playing_last_frame;
+ if (!mpctx->paused || mpctx->restart_playback) {
+ double frame_time = 0;
+ r = update_video(mpctx, endpts, false, &frame_time);
+ MP_VERBOSE(mpctx, "update_video: %d\n", r);
+ if (r == 0) {
if (!mpctx->playing_last_frame && mpctx->last_frame_duration > 0) {
mpctx->time_frame += mpctx->last_frame_duration;
mpctx->last_frame_duration = 0;
mpctx->playing_last_frame = true;
+ MP_VERBOSE(mpctx, "showing last frame\n");
}
if (mpctx->playing_last_frame) {
- frame_time = 0; // don't stop playback yet
- } else if (mpctx->d_video->waiting_decoded_mpi) {
+ r = 1; // don't stop playback yet
+ MP_VERBOSE(mpctx, "still showing last frame\n");
+ } else if (r == 0) {
+ // We just displayed the previous frame, so display the
+ // new frame (if there's one) immediately.
+ mpctx->video_next_pts = MP_NOPTS_VALUE;
// Format changes behave like EOF, and this call "unstucks"
// the EOF condition (after waiting for the previous frame
// to finish displaying).
- video_execute_format_change(mpctx);
- frame_time = update_video(mpctx, endpts);
- // We just displayed the previous frame, so display the
- // new frame immediately.
- if (frame_time > 0)
- frame_time = 0;
+ r = update_video(mpctx, endpts, true, &frame_time);
+ MP_VERBOSE(mpctx, "second update_video: %d\n", r);
}
}
- MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time);
- if (mpctx->d_video->vfilter && mpctx->d_video->vfilter->initialized < 0)
- {
+ if (r < 0) {
MP_FATAL(mpctx, "Could not initialize video chain.\n");
int uninit = INITIALIZED_VCODEC;
if (!opts->force_vo)
@@ -1022,8 +1023,9 @@ void run_playloop(struct MPContext *mpctx)
handle_force_window(mpctx, true);
break;
}
- video_left = frame_time >= 0;
- if (video_left && !mpctx->restart_playback) {
+ MP_VERBOSE(mpctx, "frametime=%5.3f\n", frame_time);
+ video_left = r >= 1;
+ if (r == 2 && !mpctx->restart_playback) {
mpctx->time_frame += frame_time / opts->playback_speed;
adjust_sync(mpctx, frame_time);
}
@@ -1040,7 +1042,7 @@ void run_playloop(struct MPContext *mpctx)
if (!video_left || (mpctx->paused && !mpctx->restart_playback))
break;
- if (!vo->frame_loaded && !mpctx->playing_last_frame) {
+ if (r != 2 && !mpctx->playing_last_frame) {
sleeptime = 0;
break;
}
@@ -1088,7 +1090,7 @@ void run_playloop(struct MPContext *mpctx)
mpctx->playing_last_frame = false;
// last frame case (don't set video_left - consider format changes)
- if (!vo->frame_loaded)
+ if (r != 2)
break;
//=================== FLIP PAGE (VIDEO BLT): ======================
@@ -1121,7 +1123,7 @@ void run_playloop(struct MPContext *mpctx)
double time_frame = MPMAX(mpctx->time_frame, -1);
int64_t pts_us = mpctx->last_time + time_frame * 1e6;
int duration = -1;
- double pts2 = vo->next_pts2;
+ double pts2 = vo_get_next_pts(vo, 0); // this is the next frame PTS
if (mpctx->video_pts != MP_NOPTS_VALUE && pts2 == MP_NOPTS_VALUE) {
// Make up a frame duration. Using the frame rate is not a good
// choice, since the frame rate could be unset/broken/random.
diff --git a/player/video.c b/player/video.c
index 7679348243..e84befe1f7 100644
--- a/player/video.c
+++ b/player/video.c
@@ -83,14 +83,15 @@ static int try_filter(struct MPContext *mpctx, struct mp_image_params params,
return 0;
}
-static void reconfig_video(struct MPContext *mpctx,
- struct mp_image_params params,
- bool probe_only)
+// Reconfigure the filter chain according to decoder output.
+// probe_only: don't force fallback to software when doing hw decoding, and
+// the filter chain couldn't be configured
+static void filter_reconfig(struct MPContext *mpctx,
+ bool probe_only)
{
- struct MPOpts *opts = mpctx->opts;
struct dec_video *d_video = mpctx->d_video;
- d_video->decoder_output = params;
+ struct mp_image_params params = d_video->decoder_output;
set_allowed_vo_formats(d_video->vfilter, mpctx->video_out);
@@ -100,13 +101,16 @@ static void reconfig_video(struct MPContext *mpctx,
if (video_reconfig_filters(d_video, &params) < 0) {
// Most video filters don't work with hardware decoding, so this
- // might be the reason filter reconfig failed.
+ // might be the reason why filter reconfig failed.
if (!probe_only &&
video_vd_control(d_video, VDCTRL_FORCE_HWDEC_FALLBACK, NULL) == CONTROL_OK)
{
// Fallback active; decoder will return software format next
// time. Don't abort video decoding.
d_video->vfilter->initialized = 0;
+ mp_image_unrefp(&d_video->waiting_decoded_mpi);
+ d_video->decoder_output = (struct mp_image_params){0};
+ MP_VERBOSE(mpctx, "hwdec falback due to filters.\n");
}
return;
}
@@ -127,32 +131,6 @@ static void reconfig_video(struct MPContext *mpctx,
}
}
}
-
- if (d_video->vfilter->initialized < 1)
- return;
-
- struct mp_image_params p = d_video->vfilter->output_params;
- const struct vo_driver *info = mpctx->video_out->driver;
- MP_INFO(mpctx, "VO: [%s] %dx%d => %dx%d %s\n",
- info->name, p.w, p.h, p.d_w, p.d_h, vo_format_name(p.imgfmt));
- MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description);
-
- int r = vo_reconfig(mpctx->video_out, &p, 0);
- if (r < 0)
- d_video->vfilter->initialized = -1;
-
- if (r >= 0) {
- if (opts->gamma_gamma != 1000)
- video_set_colors(d_video, "gamma", opts->gamma_gamma);
- if (opts->gamma_brightness != 1000)
- video_set_colors(d_video, "brightness", opts->gamma_brightness);
- if (opts->gamma_contrast != 1000)
- video_set_colors(d_video, "contrast", opts->gamma_contrast);
- if (opts->gamma_saturation != 1000)
- video_set_colors(d_video, "saturation", opts->gamma_saturation);
- if (opts->gamma_hue != 1000)
- video_set_colors(d_video, "hue", opts->gamma_hue);
- }
}
static void recreate_video_filters(struct MPContext *mpctx)
@@ -186,7 +164,7 @@ int reinit_video_filters(struct MPContext *mpctx)
recreate_video_filters(mpctx);
if (need_reconfig)
- reconfig_video(mpctx, d_video->decoder_output, true);
+ filter_reconfig(mpctx, true);
if (!d_video->vfilter)
return 0;
@@ -301,30 +279,63 @@ void mp_force_video_refresh(struct MPContext *mpctx)
queue_seek(mpctx, MPSEEK_ABSOLUTE, mpctx->last_vo_pts, 1, true);
}
-static bool filter_output_queued_frame(struct MPContext *mpctx, bool eof)
+static int check_framedrop(struct MPContext *mpctx, double frame_time)
+{
+ struct MPOpts *opts = mpctx->opts;
+ struct track *t_audio = mpctx->current_track[0][STREAM_AUDIO];
+ struct sh_stream *sh_audio = t_audio ? t_audio->stream : NULL;
+ // check for frame-drop:
+ if (mpctx->d_audio && !ao_untimed(mpctx->ao) && sh_audio &&
+ !demux_stream_eof(sh_audio))
+ {
+ float delay = opts->playback_speed * ao_get_delay(mpctx->ao);
+ float d = delay - mpctx->delay;
+ float fps = mpctx->d_video->fps;
+ if (frame_time < 0)
+ frame_time = fps > 0 ? 1.0 / fps : 0;
+ // we should avoid dropping too many frames in sequence unless we
+ // are too late. and we allow 100ms A-V delay here:
+ if (d < -mpctx->dropped_frames * frame_time - 0.100 && !mpctx->paused
+ && !mpctx->restart_playback) {
+ mpctx->drop_frame_cnt++;
+ mpctx->dropped_frames++;
+ return mpctx->opts->frame_dropping;
+ } else
+ mpctx->dropped_frames = 0;
+ }
+ return 0;
+}
+
+// Read a packet, store decoded image into d_video->waiting_decoded_mpi
+// Return 0 if EOF was reached (though the decoder still can have frames buffered)
+static int decode_image(struct MPContext *mpctx)
{
struct dec_video *d_video = mpctx->d_video;
- struct vo *video_out = mpctx->video_out;
- struct mp_image *img = vf_output_queued_frame(d_video->vfilter, eof);
- if (img)
- vo_queue_image(video_out, img);
- talloc_free(img);
+ if (d_video->header->attached_picture) {
+ d_video->waiting_decoded_mpi =
+ video_decode(d_video, d_video->header->attached_picture, 0);
+ return 1;
+ }
- return !!img;
-}
+ struct demux_packet *pkt = demux_read_packet(d_video->header);
+ if (pkt && pkt->pts != MP_NOPTS_VALUE)
+ pkt->pts += mpctx->video_offset;
+ if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
+ d_video->has_broken_packet_pts)
+ {
+ mpctx->hrseek_framedrop = false;
+ }
+ int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ?
+ 1 : check_framedrop(mpctx, -1);
+ d_video->waiting_decoded_mpi =
+ video_decode(d_video, pkt, framedrop_type);
+ talloc_free(pkt);
-static bool load_next_vo_frame(struct MPContext *mpctx, bool eof)
-{
- if (vo_get_buffered_frame(mpctx->video_out, false) >= 0)
- return true;
- if (filter_output_queued_frame(mpctx, eof))
- return true;
- if (eof && vo_get_buffered_frame(mpctx->video_out, true) >= 0)
- return true;
- return false;
+ return !!pkt;
}
+
// Called after video reinit. This can be generally used to try to insert more
// filters using the filter chain edit functionality in command.c.
static void init_filter_params(struct MPContext *mpctx)
@@ -339,143 +350,172 @@ static void init_filter_params(struct MPContext *mpctx)
mp_property_do("deinterlace", M_PROPERTY_SET, &opts->deinterlace, mpctx);
}
-static void filter_video(struct MPContext *mpctx, struct mp_image *frame,
- bool reconfig_ok)
+// Make sure at least 1 filtered image is available.
+// Returns: -1: error, 0: EOF, 1: ok or progress was made
+// A return value of 1 doesn't necessarily output a frame, but makes the promise
+// that calling this function again will eventually do something.
+static int video_decode_and_filter(struct MPContext *mpctx)
{
struct dec_video *d_video = mpctx->d_video;
+ struct vf_chain *vf = d_video->vfilter;
- struct mp_image_params params;
- mp_image_params_from_image(&params, frame);
- if (!mp_image_params_equals(&d_video->decoder_output, &params) ||
- d_video->vfilter->initialized < 1)
- {
- // In case we want to wait until filter chain is drained
- if (!reconfig_ok) {
- talloc_free(d_video->waiting_decoded_mpi);
- d_video->waiting_decoded_mpi = frame;
- return;
- }
+ if (vf->initialized < 0)
+ return -1;
- reconfig_video(mpctx, params, false);
- if (d_video->vfilter->initialized > 0)
- init_filter_params(mpctx);
+ // There is already a filtered frame available.
+ if (vf_output_frame(vf, false) > 0)
+ return 1;
+
+ // Decoder output is different from filter input?
+ bool need_vf_reconfig = !vf->input_params.imgfmt || vf->initialized < 1 ||
+ !mp_image_params_equals(&d_video->decoder_output, &vf->input_params);
+
+ // (If imgfmt==0, nothing was decoded yet, and the format is unknown.)
+ if (need_vf_reconfig && d_video->decoder_output.imgfmt) {
+ // Drain the filter chain.
+ if (vf_output_frame(vf, true) > 0)
+ return 1;
+
+ // The filter chain is drained; execute the filter format change.
+ filter_reconfig(mpctx, false);
+ if (vf->initialized == 0)
+ return 1; // hw decoding fallback; try again
+ if (vf->initialized < 1)
+ return -1;
+ init_filter_params(mpctx);
+ return 1;
}
- if (d_video->vfilter->initialized < 1) {
- talloc_free(frame);
- return;
+ // If something was decoded, and the filter chain is ready, filter it.
+ if (!need_vf_reconfig && d_video->waiting_decoded_mpi) {
+ vf_filter_frame(vf, d_video->waiting_decoded_mpi);
+ d_video->waiting_decoded_mpi = NULL;
+ return 1;
}
- vf_filter_frame(d_video->vfilter, frame);
- filter_output_queued_frame(mpctx, false);
-}
+ if (!d_video->waiting_decoded_mpi) {
+ // Decode a new image, or at least feed the decoder a packet.
+ int r = decode_image(mpctx);
+ if (d_video->waiting_decoded_mpi)
+ d_video->decoder_output = d_video->waiting_decoded_mpi->params;
+ if (!d_video->waiting_decoded_mpi && r < 1)
+ return 0; // true EOF
+ }
-// Reconfigure the video chain and the VO on a format change. This is separate,
-// because we wait with the reconfig until the currently buffered video has
-// finished displaying. Otherwise, we'd resize the window and then wait for the
-// video finishing, which would result in a black window for that frame.
-// Does nothing if there was no pending change.
-void video_execute_format_change(struct MPContext *mpctx)
-{
- struct dec_video *d_video = mpctx->d_video;
- struct mp_image *decoded_frame = d_video->waiting_decoded_mpi;
- d_video->waiting_decoded_mpi = NULL;
- if (decoded_frame)
- filter_video(mpctx, decoded_frame, true);
+ // Image will be filtered on the next iteration.
+ return 1;
}
-static int check_framedrop(struct MPContext *mpctx, double frame_time)
+static void init_vo(struct MPContext *mpctx)
{
struct MPOpts *opts = mpctx->opts;
- struct track *t_audio = mpctx->current_track[0][STREAM_AUDIO];
- struct sh_stream *sh_audio = t_audio ? t_audio->stream : NULL;
- // check for frame-drop:
- if (mpctx->d_audio && !ao_untimed(mpctx->ao) && sh_audio &&
- !demux_stream_eof(sh_audio))
- {
- float delay = opts->playback_speed * ao_get_delay(mpctx->ao);
- float d = delay - mpctx->delay;
- float fps = mpctx->d_video->fps;
- if (frame_time < 0)
- frame_time = fps > 0 ? 1.0 / fps : 0;
- // we should avoid dropping too many frames in sequence unless we
- // are too late. and we allow 100ms A-V delay here:
- if (d < -mpctx->dropped_frames * frame_time - 0.100 && !mpctx->paused
- && !mpctx->restart_playback) {
- mpctx->drop_frame_cnt++;
- mpctx->dropped_frames++;
- return mpctx->opts->frame_dropping;
- } else
- mpctx->dropped_frames = 0;
- }
- return 0;
+ struct dec_video *d_video = mpctx->d_video;
+
+ if (opts->gamma_gamma != 1000)
+ video_set_colors(d_video, "gamma", opts->gamma_gamma);
+ if (opts->gamma_brightness != 1000)
+ video_set_colors(d_video, "brightness", opts->gamma_brightness);
+ if (opts->gamma_contrast != 1000)
+ video_set_colors(d_video, "contrast", opts->gamma_contrast);
+ if (opts->gamma_saturation != 1000)
+ video_set_colors(d_video, "saturation", opts->gamma_saturation);
+ if (opts->gamma_hue != 1000)
+ video_set_colors(d_video, "hue", opts->gamma_hue);
}
-static double update_video_attached_pic(struct MPContext *mpctx)
+// Fill the VO buffer with a newly filtered or decoded image.
+// Returns: -1: error, 0: EOF, 1: ok or progress was made
+static int video_output_image(struct MPContext *mpctx, bool reconfig_ok)
{
- struct dec_video *d_video = mpctx->d_video;
+ struct vf_chain *vf = mpctx->d_video->vfilter;
+ struct vo *vo = mpctx->video_out;
- // Try to decode the picture multiple times, until it is displayed.
- if (mpctx->video_out->hasframe)
- return -1;
+ // Already enough video buffered in VO?
+ // (This implies vo_has_next_frame(vo, false/true) returns true.)
+ if (!vo_needs_new_image(vo) && vo->params)
+ return 1;
- struct mp_image *decoded_frame =
- video_decode(d_video, d_video->header->attached_picture, 0);
- if (decoded_frame)
- filter_video(mpctx, decoded_frame, true);
- load_next_vo_frame(mpctx, true);
- mpctx->video_next_pts = MP_NOPTS_VALUE;
- return 0;
-}
+ // Filter a new frame.
+ int r = video_decode_and_filter(mpctx);
+ if (r < 0)
+ return r; // error
-double update_video(struct MPContext *mpctx, double endpts)
-{
- struct dec_video *d_video = mpctx->d_video;
- struct vo *video_out = mpctx->video_out;
+ // Filter output is different from VO input?
+ bool need_vo_reconfig = !vo->params ||
+ !mp_image_params_equals(&vf->output_params, vo->params);
+
+ if (need_vo_reconfig) {
+ // Draining VO buffers.
+ if (vo_has_next_frame(vo, true))
+ return 0; // EOF so that caller displays remaining VO frames
+
+ // There was no decoded image yet - must not signal fake EOF.
+ if (!vf->output_params.imgfmt)
+ return r;
- if (d_video->header->attached_picture)
- return update_video_attached_pic(mpctx);
+ // Force draining.
+ if (!reconfig_ok)
+ return 0;
- if (load_next_vo_frame(mpctx, false)) {
- // Use currently queued VO frame
- } else if (d_video->waiting_decoded_mpi) {
- // Draining on reconfig
- if (!load_next_vo_frame(mpctx, true))
+ struct mp_image_params p = vf->output_params;
+
+ const struct vo_driver *info = mpctx->video_out->driver;
+ MP_INFO(mpctx, "VO: [%s] %dx%d => %dx%d %s\n",
+ info->name, p.w, p.h, p.d_w, p.d_h, vo_format_name(p.imgfmt));
+ MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description);
+
+ r = vo_reconfig(vo, &p, 0);
+ if (r < 0) {
+ vf->initialized = -1;
return -1;
- } else {
- // Decode a new frame
- struct demux_packet *pkt = demux_read_packet(d_video->header);
- if (pkt && pkt->pts != MP_NOPTS_VALUE)
- pkt->pts += mpctx->video_offset;
- if ((pkt && pkt->pts >= mpctx->hrseek_pts - .005) ||
- d_video->has_broken_packet_pts)
- {
- mpctx->hrseek_framedrop = false;
- }
- int framedrop_type = mpctx->hrseek_active && mpctx->hrseek_framedrop ?
- 1 : check_framedrop(mpctx, -1);
- struct mp_image *decoded_frame =
- video_decode(d_video, pkt, framedrop_type);
- talloc_free(pkt);
- if (decoded_frame) {
- filter_video(mpctx, decoded_frame, false);
- } else if (!pkt) {
- if (!load_next_vo_frame(mpctx, true))
- return -1;
}
+ init_vo(mpctx);
+ return 1;
}
- // Whether the VO has an image queued.
- // If it does, it will be used to time and display the next frame.
- if (!video_out->frame_loaded)
- return 0;
+ // Queue new frame, if there's one.
+ struct mp_image *img = vf_read_output_frame(vf);
+ if (img) {
+ vo_queue_image(vo, img);
+ talloc_free(img);
+ return 1;
+ }
- double pts = video_out->next_pts;
+ return r; // includes the true EOF case
+}
+
+// returns: <0 on error, 0: eof, 1: progress, but no output, 2: new frame
+int update_video(struct MPContext *mpctx, double endpts, bool reconfig_ok,
+ double *frame_duration)
+{
+ struct vo *video_out = mpctx->video_out;
+
+ if (mpctx->d_video->header->attached_picture) {
+ if (video_out->hasframe || vo_has_next_frame(video_out, true))
+ return 0;
+ }
+
+ int r = video_output_image(mpctx, reconfig_ok);
+ if (r < 0)
+ return r;
+
+ // On EOF (r==0), we always drain the VO; otherwise we must ensure that
+ // the VO will have enough frames buffered (matters especially for VO based
+ // frame dropping).
+ if (!vo_has_next_frame(video_out, !r))
+ return !!r;
+
+ if (mpctx->d_video->header->attached_picture) {
+ mpctx->video_next_pts = MP_NOPTS_VALUE;
+ return 2;
+ }
+
+ double pts = vo_get_next_pts(video_out, 0);
if (endpts == MP_NOPTS_VALUE || pts < endpts)
add_frame_pts(mpctx, pts);
if (mpctx->hrseek_active && pts < mpctx->hrseek_pts - .005) {
vo_skip_frame(video_out);
- return 0;
+ return 1;
}
mpctx->hrseek_active = false;
double last_pts = mpctx->video_next_pts;
@@ -490,5 +530,6 @@ double update_video(struct MPContext *mpctx, double endpts)
mpctx->video_next_pts = pts;
if (mpctx->d_audio)
mpctx->delay -= frame_time;
- return frame_time;
+ *frame_duration = frame_time;
+ return 2;
}
diff --git a/video/decode/dec_video.c b/video/decode/dec_video.c
index b59ded7ac3..f092e82a2e 100644
--- a/video/decode/dec_video.c
+++ b/video/decode/dec_video.c
@@ -436,8 +436,6 @@ int video_reconfig_filters(struct dec_video *d_video,
return -1;
}
- d_video->vf_input = *params;
-
return 0;
}
diff --git a/video/decode/dec_video.h b/video/decode/dec_video.h
index 7805d89c48..58ec99a86b 100644
--- a/video/decode/dec_video.h
+++ b/video/decode/dec_video.h
@@ -40,11 +40,9 @@ struct dec_video {
char *decoder_desc;
- struct mp_image_params decoder_output; // last output of the decoder
- struct mp_image_params vf_input; // video filter input params
-
- // Used temporarily during format changes
+ // Used temporarily during decoding (important for format changes)
struct mp_image *waiting_decoded_mpi;
+ struct mp_image_params decoder_output; // last output of the decoder
void *priv; // for free use by vd_driver
diff --git a/video/out/vo.c b/video/out/vo.c
index 2372ce36b0..a13922534e 100644
--- a/video/out/vo.c
+++ b/video/out/vo.c
@@ -174,8 +174,6 @@ static struct vo *vo_create(struct mpv_global *global,
.event_fd = -1,
.monitor_par = 1,
.max_video_queue = 1,
- .next_pts = MP_NOPTS_VALUE,
- .next_pts2 = MP_NOPTS_VALUE,
};
talloc_steal(vo, log);
if (vo->driver->encode != !!vo->encode_lavc_ctx)
@@ -358,27 +356,11 @@ int vo_control(struct vo *vo, uint32_t request, void *data)
return vo->driver->control(vo, request, data);
}
-static void update_video_queue_state(struct vo *vo, bool eof)
-{
- int num = vo->num_video_queue;
- // Normally, buffer 1 image ahead, except if the queue is limited to less
- // than 2 entries, or if EOF is reached and there aren't enough images left.
- int min = 2;
- if (vo->max_video_queue < 2 || (vo->num_video_queue < 2 && eof))
- min = 1;
- vo->frame_loaded = num >= min;
- if (!vo->frame_loaded)
- num = -1;
- vo->next_pts = num > 0 ? vo->video_queue[0]->pts : MP_NOPTS_VALUE;
- vo->next_pts2 = num > 1 ? vo->video_queue[1]->pts : MP_NOPTS_VALUE;
-}
-
static void forget_frames(struct vo *vo)
{
for (int n = 0; n < vo->num_video_queue; n++)
talloc_free(vo->video_queue[n]);
vo->num_video_queue = 0;
- update_video_queue_state(vo, false);
}
void vo_queue_image(struct vo *vo, struct mp_image *mpi)
@@ -397,7 +379,34 @@ void vo_queue_image(struct vo *vo, struct mp_image *mpi)
assert(vo->max_video_queue <= VO_MAX_QUEUE);
assert(vo->num_video_queue < vo->max_video_queue);
vo->video_queue[vo->num_video_queue++] = mpi;
- update_video_queue_state(vo, false);
+}
+
+// Return whether vo_queue_image() should be called.
+bool vo_needs_new_image(struct vo *vo)
+{
+ if (!vo->config_ok)
+ return false;
+ return vo->num_video_queue < vo->max_video_queue;
+}
+
+// Return whether a frame can be displayed.
+// eof==true: return true if at least one frame is queued
+// eof==false: return true if "enough" frames are queued
+bool vo_has_next_frame(struct vo *vo, bool eof)
+{
+ if (!vo->config_ok)
+ return false;
+ // Normally, buffer 1 image ahead, except if the queue is limited to less
+ // than 2 entries, or if EOF is reached and there aren't enough images left.
+ return eof ? vo->num_video_queue : vo->num_video_queue == vo->max_video_queue;
+}
+
+// Return the PTS of a future frame (where index==0 is the next frame)
+double vo_get_next_pts(struct vo *vo, int index)
+{
+ if (index < 0 || index >= vo->num_video_queue)
+ return MP_NOPTS_VALUE;
+ return vo->video_queue[index]->pts;
}
int vo_redraw_frame(struct vo *vo)
@@ -418,14 +427,6 @@ bool vo_get_want_redraw(struct vo *vo)
return vo->want_redraw;
}
-int vo_get_buffered_frame(struct vo *vo, bool eof)
-{
- if (!vo->config_ok)
- return -1;
- update_video_queue_state(vo, eof);
- return vo->frame_loaded ? 0 : -1;
-}
-
// Remove vo->video_queue[0]
static void shift_queue(struct vo *vo)
{
@@ -440,12 +441,10 @@ static void shift_queue(struct vo *vo)
void vo_skip_frame(struct vo *vo)
{
shift_queue(vo);
- vo->frame_loaded = false;
}
void vo_new_frame_imminent(struct vo *vo)
{
- assert(vo->frame_loaded);
assert(vo->num_video_queue > 0);
vo->driver->draw_image(vo, vo->video_queue[0]);
shift_queue(vo);
@@ -467,7 +466,6 @@ void vo_flip_page(struct vo *vo, int64_t pts_us, int duration)
else
vo->driver->flip_page(vo);
vo->hasframe = true;
- update_video_queue_state(vo, false);
}
void vo_check_events(struct vo *vo)
diff --git a/video/out/vo.h b/video/out/vo.h
index e3ab562839..77382d9f38 100644
--- a/video/out/vo.h
+++ b/video/out/vo.h
@@ -237,9 +237,6 @@ struct vo {
bool untimed; // non-interactive, don't do sleep calls in playloop
- bool frame_loaded; // Is there a next frame the VO could flip to?
- double next_pts; // pts value of the next frame if any
- double next_pts2; // optional pts of frame after that
bool want_redraw; // visible frame wrong (window resize), needs refresh
bool hasframe; // >= 1 frame has been drawn, so redraw is possible
double wakeup_period; // if > 0, this sets the maximum wakeup period for event polling
@@ -288,7 +285,9 @@ int vo_control(struct vo *vo, uint32_t request, void *data);
void vo_queue_image(struct vo *vo, struct mp_image *mpi);
int vo_redraw_frame(struct vo *vo);
bool vo_get_want_redraw(struct vo *vo);
-int vo_get_buffered_frame(struct vo *vo, bool eof);
+bool vo_has_next_frame(struct vo *vo, bool eof);
+double vo_get_next_pts(struct vo *vo, int index);
+bool vo_needs_new_image(struct vo *vo);
void vo_skip_frame(struct vo *vo);
void vo_new_frame_imminent(struct vo *vo);
void vo_draw_osd(struct vo *vo, struct osd_state *osd);