summaryrefslogtreecommitdiffstats
path: root/player/video.c
diff options
context:
space:
mode:
Diffstat (limited to 'player/video.c')
-rw-r--r--player/video.c268
1 files changed, 174 insertions, 94 deletions
diff --git a/player/video.c b/player/video.c
index d8b7728fe8..f0372b69a5 100644
--- a/player/video.c
+++ b/player/video.c
@@ -21,7 +21,6 @@
#include <math.h>
#include <assert.h>
-#include "config.h"
#include "mpv_talloc.h"
#include "common/msg.h"
@@ -60,6 +59,7 @@ static const char av_desync_help_text[] =
"Audio/Video desynchronisation detected! Possible reasons include too slow\n"
"hardware, temporary CPU spikes, broken drivers, and broken files. Audio\n"
"position will not match to the video (see A-V status field).\n"
+"Consider trying `--profile=fast` and/or `--hwdec=auto-safe` as they may help.\n"
"\n";
static bool recreate_video_filters(struct MPContext *mpctx)
@@ -101,7 +101,7 @@ void reset_video_state(struct MPContext *mpctx)
vo_chain_reset_state(mpctx->vo_chain);
struct track *t = mpctx->vo_chain->track;
if (t && t->dec)
- t->dec->play_dir = mpctx->play_dir;
+ mp_decoder_wrapper_set_play_dir(t->dec, mpctx->play_dir);
}
for (int n = 0; n < mpctx->num_next_frames; n++)
@@ -119,6 +119,8 @@ void reset_video_state(struct MPContext *mpctx)
mpctx->mistimed_frames_total = 0;
mpctx->drop_message_shown = 0;
mpctx->display_sync_drift_dir = 0;
+ mpctx->display_sync_error = 0;
+ mpctx->display_sync_active = 0;
mpctx->video_status = mpctx->vo_chain ? STATUS_SYNCING : STATUS_EOF;
}
@@ -128,9 +130,9 @@ void uninit_video_out(struct MPContext *mpctx)
uninit_video_chain(mpctx);
if (mpctx->video_out) {
vo_destroy(mpctx->video_out);
+ mpctx->video_out = NULL;
mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL);
}
- mpctx->video_out = NULL;
}
static void vo_chain_uninit(struct vo_chain *vo_c)
@@ -176,9 +178,9 @@ int init_video_decoder(struct MPContext *mpctx, struct track *track)
// If possible, set this as parent so the decoder gets the hwdec and DR
// interfaces.
- // Note: at least mpv_opengl_cb_uninit_gl() relies on being able to get
- // rid of all references to the VO by destroying the VO chain. Thus,
- // decoders not linked to vo_chain must not use the hwdec context.
+ // Note: We rely on being able to get rid of all references to the VO by
+ // destroying the VO chain. Thus, decoders not linked to vo_chain
+ // must not use the hwdec context.
if (track->vo_c)
parent = track->vo_c->filter->f;
@@ -259,9 +261,13 @@ void reinit_video_chain_src(struct MPContext *mpctx, struct track *track)
goto err_out;
vo_c->dec_src = track->dec->f->pins[0];
- vo_c->filter->container_fps = track->dec->fps;
- vo_c->is_coverart = !!track->stream->attached_picture;
- vo_c->is_sparse = track->stream->still_image;
+ vo_c->filter->container_fps =
+ mp_decoder_wrapper_get_container_fps(track->dec);
+ vo_c->is_coverart = !!track->attached_picture;
+ vo_c->is_sparse = track->stream->still_image || vo_c->is_coverart;
+
+ if (vo_c->is_coverart)
+ mp_decoder_wrapper_set_coverart_flag(track->dec, true);
track->vo_c = vo_c;
vo_c->track = track;
@@ -272,16 +278,13 @@ void reinit_video_chain_src(struct MPContext *mpctx, struct track *track)
if (!recreate_video_filters(mpctx))
goto err_out;
+ update_content_type(mpctx, track);
update_screensaver_state(mpctx);
- vo_set_paused(vo_c->vo, mpctx->paused);
-
- // If we switch on video again, ensure audio position matches up.
- if (mpctx->ao_chain)
- mpctx->audio_status = STATUS_SYNCING;
+ vo_set_paused(vo_c->vo, get_internal_paused(mpctx));
reset_video_state(mpctx);
- reset_subtitle_state(mpctx);
+ term_osd_set_subs(mpctx, NULL);
return;
@@ -323,8 +326,8 @@ static void check_framedrop(struct MPContext *mpctx, struct vo_chain *vo_c)
return;
double frame_time = 1.0 / fps;
// try to drop as many frames as we appear to be behind
- vo_c->track->dec->attempt_framedrops =
- MPCLAMP((mpctx->last_av_difference - 0.010) / frame_time, 0, 100);
+ mp_decoder_wrapper_set_frame_drops(vo_c->track->dec,
+ MPCLAMP((mpctx->last_av_difference - 0.010) / frame_time, 0, 100));
}
}
@@ -384,8 +387,9 @@ static void handle_new_frame(struct MPContext *mpctx)
frame_time = 0;
}
}
- mpctx->delay -= frame_time;
mpctx->time_frame += frame_time / mpctx->video_speed;
+ if (mpctx->ao_chain && mpctx->ao_chain->audio_started)
+ mpctx->delay -= frame_time;
if (mpctx->video_status >= STATUS_PLAYING)
adjust_sync(mpctx, pts, frame_time);
MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time);
@@ -402,23 +406,28 @@ static void shift_frames(struct MPContext *mpctx)
mpctx->num_next_frames -= 1;
}
+static bool use_video_lookahead(struct MPContext *mpctx)
+{
+ return mpctx->video_out &&
+ !(mpctx->video_out->driver->caps & VO_CAP_NORETAIN) &&
+ !(mpctx->opts->untimed || mpctx->video_out->driver->untimed) &&
+ !mpctx->opts->video_latency_hacks;
+}
+
static int get_req_frames(struct MPContext *mpctx, bool eof)
{
// On EOF, drain all frames.
if (eof)
return 1;
- if (mpctx->video_out->driver->caps & VO_CAP_NORETAIN)
+ if (!use_video_lookahead(mpctx))
return 1;
if (mpctx->vo_chain && mpctx->vo_chain->is_sparse)
return 1;
- if (mpctx->opts->untimed || mpctx->video_out->driver->untimed)
- return 1;
-
// Normally require at least 2 frames, so we can compute a frame duration.
- int min = mpctx->opts->video_latency_hacks ? 1 : 2;
+ int min = 2;
// On the first frame, output a new frame as quickly as possible.
if (mpctx->video_pts == MP_NOPTS_VALUE)
@@ -452,6 +461,7 @@ static bool have_new_frame(struct MPContext *mpctx, bool eof)
}
// Fill mpctx->next_frames[] with a newly filtered or decoded image.
+// logical_eof: is set to true if there is EOF after currently queued frames
// returns VD_* code
static int video_output_image(struct MPContext *mpctx, bool *logical_eof)
{
@@ -471,6 +481,7 @@ static int video_output_image(struct MPContext *mpctx, bool *logical_eof)
}
if (vo_c->is_coverart) {
+ *logical_eof = true;
if (vo_has_frame(mpctx->video_out))
return VD_EOF;
hrseek = false;
@@ -522,6 +533,7 @@ static int video_output_image(struct MPContext *mpctx, bool *logical_eof)
}
mpctx->hrseek_backstep = false;
}
+ mp_image_unrefp(&mpctx->saved_frame);
add_new_frame(mpctx, img);
img = NULL;
}
@@ -529,11 +541,14 @@ static int video_output_image(struct MPContext *mpctx, bool *logical_eof)
}
}
- // If hr-seek went past EOF, use the last frame.
- if (r <= 0 && hrseek && mpctx->saved_frame && r == VD_EOF) {
- add_new_frame(mpctx, mpctx->saved_frame);
+ if (!hrseek)
+ mp_image_unrefp(&mpctx->saved_frame);
+
+ if (r == VD_EOF) {
+ // If hr-seek went past EOF, use the last frame.
+ if (mpctx->saved_frame)
+ add_new_frame(mpctx, mpctx->saved_frame);
mpctx->saved_frame = NULL;
- r = VD_EOF;
*logical_eof = true;
}
@@ -544,7 +559,7 @@ static bool check_for_hwdec_fallback(struct MPContext *mpctx)
{
struct vo_chain *vo_c = mpctx->vo_chain;
- if (!vo_c->filter->failed_output_conversion || !vo_c->track)
+ if (!vo_c->filter->failed_output_conversion || !vo_c->track || !vo_c->track->dec)
return false;
if (mp_decoder_wrapper_control(vo_c->track->dec,
@@ -555,6 +570,20 @@ static bool check_for_hwdec_fallback(struct MPContext *mpctx)
return true;
}
+static bool check_for_forced_eof(struct MPContext *mpctx)
+{
+ struct vo_chain *vo_c = mpctx->vo_chain;
+
+ if (!vo_c->track || !vo_c->track->dec)
+ return false;
+
+ struct mp_decoder_wrapper *dec = vo_c->track->dec;
+ bool forced_eof = false;
+
+ mp_decoder_wrapper_control(dec, VDCTRL_CHECK_FORCED_EOF, &forced_eof);
+ return forced_eof;
+}
+
/* Update avsync before a new video frame is displayed. Actually, this can be
* called arbitrarily often before the actual display.
* This adjusts the time of the next video frame */
@@ -565,7 +594,7 @@ static void update_avsync_before_frame(struct MPContext *mpctx)
if (mpctx->video_status < STATUS_READY) {
mpctx->time_frame = 0;
- } else if (mpctx->display_sync_active || opts->video_sync == VS_NONE) {
+ } else if (mpctx->display_sync_active || vo->opts->video_sync == VS_NONE) {
// don't touch the timing
} else if (mpctx->audio_status == STATUS_PLAYING &&
mpctx->video_status == STATUS_PLAYING &&
@@ -580,7 +609,7 @@ static void update_avsync_before_frame(struct MPContext *mpctx)
if (opts->autosync) {
/* Smooth reported playback position from AO by averaging
- * it with the value expected based on previus value and
+ * it with the value expected based on previous value and
* time elapsed since then. May help smooth video timing
* with audio output that have inaccurate position reporting.
* This is badly implemented; the behavior of the smoothing
@@ -617,8 +646,9 @@ static void update_av_diff(struct MPContext *mpctx, double offset)
if (mpctx->vo_chain && mpctx->vo_chain->is_sparse)
return;
- double a_pos = playing_audio_pts(mpctx);
+ double a_pos = written_audio_pts(mpctx);
if (a_pos != MP_NOPTS_VALUE && mpctx->video_pts != MP_NOPTS_VALUE) {
+ a_pos -= mpctx->audio_speed * ao_get_delay(mpctx->ao);
mpctx->last_av_difference = a_pos - mpctx->video_pts
+ opts->audio_delay + offset;
}
@@ -647,20 +677,16 @@ double calc_average_frame_duration(struct MPContext *mpctx)
// effective video FPS. If this is not possible, try to do it for multiples,
// which still leads to an improved end result.
// Both parameters are durations in seconds.
-static double calc_best_speed(double vsync, double frame)
+static double calc_best_speed(double vsync, double frame,
+ double max_change, int max_factor)
{
double ratio = frame / vsync;
- double best_scale = -1;
- double best_dev = INFINITY;
- for (int factor = 1; factor <= 5; factor++) {
+ for (int factor = 1; factor <= max_factor; factor++) {
double scale = ratio * factor / rint(ratio * factor);
- double dev = fabs(scale - 1);
- if (dev < best_dev) {
- best_scale = scale;
- best_dev = dev;
- }
+ if (fabs(scale - 1) <= max_change)
+ return scale;
}
- return best_scale;
+ return -1;
}
static double find_best_speed(struct MPContext *mpctx, double vsync)
@@ -671,9 +697,15 @@ static double find_best_speed(struct MPContext *mpctx, double vsync)
double dur = mpctx->past_frames[n].approx_duration;
if (dur <= 0)
continue;
- total += calc_best_speed(vsync, dur / mpctx->opts->playback_speed);
+ double best = calc_best_speed(vsync, dur / mpctx->opts->playback_speed,
+ mpctx->opts->sync_max_video_change / 100,
+ mpctx->opts->sync_max_factor);
+ if (best <= 0)
+ continue;
+ total += best;
num++;
}
+ // If it doesn't work, play at normal speed.
return num > 0 ? total / num : 1;
}
@@ -715,12 +747,14 @@ static double compute_audio_drift(struct MPContext *mpctx, double vsync)
return (sum_x * sum_y - num * sum_xy) / (sum_x * sum_x - num * sum_xx);
}
-static void adjust_audio_resample_speed(struct MPContext *mpctx, double vsync)
+static void adjust_audio_drift_compensation(struct MPContext *mpctx, double vsync)
{
struct MPOpts *opts = mpctx->opts;
- int mode = opts->video_sync;
+ int mode = mpctx->video_out->opts->video_sync;
- if (mode != VS_DISP_RESAMPLE || mpctx->audio_status != STATUS_PLAYING) {
+ if ((mode != VS_DISP_RESAMPLE && mode != VS_DISP_TEMPO) ||
+ mpctx->audio_status != STATUS_PLAYING)
+ {
mpctx->speed_factor_a = mpctx->speed_factor_v;
return;
}
@@ -778,7 +812,7 @@ static void handle_display_sync_frame(struct MPContext *mpctx,
{
struct MPOpts *opts = mpctx->opts;
struct vo *vo = mpctx->video_out;
- int mode = opts->video_sync;
+ int mode = vo->opts->video_sync;
if (!mpctx->display_sync_active) {
mpctx->display_sync_error = 0.0;
@@ -787,39 +821,30 @@ static void handle_display_sync_frame(struct MPContext *mpctx,
mpctx->display_sync_active = false;
- if (mode == VS_DISP_ADROP && !mpctx->audio_drop_deprecated_msg) {
- MP_WARN(mpctx, "video-sync=display-adrop mode is deprecated and will "
- "be removed in the future.\n");
- mpctx->audio_drop_deprecated_msg = true;
- }
-
if (!VS_IS_DISP(mode))
return;
bool resample = mode == VS_DISP_RESAMPLE || mode == VS_DISP_RESAMPLE_VDROP ||
mode == VS_DISP_RESAMPLE_NONE;
bool drop = mode == VS_DISP_VDROP || mode == VS_DISP_RESAMPLE ||
- mode == VS_DISP_ADROP || mode == VS_DISP_RESAMPLE_VDROP;
+ mode == VS_DISP_ADROP || mode == VS_DISP_RESAMPLE_VDROP ||
+ mode == VS_DISP_TEMPO;
drop &= frame->can_drop;
if (resample && using_spdif_passthrough(mpctx))
return;
- double vsync = vo_get_vsync_interval(vo) / 1e6;
+ double vsync = vo_get_vsync_interval(vo) / 1e9;
if (vsync <= 0)
return;
- double adjusted_duration = MPMAX(0, mpctx->past_frames[0].approx_duration);
- adjusted_duration /= opts->playback_speed;
+ double approx_duration = MPMAX(0, mpctx->past_frames[0].approx_duration);
+ double adjusted_duration = approx_duration / opts->playback_speed;
if (adjusted_duration > 0.5)
return;
mpctx->speed_factor_v = 1.0;
- if (mode != VS_DISP_VDROP) {
- double best = find_best_speed(mpctx, vsync);
- // If it doesn't work, play at normal speed.
- if (fabs(best - 1.0) <= opts->sync_max_video_change / 100)
- mpctx->speed_factor_v = best;
- }
+ if (mode != VS_DISP_VDROP)
+ mpctx->speed_factor_v = find_best_speed(mpctx, vsync);
// Determine for how many vsyncs a frame should be displayed. This can be
// e.g. 2 for 30hz on a 60hz display. It can also be 0 if the video
@@ -885,8 +910,8 @@ static void handle_display_sync_frame(struct MPContext *mpctx,
mpctx->past_frames[0].num_vsyncs = num_vsyncs;
mpctx->past_frames[0].av_diff = mpctx->last_av_difference;
- if (resample) {
- adjust_audio_resample_speed(mpctx, vsync);
+ if (resample || mode == VS_DISP_ADROP || mode == VS_DISP_TEMPO) {
+ adjust_audio_drift_compensation(mpctx, vsync);
} else {
mpctx->speed_factor_a = 1.0;
}
@@ -897,11 +922,21 @@ static void handle_display_sync_frame(struct MPContext *mpctx,
frame->vsync_interval = vsync;
frame->vsync_offset = -prev_error;
frame->ideal_frame_duration = frame_duration;
+ frame->ideal_frame_vsync = (-prev_error / frame_duration) * approx_duration;
+ frame->ideal_frame_vsync_duration = (vsync / frame_duration) * approx_duration;
frame->num_vsyncs = num_vsyncs;
frame->display_synced = true;
+ frame->approx_duration = approx_duration;
+
+ // Adjust frame virtual vsyncs by the repeat count
+ if (drop_repeat > 0)
+ frame->ideal_frame_vsync_duration /= drop_repeat;
mpctx->display_sync_active = true;
- update_playback_speed(mpctx);
+ // Try to avoid audio underruns that may occur if we update
+ // the playback speed while in the STATUS_SYNCING state.
+ if (mpctx->video_status != STATUS_SYNCING)
+ update_playback_speed(mpctx);
MP_STATS(mpctx, "value %f aspeed", mpctx->speed_factor_a - 1);
MP_STATS(mpctx, "value %f vspeed", mpctx->speed_factor_v - 1);
@@ -983,6 +1018,32 @@ static void calculate_frame_duration(struct MPContext *mpctx)
MP_STATS(mpctx, "value %f frame-duration-approx", MPMAX(0, approx_duration));
}
+static void apply_video_crop(struct MPContext *mpctx, struct vo *vo)
+{
+ for (int n = 0; n < mpctx->num_next_frames; n++) {
+ struct m_geometry *gm = &vo->opts->video_crop;
+ struct mp_image_params p = mpctx->next_frames[n]->params;
+ if (gm->xy_valid || (gm->wh_valid && (gm->w > 0 || gm->h > 0)))
+ {
+ m_rect_apply(&p.crop, p.w, p.h, gm);
+ }
+
+ if (p.crop.x1 == 0 && p.crop.y1 == 0)
+ return;
+
+ if (!mp_image_crop_valid(&p)) {
+ char *str = m_option_type_rect.print(NULL, gm);
+ MP_WARN(vo, "Ignoring invalid --video-crop=%s for %dx%d image\n",
+ str, p.w, p.h);
+ talloc_free(str);
+ *gm = (struct m_geometry){0};
+ mp_property_do("video-crop", M_PROPERTY_SET, gm, mpctx);
+ return;
+ }
+ mpctx->next_frames[n]->params.crop = p.crop;
+ }
+}
+
void write_video(struct MPContext *mpctx)
{
struct MPOpts *opts = mpctx->opts;
@@ -1007,7 +1068,8 @@ void write_video(struct MPContext *mpctx)
bool logical_eof = false;
int r = video_output_image(mpctx, &logical_eof);
- MP_TRACE(mpctx, "video_output_image: %d\n", r);
+ MP_TRACE(mpctx, "video_output_image: r=%d/eof=%d/st=%s\n", r, logical_eof,
+ mp_status_str(mpctx->video_status));
if (r < 0)
goto error;
@@ -1027,6 +1089,11 @@ void write_video(struct MPContext *mpctx)
if (r == VD_EOF) {
if (check_for_hwdec_fallback(mpctx))
return;
+ if (check_for_forced_eof(mpctx)) {
+ uninit_video_chain(mpctx);
+ handle_force_window(mpctx, true);
+ return;
+ }
if (vo_c->filter->failed_output_conversion)
goto error;
@@ -1036,9 +1103,7 @@ void write_video(struct MPContext *mpctx)
if (mpctx->video_status <= STATUS_PLAYING) {
mpctx->video_status = STATUS_DRAINING;
get_relative_time(mpctx);
- if (mpctx->num_past_frames == 1 && mpctx->past_frames[0].pts == 0 &&
- !mpctx->ao_chain)
- {
+ if (vo_c->is_sparse && !mpctx->ao_chain) {
MP_VERBOSE(mpctx, "assuming this is an image\n");
mpctx->time_frame += opts->image_display_duration;
} else if (mpctx->last_frame_duration > 0) {
@@ -1063,11 +1128,14 @@ void write_video(struct MPContext *mpctx)
if (mpctx->time_frame <= 0 || !has_frame) {
MP_VERBOSE(mpctx, "video EOF reached\n");
mpctx->video_status = STATUS_EOF;
- encode_lavc_stream_eof(mpctx->encode_lavc_ctx, STREAM_VIDEO);
}
}
- MP_DBG(mpctx, "video EOF (status=%d)\n", mpctx->video_status);
+ // Avoid pointlessly spamming the logs every frame.
+ if (!vo_c->is_sparse || !vo_c->sparse_eof_signalled) {
+ MP_DBG(mpctx, "video EOF (status=%d)\n", mpctx->video_status);
+ vo_c->sparse_eof_signalled = vo_c->is_sparse;
+ }
return;
}
@@ -1079,25 +1147,44 @@ void write_video(struct MPContext *mpctx)
return;
}
+ if (logical_eof && !mpctx->num_past_frames && mpctx->num_next_frames == 1 &&
+ use_video_lookahead(mpctx) && !vo_c->is_sparse)
+ {
+ // Too much danger to accidentally mark video as sparse when e.g.
+ // seeking exactly to the last frame, so as a heuristic, do this only
+ // if it looks like the "first" video frame (unreliable, but often
+ // works out well). Helps with seeking with single-image video tracks,
+ // as well as detecting whether as video track is really an image.
+ if (mpctx->next_frames[0]->pts == 0) {
+ MP_VERBOSE(mpctx, "assuming single-image video stream\n");
+ vo_c->is_sparse = true;
+ }
+ }
+
+ // Inject vo crop to notify and reconfig if needed
+ apply_video_crop(mpctx, vo);
+
// Filter output is different from VO input?
- struct mp_image_params p = mpctx->next_frames[0]->params;
- if (!vo->params || !mp_image_params_equal(&p, vo->params)) {
+ struct mp_image_params *p = &mpctx->next_frames[0]->params;
+ if (!vo->params || !mp_image_params_static_equal(p, vo->params)) {
// Changing config deletes the current frame; wait until it's finished.
- if (vo_still_displaying(vo))
+ if (vo_still_displaying(vo)) {
+ vo_request_wakeup_on_done(vo);
return;
+ }
const struct vo_driver *info = mpctx->video_out->driver;
char extra[20] = {0};
- if (p.p_w != p.p_h) {
+ if (p->p_w != p->p_h) {
int d_w, d_h;
- mp_image_params_get_dsize(&p, &d_w, &d_h);
+ mp_image_params_get_dsize(p, &d_w, &d_h);
snprintf(extra, sizeof(extra), " => %dx%d", d_w, d_h);
}
char sfmt[20] = {0};
- if (p.hw_subfmt)
- snprintf(sfmt, sizeof(sfmt), "[%s]", mp_imgfmt_to_name(p.hw_subfmt));
+ if (p->hw_subfmt)
+ snprintf(sfmt, sizeof(sfmt), "[%s]", mp_imgfmt_to_name(p->hw_subfmt));
MP_INFO(mpctx, "VO: [%s] %dx%d%s %s%s\n",
- info->name, p.w, p.h, extra, mp_imgfmt_to_name(p.imgfmt), sfmt);
+ info->name, p->w, p->h, extra, mp_imgfmt_to_name(p->imgfmt), sfmt);
MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description);
int vo_r = vo_reconfig2(vo, mpctx->next_frames[0]);
@@ -1120,7 +1207,7 @@ void write_video(struct MPContext *mpctx)
}
double time_frame = MPMAX(mpctx->time_frame, -1);
- int64_t pts = mp_time_us() + (int64_t)(time_frame * 1e6);
+ int64_t pts = mp_time_ns() + (int64_t)(time_frame * 1e9);
// wait until VO wakes us up to get more frames
// (NB: in theory, the 1st frame after display sync mode change uses the
@@ -1162,11 +1249,10 @@ void write_video(struct MPContext *mpctx)
diff /= mpctx->video_speed;
if (mpctx->time_frame < 0)
diff += mpctx->time_frame;
- frame->duration = MPCLAMP(diff, 0, 10) * 1e6;
+ frame->duration = MP_TIME_S_TO_NS(MPCLAMP(diff, 0, 10));
}
mpctx->video_pts = mpctx->next_frames[0]->pts;
- mpctx->last_vo_pts = mpctx->video_pts;
mpctx->last_frame_duration =
mpctx->next_frames[0]->pkt_duration / mpctx->video_speed;
@@ -1197,17 +1283,10 @@ void write_video(struct MPContext *mpctx)
mp_notify(mpctx, MPV_EVENT_TICK, NULL);
- if (vo_c->filter->got_output_eof && !mpctx->num_next_frames &&
- mpctx->ao_chain)
- {
- MP_VERBOSE(mpctx, "assuming this was the last video frame\n");
- // The main point of doing this is to prevent use of this for the
- // playback_pts if audio is still running (=> seek behavior).
- mpctx->video_status = STATUS_EOF;
- }
-
- // hr-seek past EOF -> returns last frame, but terminates playback.
- if (logical_eof)
+ // hr-seek past EOF -> returns last frame, but terminates playback. The
+ // early EOF is needed to trigger the exit before the next seek is executed.
+ // Always using early EOF breaks other cases, like images.
+ if (logical_eof && !mpctx->num_next_frames && mpctx->ao_chain)
mpctx->video_status = STATUS_EOF;
if (mpctx->video_status != STATUS_EOF) {
@@ -1224,7 +1303,8 @@ void write_video(struct MPContext *mpctx)
vo_c->underrun_signaled = false;
- mp_wakeup_core(mpctx);
+ if (mpctx->video_status == STATUS_EOF || mpctx->stop_play)
+ mp_wakeup_core(mpctx);
return;
error: