summaryrefslogtreecommitdiffstats
path: root/player/client.c
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2014-12-15 14:44:25 +0100
committerDiogo Franco (Kovensky) <diogomfranco@gmail.com>2015-01-25 17:00:13 +0900
commitead565afb31db72c97ba5b2cd17bec2bfa115eb7 (patch)
treebe08ce78a23d69031c523da260f838ea03d265be /player/client.c
parent9f80936ddc05b27945637b7944b8b6c894ff87ac (diff)
downloadmpv-ead565afb31db72c97ba5b2cd17bec2bfa115eb7.tar.bz2
mpv-ead565afb31db72c97ba5b2cd17bec2bfa115eb7.tar.xz
client API: be more lenient about mpv_suspend/resume mismatches
Before this commit, this was defined to trigger undefined behavior. This was nice because it required less code; but on the other hand, Lua as well as IPC support had to check these things manually. Do it directly in the API to avoid code duplication, and to make the API more robust. (The total code size still grows, though...) Since all of the failure cases were originally meant to ruin things forever, there is no way to return error codes. So just print the errors.
Diffstat (limited to 'player/client.c')
-rw-r--r--player/client.c47
1 files changed, 45 insertions, 2 deletions
diff --git a/player/client.c b/player/client.c
index 7e9d9b5247..8cd47df4ab 100644
--- a/player/client.c
+++ b/player/client.c
@@ -108,6 +108,7 @@ struct mpv_handle {
uint64_t event_mask;
bool queued_wakeup;
bool choke_warning;
+ int suspend_count;
mpv_event *events; // ringbuffer of max_events entries
int max_events; // allocated number of entries in events
@@ -300,12 +301,47 @@ void mpv_set_wakeup_callback(mpv_handle *ctx, void (*cb)(void *d), void *d)
void mpv_suspend(mpv_handle *ctx)
{
- mp_dispatch_suspend(ctx->mpctx->dispatch);
+ bool do_suspend = false;
+
+ pthread_mutex_lock(&ctx->lock);
+ if (ctx->suspend_count == INT_MAX) {
+ MP_ERR(ctx, "suspend counter overflow");
+ } else {
+ do_suspend = ctx->suspend_count == 0;
+ ctx->suspend_count++;
+ }
+ pthread_mutex_unlock(&ctx->lock);
+
+ if (do_suspend)
+ mp_dispatch_suspend(ctx->mpctx->dispatch);
}
void mpv_resume(mpv_handle *ctx)
{
- mp_dispatch_resume(ctx->mpctx->dispatch);
+ bool do_resume = false;
+
+ pthread_mutex_lock(&ctx->lock);
+ if (ctx->suspend_count == 0) {
+ MP_ERR(ctx, "suspend counter underflow");
+ } else {
+ do_resume = ctx->suspend_count == 1;
+ ctx->suspend_count--;
+ }
+ pthread_mutex_unlock(&ctx->lock);
+
+ if (do_resume)
+ mp_dispatch_resume(ctx->mpctx->dispatch);
+}
+
+void mp_resume_all(mpv_handle *ctx)
+{
+ pthread_mutex_lock(&ctx->lock);
+ bool do_resume = ctx->suspend_count > 0;
+ ctx->suspend_count = 0;
+ pthread_mutex_unlock(&ctx->lock);
+
+ if (do_resume)
+ mp_dispatch_resume(ctx->mpctx->dispatch);
}
static void lock_core(mpv_handle *ctx)
@@ -325,6 +361,8 @@ void mpv_detach_destroy(mpv_handle *ctx)
if (!ctx)
return;
+ mp_resume_all(ctx);
+
pthread_mutex_lock(&ctx->lock);
// reserved_events equals the number of asynchronous requests that weren't
// yet replied. In order to avoid that trying to reply to a removed client
@@ -674,6 +712,11 @@ mpv_event *mpv_wait_event(mpv_handle *ctx, double timeout)
talloc_free_children(event);
while (1) {
+ // This will almost surely lead to a deadlock. (Polling is still ok.)
+ if (ctx->suspend_count && timeout > 0) {
+ MP_ERR(ctx, "attempting to wait while core is suspended");
+ break;
+ }
if (ctx->num_events) {
*event = ctx->events[ctx->first_event];
ctx->first_event = (ctx->first_event + 1) % ctx->max_events;