summaryrefslogtreecommitdiffstats
path: root/misc
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2017-08-22 15:50:33 +0200
committerwm4 <wm4@nowhere>2017-08-22 15:50:33 +0200
commitd2bdb72b69536370c3046107fc593896e74803c3 (patch)
tree09d5913f981ee5f5e0d43fbfd24b06f67eb8dd5e /misc
parent5361c0b5d897ec7c969d2a75bb2dde345d62a59c (diff)
downloadmpv-d2bdb72b69536370c3046107fc593896e74803c3.tar.bz2
mpv-d2bdb72b69536370c3046107fc593896e74803c3.tar.xz
options: add a thread-safe way to notify option updates
So far, we had a thread-safe way to read options, but no option update notification mechanism. Everything was funneled though the main thread's central mp_option_change_callback() function. For example, if the panscan options were changed, the function called vo_control() with VOCTRL_SET_PANSCAN to manually notify the VO thread of updates. This worked, but's pretty inconvenient. Most of these problems come from the fact that MPlayer was written as a single-threaded program. This commit works towards a more flexible mechanism. It adds an update callback to m_config_cache (the thing that is already used for thread-safe access of global options). This alone would still be rather inconvenient, at least in context of VOs. Add another mechanism on top of it that uses mp_dispatch_queue, and takes care of some annoying synchronization issues. We extend mp_dispatch_queue itself to make this easier and slightly more efficient. As a first application, use this to reimplement certain VO scaling and renderer options. The update_opts() function translates these to the "old" VOCTRLs, though. An annoyingly subtle issue is that m_config_cache's destructor now releases pending notifications, and must be released before the associated dispatch queue. Otherwise, it could happen that option updates during e.g. VO destruction queue or run stale entries, which is not expected. Rather untested. The singly-linked list code in dispatch.c is probably buggy, and I bet some aspects about synchronization are not entirely sane.
Diffstat (limited to 'misc')
-rw-r--r--misc/dispatch.c56
-rw-r--r--misc/dispatch.h4
2 files changed, 60 insertions, 0 deletions
diff --git a/misc/dispatch.c b/misc/dispatch.c
index 086896ba79..e625f196dc 100644
--- a/misc/dispatch.c
+++ b/misc/dispatch.c
@@ -59,6 +59,7 @@ struct mp_dispatch_item {
mp_dispatch_fn fn;
void *fn_data;
bool asynchronous;
+ bool mergeable;
bool completed;
struct mp_dispatch_item *next;
};
@@ -113,12 +114,25 @@ static void mp_dispatch_append(struct mp_dispatch_queue *queue,
struct mp_dispatch_item *item)
{
pthread_mutex_lock(&queue->lock);
+ if (item->mergeable) {
+ for (struct mp_dispatch_item *cur = queue->head; cur; cur = cur->next) {
+ if (cur->mergeable && cur->fn == item->fn &&
+ cur->fn_data == item->fn_data)
+ {
+ talloc_free(item);
+ pthread_mutex_unlock(&queue->lock);
+ return;
+ }
+ }
+ }
+
if (queue->tail) {
queue->tail->next = item;
} else {
queue->head = item;
}
queue->tail = item;
+
// Wake up the main thread; note that other threads might wait on this
// condition for reasons, so broadcast the condition.
pthread_cond_broadcast(&queue->cond);
@@ -127,6 +141,7 @@ static void mp_dispatch_append(struct mp_dispatch_queue *queue,
if (!queue->wakeup_fn)
queue->interrupted = true;
pthread_mutex_unlock(&queue->lock);
+
if (queue->wakeup_fn)
queue->wakeup_fn(queue->wakeup_ctx);
}
@@ -165,6 +180,47 @@ void mp_dispatch_enqueue_autofree(struct mp_dispatch_queue *queue,
mp_dispatch_append(queue, item);
}
+// Like mp_dispatch_enqueue(), but
+void mp_dispatch_enqueue_notify(struct mp_dispatch_queue *queue,
+ mp_dispatch_fn fn, void *fn_data)
+{
+ struct mp_dispatch_item *item = talloc_ptrtype(NULL, item);
+ *item = (struct mp_dispatch_item){
+ .fn = fn,
+ .fn_data = fn_data,
+ .mergeable = true,
+ .asynchronous = true,
+ };
+ mp_dispatch_append(queue, item);
+}
+
+// Remove already queued item. Only items enqueued with the following functions
+// can be canceled:
+// - mp_dispatch_enqueue()
+// - mp_dispatch_enqueue_notify()
+// Items which were enqueued, and which are currently executing, can not be
+// canceled anymore. This function is mostly for being called from the same
+// context as mp_dispatch_queue_process(), where the "currently executing" case
+// can be excluded.
+void mp_dispatch_cancel_fn(struct mp_dispatch_queue *queue,
+ mp_dispatch_fn fn, void *fn_data)
+{
+ pthread_mutex_lock(&queue->lock);
+ struct mp_dispatch_item **pcur = &queue->head;
+ queue->tail = NULL;
+ while (*pcur) {
+ struct mp_dispatch_item *cur = *pcur;
+ if (cur->fn == fn && cur->fn_data == fn_data) {
+ *pcur = cur->next;
+ talloc_free(cur);
+ } else {
+ queue->tail = cur;
+ pcur = &cur->next;
+ }
+ }
+ pthread_mutex_unlock(&queue->lock);
+}
+
// Run fn(fn_data) on the target thread synchronously. This function enqueues
// the callback and waits until the target thread is done doing this.
// This is redundant to calling the function inside mp_dispatch_[un]lock(),
diff --git a/misc/dispatch.h b/misc/dispatch.h
index a762e47cd2..d850437934 100644
--- a/misc/dispatch.h
+++ b/misc/dispatch.h
@@ -12,6 +12,10 @@ void mp_dispatch_enqueue(struct mp_dispatch_queue *queue,
mp_dispatch_fn fn, void *fn_data);
void mp_dispatch_enqueue_autofree(struct mp_dispatch_queue *queue,
mp_dispatch_fn fn, void *fn_data);
+void mp_dispatch_enqueue_notify(struct mp_dispatch_queue *queue,
+ mp_dispatch_fn fn, void *fn_data);
+void mp_dispatch_cancel_fn(struct mp_dispatch_queue *queue,
+ mp_dispatch_fn fn, void *fn_data);
void mp_dispatch_run(struct mp_dispatch_queue *queue,
mp_dispatch_fn fn, void *fn_data);
void mp_dispatch_queue_process(struct mp_dispatch_queue *queue, double timeout);