summaryrefslogtreecommitdiffstats
path: root/sub/dec_sub.c
diff options
context:
space:
mode:
Diffstat (limited to 'sub/dec_sub.c')
-rw-r--r--sub/dec_sub.c362
1 files changed, 318 insertions, 44 deletions
diff --git a/sub/dec_sub.c b/sub/dec_sub.c
index e8b63986ab..77ef2af94e 100644
--- a/sub/dec_sub.c
+++ b/sub/dec_sub.c
@@ -30,6 +30,7 @@
#include "common/global.h"
#include "common/msg.h"
#include "osdep/threads.h"
+#include "video/mp_image.h"
extern const struct sd_functions sd_ass;
extern const struct sd_functions sd_lavc;
@@ -42,36 +43,195 @@ static const struct sd_functions *const sd_list[] = {
NULL
};
-struct dec_sub {
- pthread_mutex_t lock;
+// It's hard to put an upper bound on the ahead rendering caused by use of
+// vo_opengl interpolation + blend-subtitles.
+#define MAX_BUFFER (MAX_SUB_RENDER_AHEAD + 10)
+struct dec_sub {
+ // --- the following fields are invariant after init
struct mp_log *log;
struct MPOpts *opts;
struct sh_stream *sh;
- double last_pkt_pts;
+ bool threaded;
+ pthread_t thread;
+
+ // --- the following fields are protected by state_lock
+ pthread_mutex_t state_lock;
+ pthread_cond_t state_wakeup;
+ double last_pkt_pts;
+ struct mp_image_params last_video_fmt;
+ struct mp_osd_res last_osd_res;
+ struct cache_entry *entries[MAX_BUFFER];
+ int num_entries;
+ struct demux_packet **packets;
+ int num_packets;
+ bool preloaded;
+ struct cache_entry *cur;
+
+ // This lock serialized accesses to sub_get_bitmaps(). The problem is that
+ // a call to sub_get_bitmaps() can invalidate the result to the previous
+ // call, so sub_get_bitmaps() "reserves" access to the renderer, and must
+ // wait until the previous reservation has expired.
+ bool reserved;
+
+ // --- the following fields are protected by sd_lock
+ // (lock order: lock state_lock before sd_lock)
+ pthread_mutex_t sd_lock;
struct sd *sd;
};
-void sub_lock(struct dec_sub *sub)
+struct cache_entry {
+ // all fields, including refcount, are protected by state_lock, or immutable
+ double pts;
+ int refcount;
+ bool rendered;
+ struct sub_bitmaps data;
+ struct cache_entry *references;
+};
+
+static void copy_sub_bitmaps(struct sub_bitmaps *dst, struct sub_bitmaps *src)
+{
+ assert(src->format == SUBBITMAP_EMPTY || src->format == SUBBITMAP_LIBASS);
+ *dst = *src;
+ dst->parts =
+ talloc_memdup(NULL, src->parts, sizeof(src->parts[0]) * src->num_parts);
+ for (int n = 0; n < dst->num_parts; n++) {
+ struct sub_bitmap *p = &dst->parts[n];
+ p->bitmap = talloc_memdup(dst->parts, p->bitmap, p->stride * p->h);
+ }
+}
+
+static void cache_entry_unref(struct cache_entry *e)
+{
+ if (e) {
+ e->refcount -= 1;
+ if (e->refcount == 0) {
+ if (e->references) {
+ cache_entry_unref(e->references);
+ } else {
+ talloc_free(e->data.parts);
+ }
+ talloc_free(e);
+ }
+ }
+}
+
+static void *sub_thread(void *arg)
{
- pthread_mutex_lock(&sub->lock);
+ struct dec_sub *sub = arg;
+ mpthread_set_name("subrender");
+
+ pthread_mutex_lock(&sub->state_lock);
+ while (sub->threaded) {
+ struct demux_packet **packets = sub->packets;
+ int num_packets = sub->num_packets;
+ sub->packets = NULL;
+ sub->num_packets = 0;
+ if (num_packets) {
+ pthread_mutex_unlock(&sub->state_lock);
+ pthread_mutex_lock(&sub->sd_lock);
+
+ for (int n = 0; n < num_packets; n++) {
+ sub->sd->driver->decode(sub->sd, packets[n]);
+ talloc_free(packets[n]);
+ }
+
+ pthread_mutex_unlock(&sub->sd_lock);
+ pthread_mutex_lock(&sub->state_lock);
+ talloc_free(packets);
+ continue;
+ }
+ talloc_free(packets);
+
+ struct cache_entry *e = NULL;
+ struct cache_entry *prev = NULL;
+ for (int n = 0; n < sub->num_entries; n++) {
+ if (!sub->entries[n]->rendered) {
+ e = sub->entries[n];
+ if (n > 0)
+ prev = sub->entries[n - 1];
+ break;
+ }
+ }
+
+ struct mp_osd_res res = sub->last_osd_res;
+ if (e && res.w && res.h) {
+ assert(e->refcount);
+ e->refcount += 1;
+ if (prev && !prev->rendered)
+ prev = NULL;
+ if (prev)
+ prev->refcount += 1;
+ pthread_mutex_unlock(&sub->state_lock);
+ pthread_mutex_lock(&sub->sd_lock);
+
+ struct sub_bitmaps data = {0};
+ sub->sd->driver->get_bitmaps(sub->sd, res, e->pts, &data);
+ bool keep_ref = prev && data.change_id == 0;
+ if (keep_ref) {
+ e->data = prev->data;
+ e->data.change_id = 0;
+ e->references = prev;
+ } else {
+ copy_sub_bitmaps(&e->data, &data);
+ }
+
+ pthread_mutex_unlock(&sub->sd_lock);
+ pthread_mutex_lock(&sub->state_lock);
+ e->rendered = true;
+ if (!keep_ref)
+ cache_entry_unref(prev);
+ cache_entry_unref(e);
+ // Potentially wakeup VO thread waiting on this
+ pthread_cond_broadcast(&sub->state_wakeup);
+ } else {
+ pthread_cond_wait(&sub->state_wakeup, &sub->state_lock);
+ }
+ }
+ pthread_mutex_unlock(&sub->state_lock);
+
+ return NULL;
+}
+
+static void flush_cache(struct dec_sub *sub)
+{
+ for (int n = 0; n < sub->num_entries; n++)
+ cache_entry_unref(sub->entries[n]);
+ sub->num_entries = 0;
}
-void sub_unlock(struct dec_sub *sub)
+static void flush_packets(struct dec_sub *sub)
{
- pthread_mutex_unlock(&sub->lock);
+ for (int n = 0; n < sub->num_packets; n++)
+ talloc_free(sub->packets[n]);
+ sub->num_packets = 0;
}
void sub_destroy(struct dec_sub *sub)
{
if (!sub)
return;
- sub_reset(sub);
- sub->sd->driver->uninit(sub->sd);
+ if (sub->threaded) {
+ pthread_mutex_lock(&sub->state_lock);
+ sub->threaded = false;
+ pthread_cond_broadcast(&sub->state_wakeup);
+ pthread_mutex_unlock(&sub->state_lock);
+ pthread_join(sub->threaded, NULL);
+ }
+ flush_cache(sub);
+ flush_packets(sub);
+ assert(sub->cur == NULL);
+ if (sub->sd) {
+ sub_reset(sub);
+ sub->sd->driver->uninit(sub->sd);
+ }
talloc_free(sub->sd);
- pthread_mutex_destroy(&sub->lock);
+ talloc_free(sub->packets);
+ pthread_mutex_destroy(&sub->state_lock);
+ pthread_cond_destroy(&sub->state_wakeup);
+ pthread_mutex_destroy(&sub->sd_lock);
talloc_free(sub);
}
@@ -92,7 +252,9 @@ struct dec_sub *sub_create(struct mpv_global *global, struct demuxer *demuxer,
sub->opts = global->opts;
sub->sh = sh;
sub->last_pkt_pts = MP_NOPTS_VALUE;
- mpthread_mutex_init_recursive(&sub->lock);
+ pthread_mutex_init(&sub->state_lock, NULL);
+ pthread_cond_init(&sub->state_wakeup, NULL);
+ pthread_mutex_init(&sub->sd_lock, NULL);
sub->sd = talloc(NULL, struct sd);
*sub->sd = (struct sd){
@@ -104,12 +266,20 @@ struct dec_sub *sub_create(struct mpv_global *global, struct demuxer *demuxer,
.codec = sh->codec,
};
- if (sh->codec && sub->sd->driver->init(sub->sd) >= 0)
+ if (sh->codec && sub->sd->driver->init(sub->sd) >= 0) {
+ if (sub->opts->sub_render_ahead && !sub->sd->driver->accepts_packet)
+ {
+ sub->threaded = true;
+ if (pthread_create(&sub->thread, NULL, sub_thread, sub))
+ sub->threaded = false;
+ }
return sub;
+ }
ta_set_parent(log, NULL);
talloc_free(sub->sd);
- talloc_free(sub);
+ sub->sd = NULL;
+ sub_destroy(sub);
}
mp_err(log, "Could not find subtitle decoder for format '%s'.\n",
@@ -118,14 +288,24 @@ struct dec_sub *sub_create(struct mpv_global *global, struct demuxer *demuxer,
return NULL;
}
+static void feed_packet(struct dec_sub *sub, struct demux_packet *pkt)
+{
+ if (sub->threaded) {
+ MP_TARRAY_APPEND(NULL, sub->packets, sub->num_packets, pkt);
+ } else {
+ sub->sd->driver->decode(sub->sd, pkt);
+ talloc_free(pkt);
+ }
+}
+
// Read all packets from the demuxer and decode/add them. Returns false if
// there are circumstances which makes this not possible.
bool sub_read_all_packets(struct dec_sub *sub)
{
- pthread_mutex_lock(&sub->lock);
+ pthread_mutex_lock(&sub->state_lock);
- if (!sub->sd->driver->accept_packets_in_advance) {
- pthread_mutex_unlock(&sub->lock);
+ if (sub->sd->driver->accepts_packet) {
+ pthread_mutex_unlock(&sub->state_lock);
return false;
}
@@ -133,27 +313,31 @@ bool sub_read_all_packets(struct dec_sub *sub)
struct demux_packet *pkt = demux_read_packet(sub->sh);
if (!pkt)
break;
- sub->sd->driver->decode(sub->sd, pkt);
- talloc_free(pkt);
+ feed_packet(sub, pkt);
}
- pthread_mutex_unlock(&sub->lock);
+ sub->preloaded = true;
+
+ pthread_cond_broadcast(&sub->state_wakeup);
+ pthread_mutex_unlock(&sub->state_lock);
return true;
}
// Read packets from the demuxer stream passed to sub_create(). Return true if
// enough packets were read, false if the player should wait until the demuxer
// signals new packets available (and then should retry).
+// This can also be used to render subtitles with the given timestamp ahead.
+// (Then it's assumed that the player will want to render subtitles at this
+// point.)
bool sub_read_packets(struct dec_sub *sub, double video_pts)
{
bool r = true;
- pthread_mutex_lock(&sub->lock);
+ pthread_mutex_lock(&sub->state_lock);
while (1) {
- bool read_more = true;
- if (sub->sd->driver->accepts_packet)
- read_more = sub->sd->driver->accepts_packet(sub->sd);
-
- if (!read_more)
+ if (sub->preloaded)
+ break;
+ if (sub->sd->driver->accepts_packet &&
+ !sub->sd->driver->accepts_packet(sub->sd))
break;
struct demux_packet *pkt;
@@ -169,25 +353,100 @@ bool sub_read_packets(struct dec_sub *sub, double video_pts)
break;
}
- sub->sd->driver->decode(sub->sd, pkt);
sub->last_pkt_pts = pkt->pts;
- talloc_free(pkt);
+ feed_packet(sub, pkt);
+ }
+ if (sub->threaded && r) {
+ // xxx: can overflow if either
+ // - static readahead count is greater than MAX_BUFFER
+ // - VO is somehow not rendering subs
+ //assert(sub->num_entries < MAX_BUFFER);
+ if ((!sub->num_entries ||
+ video_pts > sub->entries[sub->num_entries - 1]->pts) &&
+ sub->num_entries < MAX_BUFFER)
+ {
+ // MP_WARN(sub,"add %f -> %d \n", video_pts, sub->num_entries);
+ struct cache_entry *e = talloc_ptrtype(NULL, e);
+ *e = (struct cache_entry){
+ .pts = video_pts,
+ .refcount = 1,
+ };
+ sub->entries[sub->num_entries++] = e;
+ }
+ // process packets, render ahead subtitles
+ pthread_cond_broadcast(&sub->state_wakeup);
}
- pthread_mutex_unlock(&sub->lock);
+ pthread_mutex_unlock(&sub->state_lock);
return r;
}
-// You must call sub_lock/sub_unlock if more than 1 thread access sub.
-// The issue is that *res will contain decoder allocated data, which might
-// be deallocated on the next decoder access.
+// Warning: you must call sub_release_bitmaps() when done. sub_get_bitmaps()
+// will block until the previous one has been released.
void sub_get_bitmaps(struct dec_sub *sub, struct mp_osd_res dim, double pts,
struct sub_bitmaps *res)
{
- struct MPOpts *opts = sub->opts;
-
*res = (struct sub_bitmaps) {0};
- if (opts->sub_visibility && sub->sd->driver->get_bitmaps)
- sub->sd->driver->get_bitmaps(sub->sd, dim, pts, res);
+
+ if (sub->threaded) {
+ pthread_mutex_lock(&sub->state_lock);
+ if (!osd_res_equals(sub->last_osd_res, dim)) {
+ sub->last_osd_res = dim;
+ flush_cache(sub);
+ pthread_cond_broadcast(&sub->state_wakeup);
+ }
+ assert(!sub->cur);
+ for (int n = 0; n < sub->num_entries; n++) {
+ struct cache_entry *e = sub->entries[n];
+ if (e->pts < pts) {
+ // Assume old entries are not needed anymore.
+// MP_WARN(sub, "prune %f at %f -> %d \n", e->pts, pts, sub->num_entries);
+ cache_entry_unref(e);
+ MP_TARRAY_REMOVE_AT(sub->entries, sub->num_entries, n);
+ n--;
+ } else if (e->pts == pts) {
+ e->refcount += 1;
+ sub->cur = e;
+ break;
+ }
+ }
+ bool done = false;
+ if (sub->cur) {
+ done = true;
+ while (!sub->cur->rendered)
+ pthread_cond_wait(&sub->state_wakeup, &sub->state_lock);
+ *res = sub->cur->data;
+ //xxx: if there's a cache miss, then it might happen that older
+ // subs are being rendered again => must add change_id
+ //MP_WARN(sub, "%f in cache\n", pts);
+ }
+ pthread_mutex_unlock(&sub->state_lock);
+ if (done)
+ return;
+ }
+
+ //MP_WARN(sub, "%f not in cache\n", pts);
+ pthread_mutex_lock(&sub->sd_lock);
+ assert(!sub->reserved);
+ sub->reserved = true;
+ sub->sd->driver->get_bitmaps(sub->sd, dim, pts, res);
+}
+
+void sub_release_bitmaps(struct dec_sub *sub)
+{
+ if (sub->threaded) {
+ pthread_mutex_lock(&sub->state_lock);
+ bool had_sub = !!sub->cur;
+ cache_entry_unref(sub->cur);
+ sub->cur = NULL;
+ pthread_mutex_unlock(&sub->state_lock);
+ if (had_sub)
+ return;
+ }
+
+ if (sub->reserved) {
+ sub->reserved = false;
+ pthread_mutex_unlock(&sub->sd_lock);
+ }
}
// See sub_get_bitmaps() for locking requirements.
@@ -195,38 +454,53 @@ void sub_get_bitmaps(struct dec_sub *sub, struct mp_osd_res dim, double pts,
// at a time (unless exclusive access is guaranteed).
char *sub_get_text(struct dec_sub *sub, double pts)
{
- pthread_mutex_lock(&sub->lock);
+ pthread_mutex_lock(&sub->sd_lock);
struct MPOpts *opts = sub->opts;
char *text = NULL;
if (opts->sub_visibility && sub->sd->driver->get_text)
text = sub->sd->driver->get_text(sub->sd, pts);
- pthread_mutex_unlock(&sub->lock);
+ pthread_mutex_unlock(&sub->sd_lock);
return text;
}
void sub_reset(struct dec_sub *sub)
{
- pthread_mutex_lock(&sub->lock);
- if (sub->sd->driver->reset)
+ pthread_mutex_lock(&sub->state_lock);
+ pthread_mutex_lock(&sub->sd_lock);
+ if (sub->sd && sub->sd->driver->reset)
sub->sd->driver->reset(sub->sd);
+ pthread_mutex_unlock(&sub->sd_lock);
sub->last_pkt_pts = MP_NOPTS_VALUE;
- pthread_mutex_unlock(&sub->lock);
+ flush_cache(sub);
+ flush_packets(sub);
+ pthread_mutex_unlock(&sub->state_lock);
}
void sub_select(struct dec_sub *sub, bool selected)
{
- pthread_mutex_lock(&sub->lock);
+ pthread_mutex_lock(&sub->sd_lock);
if (sub->sd->driver->select)
sub->sd->driver->select(sub->sd, selected);
- pthread_mutex_unlock(&sub->lock);
+ pthread_mutex_unlock(&sub->sd_lock);
}
int sub_control(struct dec_sub *sub, enum sd_ctrl cmd, void *arg)
{
int r = CONTROL_UNKNOWN;
- pthread_mutex_lock(&sub->lock);
+ pthread_mutex_lock(&sub->sd_lock);
if (sub->sd->driver->control)
r = sub->sd->driver->control(sub->sd, cmd, arg);
- pthread_mutex_unlock(&sub->lock);
+ pthread_mutex_unlock(&sub->sd_lock);
return r;
}
+
+void sub_set_video_fmt(struct dec_sub *sub, struct mp_image_params *fmt)
+{
+ pthread_mutex_lock(&sub->state_lock);
+ if (!mp_image_params_equal(&sub->last_video_fmt, fmt)) {
+ sub->last_video_fmt = *fmt;
+ sub_control(sub, SD_CTRL_SET_VIDEO_PARAMS, &sub->last_video_fmt);
+ }
+ pthread_mutex_unlock(&sub->state_lock);
+}
+