summaryrefslogtreecommitdiffstats
path: root/video/mp_image_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'video/mp_image_pool.c')
-rw-r--r--video/mp_image_pool.c78
1 files changed, 60 insertions, 18 deletions
diff --git a/video/mp_image_pool.c b/video/mp_image_pool.c
index 6b907706c5..c2d1fca080 100644
--- a/video/mp_image_pool.c
+++ b/video/mp_image_pool.c
@@ -43,6 +43,12 @@ struct mp_image_pool {
struct mp_image **images;
int num_images;
+
+ mp_image_allocator allocator;
+ void *allocator_ctx;
+
+ bool use_lru;
+ unsigned int lru_counter;
};
// Used to gracefully handle the case when the pool is freed while image
@@ -51,6 +57,7 @@ struct image_flags {
// If both of these are false, the image must be freed.
bool referenced; // outside mp_image reference exists
bool pool_alive; // the mp_image_pool references this
+ unsigned int order; // for LRU allocation (basically a timestamp)
};
static void image_pool_destructor(void *ptr)
@@ -102,43 +109,65 @@ static void unref_image(void *ptr)
talloc_free(img);
}
-// Return a new image of given format/size. The only difference to
-// mp_image_alloc() is that there is a transparent mechanism to recycle image
-// data allocations through this pool.
-// The image can be free'd with talloc_free().
-struct mp_image *mp_image_pool_get(struct mp_image_pool *pool, int fmt,
- int w, int h)
+// Return a new image of given format/size. Unlike mp_image_pool_get(), this
+// returns NULL if there is no free image of this format/size.
+struct mp_image *mp_image_pool_get_no_alloc(struct mp_image_pool *pool, int fmt,
+ int w, int h)
{
struct mp_image *new = NULL;
-
pool_lock();
for (int n = 0; n < pool->num_images; n++) {
struct mp_image *img = pool->images[n];
- struct image_flags *it = img->priv;
- assert(it->pool_alive);
- if (!it->referenced) {
+ struct image_flags *img_it = img->priv;
+ assert(img_it->pool_alive);
+ if (!img_it->referenced) {
if (img->imgfmt == fmt && img->w == w && img->h == h) {
- new = img;
- break;
+ if (pool->use_lru) {
+ struct image_flags *new_it = new ? new->priv : NULL;
+ if (!new_it || new_it->order > img_it->order)
+ new = img;
+ } else {
+ new = img;
+ break;
+ }
}
}
}
pool_unlock();
+ if (!new)
+ return NULL;
+ struct image_flags *it = new->priv;
+ assert(!it->referenced && it->pool_alive);
+ it->referenced = true;
+ it->order = ++pool->lru_counter;
+ return mp_image_new_custom_ref(new, new, unref_image);
+}
+// Return a new image of given format/size. The only difference to
+// mp_image_alloc() is that there is a transparent mechanism to recycle image
+// data allocations through this pool.
+// The image can be free'd with talloc_free().
+struct mp_image *mp_image_pool_get(struct mp_image_pool *pool, int fmt,
+ int w, int h)
+{
+ struct mp_image *new = mp_image_pool_get_no_alloc(pool, fmt, w, h);
if (!new) {
if (pool->num_images >= pool->max_count)
mp_image_pool_clear(pool);
- new = mp_image_alloc(fmt, w, h);
+ if (pool->allocator) {
+ new = pool->allocator(pool->allocator_ctx, fmt, w, h);
+ } else {
+ new = mp_image_alloc(fmt, w, h);
+ }
+ if (!new)
+ return NULL;
struct image_flags *it = talloc_ptrtype(new, it);
*it = (struct image_flags) { .pool_alive = true };
new->priv = it;
MP_TARRAY_APPEND(pool, pool->images, pool->num_images, new);
+ new = mp_image_pool_get_no_alloc(pool, fmt, w, h);
}
-
- struct image_flags *it = new->priv;
- assert(!it->referenced && it->pool_alive);
- it->referenced = true;
- return mp_image_new_custom_ref(new, new, unref_image);
+ return new;
}
// Like mp_image_new_copy(), but allocate the image out of the pool.
@@ -161,3 +190,16 @@ void mp_image_pool_make_writeable(struct mp_image_pool *pool,
mp_image_steal_data(img, mp_image_pool_new_copy(pool, img));
assert(mp_image_is_writeable(img));
}
+
+void mp_image_pool_set_allocator(struct mp_image_pool *pool,
+ mp_image_allocator cb, void *cb_data)
+{
+ pool->allocator = cb;
+ pool->allocator_ctx = cb_data;
+}
+
+// Put into LRU mode. (Likely better for hwaccel surfaces, but worse for memory.)
+void mp_image_pool_set_lru(struct mp_image_pool *pool)
+{
+ pool->use_lru = true;
+}