summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--DOCS/man/options.rst15
-rw-r--r--demux/demux.c5
2 files changed, 18 insertions, 2 deletions
diff --git a/DOCS/man/options.rst b/DOCS/man/options.rst
index a81d3313ea..bcdad2ed41 100644
--- a/DOCS/man/options.rst
+++ b/DOCS/man/options.rst
@@ -3365,7 +3365,8 @@ Demuxer
position (as it removes past packets that are seek points).
If the end of the file is reached, the remaining unused forward buffer space
- is "donated" to the backbuffer (unless the backbuffer size is set to 0).
+ is "donated" to the backbuffer (unless the backbuffer size is set to 0, or
+ ``--demuxer-donate-buffer`` is set to ``no``).
This still limits the total cache usage to the sum of the forward and
backward cache, and effectively makes better use of the total allowed memory
budget. (The opposite does not happen: free backward buffer is never
@@ -3377,6 +3378,18 @@ Demuxer
See ``--list-options`` for defaults and value range.
+``--demuxer-donate-buffer=<yes|no>``
+ Whether to let the back buffer use part of the forward buffer (default: yes).
+ If set to ``yes``, the "donation" behavior described in the option
+ description for ``--demuxer-max-back-bytes`` is enabled. This means the
+ back buffer may use up memory up to the sum of the forward and back buffer
+ options, minus the active size of the forward buffer. If set to ``no``, the
+ options strictly limit the forward and back buffer sizes separately.
+
+ Note that if the end of the file is reached, the buffered data stays the
+ same, even if you seek back within the cache. This is because the back
+ buffer is only reduced when new data is read.
+
``--demuxer-seekable-cache=<yes|no|auto>``
This controls whether seeking can use the demuxer cache (default: auto). If
enabled, short seek offsets will not trigger a low level demuxer seek
diff --git a/demux/demux.c b/demux/demux.c
index a5a287e8bd..2ecfc96ba2 100644
--- a/demux/demux.c
+++ b/demux/demux.c
@@ -87,6 +87,7 @@ struct demux_opts {
int disk_cache;
int64_t max_bytes;
int64_t max_bytes_bw;
+ int donate_fw;
double min_secs;
int force_seekable;
double min_secs_cache;
@@ -117,6 +118,7 @@ const struct m_sub_options demux_conf = {
// of double type.)
OPT_BYTE_SIZE("demuxer-max-bytes", max_bytes, 0, 0, MAX_BYTES),
OPT_BYTE_SIZE("demuxer-max-back-bytes", max_bytes_bw, 0, 0, MAX_BYTES),
+ OPT_FLAG("demuxer-donate-buffer", donate_fw, 0),
OPT_FLAG("force-seekable", force_seekable, 0),
OPT_DOUBLE("cache-secs", min_secs_cache, M_OPT_MIN, .min = 0),
OPT_FLAG("access-references", access_references, 0),
@@ -140,6 +142,7 @@ const struct m_sub_options demux_conf = {
.enable_cache = -1, // auto
.max_bytes = 150 * 1024 * 1024,
.max_bytes_bw = 50 * 1024 * 1024,
+ .donate_fw = 1,
.min_secs = 1.0,
.min_secs_cache = 10.0 * 60 * 60,
.seekable_cache = -1,
@@ -2265,7 +2268,7 @@ static void prune_old_packets(struct demux_internal *in)
uint64_t max_avail = in->max_bytes_bw;
// Backward cache (if enabled at all) can use unused forward cache.
// Still leave 1 byte free, so the read_packet logic doesn't get stuck.
- if (max_avail && in->max_bytes > (fw_bytes + 1))
+ if (max_avail && in->max_bytes > (fw_bytes + 1) && in->opts->donate_fw)
max_avail += in->max_bytes - (fw_bytes + 1);
if (in->total_bytes - fw_bytes <= max_avail)
break;