summaryrefslogtreecommitdiffstats
path: root/audio
diff options
context:
space:
mode:
Diffstat (limited to 'audio')
-rw-r--r--audio/out/ao_audiotrack.c6
-rw-r--r--audio/out/ao_audiounit.m6
-rw-r--r--audio/out/ao_coreaudio.c18
-rw-r--r--audio/out/ao_coreaudio_exclusive.c12
-rw-r--r--audio/out/ao_coreaudio_utils.c12
-rw-r--r--audio/out/ao_coreaudio_utils.h4
-rw-r--r--audio/out/ao_jack.c4
-rw-r--r--audio/out/ao_opensles.c2
-rw-r--r--audio/out/ao_pipewire.c6
-rw-r--r--audio/out/ao_sdl.c2
-rw-r--r--audio/out/ao_wasapi.c21
-rw-r--r--audio/out/buffer.c24
-rw-r--r--audio/out/internal.h4
13 files changed, 61 insertions, 60 deletions
diff --git a/audio/out/ao_audiotrack.c b/audio/out/ao_audiotrack.c
index 3fd68e5783..9620e45bd6 100644
--- a/audio/out/ao_audiotrack.c
+++ b/audio/out/ao_audiotrack.c
@@ -563,9 +563,9 @@ static void *playthread(void *arg)
}
if (state == AudioTrack.PLAYSTATE_PLAYING) {
int read_samples = p->chunksize / ao->sstride;
- int64_t ts = mp_time_us();
- ts += (read_samples / (double)(ao->samplerate)) * 1e6;
- ts += AudioTrack_getLatency(ao) * 1e6;
+ int64_t ts = mp_time_ns();
+ ts += MP_TIME_S_TO_NS(read_samples / (double)(ao->samplerate));
+ ts += MP_TIME_S_TO_NS(AudioTrack_getLatency(ao));
int samples = ao_read_data(ao, &p->chunk, read_samples, ts);
int ret = AudioTrack_write(ao, samples * ao->sstride);
if (ret >= 0) {
diff --git a/audio/out/ao_audiounit.m b/audio/out/ao_audiounit.m
index 91f0dd168a..85b1226dc9 100644
--- a/audio/out/ao_audiounit.m
+++ b/audio/out/ao_audiounit.m
@@ -94,9 +94,9 @@ static OSStatus render_cb_lpcm(void *ctx, AudioUnitRenderActionFlags *aflags,
for (int n = 0; n < ao->num_planes; n++)
planes[n] = buffer_list->mBuffers[n].mData;
- int64_t end = mp_time_us();
- end += p->device_latency * 1e6;
- end += ca_get_latency(ts) + ca_frames_to_us(ao, frames);
+ int64_t end = mp_time_ns();
+ end += MP_TIME_S_TO_NS(p->device_latency);
+ end += ca_get_latency(ts) + ca_frames_to_ns(ao, frames);
ao_read_data(ao, planes, frames, end);
return noErr;
}
diff --git a/audio/out/ao_coreaudio.c b/audio/out/ao_coreaudio.c
index ee8d63a81c..d96b597f6e 100644
--- a/audio/out/ao_coreaudio.c
+++ b/audio/out/ao_coreaudio.c
@@ -31,7 +31,7 @@ struct priv {
AudioDeviceID device;
AudioUnit audio_unit;
- uint64_t hw_latency_us;
+ uint64_t hw_latency_ns;
AudioStreamBasicDescription original_asbd;
AudioStreamID original_asbd_stream;
@@ -53,13 +53,13 @@ static int64_t ca_get_hardware_latency(struct ao *ao) {
&size);
CHECK_CA_ERROR("cannot get audio unit latency");
- uint64_t audiounit_latency_us = audiounit_latency_sec * 1e6;
- uint64_t device_latency_us = ca_get_device_latency_us(ao, p->device);
+ uint64_t audiounit_latency_ns = MP_TIME_S_TO_NS(audiounit_latency_sec);
+ uint64_t device_latency_ns = ca_get_device_latency_ns(ao, p->device);
- MP_VERBOSE(ao, "audiounit latency [us]: %lld\n", audiounit_latency_us);
- MP_VERBOSE(ao, "device latency [us]: %lld\n", device_latency_us);
+ MP_VERBOSE(ao, "audiounit latency [ns]: %lld\n", audiounit_latency_ns);
+ MP_VERBOSE(ao, "device latency [ns]: %lld\n", device_latency_ns);
- return audiounit_latency_us + device_latency_us;
+ return audiounit_latency_ns + device_latency_ns;
coreaudio_error:
return 0;
@@ -76,8 +76,8 @@ static OSStatus render_cb_lpcm(void *ctx, AudioUnitRenderActionFlags *aflags,
for (int n = 0; n < ao->num_planes; n++)
planes[n] = buffer_list->mBuffers[n].mData;
- int64_t end = mp_time_us();
- end += p->hw_latency_us + ca_get_latency(ts) + ca_frames_to_us(ao, frames);
+ int64_t end = mp_time_ns();
+ end += p->hw_latency_ns + ca_get_latency(ts) + ca_frames_to_ns(ao, frames);
ao_read_data(ao, planes, frames, end);
return noErr;
}
@@ -288,7 +288,7 @@ static bool init_audiounit(struct ao *ao, AudioStreamBasicDescription asbd)
CHECK_CA_ERROR_L(coreaudio_error_audiounit,
"can't link audio unit to selected device");
- p->hw_latency_us = ca_get_hardware_latency(ao);
+ p->hw_latency_ns = ca_get_hardware_latency(ao);
AURenderCallbackStruct render_cb = (AURenderCallbackStruct) {
.inputProc = render_cb_lpcm,
diff --git a/audio/out/ao_coreaudio_exclusive.c b/audio/out/ao_coreaudio_exclusive.c
index a60fcc1a97..821e9547f5 100644
--- a/audio/out/ao_coreaudio_exclusive.c
+++ b/audio/out/ao_coreaudio_exclusive.c
@@ -78,7 +78,7 @@ struct priv {
atomic_bool reload_requested;
- uint32_t hw_latency_us;
+ uint64_t hw_latency_ns;
};
static OSStatus property_listener_cb(
@@ -176,9 +176,9 @@ static OSStatus render_cb_compressed(
return kAudioHardwareUnspecifiedError;
}
- int64_t end = mp_time_us();
- end += p->hw_latency_us + ca_get_latency(ts)
- + ca_frames_to_us(ao, pseudo_frames);
+ int64_t end = mp_time_ns();
+ end += p->hw_latency_ns + ca_get_latency(ts)
+ + ca_frames_to_ns(ao, pseudo_frames);
ao_read_data(ao, &buf.mData, pseudo_frames, end);
@@ -383,8 +383,8 @@ static int init(struct ao *ao)
MP_WARN(ao, "Using spdif passthrough hack. This could produce noise.\n");
}
- p->hw_latency_us = ca_get_device_latency_us(ao, p->device);
- MP_VERBOSE(ao, "base latency: %d microseconds\n", (int)p->hw_latency_us);
+ p->hw_latency_ns = ca_get_device_latency_ns(ao, p->device);
+ MP_VERBOSE(ao, "base latency: %lld nanoseconds\n", p->hw_latency_ns);
err = enable_property_listener(ao, true);
CHECK_CA_ERROR("cannot install format change listener during init");
diff --git a/audio/out/ao_coreaudio_utils.c b/audio/out/ao_coreaudio_utils.c
index 45c5eff9db..67ab98d34d 100644
--- a/audio/out/ao_coreaudio_utils.c
+++ b/audio/out/ao_coreaudio_utils.c
@@ -292,9 +292,9 @@ bool ca_asbd_is_better(AudioStreamBasicDescription *req,
return true;
}
-int64_t ca_frames_to_us(struct ao *ao, uint32_t frames)
+int64_t ca_frames_to_ns(struct ao *ao, uint32_t frames)
{
- return frames / (float) ao->samplerate * 1e6;
+ return MP_TIME_S_TO_NS(frames / (double)ao->samplerate);
}
int64_t ca_get_latency(const AudioTimeStamp *ts)
@@ -306,7 +306,7 @@ int64_t ca_get_latency(const AudioTimeStamp *ts)
if (now > out)
return 0;
- return (out - now) * 1e-3;
+ return out - now;
#else
static mach_timebase_info_data_t timebase;
if (timebase.denom == 0)
@@ -318,7 +318,7 @@ int64_t ca_get_latency(const AudioTimeStamp *ts)
if (now > out)
return 0;
- return (out - now) * timebase.numer / timebase.denom / 1e3;
+ return (out - now) * timebase.numer / timebase.denom;
#endif
}
@@ -422,7 +422,7 @@ OSStatus ca_enable_mixing(struct ao *ao, AudioDeviceID device, bool changed)
return noErr;
}
-int64_t ca_get_device_latency_us(struct ao *ao, AudioDeviceID device)
+int64_t ca_get_device_latency_ns(struct ao *ao, AudioDeviceID device)
{
uint32_t latency_frames = 0;
uint32_t latency_properties[] = {
@@ -449,7 +449,7 @@ int64_t ca_get_device_latency_us(struct ao *ao, AudioDeviceID device)
MP_VERBOSE(ao, "Device sample rate: %f\n", sample_rate);
}
- return latency_frames / sample_rate * 1e6;
+ return MP_TIME_S_TO_NS(latency_frames / sample_rate);
}
static OSStatus ca_change_format_listener(
diff --git a/audio/out/ao_coreaudio_utils.h b/audio/out/ao_coreaudio_utils.h
index b5492924b6..d7532e3051 100644
--- a/audio/out/ao_coreaudio_utils.h
+++ b/audio/out/ao_coreaudio_utils.h
@@ -60,7 +60,7 @@ bool ca_asbd_is_better(AudioStreamBasicDescription *req,
AudioStreamBasicDescription *old,
AudioStreamBasicDescription *new);
-int64_t ca_frames_to_us(struct ao *ao, uint32_t frames);
+int64_t ca_frames_to_ns(struct ao *ao, uint32_t frames);
int64_t ca_get_latency(const AudioTimeStamp *ts);
#if HAVE_COREAUDIO
@@ -69,7 +69,7 @@ OSStatus ca_lock_device(AudioDeviceID device, pid_t *pid);
OSStatus ca_unlock_device(AudioDeviceID device, pid_t *pid);
OSStatus ca_disable_mixing(struct ao *ao, AudioDeviceID device, bool *changed);
OSStatus ca_enable_mixing(struct ao *ao, AudioDeviceID device, bool changed);
-int64_t ca_get_device_latency_us(struct ao *ao, AudioDeviceID device);
+int64_t ca_get_device_latency_ns(struct ao *ao, AudioDeviceID device);
bool ca_change_physical_format_sync(struct ao *ao, AudioStreamID stream,
AudioStreamBasicDescription change_format);
#endif
diff --git a/audio/out/ao_jack.c b/audio/out/ao_jack.c
index f55974b1e8..0c9ab8e0a5 100644
--- a/audio/out/ao_jack.c
+++ b/audio/out/ao_jack.c
@@ -122,8 +122,8 @@ static int process(jack_nframes_t nframes, void *arg)
jack_nframes_t jack_latency =
atomic_load(&p->graph_latency_max) + atomic_load(&p->buffer_size);
- int64_t end_time = mp_time_us();
- end_time += (jack_latency + nframes) / (double)ao->samplerate * 1000000.0;
+ int64_t end_time = mp_time_ns();
+ end_time += MP_TIME_S_TO_NS((jack_latency + nframes) / (double)ao->samplerate);
ao_read_data(ao, buffers, nframes, end_time);
diff --git a/audio/out/ao_opensles.c b/audio/out/ao_opensles.c
index 1348cebffb..9ade73c857 100644
--- a/audio/out/ao_opensles.c
+++ b/audio/out/ao_opensles.c
@@ -82,7 +82,7 @@ static void buffer_callback(SLBufferQueueItf buffer_queue, void *context)
delay = p->frames_per_enqueue / (double)ao->samplerate;
delay += p->audio_latency;
ao_read_data(ao, &p->buf, p->frames_per_enqueue,
- mp_time_us() + 1000000LL * delay);
+ mp_time_ns() + MP_TIME_S_TO_NS(delay));
res = (*buffer_queue)->Enqueue(buffer_queue, p->buf, p->bytes_per_enqueue);
if (res != SL_RESULT_SUCCESS)
diff --git a/audio/out/ao_pipewire.c b/audio/out/ao_pipewire.c
index c545a571c4..657ea1b7ba 100644
--- a/audio/out/ao_pipewire.c
+++ b/audio/out/ao_pipewire.c
@@ -176,10 +176,10 @@ static void on_process(void *userdata)
if (time.rate.num == 0)
time.rate.num = 1;
- int64_t end_time = mp_time_us();
+ int64_t end_time = mp_time_ns();
/* time.queued is always going to be 0, so we don't need to care */
- end_time += (nframes * 1e6 / ao->samplerate) +
- ((float) time.delay * SPA_USEC_PER_SEC * time.rate.num / time.rate.denom);
+ end_time += (nframes * 1e9 / ao->samplerate) +
+ ((double) time.delay * SPA_NSEC_PER_SEC * time.rate.num / time.rate.denom);
int samples = ao_read_data(ao, data, nframes, end_time);
b->size = samples;
diff --git a/audio/out/ao_sdl.c b/audio/out/ao_sdl.c
index 032939925d..5a6a58b124 100644
--- a/audio/out/ao_sdl.c
+++ b/audio/out/ao_sdl.c
@@ -61,7 +61,7 @@ static void audio_callback(void *userdata, Uint8 *stream, int len)
// fixed latency.
double delay = 2 * len / (double)ao->bps;
- ao_read_data(ao, data, len / ao->sstride, mp_time_us() + 1000000LL * delay);
+ ao_read_data(ao, data, len / ao->sstride, mp_time_ns() + MP_TIME_S_TO_NS(delay));
}
static void uninit(struct ao *ao)
diff --git a/audio/out/ao_wasapi.c b/audio/out/ao_wasapi.c
index fcb9c96818..a2460ab513 100644
--- a/audio/out/ao_wasapi.c
+++ b/audio/out/ao_wasapi.c
@@ -36,7 +36,8 @@ static UINT64 uint64_scale(UINT64 x, UINT64 num, UINT64 den)
+ ((x % den) * (num % den)) / den;
}
-static HRESULT get_device_delay(struct wasapi_state *state, double *delay_us) {
+static HRESULT get_device_delay(struct wasapi_state *state, double *delay_ns)
+{
UINT64 sample_count = atomic_load(&state->sample_count);
UINT64 position, qpc_position;
HRESULT hr;
@@ -54,7 +55,7 @@ static HRESULT get_device_delay(struct wasapi_state *state, double *delay_us) {
state->format.Format.nSamplesPerSec,
state->clock_frequency);
INT64 diff = sample_count - sample_position;
- *delay_us = diff * 1e6 / state->format.Format.nSamplesPerSec;
+ *delay_ns = diff * 1e9 / state->format.Format.nSamplesPerSec;
// Correct for any delay in IAudioClock_GetPosition above.
// This should normally be very small (<1 us), but just in case. . .
@@ -65,16 +66,16 @@ static HRESULT get_device_delay(struct wasapi_state *state, double *delay_us) {
// ignore the above calculation if it yields more than 10 seconds (due to
// possible overflow inside IAudioClock_GetPosition)
if (qpc_diff < 10 * 10000000) {
- *delay_us -= qpc_diff / 10.0; // convert to us
+ *delay_ns -= qpc_diff * 100.0; // convert to ns
} else {
MP_VERBOSE(state, "Insane qpc delay correction of %g seconds. "
"Ignoring it.\n", qpc_diff / 10000000.0);
}
- if (sample_count > 0 && *delay_us <= 0) {
- MP_WARN(state, "Under-run: Device delay: %g us\n", *delay_us);
+ if (sample_count > 0 && *delay_ns <= 0) {
+ MP_WARN(state, "Under-run: Device delay: %g ns\n", *delay_ns);
} else {
- MP_TRACE(state, "Device delay: %g us\n", *delay_us);
+ MP_TRACE(state, "Device delay: %g ns\n", *delay_ns);
}
return S_OK;
@@ -116,11 +117,11 @@ static bool thread_feed(struct ao *ao)
MP_TRACE(ao, "Frame to fill: %"PRIu32". Padding: %"PRIu32"\n",
frame_count, padding);
- double delay_us;
- hr = get_device_delay(state, &delay_us);
+ double delay_ns;
+ hr = get_device_delay(state, &delay_ns);
EXIT_ON_ERROR(hr);
// add the buffer delay
- delay_us += frame_count * 1e6 / state->format.Format.nSamplesPerSec;
+ delay_ns += frame_count * 1e9 / state->format.Format.nSamplesPerSec;
BYTE *pData;
hr = IAudioRenderClient_GetBuffer(state->pRenderClient,
@@ -131,7 +132,7 @@ static bool thread_feed(struct ao *ao)
ao_read_data_converted(ao, &state->convert_format,
(void **)data, frame_count,
- mp_time_us() + (int64_t)llrint(delay_us));
+ mp_time_ns() + (int64_t)llrint(delay_ns));
// note, we can't use ao_read_data return value here since we already
// committed to frame_count above in the GetBuffer call
diff --git a/audio/out/buffer.c b/audio/out/buffer.c
index 77506d8363..c0457ba279 100644
--- a/audio/out/buffer.c
+++ b/audio/out/buffer.c
@@ -62,7 +62,7 @@ struct buffer_state {
bool playing; // logically playing audio from buffer
bool paused; // logically paused
- int64_t end_time_us; // absolute output time of last played sample
+ int64_t end_time_ns; // absolute output time of last played sample
bool initial_unblocked;
@@ -177,9 +177,9 @@ static int read_buffer(struct ao *ao, void **data, int samples, bool *eof)
// rest of the user-provided buffer with silence.
// This basically assumes that the audio device doesn't care about underruns.
// If this is called in paused mode, it will always return 0.
-// The caller should set out_time_us to the expected delay until the last sample
-// reaches the speakers, in microseconds, using mp_time_us() as reference.
-int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_us)
+// The caller should set out_time_ns to the expected delay until the last sample
+// reaches the speakers, in nanoseconds, using mp_time_ns() as reference.
+int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_ns)
{
struct buffer_state *p = ao->buffer_state;
assert(!ao->driver->write);
@@ -189,7 +189,7 @@ int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_us)
int pos = read_buffer(ao, data, samples, &(bool){0});
if (pos > 0)
- p->end_time_us = out_time_us;
+ p->end_time_ns = out_time_ns;
if (pos < samples && p->playing && !p->paused) {
p->playing = false;
@@ -206,13 +206,13 @@ int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_us)
// Same as ao_read_data(), but convert data according to *fmt.
// fmt->src_fmt and fmt->channels must be the same as the AO parameters.
int ao_read_data_converted(struct ao *ao, struct ao_convert_fmt *fmt,
- void **data, int samples, int64_t out_time_us)
+ void **data, int samples, int64_t out_time_ns)
{
struct buffer_state *p = ao->buffer_state;
void *ndata[MP_NUM_CHANNELS] = {0};
if (!ao_need_conversion(fmt))
- return ao_read_data(ao, data, samples, out_time_us);
+ return ao_read_data(ao, data, samples, out_time_ns);
assert(ao->format == fmt->src_fmt);
assert(ao->channels.num == fmt->channels);
@@ -232,7 +232,7 @@ int ao_read_data_converted(struct ao *ao, struct ao_convert_fmt *fmt,
for (int n = 0; n < planes; n++)
ndata[n] = p->convert_buffer + n * src_plane_size;
- int res = ao_read_data(ao, ndata, samples, out_time_us);
+ int res = ao_read_data(ao, ndata, samples, out_time_ns);
ao_convert_inplace(fmt, ndata, samples);
for (int n = 0; n < planes; n++)
@@ -270,9 +270,9 @@ double ao_get_delay(struct ao *ao)
get_dev_state(ao, &state);
driver_delay = state.delay;
} else {
- int64_t end = p->end_time_us;
- int64_t now = mp_time_us();
- driver_delay = MPMAX(0, (end - now) / (1000.0 * 1000.0));
+ int64_t end = p->end_time_ns;
+ int64_t now = mp_time_ns();
+ driver_delay = MPMAX(0, MP_TIME_NS_TO_S(end - now));
}
int pending = mp_async_queue_get_samples(p->queue);
@@ -311,7 +311,7 @@ void ao_reset(struct ao *ao)
p->playing = false;
p->recover_pause = false;
p->hw_paused = false;
- p->end_time_us = 0;
+ p->end_time_ns = 0;
pthread_mutex_unlock(&p->lock);
diff --git a/audio/out/internal.h b/audio/out/internal.h
index c3ed9dca98..7736d3174b 100644
--- a/audio/out/internal.h
+++ b/audio/out/internal.h
@@ -200,7 +200,7 @@ struct ao_driver {
// These functions can be called by AOs.
-int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_us);
+int ao_read_data(struct ao *ao, void **data, int samples, int64_t out_time_ns);
bool ao_chmap_sel_adjust(struct ao *ao, const struct mp_chmap_sel *s,
struct mp_chmap *map);
@@ -231,6 +231,6 @@ void ao_convert_inplace(struct ao_convert_fmt *fmt, void **data, int num_samples
void ao_wakeup_playthread(struct ao *ao);
int ao_read_data_converted(struct ao *ao, struct ao_convert_fmt *fmt,
- void **data, int samples, int64_t out_time_us);
+ void **data, int samples, int64_t out_time_ns);
#endif