From d1d864546cf6f63b089cf248c04fa6b625f23d50 Mon Sep 17 00:00:00 2001 From: wm4 Date: Sat, 25 Jun 2016 12:39:08 +0200 Subject: DOCS: change version references from 0.17.1 to 0.18.0 0.17.1 was never released, so the actual 0.18.0 release takes its place. --- DOCS/client-api-changes.rst | 2 +- DOCS/interface-changes.rst | 4 ++-- DOCS/man/input.rst | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/DOCS/client-api-changes.rst b/DOCS/client-api-changes.rst index 8ef01a05e1..7bcbbad721 100644 --- a/DOCS/client-api-changes.rst +++ b/DOCS/client-api-changes.rst @@ -32,7 +32,7 @@ API changes :: - --- mpv 0.17.1 --- + --- mpv 0.18.0 --- 1.21 - mpv_set_property() changes behavior with MPV_FORMAT_NODE. Before this change it rejected mpv_nodes with format==MPV_FORMAT_STRING if the property was not a string or did not have special mechanisms in place diff --git a/DOCS/interface-changes.rst b/DOCS/interface-changes.rst index 7ffbbd60e0..f8584293d7 100644 --- a/DOCS/interface-changes.rst +++ b/DOCS/interface-changes.rst @@ -19,7 +19,7 @@ Interface changes :: - --- mpv 0.17.1 --- + --- mpv 0.18.0 --- - now ab-loops are active even if one of the "ab-loop-a"/"-b" properties is unset ("no"), in which case the start of the file is used if the A loop point is unset, and the end of the file for an unset B loop point @@ -27,7 +27,7 @@ Interface changes (also needs --embeddedfonts=no) - add "hwdec-interop" and "hwdec-current" properties - deprecated "hwdec-active" and "hwdec-detected" properties (to be removed - in mpv 0.19.0) + in mpv 0.20.0) - choice option/property values that are "yes" or "no" will now be returned as booleans when using the mpv_node functions in the client API, the "native" property accessors in Lua, and the JSON API. They can be set as diff --git a/DOCS/man/input.rst b/DOCS/man/input.rst index d58f2dc4d5..9c0cf922c4 100644 --- a/DOCS/man/input.rst +++ b/DOCS/man/input.rst @@ -1388,7 +1388,7 @@ Property list properties to see whether this was successful. Unlike in mpv 0.9.x and before, this does not return the currently active - hardware decoder. Since mpv 0.17.1, ``hwdec-current`` is available for + hardware decoder. Since mpv 0.18.0, ``hwdec-current`` is available for this purpose. ``hwdec-current`` @@ -1412,13 +1412,13 @@ Property list platform and VO. ``hwdec-active`` - Deprecated. To be removed in mpv 0.19.0. Use ``hwdec-current`` instead. + Deprecated. To be removed in mpv 0.20.0. Use ``hwdec-current`` instead. Return ``yes`` or ``no``, depending on whether any type of hardware decoding is actually in use. ``hwdec-detected`` - Deprecated. To be removed in mpv 0.19.0. + Deprecated. To be removed in mpv 0.20.0. If hardware decoding is active, this returns the hardware decoder in use. Otherwise, it returns either ``no``, or if applicable, the currently loaded -- cgit v1.2.3 From 22c76e85db88a772e3360892cd3a673a89c6fc7a Mon Sep 17 00:00:00 2001 From: wm4 Date: Sat, 25 Jun 2016 12:44:42 +0200 Subject: vo_xv: fix behavior with odd sizes The size check introduced in commit d941a57b did not consider that Xv can round up the image size to the next chroma boundary. Doing that makes sense, so it can't certainly be considered server misbehavior. Do 2 things against this: allow if the server returns a larger image (we just crop it then), and also allocate a properly aligned image in the first place. --- video/out/vo_xv.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/video/out/vo_xv.c b/video/out/vo_xv.c index 1e7ae7c4c5..a5a4728085 100644 --- a/video/out/vo_xv.c +++ b/video/out/vo_xv.c @@ -533,6 +533,8 @@ static bool allocate_xvimage(struct vo *vo, int foo) struct vo_x11_state *x11 = vo->x11; // align it for faster OSD rendering (draw_bmp.c swscale usage) int aligned_w = FFALIGN(ctx->image_width, 32); + // round up the height to next chroma boundary too + int aligned_h = FFALIGN(ctx->image_height, 2); #if HAVE_SHM && HAVE_XEXT if (x11->display_is_local && XShmQueryExtension(x11->display)) { ctx->Shmem_Flag = 1; @@ -546,7 +548,7 @@ static bool allocate_xvimage(struct vo *vo, int foo) ctx->xvimage[foo] = (XvImage *) XvShmCreateImage(x11->display, ctx->xv_port, ctx->xv_format, NULL, - aligned_w, ctx->image_height, + aligned_w, aligned_h, &ctx->Shminfo[foo]); if (!ctx->xvimage[foo]) return false; @@ -569,7 +571,7 @@ static bool allocate_xvimage(struct vo *vo, int foo) ctx->xvimage[foo] = (XvImage *) XvCreateImage(x11->display, ctx->xv_port, ctx->xv_format, NULL, aligned_w, - ctx->image_height); + aligned_h); if (!ctx->xvimage[foo]) return false; ctx->xvimage[foo]->data = av_malloc(ctx->xvimage[foo]->data_size); @@ -578,16 +580,16 @@ static bool allocate_xvimage(struct vo *vo, int foo) XSync(x11->display, False); } - if ((ctx->xvimage[foo]->width != aligned_w) || - (ctx->xvimage[foo]->height != ctx->image_height)) { - MP_ERR(vo, "Got XvImage with incorrect size: %ux%u (expected %ux%u)\n", + if ((ctx->xvimage[foo]->width < aligned_w) || + (ctx->xvimage[foo]->height < aligned_h)) { + MP_ERR(vo, "Got XvImage with too small size: %ux%u (expected %ux%u)\n", ctx->xvimage[foo]->width, ctx->xvimage[foo]->height, aligned_w, ctx->image_height); return false; } struct mp_image img = get_xv_buffer(vo, foo); - img.w = aligned_w; + mp_image_set_size(&img, aligned_w, aligned_h); mp_image_clear(&img, 0, 0, img.w, img.h); return true; } -- cgit v1.2.3 From e081e4695097ad2769510b639412adfcfe6896c5 Mon Sep 17 00:00:00 2001 From: wm4 Date: Sun, 26 Jun 2016 12:33:16 +0200 Subject: manpage: fix typo --- DOCS/man/options.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DOCS/man/options.rst b/DOCS/man/options.rst index a42d87912b..2651aa018d 100644 --- a/DOCS/man/options.rst +++ b/DOCS/man/options.rst @@ -638,7 +638,7 @@ Video In some cases, RGB conversion is forced, which means the RGB conversion is performed by the hardware decoding API, instead of the OpenGL code used by ``--vo=opengl``. This means certain obscure colorspaces may - not display correctly, and not certain filtering (such as debanding) + not display correctly, not certain filtering (such as debanding) can not be applied in an ideal way. ``vdpau`` is usually safe. If deinterlacing enabled (or the ``vdpaupp`` -- cgit v1.2.3 From 24478a8a72b17d54346fb8d648e53448c3819e9d Mon Sep 17 00:00:00 2001 From: quilloss Date: Sun, 26 Jun 2016 19:08:24 +0800 Subject: vo_opengl utils: use gl->main_fb when reading window content The main framebuffer is not the default framebuffer for the dxinterop backend. Bind the main framebuffer and use the appropriate attachment when reading the window content. Fix #3284 --- video/out/opengl/utils.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/video/out/opengl/utils.c b/video/out/opengl/utils.c index 73b411e66c..8ddae33d8f 100644 --- a/video/out/opengl/utils.c +++ b/video/out/opengl/utils.c @@ -109,8 +109,10 @@ mp_image_t *gl_read_window_contents(GL *gl) mp_image_t *image = mp_image_alloc(IMGFMT_RGB24, vp[2], vp[3]); if (!image) return NULL; + gl->BindFramebuffer(GL_FRAMEBUFFER, gl->main_fb); + GLenum obj = gl->main_fb ? GL_COLOR_ATTACHMENT0 : GL_FRONT; gl->PixelStorei(GL_PACK_ALIGNMENT, 1); - gl->ReadBuffer(GL_FRONT); + gl->ReadBuffer(obj); //flip image while reading (and also avoid stride-related trouble) for (int y = 0; y < vp[3]; y++) { gl->ReadPixels(vp[0], vp[1] + vp[3] - y - 1, vp[2], 1, @@ -118,6 +120,7 @@ mp_image_t *gl_read_window_contents(GL *gl) image->planes[0] + y * image->stride[0]); } gl->PixelStorei(GL_PACK_ALIGNMENT, 4); + gl->BindFramebuffer(GL_FRAMEBUFFER, 0); return image; } -- cgit v1.2.3 From c5094206ce6ff1a557540ed6e0d8505bc6db0031 Mon Sep 17 00:00:00 2001 From: stepshal Date: Sun, 26 Jun 2016 01:07:38 +0700 Subject: Fix misspellings --- DOCS/tech-overview.txt | 2 +- audio/filter/af.c | 2 +- audio/out/ao_wasapi.c | 2 +- audio/out/ao_wasapi_changenotify.c | 2 +- misc/bstr.h | 2 +- options/m_option.h | 2 +- osdep/ar/HIDRemote.h | 4 ++-- osdep/ar/HIDRemote.m | 6 +++--- stream/tvi_v4l2.c | 2 +- video/decode/d3d.c | 2 +- video/gpu_memcpy.c | 2 +- video/out/opengl/video.c | 2 +- video/out/vo_direct3d.c | 2 +- video/out/w32_common.c | 2 +- 14 files changed, 17 insertions(+), 17 deletions(-) diff --git a/DOCS/tech-overview.txt b/DOCS/tech-overview.txt index 914b2222b4..e53f00b7be 100644 --- a/DOCS/tech-overview.txt +++ b/DOCS/tech-overview.txt @@ -123,7 +123,7 @@ options/options.h, options/options.c parser-mpcmd.c, and uses the option table in options.c. input/input.c: - This translates keyboard input comming from VOs and other sources (such + This translates keyboard input coming from VOs and other sources (such as remote control devices like Apple IR or client API commands) to the key bindings listed in the user's (or the builtin) input.conf and turns them into items of type struct mp_cmd. These commands are queued, and read diff --git a/audio/filter/af.c b/audio/filter/af.c index ac1b4926d8..084589873f 100644 --- a/audio/filter/af.c +++ b/audio/filter/af.c @@ -557,7 +557,7 @@ void af_destroy(struct af_stream *s) format of the preferred output respectively. The function is reentrant i.e. if called with an already initialized stream the stream will be reinitialized. - If one of the prefered output parameters is 0 the one that needs + If one of the preferred output parameters is 0 the one that needs no conversion is used (i.e. the output format in the last filter). The return value is 0 if success and -1 if failure */ int af_init(struct af_stream *s) diff --git a/audio/out/ao_wasapi.c b/audio/out/ao_wasapi.c index ae6bd3d9dc..325a7cf72b 100644 --- a/audio/out/ao_wasapi.c +++ b/audio/out/ao_wasapi.c @@ -132,7 +132,7 @@ static bool thread_feed(struct ao *ao) mp_time_us() + (int64_t)llrint(delay_us)); // note, we can't use ao_read_data return value here since we already - // commited to frame_count above in the GetBuffer call + // committed to frame_count above in the GetBuffer call hr = IAudioRenderClient_ReleaseBuffer(state->pRenderClient, frame_count, 0); EXIT_ON_ERROR(hr); diff --git a/audio/out/ao_wasapi_changenotify.c b/audio/out/ao_wasapi_changenotify.c index e3ca4e4936..46843ec5e3 100644 --- a/audio/out/ao_wasapi_changenotify.c +++ b/audio/out/ao_wasapi_changenotify.c @@ -95,7 +95,7 @@ static HRESULT STDMETHODCALLTYPE sIMMNotificationClient_OnDeviceAdded( return S_OK; } -// maybe MPV can go over to the prefered device once it is plugged in? +// maybe MPV can go over to the preferred device once it is plugged in? static HRESULT STDMETHODCALLTYPE sIMMNotificationClient_OnDeviceRemoved( IMMNotificationClient *This, LPCWSTR pwstrDeviceId) diff --git a/misc/bstr.h b/misc/bstr.h index 2785520b87..4aba35e965 100644 --- a/misc/bstr.h +++ b/misc/bstr.h @@ -116,7 +116,7 @@ int bstr_validate_utf8(struct bstr s); // talloc, with talloc_ctx as parent. struct bstr bstr_sanitize_utf8_latin1(void *talloc_ctx, struct bstr s); -// Return the text before the occurance of a character, and return it. Change +// Return the text before the occurrence of a character, and return it. Change // *rest to point to the text following this character. (rest can be NULL.) struct bstr bstr_splitchar(struct bstr str, struct bstr *rest, const char c); diff --git a/options/m_option.h b/options/m_option.h index 7e6550691a..4066a36d81 100644 --- a/options/m_option.h +++ b/options/m_option.h @@ -394,7 +394,7 @@ struct m_option { // Dynamic data type. /** This flag indicates that the data is dynamically allocated (m_option::p - * points to a pointer). It enables a little hack in the \ref Config wich + * points to a pointer). It enables a little hack in the \ref Config which * replaces the initial value of such variables with a dynamic copy in case * the initial value is statically allocated (pretty common with strings). */ diff --git a/osdep/ar/HIDRemote.h b/osdep/ar/HIDRemote.h index 9ea01d1fc2..35db408b40 100644 --- a/osdep/ar/HIDRemote.h +++ b/osdep/ar/HIDRemote.h @@ -74,7 +74,7 @@ typedef enum { kHIDRemoteModeNone = 0L, - kHIDRemoteModeShared, // Share the remote with others - let's you listen to the remote control events as long as noone has an exclusive lock on it + kHIDRemoteModeShared, // Share the remote with others - let's you listen to the remote control events as long as no one has an exclusive lock on it // (RECOMMENDED ONLY FOR SPECIAL PURPOSES) kHIDRemoteModeExclusive, // Try to acquire an exclusive lock on the remote (NOT RECOMMENDED) @@ -182,7 +182,7 @@ typedef enum - (BOOL)hidRemote:(HIDRemote *)hidRemote // Invoked when new hardware is inspected inspectNewHardwareWithService:(io_service_t)service // prematchResult:(BOOL)prematchResult; // Return YES if HIDRemote should go on with this hardware and try - // to use it, or NO if it should not be persued further. + // to use it, or NO if it should not be pursued further. // Exlusive lock lending - (BOOL)hidRemote:(HIDRemote *)hidRemote diff --git a/osdep/ar/HIDRemote.m b/osdep/ar/HIDRemote.m index f62289e23c..47e35f46fd 100644 --- a/osdep/ar/HIDRemote.m +++ b/osdep/ar/HIDRemote.m @@ -293,7 +293,7 @@ static HIDRemote *sHIDRemote = nil; }while(0); - // An error occured. Do necessary clean up. + // An error occurred. Do necessary clean up. if (matchDict!=NULL) { CFRelease(matchDict); @@ -1422,7 +1422,7 @@ static HIDRemote *sHIDRemote = nil; [((NSObject *)[self delegate]) hidRemote:self failedNewHardwareWithError:error]; } - // An error occured or this device is not of interest .. cleanup .. + // An error occurred or this device is not of interest .. cleanup .. if (serviceNotification!=0) { IOObjectRelease(serviceNotification); @@ -1615,7 +1615,7 @@ static HIDRemote *sHIDRemote = nil; switch (buttonCode) { case kHIDRemoteButtonCodeIDChanged: - // Do nothing, this is handled seperately + // Do nothing, this is handled separately break; case kHIDRemoteButtonCodeUp: diff --git a/stream/tvi_v4l2.c b/stream/tvi_v4l2.c index f882818a59..91c810ad23 100644 --- a/stream/tvi_v4l2.c +++ b/stream/tvi_v4l2.c @@ -1352,7 +1352,7 @@ static int start(priv_t *priv) if (priv->map[i].buf.flags & V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC) MP_WARN(priv, "compiled without clock_gettime() that is needed to handle monotone video timestamps from the kernel. Expect desync.\n"); #endif - /* count up to make sure this is correct everytime */ + /* count up to make sure this is correct every time */ priv->mapcount++; if (v4l2_ioctl(priv->video_fd, VIDIOC_QBUF, &(priv->map[i].buf)) < 0) { diff --git a/video/decode/d3d.c b/video/decode/d3d.c index b978472723..59d2a81664 100644 --- a/video/decode/d3d.c +++ b/video/decode/d3d.c @@ -68,7 +68,7 @@ struct d3dva_mode { #define MODE2(id) &MP_CONCAT(DXVA2_Mode, id), # id #define MODE(id) &MP_CONCAT(DXVA_, id), # id -// Prefered modes must come first +// Preferred modes must come first static const struct d3dva_mode d3dva_modes[] = { // MPEG-1/2 {MODE2(MPEG2_VLD), AV_CODEC_ID_MPEG2VIDEO, PROF_MPEG2_MAIN}, diff --git a/video/gpu_memcpy.c b/video/gpu_memcpy.c index 355da0e2a2..542fbc8b50 100644 --- a/video/gpu_memcpy.c +++ b/video/gpu_memcpy.c @@ -83,7 +83,7 @@ void *gpu_memcpy(void *restrict d, const void *restrict s, size_t size) xmm15 = _mm_stream_load_si128(pSrc + 15); #endif pSrc += regsInLoop; - // _mm_store_si128 emit the SSE2 intruction MOVDQA (aligned store) + // _mm_store_si128 emit the SSE2 instruction MOVDQA (aligned store) _mm_store_si128(pTrg , xmm0); _mm_store_si128(pTrg + 1, xmm1); _mm_store_si128(pTrg + 2, xmm2); diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index f46fdc1c9f..4387208ead 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2564,7 +2564,7 @@ static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t, if (t->still) gl_video_reset_surfaces(p); - // First of all, figure out if we have a frame availble at all, and draw + // First of all, figure out if we have a frame available at all, and draw // it manually + reset the queue if not if (p->surfaces[p->surface_now].pts == MP_NOPTS_VALUE) { if (!gl_video_upload_image(p, t->current)) diff --git a/video/out/vo_direct3d.c b/video/out/vo_direct3d.c index 5190095621..ed7b1dd62a 100644 --- a/video/out/vo_direct3d.c +++ b/video/out/vo_direct3d.c @@ -822,7 +822,7 @@ static bool resize_d3d(d3d_priv *priv) MP_VERBOSE(priv, "resize_d3d %dx%d called.\n", priv->vo->dwidth, priv->vo->dheight); - /* Make sure that backbuffer is large enough to accomodate the new + /* Make sure that backbuffer is large enough to accommodate the new viewport dimensions. Grow it if necessary. */ bool backbuf_resize = priv->vo->dwidth > priv->cur_backbuf_width || diff --git a/video/out/w32_common.c b/video/out/w32_common.c index f3b59f183f..c29fc51dc8 100644 --- a/video/out/w32_common.c +++ b/video/out/w32_common.c @@ -1182,7 +1182,7 @@ static void gui_thread_reconfig(void *ptr) } // Recenter window around old position on new video size - // excluding the case when initial positon handled by win_state. + // excluding the case when initial position handled by win_state. if (!pos_init) { w32->window_x += w32->dw / 2 - vo->dwidth / 2; w32->window_y += w32->dh / 2 - vo->dheight / 2; -- cgit v1.2.3 From acb74236ac9e48ccc653207a22428d3811b0a2cd Mon Sep 17 00:00:00 2001 From: Rudolf Polzer Date: Fri, 24 Jun 2016 14:20:32 -0400 Subject: ao_lavc, vo_lavc: Migrate to new encoding API. Also marked some places for possible later refactoring, as they became quite similar in this commit. --- audio/out/ao_lavc.c | 197 ++++++++++++++++++++++++++++++++-------------------- video/out/vo_lavc.c | 189 ++++++++++++++++++++++++++----------------------- 2 files changed, 223 insertions(+), 163 deletions(-) diff --git a/audio/out/ao_lavc.c b/audio/out/ao_lavc.c index 572874d27c..6b4279ca87 100644 --- a/audio/out/ao_lavc.c +++ b/audio/out/ao_lavc.c @@ -39,8 +39,6 @@ #include "common/encode_lavc.h" struct priv { - uint8_t *buffer; - size_t buffer_size; AVStream *stream; AVCodecContext *codec; int pcmhack; @@ -146,18 +144,10 @@ static int init(struct ao *ao) if (ac->codec->frame_size <= 1) ac->pcmhack = av_get_bits_per_sample(ac->codec->codec_id) / 8; - if (ac->pcmhack) { + if (ac->pcmhack) ac->aframesize = 16384; // "enough" - ac->buffer_size = - ac->aframesize * ac->pcmhack * ao->channels.num * 2 + 200; - } else { + else ac->aframesize = ac->codec->frame_size; - ac->buffer_size = - ac->aframesize * ac->sample_size * ao->channels.num * 2 + 200; - } - if (ac->buffer_size < FF_MIN_BUFFER_SIZE) - ac->buffer_size = FF_MIN_BUFFER_SIZE; - ac->buffer = talloc_size(ac, ac->buffer_size); // enough frames for at least 0.25 seconds ac->framecount = ceil(ao->samplerate * 0.25 / ac->aframesize); @@ -182,7 +172,7 @@ fail: } // close audio device -static int encode(struct ao *ao, double apts, void **data); +static void encode(struct ao *ao, double apts, void **data); static void uninit(struct ao *ao) { struct priv *ac = ao->priv; @@ -199,12 +189,12 @@ static void uninit(struct ao *ao) return; } - if (ac->buffer) { + if (ac->stream) { double outpts = ac->expected_next_pts; if (!ectx->options->rawts && ectx->options->copyts) outpts += ectx->discontinuity_pts_offset; outpts += encode_lavc_getoffset(ectx, ac->codec); - while (encode(ao, outpts, NULL) > 0) ; + encode(ao, outpts, NULL); } pthread_mutex_unlock(&ectx->lock); @@ -220,24 +210,130 @@ static int get_space(struct ao *ao) return ac->aframesize * ac->framecount; } +static void write_packet(struct ao *ao, AVPacket *packet) +{ + // TODO: Can we unify this with the equivalent video code path? + struct priv *ac = ao->priv; + + packet->stream_index = ac->stream->index; + if (packet->pts != AV_NOPTS_VALUE) { + packet->pts = av_rescale_q(packet->pts, + ac->codec->time_base, + ac->stream->time_base); + } else { + // Do we need this at all? Better be safe than sorry... + MP_WARN(ao, "encoder lost pts, why?\n"); + if (ac->savepts != MP_NOPTS_VALUE) { + packet->pts = av_rescale_q(ac->savepts, + ac->codec->time_base, + ac->stream->time_base); + } + } + if (packet->dts != AV_NOPTS_VALUE) { + packet->dts = av_rescale_q(packet->dts, + ac->codec->time_base, + ac->stream->time_base); + } + if (packet->duration > 0) { + packet->duration = av_rescale_q(packet->duration, + ac->codec->time_base, + ac->stream->time_base); + } + + ac->savepts = AV_NOPTS_VALUE; + + if (encode_lavc_write_frame(ao->encode_lavc_ctx, + ac->stream, packet) < 0) { + MP_ERR(ao, "error writing at %d %d/%d\n", + (int) packet->pts, + ac->stream->time_base.num, + ac->stream->time_base.den); + return; + } +} + +static void encode_audio_and_write(struct ao *ao, AVFrame *frame) +{ + // TODO: Can we unify this with the equivalent video code path? + struct priv *ac = ao->priv; + AVPacket packet = {0}; + +#if HAVE_AVCODEC_NEW_CODEC_API + int status = avcodec_send_frame(ac->codec, frame); + if (status < 0) { + MP_ERR(ao, "error encoding at %d %d/%d\n", + frame ? (int) frame->pts : -1, + ac->codec->time_base.num, + ac->codec->time_base.den); + return; + } + for (;;) { + av_init_packet(&packet); + status = avcodec_receive_packet(ac->codec, &packet); + if (status == AVERROR(EAGAIN)) { // No more packets for now. + if (frame == NULL) { + MP_ERR(ao, "sent flush frame, got EAGAIN"); + } + break; + } + if (status == AVERROR_EOF) { // No more packets, ever. + if (frame != NULL) { + MP_ERR(ao, "sent audio frame, got EOF"); + } + break; + } + if (status < 0) { + MP_ERR(ao, "error encoding at %d %d/%d\n", + frame ? (int) frame->pts : -1, + ac->codec->time_base.num, + ac->codec->time_base.den); + break; + } + if (frame) { + if (ac->savepts == AV_NOPTS_VALUE) + ac->savepts = frame->pts; + } + encode_lavc_write_stats(ao->encode_lavc_ctx, ac->codec); + write_packet(ao, &packet); + av_packet_unref(&packet); + } +#else + av_init_packet(&packet); + int got_packet = 0; + int status = avcodec_encode_audio2(ac->codec, &packet, frame, &got_packet); + if (status < 0) { + MP_ERR(ao, "error encoding at %d %d/%d\n", + frame ? (int) frame->pts : -1, + ac->codec->time_base.num, + ac->codec->time_base.den); + return; + } + if (!got_packet) { + return; + } + if (frame) { + if (ac->savepts == AV_NOPTS_VALUE) + ac->savepts = frame->pts; + } + encode_lavc_write_stats(ao->encode_lavc_ctx, ac->codec); + write_packet(ao, &packet); + av_packet_unref(&packet); +#endif +} + // must get exactly ac->aframesize amount of data -static int encode(struct ao *ao, double apts, void **data) +static void encode(struct ao *ao, double apts, void **data) { - AVPacket packet; struct priv *ac = ao->priv; struct encode_lavc_context *ectx = ao->encode_lavc_ctx; double realapts = ac->aframecount * (double) ac->aframesize / ao->samplerate; - int status, gotpacket; ac->aframecount++; if (data) ectx->audio_pts_offset = realapts - apts; - av_init_packet(&packet); - packet.data = ac->buffer; - packet.size = ac->buffer_size; if(data) { AVFrame *frame = av_frame_alloc(); frame->format = af_to_avformat(ao->format); @@ -270,64 +366,11 @@ static int encode(struct ao *ao, double apts, void **data) ac->lastpts = frame_pts; frame->quality = ac->codec->global_quality; - status = avcodec_encode_audio2(ac->codec, &packet, frame, &gotpacket); - - if (!status) { - if (ac->savepts == AV_NOPTS_VALUE) - ac->savepts = frame->pts; - } - + encode_audio_and_write(ao, frame); av_frame_free(&frame); } else - { - status = avcodec_encode_audio2(ac->codec, &packet, NULL, &gotpacket); - } - - if(status) { - MP_ERR(ao, "error encoding\n"); - return -1; - } - - if(!gotpacket) - return 0; - - MP_DBG(ao, "got pts %f (playback time: %f); out size: %d\n", - apts, realapts, packet.size); - - encode_lavc_write_stats(ao->encode_lavc_ctx, ac->codec); - - packet.stream_index = ac->stream->index; - - // Do we need this at all? Better be safe than sorry... - if (packet.pts == AV_NOPTS_VALUE) { - MP_WARN(ao, "encoder lost pts, why?\n"); - if (ac->savepts != MP_NOPTS_VALUE) - packet.pts = ac->savepts; - } - - if (packet.pts != AV_NOPTS_VALUE) - packet.pts = av_rescale_q(packet.pts, ac->codec->time_base, - ac->stream->time_base); - - if (packet.dts != AV_NOPTS_VALUE) - packet.dts = av_rescale_q(packet.dts, ac->codec->time_base, - ac->stream->time_base); - - if(packet.duration > 0) - packet.duration = av_rescale_q(packet.duration, ac->codec->time_base, - ac->stream->time_base); - - ac->savepts = AV_NOPTS_VALUE; - - if (encode_lavc_write_frame(ao->encode_lavc_ctx, ac->stream, &packet) < 0) { - MP_ERR(ao, "error writing at %f %f/%f\n", - realapts, (double) ac->stream->time_base.num, - (double) ac->stream->time_base.den); - return -1; - } - - return packet.size; + encode_audio_and_write(ao, NULL); } // this should round samples down to frame sizes @@ -492,3 +535,5 @@ const struct ao_driver audio_out_lavc = { .play = play, .drain = drain, }; + +// vim: sw=4 ts=4 et tw=80 diff --git a/video/out/vo_lavc.c b/video/out/vo_lavc.c index 188a575f45..7003bd17bd 100644 --- a/video/out/vo_lavc.c +++ b/video/out/vo_lavc.c @@ -22,6 +22,8 @@ #include #include + +#include "config.h" #include "common/common.h" #include "options/options.h" #include "video/fmt-conversion.h" @@ -34,8 +36,6 @@ #include "sub/osd.h" struct priv { - uint8_t *buffer; - size_t buffer_size; AVStream *stream; AVCodecContext *codec; int have_first_packet; @@ -161,14 +161,6 @@ static int reconfig(struct vo *vo, struct mp_image_params *params) if (encode_lavc_open_codec(vo->encode_lavc_ctx, vc->codec) < 0) goto error; - vc->buffer_size = 6 * width * height + 200; - if (vc->buffer_size < FF_MIN_BUFFER_SIZE) - vc->buffer_size = FF_MIN_BUFFER_SIZE; - if (vc->buffer_size < sizeof(AVPicture)) - vc->buffer_size = sizeof(AVPicture); - - vc->buffer = talloc_size(vc, vc->buffer_size); - done: pthread_mutex_unlock(&vo->encode_lavc_ctx->lock); return 0; @@ -194,82 +186,120 @@ static int query_format(struct vo *vo, int format) return flags; } -static void write_packet(struct vo *vo, int size, AVPacket *packet) +static void write_packet(struct vo *vo, AVPacket *packet) { struct priv *vc = vo->priv; - if (size < 0) { - MP_ERR(vo, "error encoding\n"); - return; + packet->stream_index = vc->stream->index; + if (packet->pts != AV_NOPTS_VALUE) { + packet->pts = av_rescale_q(packet->pts, + vc->codec->time_base, + vc->stream->time_base); + } else { + MP_VERBOSE(vo, "codec did not provide pts\n"); + packet->pts = av_rescale_q(vc->lastipts, + vc->worst_time_base, + vc->stream->time_base); + } + if (packet->dts != AV_NOPTS_VALUE) { + packet->dts = av_rescale_q(packet->dts, + vc->codec->time_base, + vc->stream->time_base); + } + if (packet->duration > 0) { + packet->duration = av_rescale_q(packet->duration, + vc->codec->time_base, + vc->stream->time_base); + } else { + // HACK: libavformat calculates dts wrong if the initial packet + // duration is not set, but ONLY if the time base is "high" and if we + // have b-frames! + if (!packet->duration) + if (!vc->have_first_packet) + if (vc->codec->has_b_frames + || vc->codec->max_b_frames) + if (vc->stream->time_base.num * 1000LL <= + vc->stream->time_base.den) + packet->duration = FFMAX(1, av_rescale_q(1, + vc->codec->time_base, vc->stream->time_base)); } - if (size > 0) { - packet->stream_index = vc->stream->index; - if (packet->pts != AV_NOPTS_VALUE) { - packet->pts = av_rescale_q(packet->pts, - vc->codec->time_base, - vc->stream->time_base); - } else { - MP_VERBOSE(vo, "codec did not provide pts\n"); - packet->pts = av_rescale_q(vc->lastipts, vc->worst_time_base, - vc->stream->time_base); - } - if (packet->dts != AV_NOPTS_VALUE) { - packet->dts = av_rescale_q(packet->dts, - vc->codec->time_base, - vc->stream->time_base); - } - if (packet->duration > 0) { - packet->duration = av_rescale_q(packet->duration, - vc->codec->time_base, - vc->stream->time_base); - } else { - // HACK: libavformat calculates dts wrong if the initial packet - // duration is not set, but ONLY if the time base is "high" and if we - // have b-frames! - if (!packet->duration) - if (!vc->have_first_packet) - if (vc->codec->has_b_frames - || vc->codec->max_b_frames) - if (vc->stream->time_base.num * 1000LL <= - vc->stream->time_base.den) - packet->duration = FFMAX(1, av_rescale_q(1, - vc->codec->time_base, vc->stream->time_base)); - } - - if (encode_lavc_write_frame(vo->encode_lavc_ctx, - vc->stream, packet) < 0) { - MP_ERR(vo, "error writing\n"); - return; - } - - vc->have_first_packet = 1; + if (encode_lavc_write_frame(vo->encode_lavc_ctx, + vc->stream, packet) < 0) { + MP_ERR(vo, "error writing at %d %d/%d\n", + (int) packet->pts, + vc->stream->time_base.num, + vc->stream->time_base.den); + return; } + + vc->have_first_packet = 1; } -static int encode_video(struct vo *vo, AVFrame *frame, AVPacket *packet) +static void encode_video_and_write(struct vo *vo, AVFrame *frame) { struct priv *vc = vo->priv; - int got_packet = 0; - int status = avcodec_encode_video2(vc->codec, packet, - frame, &got_packet); - int size = (status < 0) ? status : got_packet ? packet->size : 0; - - if (frame) - MP_DBG(vo, "got pts %f; out size: %d\n", - frame->pts * (double) vc->codec->time_base.num / - (double) vc->codec->time_base.den, size); - - if (got_packet) + AVPacket packet = {0}; + +#if HAVE_AVCODEC_NEW_CODEC_API + int status = avcodec_send_frame(vc->codec, frame); + if (status < 0) { + MP_ERR(vo, "error encoding at %d %d/%d\n", + frame ? (int) frame->pts : -1, + vc->codec->time_base.num, + vc->codec->time_base.den); + return; + } + for (;;) { + av_init_packet(&packet); + status = avcodec_receive_packet(vc->codec, &packet); + if (status == AVERROR(EAGAIN)) { // No more packets for now. + if (frame == NULL) { + MP_ERR(vo, "sent flush frame, got EAGAIN"); + } + break; + } + if (status == AVERROR_EOF) { // No more packets, ever. + if (frame != NULL) { + MP_ERR(vo, "sent image frame, got EOF"); + } + break; + } + if (status < 0) { + MP_ERR(vo, "error encoding at %d %d/%d\n", + frame ? (int) frame->pts : -1, + vc->codec->time_base.num, + vc->codec->time_base.den); + break; + } encode_lavc_write_stats(vo->encode_lavc_ctx, vc->codec); - return size; + write_packet(vo, &packet); + av_packet_unref(&packet); + } +#else + av_init_packet(&packet); + int got_packet = 0; + int status = avcodec_encode_video2(vc->codec, &packet, frame, &got_packet); + if (status < 0) { + MP_ERR(vo, "error encoding at %d %d/%d\n", + frame ? (int) frame->pts : -1, + vc->codec->time_base.num, + vc->codec->time_base.den); + return; + } + if (!got_packet) { + return; + } + encode_lavc_write_stats(vo->encode_lavc_ctx, vc->codec); + write_packet(vo, &packet); + av_packet_unref(&packet); +#endif } static void draw_image_unlocked(struct vo *vo, mp_image_t *mpi) { struct priv *vc = vo->priv; struct encode_lavc_context *ectx = vo->encode_lavc_ctx; - int size; AVCodecContext *avc; int64_t frameipts; double nextpts; @@ -398,7 +428,6 @@ static void draw_image_unlocked(struct vo *vo, mp_image_t *mpi) // we have a valid image in lastimg while (vc->lastimg && vc->lastipts < frameipts) { int64_t thisduration = vc->harddup ? 1 : (frameipts - vc->lastipts); - AVPacket packet; // we will ONLY encode this frame if it can be encoded at at least // vc->mindeltapts after the last encoded frame! @@ -417,20 +446,13 @@ static void draw_image_unlocked(struct vo *vo, mp_image_t *mpi) // this is a nop, unless the worst time base is the STREAM time base frame->pts = av_rescale_q(vc->lastipts + skipframes, vc->worst_time_base, avc->time_base); - frame->pict_type = 0; // keep this at unknown/undefined - frame->quality = avc->global_quality; + encode_video_and_write(vo, frame); + av_frame_free(&frame); - av_init_packet(&packet); - packet.data = vc->buffer; - packet.size = vc->buffer_size; - size = encode_video(vo, frame, &packet); - write_packet(vo, size, &packet); ++vc->lastdisplaycount; vc->lastencodedipts = vc->lastipts + skipframes; - - av_frame_free(&frame); } vc->lastipts += thisduration; @@ -439,14 +461,7 @@ static void draw_image_unlocked(struct vo *vo, mp_image_t *mpi) if (!mpi) { // finish encoding - do { - AVPacket packet; - av_init_packet(&packet); - packet.data = vc->buffer; - packet.size = vc->buffer_size; - size = encode_video(vo, NULL, &packet); - write_packet(vo, size, &packet); - } while (size > 0); + encode_video_and_write(vo, NULL); } else { if (frameipts >= vc->lastframeipts) { if (vc->lastframeipts != AV_NOPTS_VALUE && vc->lastdisplaycount != 1) -- cgit v1.2.3 From 3e58ce96acaec14adb840875c10b2b543be0b1e3 Mon Sep 17 00:00:00 2001 From: wm4 Date: Mon, 27 Jun 2016 15:00:20 +0200 Subject: dec_audio: fix segment boudnary switching Some bugs in this code are exposed by e.g. playing lossless audio files with --ad-lavc-threads=16. (libavcodec doesn't really support threaded audio decoding, except for lossless files.) In these cases, a major amount of audio can be buffered, which makes incorrect handling of this buffering obvious. For one, draining the decoder can take a while, so if there's a new segment, we shouldn't read audio. The segment end check was completely wrong, and used the start value. --- audio/decode/dec_audio.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/audio/decode/dec_audio.c b/audio/decode/dec_audio.c index e60ebe370f..d455770a74 100644 --- a/audio/decode/dec_audio.c +++ b/audio/decode/dec_audio.c @@ -200,7 +200,9 @@ void audio_work(struct dec_audio *da) if (da->current_frame) return; - if (!da->packet && demux_read_packet_async(da->header, &da->packet) == 0) { + if (!da->packet && !da->new_segment && + demux_read_packet_async(da->header, &da->packet) == 0) + { da->current_state = DATA_WAIT; return; } @@ -211,6 +213,7 @@ void audio_work(struct dec_audio *da) da->packet = NULL; } + bool had_input_packet = !!da->packet; bool had_packet = da->packet || da->new_segment; int ret = da->ad_driver->decode_packet(da, da->packet, &da->current_frame); @@ -233,12 +236,12 @@ void audio_work(struct dec_audio *da) fix_audio_pts(da); - bool segment_end = true; + bool segment_end = !da->current_frame && !had_input_packet; if (da->current_frame) { mp_audio_clip_timestamps(da->current_frame, da->start, da->end); if (da->current_frame->pts != MP_NOPTS_VALUE && da->start != MP_NOPTS_VALUE) - segment_end = da->current_frame->pts >= da->start; + segment_end = da->current_frame->pts >= da->end; if (da->current_frame->samples == 0) { talloc_free(da->current_frame); da->current_frame = NULL; -- cgit v1.2.3 From 4ce53025cb475408bfb27a56a57322d9d0c48a4f Mon Sep 17 00:00:00 2001 From: wm4 Date: Mon, 27 Jun 2016 15:02:41 +0200 Subject: audio: add a helper for getting frame end PTS Although I don't see any use for it yet, why not. --- audio/audio.c | 12 ++++++++++-- audio/audio.h | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/audio/audio.c b/audio/audio.c index ae85a4bf08..306401b5a4 100644 --- a/audio/audio.c +++ b/audio/audio.c @@ -257,13 +257,21 @@ void mp_audio_skip_samples(struct mp_audio *data, int samples) data->pts += samples / (double)data->rate; } +// Return the timestamp of the sample just after the end of this frame. +double mp_audio_end_pts(struct mp_audio *f) +{ + if (f->pts == MP_NOPTS_VALUE || f->rate < 1) + return MP_NOPTS_VALUE; + return f->pts + f->samples / (double)f->rate; +} + // Clip the given frame to the given timestamp range. Adjusts the frame size // and timestamp. void mp_audio_clip_timestamps(struct mp_audio *f, double start, double end) { - if (f->pts == MP_NOPTS_VALUE || f->rate < 1) + double f_end = mp_audio_end_pts(f); + if (f_end == MP_NOPTS_VALUE) return; - double f_end = f->pts + f->samples / (double)f->rate; if (end != MP_NOPTS_VALUE) { if (f_end >= end) { if (f->pts >= end) { diff --git a/audio/audio.h b/audio/audio.h index c469f7a21e..e126e93b66 100644 --- a/audio/audio.h +++ b/audio/audio.h @@ -73,6 +73,7 @@ void mp_audio_copy(struct mp_audio *dst, int dst_offset, void mp_audio_copy_attributes(struct mp_audio *dst, struct mp_audio *src); void mp_audio_skip_samples(struct mp_audio *data, int samples); void mp_audio_clip_timestamps(struct mp_audio *f, double start, double end); +double mp_audio_end_pts(struct mp_audio *data); bool mp_audio_is_writeable(struct mp_audio *data); int mp_audio_make_writeable(struct mp_audio *data); -- cgit v1.2.3 From 9278ce98f7198e61116043fe69f885bfca54ec1c Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Sun, 26 Jun 2016 19:04:36 +0200 Subject: vo_opengl: implement ARIB STD-B68 (HLG) HDR TRC This HDR function is unique in that it's still display-referred, it just allows for values above the reference peak (super-highlights). The official standard doesn't actually document this very well, but the nominal peak turns out to be exactly 12.0 - so we normalize to this value internally in mpv. (This lets us preserve the property that the textures are encoded in the range [0,1], preventing clipping and making the best use of an integer texture's range) This was grouped together with SMPTE ST2084 when checking libavutil compatibility since they were added in the same release window, in a similar timeframe. --- DOCS/man/vf.rst | 1 + DOCS/man/vo.rst | 2 ++ video/csputils.c | 7 +++++-- video/csputils.h | 1 + video/out/opengl/video.c | 18 +++++++++++++----- video/out/opengl/video_shaders.c | 23 +++++++++++++++++++++++ wscript | 7 ++++--- 7 files changed, 49 insertions(+), 10 deletions(-) diff --git a/DOCS/man/vf.rst b/DOCS/man/vf.rst index b4e4438f78..7f60da9385 100644 --- a/DOCS/man/vf.rst +++ b/DOCS/man/vf.rst @@ -311,6 +311,7 @@ Available filters are: :gamma2.8: Pure power curve (gamma 2.8) :prophoto: ProPhoto RGB (ROMM) curve :st2084: SMPTE ST2084 (HDR) curve + :std-b67: ARIB STD-B67 (Hybrid Log-gamma) curve ```` Reference peak illumination for the video file. This is mostly diff --git a/DOCS/man/vo.rst b/DOCS/man/vo.rst index bdc317fc8f..a4d42e48c3 100644 --- a/DOCS/man/vo.rst +++ b/DOCS/man/vo.rst @@ -1003,6 +1003,8 @@ Available video output drivers are: ProPhoto RGB (ROMM) st2084 SMPTE ST2084 (HDR) curve, PQ OETF + std-b67 + ARIB STD-B67 (Hybrid Log-gamma) curve, also known as BBC/NHK HDR ``target-brightness=<1..100000>`` Specifies the display's approximate brightness in cd/m^2. When playing diff --git a/video/csputils.c b/video/csputils.c index ffa1f82a6d..e9e6772ac8 100644 --- a/video/csputils.c +++ b/video/csputils.c @@ -78,6 +78,7 @@ const struct m_opt_choice_alternatives mp_csp_trc_names[] = { {"gamma2.8", MP_CSP_TRC_GAMMA28}, {"prophoto", MP_CSP_TRC_PRO_PHOTO}, {"st2084", MP_CSP_TRC_SMPTE_ST2084}, + {"std-b67", MP_CSP_TRC_ARIB_STD_B67}, {0} }; @@ -171,8 +172,9 @@ enum mp_csp_trc avcol_trc_to_mp_csp_trc(int avtrc) case AVCOL_TRC_LINEAR: return MP_CSP_TRC_LINEAR; case AVCOL_TRC_GAMMA22: return MP_CSP_TRC_GAMMA22; case AVCOL_TRC_GAMMA28: return MP_CSP_TRC_GAMMA28; -#if HAVE_AVUTIL_ST2084 +#if HAVE_AVUTIL_HDR case AVCOL_TRC_SMPTEST2084: return MP_CSP_TRC_SMPTE_ST2084; + case AVCOL_TRC_ARIB_STD_B67: return MP_CSP_TRC_ARIB_STD_B67; #endif default: return MP_CSP_TRC_AUTO; } @@ -222,8 +224,9 @@ int mp_csp_trc_to_avcol_trc(enum mp_csp_trc trc) case MP_CSP_TRC_LINEAR: return AVCOL_TRC_LINEAR; case MP_CSP_TRC_GAMMA22: return AVCOL_TRC_GAMMA22; case MP_CSP_TRC_GAMMA28: return AVCOL_TRC_GAMMA28; -#if HAVE_AVUTIL_ST2084 +#if HAVE_AVUTIL_HDR case MP_CSP_TRC_SMPTE_ST2084: return AVCOL_TRC_SMPTEST2084; + case MP_CSP_TRC_ARIB_STD_B67: return AVCOL_TRC_ARIB_STD_B67; #endif default: return AVCOL_TRC_UNSPECIFIED; } diff --git a/video/csputils.h b/video/csputils.h index 19dd88f145..f64c42e6c8 100644 --- a/video/csputils.h +++ b/video/csputils.h @@ -79,6 +79,7 @@ enum mp_csp_trc { MP_CSP_TRC_GAMMA28, MP_CSP_TRC_PRO_PHOTO, MP_CSP_TRC_SMPTE_ST2084, + MP_CSP_TRC_ARIB_STD_B67, MP_CSP_TRC_COUNT }; diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 4387208ead..6fd92ddb29 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2172,12 +2172,15 @@ static void pass_colormanage(struct gl_video *p, float peak_src, enum mp_csp_prim prim_orig = p->image_params.primaries; enum mp_csp_trc trc_orig = p->image_params.gamma; - // One exception: SMPTE ST.2084 is not implemented by LittleCMS - // for technical limitation reasons, so we use a gamma 2.2 input curve - // here instead. We could pick any value we want here, the difference - // is just coding efficiency. - if (trc_orig == MP_CSP_TRC_SMPTE_ST2084) + // One exception: HDR is not implemented by LittleCMS for technical + // limitation reasons, so we use a gamma 2.2 input curve here instead. + // We could pick any value we want here, the difference is just coding + // efficiency. + if (trc_orig == MP_CSP_TRC_SMPTE_ST2084 || + trc_orig == MP_CSP_TRC_ARIB_STD_B67) + { trc_orig = MP_CSP_TRC_GAMMA22; + } if (gl_video_get_lut3d(p, prim_orig, trc_orig)) { prim_dst = prim_orig; @@ -2216,6 +2219,11 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // If the source has no information known, it's display-referred // (and should be treated relative to the specified desired peak_dst) peak_src = peak_dst; + + // Exception: ARIB STD-B67's nominal peak is exactly 12 times the + // target's reference peak + if (trc_src == MP_CSP_TRC_ARIB_STD_B67) + peak_src = 12 * peak_dst; } // All operations from here on require linear light as a starting point, diff --git a/video/out/opengl/video_shaders.c b/video/out/opengl/video_shaders.c index 1f37f4fed1..4a15b6ceed 100644 --- a/video/out/opengl/video_shaders.c +++ b/video/out/opengl/video_shaders.c @@ -227,6 +227,11 @@ static const float HDR_M1 = 2610./4096 * 1./4, HDR_C2 = 2413./4096 * 32, HDR_C3 = 2392./4096 * 32; +// Common constants for ARIB STD-B67 (Hybrid Log-gamma) +static const float B67_A = 0.17883277, + B67_B = 0.28466892, + B67_C = 0.55991073; + // Linearize (expand), given a TRC as input void pass_linearize(struct gl_shader_cache *sc, enum mp_csp_trc trc) { @@ -265,6 +270,17 @@ void pass_linearize(struct gl_shader_cache *sc, enum mp_csp_trc trc) HDR_C1, HDR_C2, HDR_C3); GLSLF("color.rgb = pow(color.rgb, vec3(1.0/%f));\n", HDR_M1); break; + case MP_CSP_TRC_ARIB_STD_B67: + GLSLF("color.rgb = mix(vec3(4.0) * color.rgb * color.rgb,\n" + " exp((color.rgb - vec3(%f)) / vec3(%f)) + vec3(%f),\n" + " lessThan(vec3(0.5), color.rgb));\n", + B67_C, B67_A, B67_B); + // Since the ARIB function's signal value of 1.0 corresponds to + // a peak of 12.0, we need to renormalize to prevent GL textures + // from clipping. (In general, mpv's internal conversions always + // assume 1.0 is the maximum brightness, not the reference peak) + GLSL(color.rgb /= vec3(12.0);) + break; default: abort(); } @@ -308,6 +324,13 @@ void pass_delinearize(struct gl_shader_cache *sc, enum mp_csp_trc trc) HDR_C1, HDR_C2, HDR_C3); GLSLF("color.rgb = pow(color.rgb, vec3(%f));\n", HDR_M2); break; + case MP_CSP_TRC_ARIB_STD_B67: + GLSL(color.rgb *= vec3(12.0);) + GLSLF("color.rgb = mix(vec3(0.5) * sqrt(color.rgb),\n" + " vec3(%f) * log(color.rgb - vec3(%f)) + vec3(%f),\n" + " lessThan(vec3(1.0), color.rgb));\n", + B67_A, B67_B, B67_C); + break; default: abort(); } diff --git a/wscript b/wscript index 1915f7cd67..e29d1ab403 100644 --- a/wscript +++ b/wscript @@ -498,10 +498,11 @@ FFmpeg/Libav libraries. You need at least {0}. Aborting.".format(libav_versions_ '(void)offsetof(AVFrame, hw_frames_ctx)', use='libav'), }, { - 'name': 'avutil-st2084', - 'desc': 'libavutil AVCOL_TRC_SMPTEST2084', + 'name': 'avutil-hdr', + 'desc': 'libavutil HDR TRCs', 'func': check_statement('libavutil/pixfmt.h', - 'AVCOL_TRC_SMPTEST2084', + 'AVCOL_TRC_SMPTEST2084,' + 'AVCOL_TRC_ARIB_STD_B67', use='libav'), } ] -- cgit v1.2.3 From 13a63331b67f489d547b566ea8b7b60de62192bd Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Sun, 26 Jun 2016 19:10:46 +0200 Subject: manpage: warn about the use of HDR functions for target-trc Most devices seems to require special signalling (e.g. via HDMI metadata) to actually decode HDR signals and treat them as such, so it's probably worth warning the potential user about the fact that mpv pretty definitely does *not* set any of this metadata signalling. --- DOCS/man/vo.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/DOCS/man/vo.rst b/DOCS/man/vo.rst index a4d42e48c3..306b027561 100644 --- a/DOCS/man/vo.rst +++ b/DOCS/man/vo.rst @@ -1006,6 +1006,12 @@ Available video output drivers are: std-b67 ARIB STD-B67 (Hybrid Log-gamma) curve, also known as BBC/NHK HDR + NOTE: When using HDR output formats, mpv will encode to the specified + curve but it will not set any HDMI flags or other signalling that + might be required for the target device to correctly display the + HDR signal. The user should independently guarantee this before + using these signal formats for display. + ``target-brightness=<1..100000>`` Specifies the display's approximate brightness in cd/m^2. When playing HDR content on a SDR display (or SDR content on an HDR display), video -- cgit v1.2.3 From 247ec0cb841c28b97a186675c0ef923a0ede2f40 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Sun, 26 Jun 2016 19:57:29 +0200 Subject: csputils: add Panasonic V-Gamut primaries This is actually not entirely trivial since it involves negative Yxy coordinates, so the CMM has to be capable of full floating point operation. Fortunately, LittleCMS is, so we can just blindly implement it. --- DOCS/man/vf.rst | 1 + DOCS/man/vo.rst | 2 ++ video/csputils.c | 9 +++++++++ video/csputils.h | 1 + 4 files changed, 13 insertions(+) diff --git a/DOCS/man/vf.rst b/DOCS/man/vf.rst index 7f60da9385..2ae10d068a 100644 --- a/DOCS/man/vf.rst +++ b/DOCS/man/vf.rst @@ -288,6 +288,7 @@ Available filters are: :adobe: Adobe RGB (1998) :prophoto: ProPhoto RGB (ROMM) :cie1931: CIE 1931 RGB + :v-gamut: Panasonic V-Gamut primaries ```` Gamma function the source file was encoded with. Normally this should be set diff --git a/DOCS/man/vo.rst b/DOCS/man/vo.rst index 306b027561..db6fa1d0fc 100644 --- a/DOCS/man/vo.rst +++ b/DOCS/man/vo.rst @@ -979,6 +979,8 @@ Available video output drivers are: CIE 1931 RGB (not to be confused with CIE XYZ) dci-p3 DCI-P3 (Digital Cinema Colorspace), SMPTE RP431-2 + v-gamut + Panasonic V-Gamut (VARICAM) primaries ``target-trc=`` Specifies the transfer characteristics (gamma) of the display. Video diff --git a/video/csputils.c b/video/csputils.c index e9e6772ac8..b85b1f28f2 100644 --- a/video/csputils.c +++ b/video/csputils.c @@ -65,6 +65,7 @@ const struct m_opt_choice_alternatives mp_csp_prim_names[] = { {"prophoto", MP_CSP_PRIM_PRO_PHOTO}, {"cie1931", MP_CSP_PRIM_CIE_1931}, {"dci-p3", MP_CSP_PRIM_DCI_P3}, + {"v-gamut", MP_CSP_PRIM_V_GAMUT}, {0} }; @@ -422,6 +423,14 @@ struct mp_csp_primaries mp_get_csp_primaries(enum mp_csp_prim spc) .blue = {0.150, 0.060}, .white = d65 }; + // From Panasonic VARICAM reference manual + case MP_CSP_PRIM_V_GAMUT: + return (struct mp_csp_primaries) { + .red = {0.730, 0.280}, + .green = {0.165, 0.840}, + .blue = {0.100, -0.03}, + .white = d65 + }; default: return (struct mp_csp_primaries) {{0}}; } diff --git a/video/csputils.h b/video/csputils.h index f64c42e6c8..100e3c13d9 100644 --- a/video/csputils.h +++ b/video/csputils.h @@ -64,6 +64,7 @@ enum mp_csp_prim { MP_CSP_PRIM_PRO_PHOTO, MP_CSP_PRIM_CIE_1931, MP_CSP_PRIM_DCI_P3, + MP_CSP_PRIM_V_GAMUT, MP_CSP_PRIM_COUNT }; -- cgit v1.2.3 From 740fdc139fae87144deb3cfe5897649ba2571b27 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Sun, 26 Jun 2016 20:00:22 +0200 Subject: manpage: add missing documentation for vf_format:gamma=dci-p3 --- DOCS/man/vf.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/DOCS/man/vf.rst b/DOCS/man/vf.rst index 2ae10d068a..f068f26dff 100644 --- a/DOCS/man/vf.rst +++ b/DOCS/man/vf.rst @@ -288,6 +288,7 @@ Available filters are: :adobe: Adobe RGB (1998) :prophoto: ProPhoto RGB (ROMM) :cie1931: CIE 1931 RGB + :dci-p3: DCI-P3 (Digital Cinema) :v-gamut: Panasonic V-Gamut primaries ```` -- cgit v1.2.3 From f3b6966d14e8cb34477474b85c83beb46e542e70 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Sun, 26 Jun 2016 19:28:06 +0200 Subject: vo_opengl: implement the Panasonic V-Log function User request and not that hard. Closes #3157. Note that FFmpeg doesn't support this and there's no signalling in HEVC etc., so the only way users can access it is by using vf_format manually. Mind: This encoding uses full range values, not TV range. --- DOCS/man/vf.rst | 1 + DOCS/man/vo.rst | 2 ++ video/csputils.c | 1 + video/csputils.h | 1 + video/mp_image.c | 9 +++++++-- video/out/opengl/video.c | 7 ++++++- video/out/opengl/video_shaders.c | 24 ++++++++++++++++++++++++ 7 files changed, 42 insertions(+), 3 deletions(-) diff --git a/DOCS/man/vf.rst b/DOCS/man/vf.rst index f068f26dff..e6438b984b 100644 --- a/DOCS/man/vf.rst +++ b/DOCS/man/vf.rst @@ -314,6 +314,7 @@ Available filters are: :prophoto: ProPhoto RGB (ROMM) curve :st2084: SMPTE ST2084 (HDR) curve :std-b67: ARIB STD-B67 (Hybrid Log-gamma) curve + :v-log: Panasonic V-Log transfer curve ```` Reference peak illumination for the video file. This is mostly diff --git a/DOCS/man/vo.rst b/DOCS/man/vo.rst index db6fa1d0fc..d9d4bb4d91 100644 --- a/DOCS/man/vo.rst +++ b/DOCS/man/vo.rst @@ -1007,6 +1007,8 @@ Available video output drivers are: SMPTE ST2084 (HDR) curve, PQ OETF std-b67 ARIB STD-B67 (Hybrid Log-gamma) curve, also known as BBC/NHK HDR + v-log + Panasonic V-Log (VARICAM) curve NOTE: When using HDR output formats, mpv will encode to the specified curve but it will not set any HDMI flags or other signalling that diff --git a/video/csputils.c b/video/csputils.c index b85b1f28f2..65b26acb3a 100644 --- a/video/csputils.c +++ b/video/csputils.c @@ -80,6 +80,7 @@ const struct m_opt_choice_alternatives mp_csp_trc_names[] = { {"prophoto", MP_CSP_TRC_PRO_PHOTO}, {"st2084", MP_CSP_TRC_SMPTE_ST2084}, {"std-b67", MP_CSP_TRC_ARIB_STD_B67}, + {"v-log", MP_CSP_TRC_V_LOG}, {0} }; diff --git a/video/csputils.h b/video/csputils.h index 100e3c13d9..90c5483a05 100644 --- a/video/csputils.h +++ b/video/csputils.h @@ -81,6 +81,7 @@ enum mp_csp_trc { MP_CSP_TRC_PRO_PHOTO, MP_CSP_TRC_SMPTE_ST2084, MP_CSP_TRC_ARIB_STD_B67, + MP_CSP_TRC_V_LOG, MP_CSP_TRC_COUNT }; diff --git a/video/mp_image.c b/video/mp_image.c index d5b97481e8..0b46f947bc 100644 --- a/video/mp_image.c +++ b/video/mp_image.c @@ -611,8 +611,13 @@ void mp_image_params_guess_csp(struct mp_image_params *params) } if (params->colorspace == MP_CSP_AUTO) params->colorspace = mp_csp_guess_colorspace(params->w, params->h); - if (params->colorlevels == MP_CSP_LEVELS_AUTO) - params->colorlevels = MP_CSP_LEVELS_TV; + if (params->colorlevels == MP_CSP_LEVELS_AUTO) { + if (params->gamma == MP_CSP_TRC_V_LOG) { + params->colorlevels = MP_CSP_LEVELS_PC; + } else { + params->colorlevels = MP_CSP_LEVELS_TV; + } + } if (params->primaries == MP_CSP_PRIM_AUTO) { // Guess based on the colormatrix as a first priority if (params->colorspace == MP_CSP_BT_2020_NC || diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index 6fd92ddb29..a870fe0e4f 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2177,7 +2177,8 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // We could pick any value we want here, the difference is just coding // efficiency. if (trc_orig == MP_CSP_TRC_SMPTE_ST2084 || - trc_orig == MP_CSP_TRC_ARIB_STD_B67) + trc_orig == MP_CSP_TRC_ARIB_STD_B67 || + trc_orig == MP_CSP_TRC_V_LOG) { trc_orig = MP_CSP_TRC_GAMMA22; } @@ -2224,6 +2225,10 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // target's reference peak if (trc_src == MP_CSP_TRC_ARIB_STD_B67) peak_src = 12 * peak_dst; + + // Similar deal for V-Log + if (trc_src == MP_CSP_TRC_V_LOG) + peak_src = 46.0855 * peak_dst; } // All operations from here on require linear light as a starting point, diff --git a/video/out/opengl/video_shaders.c b/video/out/opengl/video_shaders.c index 4a15b6ceed..7b736f1d5d 100644 --- a/video/out/opengl/video_shaders.c +++ b/video/out/opengl/video_shaders.c @@ -232,6 +232,12 @@ static const float B67_A = 0.17883277, B67_B = 0.28466892, B67_C = 0.55991073; +// Common constants for Panasonic V-Log +static const float VLOG_B = 0.00873, + VLOG_C = 0.241514, + VLOG_D = 0.598206, + VLOG_R = 46.085527; // nominal peak + // Linearize (expand), given a TRC as input void pass_linearize(struct gl_shader_cache *sc, enum mp_csp_trc trc) { @@ -281,6 +287,16 @@ void pass_linearize(struct gl_shader_cache *sc, enum mp_csp_trc trc) // assume 1.0 is the maximum brightness, not the reference peak) GLSL(color.rgb /= vec3(12.0);) break; + case MP_CSP_TRC_V_LOG: + GLSLF("color.rgb = mix((color.rgb - vec3(0.125)) / vec3(5.6), \n" + " pow(vec3(10.0), (color.rgb - vec3(%f)) / vec3(%f)) \n" + " - vec3(%f), \n" + " lessThanEqual(vec3(0.181), color.rgb)); \n", + VLOG_D, VLOG_C, VLOG_B); + // Same deal as with the B67 function, renormalize to texture range + GLSLF("color.rgb /= vec3(%f);\n", VLOG_R); + GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);) + break; default: abort(); } @@ -331,6 +347,14 @@ void pass_delinearize(struct gl_shader_cache *sc, enum mp_csp_trc trc) " lessThan(vec3(1.0), color.rgb));\n", B67_A, B67_B, B67_C); break; + case MP_CSP_TRC_V_LOG: + GLSLF("color.rgb *= vec3(%f);\n", VLOG_R); + GLSLF("color.rgb = mix(vec3(5.6) * color.rgb + vec3(0.125), \n" + " vec3(%f) * log(color.rgb + vec3(%f)) \n" + " + vec3(%f), \n" + " lessThanEqual(vec3(0.01), color.rgb)); \n", + VLOG_C / M_LN10, VLOG_B, VLOG_D); + break; default: abort(); } -- cgit v1.2.3 From f9fe5d06ad8ba5f8aa6068dd3c6a1a9fc6332707 Mon Sep 17 00:00:00 2001 From: Niklas Haas Date: Tue, 28 Jun 2016 01:18:55 +0200 Subject: vo_opengl: use image_params instead of *_src for autoconfig I'm not even sure why we ever consulted *_src to begin with, since that just describes the current image format - and not the original metadata. (And in fact, we specifically had logic to work around the impliciations this had on linear scaling) image_params is *the* authoritative source on the intended (i.e. reference) image metadata, whereas *_src may be changed by previous passes already. So only consult image_params for picking auto-generated values. Also, add some more missing "wide gamut" and "non-gamma" curves to the autoconfig blacklist. (Maybe it would make sense to move this list to csputils in the future? Or perhaps even auto-detect it based on the associated primaries) --- video/out/opengl/video.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/video/out/opengl/video.c b/video/out/opengl/video.c index a870fe0e4f..89030a8952 100644 --- a/video/out/opengl/video.c +++ b/video/out/opengl/video.c @@ -2194,26 +2194,29 @@ static void pass_colormanage(struct gl_video *p, float peak_src, // Some exceptions apply to source spaces that even hardcore technoluddites // would probably not enjoy viewing unaltered if (prim_dst == MP_CSP_PRIM_AUTO) { - prim_dst = prim_src; + prim_dst = p->image_params.primaries; // Avoid outputting very wide gamut content automatically, since the // majority target audience has standard gamut displays - if (prim_dst == MP_CSP_PRIM_BT_2020 || prim_dst == MP_CSP_PRIM_PRO_PHOTO) + if (prim_dst == MP_CSP_PRIM_BT_2020 || + prim_dst == MP_CSP_PRIM_PRO_PHOTO || + prim_dst == MP_CSP_PRIM_V_GAMUT) + { prim_dst = MP_CSP_PRIM_BT_709; + } } if (trc_dst == MP_CSP_TRC_AUTO) { - trc_dst = trc_src; - // Avoid outputting linear light at all costs. First try -