summaryrefslogtreecommitdiffstats
path: root/video/out/opengl/gl_utils.c
diff options
context:
space:
mode:
Diffstat (limited to 'video/out/opengl/gl_utils.c')
-rw-r--r--video/out/opengl/gl_utils.c66
1 files changed, 0 insertions, 66 deletions
diff --git a/video/out/opengl/gl_utils.c b/video/out/opengl/gl_utils.c
index 6c0537febc..9ec9d5d37d 100644
--- a/video/out/opengl/gl_utils.c
+++ b/video/out/opengl/gl_utils.c
@@ -269,72 +269,6 @@ void gl_set_debug_logger(GL *gl, struct mp_log *log)
gl->DebugMessageCallback(log ? gl_debug_cb : NULL, log);
}
-// Upload a texture, going through a PBO. PBO supposedly can facilitate
-// asynchronous copy from CPU to GPU, so this is an optimization. Note that
-// changing format/type/tex_w/tex_h or reusing the PBO in the same frame can
-// ruin performance.
-// This call is like gl_upload_tex(), plus PBO management/use.
-// target, format, type, dataptr, stride, x, y, w, h: texture upload params
-// (see gl_upload_tex())
-// tex_w, tex_h: maximum size of the used texture
-// use_pbo: for convenience, if false redirects the call to gl_upload_tex
-void gl_pbo_upload_tex(struct gl_pbo_upload *pbo, GL *gl, bool use_pbo,
- GLenum target, GLenum format, GLenum type,
- int tex_w, int tex_h, const void *dataptr, int stride,
- int x, int y, int w, int h)
-{
- assert(x >= 0 && y >= 0 && w >= 0 && h >= 0);
- assert(x + w <= tex_w && y + h <= tex_h);
-
- if (!use_pbo) {
- gl_upload_tex(gl, target, format, type, dataptr, stride, x, y, w, h);
- return;
- }
-
- // We align the buffer size to 4096 to avoid possible subregion
- // dependencies. This is not a strict requirement (the spec requires no
- // alignment), but a good precaution for performance reasons
- size_t needed_size = stride * h;
- size_t buffer_size = MP_ALIGN_UP(needed_size, 4096);
-
- if (buffer_size != pbo->buffer_size)
- gl_pbo_upload_uninit(pbo);
-
- if (!pbo->buffer) {
- pbo->gl = gl;
- pbo->buffer_size = buffer_size;
- gl->GenBuffers(1, &pbo->buffer);
- gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo->buffer);
- // Magic time: Because we memcpy once from RAM to the buffer, and then
- // the GPU needs to read from this anyway, we actually *don't* want
- // this buffer to be allocated in RAM. If we allocate it in VRAM
- // instead, we can reduce this to a single copy: from RAM into VRAM.
- // Unfortunately, drivers e.g. nvidia will think GL_STREAM_DRAW is best
- // allocated on host memory instead of device memory, so we lie about
- // the usage to fool the driver into giving us a buffer in VRAM instead
- // of RAM, which can be significantly faster for our use case.
- // Seriously, fuck OpenGL.
- gl->BufferData(GL_PIXEL_UNPACK_BUFFER, NUM_PBO_BUFFERS * buffer_size,
- NULL, GL_STREAM_COPY);
- }
-
- uintptr_t offset = buffer_size * pbo->index;
- pbo->index = (pbo->index + 1) % NUM_PBO_BUFFERS;
-
- gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo->buffer);
- gl->BufferSubData(GL_PIXEL_UNPACK_BUFFER, offset, needed_size, dataptr);
- gl_upload_tex(gl, target, format, type, (void *)offset, stride, x, y, w, h);
- gl->BindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
-}
-
-void gl_pbo_upload_uninit(struct gl_pbo_upload *pbo)
-{
- if (pbo->gl)
- pbo->gl->DeleteBuffers(1, &pbo->buffer);
-
- *pbo = (struct gl_pbo_upload){0};
-}
-
int gl_get_fb_depth(GL *gl, int fbo)
{
if ((gl->es < 300 && !gl->version) || !(gl->mpgl_caps & MPGL_CAP_FB))