summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwm4 <wm4@nowhere>2014-04-08 19:02:57 +0200
committerwm4 <wm4@nowhere>2014-04-17 22:58:47 +0200
commit8f5eb7b780cf7598157179d0679863e3b7a90084 (patch)
tree0e9600dc21cda406172f22c3664f5515b0555d29
parentb991c072f69e2fa4b842060ed5a7a8b1ece09f52 (diff)
downloadmpv-8f5eb7b780cf7598157179d0679863e3b7a90084.tar.bz2
mpv-8f5eb7b780cf7598157179d0679863e3b7a90084.tar.xz
vo_vdpau: simplify previous vsync timestamp calculation
The strange thing about this code was the shift parameter of the prev_vs2 function. The parameter is used to handle timestamps before the last vsync, since the % operator handles negative values incorrectly. Most callers set shift to 0, and _usually_ pass a timestamp after the last vsync. One caller sets it to 16, and can pass a timestamp before the last timestamp. The mystery is why prev_vs2 doesn't just compensate for the % operator semantics in the most simple way: if the result of the operator is negative, add the divisor to it. Instead, it adds a huge value to it (how huge is influenced by shift). If shift is 0, the result of the function will not be aligned to vsyncs. I have no idea why it was written in this way. Were there concerns about certain numeric overflows that could happen in the calculations? But I can't think of any (the difference between ts and vc->recent_vsync_time is usually not that huge). Or is there something more clever about it, which is important for the timing code? I can't think of anything either. So scrap it and simplify it.
-rw-r--r--video/out/vo_vdpau.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/video/out/vo_vdpau.c b/video/out/vo_vdpau.c
index 58ac6b971f..8c50e44d6b 100644
--- a/video/out/vo_vdpau.c
+++ b/video/out/vo_vdpau.c
@@ -994,12 +994,13 @@ static int update_presentation_queue_status(struct vo *vo)
return num_queued;
}
-static inline uint64_t prev_vs2(struct vdpctx *vc, uint64_t ts, int shift)
+// Return the timestamp of the vsync that must have happened before ts.
+static inline uint64_t prev_vsync(struct vdpctx *vc, uint64_t ts)
{
- uint64_t offset = ts - vc->recent_vsync_time;
- // Fix negative values for 1<<shift vsyncs before vc->recent_vsync_time
- offset += (uint64_t)vc->vsync_interval << shift;
- offset %= vc->vsync_interval;
+ int64_t diff = (int64_t)(ts - vc->recent_vsync_time);
+ int64_t offset = diff % vc->vsync_interval;
+ if (offset < 0)
+ offset += vc->vsync_interval;
return ts - offset;
}
@@ -1034,9 +1035,7 @@ static void flip_page_timed(struct vo *vo, int64_t pts_us, int duration)
uint64_t ideal_pts = pts;
uint64_t npts = duration >= 0 ? pts + duration : UINT64_MAX;
-#define PREV_VS2(ts, shift) prev_vs2(vc, ts, shift)
- // Only gives accurate results for ts >= vc->recent_vsync_time
-#define PREV_VSYNC(ts) PREV_VS2(ts, 0)
+#define PREV_VSYNC(ts) prev_vsync(vc, ts)
/* We hope to be here at least one vsync before the frame should be shown.
* If we are running late then don't drop the frame unless there is
@@ -1065,7 +1064,7 @@ static void flip_page_timed(struct vo *vo, int64_t pts_us, int duration)
*/
uint64_t vsync = PREV_VSYNC(pts);
if (pts < vsync + vsync_interval / 4
- && (vsync - PREV_VS2(vc->last_queue_time, 16)
+ && (vsync - PREV_VSYNC(vc->last_queue_time)
> pts - vc->last_ideal_time + vsync_interval / 2
|| vc->dropped_frame && vsync > vc->dropped_time))
pts -= vsync_interval / 2;