Move deferred vblank events to separate drm_vblank_deferred list

It was still possible for nested xorg_list_for_each_entry_safe loops
to occur over the drm_vblank_signalled list, which could mess up that
list. Moving deferred events to a separate list allows processing the
drm_vblank_signalled list without xorg_list_for_each_entry_safe.

Bugzilla: https://bugs.freedesktop.org/108600
(Ported from amdgpu commit 51ba6dddee40c3688d4c7b12eabeab516ed153b7)
This commit is contained in:
Michel Dänzer
2018-12-20 19:00:08 +01:00
committed by Michel Dänzer
parent f450632077
commit 64942d2c49

View File

@@ -56,6 +56,7 @@ static int radeon_drm_queue_refcnt;
static struct xorg_list radeon_drm_queue;
static struct xorg_list radeon_drm_flip_signalled;
static struct xorg_list radeon_drm_vblank_signalled;
static struct xorg_list radeon_drm_vblank_deferred;
static uintptr_t radeon_drm_queue_seq;
@@ -111,6 +112,31 @@ radeon_drm_queue_handler(int fd, unsigned int frame, unsigned int sec,
}
}
/*
* Handle signalled vblank events. If we're waiting for a flip event,
* put events for that CRTC in the vblank_deferred list.
*/
static void
radeon_drm_handle_vblank_signalled(void)
{
drmmode_crtc_private_ptr drmmode_crtc;
struct radeon_drm_queue_entry *e;
while (!xorg_list_is_empty(&radeon_drm_vblank_signalled)) {
e = xorg_list_first_entry(&radeon_drm_vblank_signalled,
struct radeon_drm_queue_entry, list);
drmmode_crtc = e->crtc->driver_private;
if (drmmode_crtc->wait_flip_nesting_level == 0) {
radeon_drm_queue_handle_one(e);
continue;
}
xorg_list_del(&e->list);
xorg_list_append(&e->list, &radeon_drm_vblank_deferred);
}
}
/*
* Handle deferred DRM vblank events
*
@@ -127,12 +153,18 @@ radeon_drm_queue_handle_deferred(xf86CrtcPtr crtc)
--drmmode_crtc->wait_flip_nesting_level > 0)
return;
xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) {
drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
/* Put previously deferred vblank events for this CRTC back in the
* signalled queue
*/
xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_deferred, list) {
if (e->crtc != crtc)
continue;
if (drmmode_crtc->wait_flip_nesting_level == 0)
radeon_drm_queue_handle_one(e);
xorg_list_del(&e->list);
xorg_list_append(&e->list, &radeon_drm_vblank_signalled);
}
radeon_drm_handle_vblank_signalled();
}
/*
@@ -205,6 +237,13 @@ radeon_drm_abort_entry(uintptr_t seq)
}
}
xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_deferred, list) {
if (e->seq == seq) {
radeon_drm_abort_one(e);
return;
}
}
xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) {
if (e->seq == seq) {
radeon_drm_abort_one(e);
@@ -235,7 +274,7 @@ radeon_drm_abort_id(uint64_t id)
int
radeon_drm_handle_event(int fd, drmEventContext *event_context)
{
struct radeon_drm_queue_entry *e, *tmp;
struct radeon_drm_queue_entry *e;
int r;
r = drmHandleEvent(fd, event_context);
@@ -246,12 +285,7 @@ radeon_drm_handle_event(int fd, drmEventContext *event_context)
radeon_drm_queue_handle_one(e);
}
xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) {
drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
if (drmmode_crtc->wait_flip_nesting_level == 0)
radeon_drm_queue_handle_one(e);
}
radeon_drm_handle_vblank_signalled();
return r;
}
@@ -298,6 +332,7 @@ radeon_drm_queue_init(ScrnInfoPtr scrn)
xorg_list_init(&radeon_drm_queue);
xorg_list_init(&radeon_drm_flip_signalled);
xorg_list_init(&radeon_drm_vblank_signalled);
xorg_list_init(&radeon_drm_vblank_deferred);
}
/*