Move deferred vblank events to separate drm_vblank_deferred list

It was still possible for nested xorg_list_for_each_entry_safe loops
to occur over the drm_vblank_signalled list, which could mess up that
list. Moving deferred events to a separate list allows processing the
drm_vblank_signalled list without xorg_list_for_each_entry_safe.

v2:
* Refactor drm_handle_vblank_signalled helper function, less code
  duplication => better readability (Alex Deucher)

Bugzilla: https://bugs.freedesktop.org/108600
Acked-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Michel Dänzer
2018-11-09 11:00:04 +01:00
committed by Michel Daenzer
parent e2c7369cae
commit 51ba6dddee

View File

@@ -56,6 +56,7 @@ static int amdgpu_drm_queue_refcnt;
static struct xorg_list amdgpu_drm_queue;
static struct xorg_list amdgpu_drm_flip_signalled;
static struct xorg_list amdgpu_drm_vblank_signalled;
static struct xorg_list amdgpu_drm_vblank_deferred;
static uintptr_t amdgpu_drm_queue_seq;
@@ -111,6 +112,31 @@ amdgpu_drm_queue_handler(int fd, unsigned int frame, unsigned int sec,
}
}
/*
* Handle signalled vblank events. If we're waiting for a flip event,
* put events for that CRTC in the vblank_deferred list.
*/
static void
amdgpu_drm_handle_vblank_signalled(void)
{
drmmode_crtc_private_ptr drmmode_crtc;
struct amdgpu_drm_queue_entry *e;
while (!xorg_list_is_empty(&amdgpu_drm_vblank_signalled)) {
e = xorg_list_first_entry(&amdgpu_drm_vblank_signalled,
struct amdgpu_drm_queue_entry, list);
drmmode_crtc = e->crtc->driver_private;
if (drmmode_crtc->wait_flip_nesting_level == 0) {
amdgpu_drm_queue_handle_one(e);
continue;
}
xorg_list_del(&e->list);
xorg_list_append(&e->list, &amdgpu_drm_vblank_deferred);
}
}
/*
* Handle deferred DRM vblank events
*
@@ -127,12 +153,18 @@ amdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc)
--drmmode_crtc->wait_flip_nesting_level > 0)
return;
xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) {
drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
/* Put previously deferred vblank events for this CRTC back in the
* signalled queue
*/
xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_deferred, list) {
if (e->crtc != crtc)
continue;
if (drmmode_crtc->wait_flip_nesting_level == 0)
amdgpu_drm_queue_handle_one(e);
xorg_list_del(&e->list);
xorg_list_append(&e->list, &amdgpu_drm_vblank_signalled);
}
amdgpu_drm_handle_vblank_signalled();
}
/*
@@ -205,6 +237,13 @@ amdgpu_drm_abort_entry(uintptr_t seq)
}
}
xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_deferred, list) {
if (e->seq == seq) {
amdgpu_drm_abort_one(e);
return;
}
}
xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
if (e->seq == seq) {
amdgpu_drm_abort_one(e);
@@ -235,7 +274,7 @@ amdgpu_drm_abort_id(uint64_t id)
int
amdgpu_drm_handle_event(int fd, drmEventContext *event_context)
{
struct amdgpu_drm_queue_entry *e, *tmp;
struct amdgpu_drm_queue_entry *e;
int r;
r = drmHandleEvent(fd, event_context);
@@ -246,12 +285,7 @@ amdgpu_drm_handle_event(int fd, drmEventContext *event_context)
amdgpu_drm_queue_handle_one(e);
}
xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_vblank_signalled, list) {
drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
if (drmmode_crtc->wait_flip_nesting_level == 0)
amdgpu_drm_queue_handle_one(e);
}
amdgpu_drm_handle_vblank_signalled();
return r;
}
@@ -298,6 +332,7 @@ amdgpu_drm_queue_init(ScrnInfoPtr scrn)
xorg_list_init(&amdgpu_drm_queue);
xorg_list_init(&amdgpu_drm_flip_signalled);
xorg_list_init(&amdgpu_drm_vblank_signalled);
xorg_list_init(&amdgpu_drm_vblank_deferred);
}
/*