1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
|
/*
* Copyright 2020 Google LLC
* SPDX-License-Identifier: MIT
*/
#ifndef VKR_QUEUE_H
#define VKR_QUEUE_H
#include "vkr_common.h"
struct vkr_queue_sync {
VkFence fence;
bool device_lost;
uint32_t flags;
uint32_t ring_idx;
uint64_t fence_id;
struct list_head head;
};
struct vkr_queue {
struct vkr_object base;
struct vkr_context *context;
struct vkr_device *device;
VkDeviceQueueCreateFlags flags;
uint32_t family;
uint32_t index;
/* only used when client driver uses multiple timelines */
uint32_t ring_idx;
/* Submitted fences are added to pending_syncs first. How submitted fences
* are retired depends on VKR_RENDERER_THREAD_SYNC and
* VKR_RENDERER_ASYNC_FENCE_CB.
*
* When VKR_RENDERER_THREAD_SYNC is not set, the main thread calls
* vkGetFenceStatus and retires signaled fences in pending_syncs in order.
*
* When VKR_RENDERER_THREAD_SYNC is set but VKR_RENDERER_ASYNC_FENCE_CB is
* not set, the sync thread calls vkWaitForFences and moves signaled fences
* from pending_syncs to signaled_syncs in order. The main thread simply
* retires all fences in signaled_syncs.
*
* When VKR_RENDERER_THREAD_SYNC and VKR_RENDERER_ASYNC_FENCE_CB are both
* set, the sync thread calls vkWaitForFences and retires signaled fences
* in pending_syncs in order.
*/
int eventfd;
thrd_t thread;
mtx_t mutex;
cnd_t cond;
bool join;
struct list_head pending_syncs;
struct list_head signaled_syncs;
struct list_head busy_head;
};
VKR_DEFINE_OBJECT_CAST(queue, VK_OBJECT_TYPE_QUEUE, VkQueue)
struct vkr_fence {
struct vkr_object base;
};
VKR_DEFINE_OBJECT_CAST(fence, VK_OBJECT_TYPE_FENCE, VkFence)
struct vkr_semaphore {
struct vkr_object base;
};
VKR_DEFINE_OBJECT_CAST(semaphore, VK_OBJECT_TYPE_SEMAPHORE, VkSemaphore)
struct vkr_event {
struct vkr_object base;
};
VKR_DEFINE_OBJECT_CAST(event, VK_OBJECT_TYPE_EVENT, VkEvent)
void
vkr_context_init_queue_dispatch(struct vkr_context *ctx);
void
vkr_context_init_fence_dispatch(struct vkr_context *ctx);
void
vkr_context_init_semaphore_dispatch(struct vkr_context *ctx);
void
vkr_context_init_event_dispatch(struct vkr_context *ctx);
struct vkr_queue_sync *
vkr_device_alloc_queue_sync(struct vkr_device *dev,
uint32_t fence_flags,
uint32_t ring_idx,
uint64_t fence_id);
void
vkr_device_free_queue_sync(struct vkr_device *dev, struct vkr_queue_sync *sync);
void
vkr_queue_get_signaled_syncs(struct vkr_queue *queue,
struct list_head *retired_syncs,
bool *queue_empty);
struct vkr_queue *
vkr_queue_create(struct vkr_context *ctx,
struct vkr_device *dev,
VkDeviceQueueCreateFlags flags,
uint32_t family,
uint32_t index,
VkQueue handle);
void
vkr_queue_destroy(struct vkr_context *ctx, struct vkr_queue *queue);
#endif /* VKR_QUEUE_H */
|