kiba-engine
queue.c
1 #include <kiba/gpu/vulkan/queue.h>
2 
3 #include <kiba/gpu/vulkan/allocator.h>
4 #include <kiba/gpu/vulkan/util.h>
5 
6 b8 vk_queue_create(VkDevice logical_device, struct vk_queue *queue) {
7  if (!queue->available)
8  return true;
9  vkGetDeviceQueue(logical_device, queue->index, 0, &queue->queue);
10  VkSemaphoreCreateInfo sp_create_info = {
11  .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
12  };
13  VK_CALL_B8(vkCreateSemaphore(logical_device, &sp_create_info, &vk_alloc.vulkan_callbacks, &queue->sp_relays[0]));
14  VK_CALL_B8(vkCreateSemaphore(logical_device, &sp_create_info, &vk_alloc.vulkan_callbacks, &queue->sp_relays[1]));
15  queue->sp_index = -1;
16  // TODO check if flags are ok here
17  VkCommandPoolCreateInfo pool_create_info = {
18  .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
19  .queueFamilyIndex = queue->index,
20  .flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
21  };
22  VK_CALL_B8(
23  vkCreateCommandPool(logical_device, &pool_create_info, &vk_alloc.vulkan_callbacks, &queue->command_pool));
24  queue->encoders = array_create(struct gpu_backend_command_encoder, 5, &vk_alloc.kiba_alloc);
25  return queue->encoders != KB_NULL;
26 }
27 
28 void vk_queue_destroy(VkDevice logical_device, struct vk_queue *queue) {
29  if (queue->available) {
30  array_for_each(struct gpu_backend_command_encoder, encoder, queue->encoders) {
31  vkWaitForFences(logical_device, 1, &encoder->exec_done, VK_TRUE, UINT64_MAX);
32  vkDestroyFence(logical_device, encoder->exec_done, &vk_alloc.vulkan_callbacks);
33  vkFreeCommandBuffers(logical_device, queue->command_pool, 1, &encoder->buffer);
34  }
35  vkDestroyCommandPool(logical_device, queue->command_pool, &vk_alloc.vulkan_callbacks);
36  vkDestroySemaphore(logical_device, queue->sp_relays[0], &vk_alloc.vulkan_callbacks);
37  vkDestroySemaphore(logical_device, queue->sp_relays[1], &vk_alloc.vulkan_callbacks);
38  }
39 }
40 
41 b8 vk_queue_get_command_encoder(struct gpu_backend_device *device, struct gpu_backend_command_encoder *encoder) {
42  // TODO don't restrict this to graphics queue, maybe via parameter
43  const usize cache_size = array_size(device->graphics_queue.encoders);
44  for (usize i = 0; i < cache_size; ++i) {
45  const struct gpu_backend_command_encoder candidate = device->graphics_queue.encoders[i];
46  if (vkGetFenceStatus(device->logical, candidate.exec_done) == VK_SUCCESS) {
47  vkResetFences(device->logical, 1, &candidate.exec_done);
48  *encoder = candidate;
49  array_remove(&device->graphics_queue.encoders, i, KB_NULL);
50  return true;
51  }
52  }
53 
54  VkCommandBufferAllocateInfo cb_allocate_info = {
55  .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
56  .commandPool = device->graphics_queue.command_pool,
57  .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
58  .commandBufferCount = 1,
59  };
60  VK_CALL_B8(vkAllocateCommandBuffers(device->logical, &cb_allocate_info, &encoder->buffer));
61  VkFenceCreateInfo fence_info = {
62  .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
63  };
64  VK_CALL_B8(vkCreateFence(device->logical, &fence_info, &vk_alloc.vulkan_callbacks, &encoder->exec_done));
65  return true;
66 }
67 
68 void vk_queue_return_command_encoder(struct gpu_backend_device *device, struct gpu_backend_command_encoder encoder) {
69  // FIXME this is a workaround for lifetime issues
70  // the validation layers will complain if resources bound to this buffer are destroyed
71  // the buffer should be done because the last present call waits for all previous buffer
72  // submissions but oh well
73  // it appears i have to rethink how to manage the lifetime of the command buffer and by extension
74  // its associated objects
75  vkWaitForFences(device->logical, 1, &encoder.exec_done, VK_TRUE, UINT64_MAX);
76  if (!array_push_checked(&device->graphics_queue.encoders, &encoder)) {
77  // TODO should probably destroy the buffer instead
78  KB_WARN("could not return command buffer to queue");
79  }
80 }
81 
82 b8 vk_queue_submit_encoder(struct gpu_backend_command_encoder *encoder, struct gpu_backend_device *device) {
83  VkPipelineStageFlags *wait_stages = KB_NULL;
84  VkPipelineStageFlags sp_wait_stages[] = {VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT};
85  VkSemaphore sp_wait[] = {VK_NULL_HANDLE};
86  u32 sp_wait_count = 0;
87  i32 old_index = device->graphics_queue.sp_index;
88  i32 new_index = 0;
89  if (old_index >= 0) {
90  sp_wait[sp_wait_count++] = device->graphics_queue.sp_relays[old_index];
91  wait_stages = sp_wait_stages;
92  new_index = (old_index + 1) % 2;
93  }
94  VkSemaphore sp_signal[] = {device->graphics_queue.sp_relays[new_index]};
95  device->graphics_queue.sp_index = new_index;
96 
97  VkSubmitInfo submit_info = {
98  .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
99  .waitSemaphoreCount = sp_wait_count,
100  .pWaitSemaphores = sp_wait,
101  .pWaitDstStageMask = wait_stages,
102  .commandBufferCount = 1,
103  .pCommandBuffers = &encoder->buffer,
104  .signalSemaphoreCount = 1,
105  .pSignalSemaphores = sp_signal,
106  };
107  KB_DEBUG("submitting");
108  VK_CALL_B8(vkQueueSubmit(device->graphics_queue.queue, 1, &submit_info, encoder->exec_done));
109  return true;
110 }
111 
112 VkResult vk_queue_present_surface(struct gpu_backend_surface *surface, struct gpu_backend_device *device) {
113  VkSemaphore sp_wait[] = {VK_NULL_HANDLE};
114  u32 sp_wait_count = 0;
115  i32 old_index = device->graphics_queue.sp_index;
116  device->graphics_queue.sp_index = -1;
117  if (old_index >= 0) {
118  sp_wait[sp_wait_count++] = device->graphics_queue.sp_relays[old_index];
119  }
120  VkPresentInfoKHR present_info = {
121  .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
122  .waitSemaphoreCount = sp_wait_count,
123  .pWaitSemaphores = sp_wait,
124  .swapchainCount = 1,
125  .pSwapchains = &surface->swap_chain,
126  .pImageIndices = &surface->presentation_image_index,
127  .pResults = NULL,
128  };
129  KB_DEBUG("presenting");
130  return vkQueuePresentKHR(device->present_queue.queue, &present_info);
131 }
#define KB_NULL
Value of an invalid ptr (nullptr).
Definition: defines.h:18
#define KB_DEBUG(...)
Log entry with debug log level.
Definition: log.h:163
#define KB_WARN(...)
Log entry with warn log level.
Definition: log.h:161