kiba-engine
surface.c
1 #include <kiba/gpu/vulkan/surface.h>
2 
3 #include <kiba/containers/array.h>
4 #include <kiba/gpu/command.h>
5 #include <kiba/gpu/surface.h>
6 #include <kiba/gpu/vulkan/allocator.h>
7 #include <kiba/gpu/vulkan/conv.h>
8 #include <kiba/gpu/vulkan/device.h>
9 #include <kiba/gpu/vulkan/instance.h>
10 #include <kiba/gpu/vulkan/queue.h>
11 #include <kiba/gpu/vulkan/util.h>
12 
13 void vk_surface_destroy(VkSurfaceKHR *surface) {
14  if (*surface != VK_NULL_HANDLE) {
15  vkDestroySurfaceKHR(vk_instance.raw, *surface, &vk_alloc.vulkan_callbacks);
16  *surface = VK_NULL_HANDLE;
17  } else {
18  KB_WARN("tried to destroy uninitialized surface");
19  }
20 }
21 
22 b8 gpu_backend_surface_create(struct gpu_backend_surface *surface, void *window_data) {
23  return vk_surface_create(&surface->raw, window_data);
24 }
25 
26 void gpu_backend_surface_destroy(struct gpu_backend_surface *surface) { vk_surface_destroy(&surface->raw); }
27 
28 static b8 vk_verify_swap_chain_format_availability(struct gpu_backend_surface *surface,
29  struct gpu_backend_device device,
30  enum gpu_texture_format format) {
31  u32 available_format_count = 0;
32  VK_CALL_B8(vkGetPhysicalDeviceSurfaceFormatsKHR(device.physical, surface->raw, &available_format_count, 0));
33  array_of(VkSurfaceFormatKHR) available_formats =
34  array_create(VkSurfaceFormatKHR, available_format_count, &vk_alloc.kiba_alloc);
35  array_resize(&available_formats, available_format_count);
36  if (available_formats == KB_NULL) {
37  KB_ERROR("could not create array to store available swap chain formats");
38  return false;
39  }
40  VK_CALL_B8(vkGetPhysicalDeviceSurfaceFormatsKHR(device.physical,
41  surface->raw,
42  &available_format_count,
43  available_formats));
44 
45  b8 found_format = false;
46  array_for_each(VkSurfaceFormatKHR, surface_format, available_formats) {
47  if (format == vk_convert_vk_surface_format(*surface_format)) {
48  found_format = true;
49  break;
50  }
51  }
52 
53  array_destroy(&available_formats);
54  return found_format;
55 }
56 
57 static b8 vk_verify_swap_chain_present_mode_availability(struct gpu_backend_surface *surface,
58  struct gpu_backend_device device,
59  enum gpu_present_mode present_mode) {
60  u32 available_present_mode_count = 0;
61  VK_CALL_B8(
62  vkGetPhysicalDeviceSurfacePresentModesKHR(device.physical, surface->raw, &available_present_mode_count, 0));
63  array_of(VkPresentModeKHR) available_present_modes =
64  array_create(VkPresentModeKHR, available_present_mode_count, &vk_alloc.kiba_alloc);
65  array_resize(&available_present_modes, available_present_mode_count);
66  if (available_present_modes == KB_NULL) {
67  KB_ERROR("could not create array to store available swap chain present modes");
68  return false;
69  }
70  VK_CALL_B8(vkGetPhysicalDeviceSurfacePresentModesKHR(device.physical,
71  surface->raw,
72  &available_present_mode_count,
73  available_present_modes));
74 
75  VkPresentModeKHR target = vk_convert_present_mode(present_mode);
76  b8 found_present_mode = false;
77  array_for_each(VkPresentModeKHR, available_present_mode, available_present_modes) {
78  if (*available_present_mode == target) {
79  found_present_mode = true;
80  break;
81  }
82  }
83 
84  array_destroy(&available_present_modes);
85  return found_present_mode;
86 }
87 
88 static b8 vk_query_swap_chain_capabilities(struct gpu_backend_surface *surface,
89  struct gpu_backend_device device,
90  struct gpu_surface_configuration config) {
91  VkSurfaceCapabilitiesKHR capabilities;
92  VK_CALL_B8(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device.physical, surface->raw, &capabilities));
93  if (capabilities.currentExtent.width == UINT_MAX) {
94  surface->extent.height =
95  KB_CLAMP(config.height, capabilities.minImageExtent.height, capabilities.maxImageExtent.height);
96  surface->extent.width =
97  KB_CLAMP(config.width, capabilities.minImageExtent.width, capabilities.maxImageExtent.width);
98  } else {
99  surface->extent = capabilities.currentExtent;
100  }
101  u32 image_count = capabilities.minImageCount;
102  if (capabilities.maxImageCount > capabilities.minImageCount) {
103  ++image_count;
104  }
105  surface->image_count = image_count;
106  return true;
107 }
108 
109 static void swap_chain_destroy(struct gpu_backend_surface *surface, struct gpu_backend_device device) {
110  if (surface->swap_chain != VK_NULL_HANDLE) {
111  vkDestroySwapchainKHR(device.logical, surface->swap_chain, &vk_alloc.vulkan_callbacks);
112  surface->swap_chain = VK_NULL_HANDLE;
113  } else {
114  KB_WARN("tried to destroy uninitialized swap chain");
115  }
116 }
117 
118 static b8 swap_chain_create(struct gpu_backend_surface *surface,
119  struct gpu_backend_device device,
120  struct gpu_surface_configuration config) {
121  // setup and verification
122  if (!vk_verify_swap_chain_format_availability(surface, device, config.format)) {
123  KB_INFO("did not find required swap chain format");
124  return false;
125  }
126  if (!vk_verify_swap_chain_present_mode_availability(surface, device, config.present_mode)) {
127  KB_INFO("did not find required swap chain present mode");
128  return false;
129  }
130  if (!vk_query_swap_chain_capabilities(surface, device, config)) {
131  KB_INFO("could not query swap chain capabilities");
132  return false;
133  }
134 
135  // create swap chain
136  VkSwapchainCreateInfoKHR swap_chain_info = {
137  .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
138  .surface = surface->raw,
139  .imageExtent = surface->extent,
140  .minImageCount = surface->image_count,
141  .imageFormat = vk_convert_texture_format(config.format),
142  .imageColorSpace = config.format == GPU_TEXTURE_FORMAT_RGBA16_FLOAT ? VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT
143  : VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
144  .presentMode = vk_convert_present_mode(config.present_mode),
145  .imageArrayLayers = 1,
146  .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
147  .preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR,
148  .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, // TODO composite alpha would be applied here
149  .clipped = VK_TRUE,
150  .oldSwapchain = surface->swap_chain,
151  };
152  u32 queue_family_indices[] = {device.graphics_queue.index, device.present_queue.index};
153  // TODO is this still needed?
154  if (device.graphics_queue.index != device.present_queue.index) {
155  swap_chain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
156  swap_chain_info.queueFamilyIndexCount = 2;
157  swap_chain_info.pQueueFamilyIndices = queue_family_indices;
158  } else {
159  swap_chain_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
160  }
161 
162  VkSwapchainKHR new_swapchain = VK_NULL_HANDLE;
163  VK_CALL_B8(vkCreateSwapchainKHR(device.logical, &swap_chain_info, &vk_alloc.vulkan_callbacks, &new_swapchain));
164  if (surface->swap_chain != VK_NULL_HANDLE) {
165  // TODO should ideally also be done in the error case above
166  swap_chain_destroy(surface, device);
167  }
168  surface->swap_chain = new_swapchain;
169 
170  // get images
171  u32 image_count = 0;
172  VK_CALL_B8(vkGetSwapchainImagesKHR(device.logical, surface->swap_chain, &image_count, 0));
173  KB_ASSERT(image_count <= 3, "address todo of the structure"); // TODO
174  surface->image_count = image_count;
175  VK_CALL_B8(vkGetSwapchainImagesKHR(device.logical, surface->swap_chain, &image_count, surface->images));
176  return true;
177 }
178 
179 b8 gpu_backend_surface_configure(struct gpu_backend_surface *surface,
180  struct gpu_backend_device *device,
181  struct gpu_surface_configuration config) {
182  vkQueueWaitIdle(device->graphics_queue.queue);
183  if (surface->fc_image_aquire == VK_NULL_HANDLE) {
184  VkFenceCreateInfo fc_create_info = {
185  .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
186  .flags = VK_FENCE_CREATE_SIGNALED_BIT,
187  };
188  VK_CALL_B8(
189  vkCreateFence(device->logical, &fc_create_info, &vk_alloc.vulkan_callbacks, &surface->fc_image_aquire));
190  }
191  // TODO cleanup and recreate certain resources
192  return swap_chain_create(surface, *device, config);
193 }
194 
195 void gpu_backend_surface_unconfigure(struct gpu_backend_surface *surface, struct gpu_backend_device *device) {
196  // TODO could probably inline this
197  swap_chain_destroy(surface, *device);
198  if (surface->fc_image_aquire != VK_NULL_HANDLE) {
199  vkDestroyFence(device->logical, surface->fc_image_aquire, &vk_alloc.vulkan_callbacks);
200  surface->fc_image_aquire = VK_NULL_HANDLE;
201  }
202 }
203 
204 b8 gpu_backend_surface_get_current_texture(struct gpu_backend_surface *surface,
205  struct gpu_backend_device *device,
206  struct gpu_backend_texture *texture,
207  b8 *suboptimal) {
208  u32 image_index = 0;
209  VK_CALL_ASSERT(vkResetFences(device->logical, 1, &surface->fc_image_aquire));
210  VkResult res = vkAcquireNextImageKHR(device->logical,
211  surface->swap_chain,
212  UINT64_MAX, // TODO could introduce timeout here
213  VK_NULL_HANDLE,
214  surface->fc_image_aquire,
215  &image_index);
216  VK_CALL_ASSERT(vkWaitForFences(device->logical, 1, &surface->fc_image_aquire, VK_TRUE, UINT64_MAX));
217  VK_CALL_ASSERT(vkResetFences(device->logical, 1, &surface->fc_image_aquire));
218  if (res == VK_NOT_READY || res == VK_SUBOPTIMAL_KHR || res == VK_ERROR_OUT_OF_DATE_KHR) {
219  KB_WARN("suboptimal");
220  *suboptimal = true;
221  }
222  if (res == VK_SUCCESS) {
223  texture->raw = surface->images[image_index];
224  surface->presentation_image_index = image_index;
225  KB_DEBUG("using tex {pointer} at index {u32}", texture->raw, surface->presentation_image_index);
226  return true;
227  }
228  return false;
229 }
230 
231 b8 gpu_backend_surface_present(struct gpu_backend_surface *surface,
232  struct gpu_backend_device *device,
233  struct gpu_backend_texture *texture,
234  b8 *suboptimal) {
235  UNUSED(texture);
236  VkResult res = vk_queue_present_surface(surface, device);
237  if (res == VK_ERROR_OUT_OF_DATE_KHR || res == VK_SUBOPTIMAL_KHR) {
238  KB_WARN("suboptimal");
239  *suboptimal = true;
240  }
241  return res == VK_SUCCESS;
242 }
#define UNUSED(x)
Mark parameter as unused.
Definition: defines.h:21
#define KB_CLAMP(x, min, max)
Clamp an input value to a certain range.
Definition: defines.h:55
#define KB_NULL
Value of an invalid ptr (nullptr).
Definition: defines.h:18
#define KB_DEBUG(...)
Log entry with debug log level.
Definition: log.h:163
#define KB_ASSERT(expr,...)
Perform runtime assertion and log failures.
Definition: log.h:133
#define KB_WARN(...)
Log entry with warn log level.
Definition: log.h:161
#define KB_ERROR(...)
Log entry with error log level.
Definition: log.h:142
#define KB_INFO(...)
Log entry with info log level.
Definition: log.h:162