kiba-engine
command.c
1 #include <kiba/gpu/command.h>
2 #include <kiba/gpu/vulkan/allocator.h>
3 #include <kiba/gpu/vulkan/conv.h>
4 #include <kiba/gpu/vulkan/device.h>
5 #include <kiba/gpu/vulkan/instance.h>
6 #include <kiba/gpu/vulkan/queue.h>
7 #include <kiba/gpu/vulkan/util.h>
8 #include "vulkan/vulkan_core.h"
9 
10 b8 gpu_backend_command_encoder_create(struct gpu_backend_command_encoder *encoder,
11  struct gpu_backend_device *device,
12  struct gpu_command_encoder_descriptor desc) {
13  if (!vk_queue_get_command_encoder(device, encoder)) {
14  KB_ERROR("failed to create command buffer for command encoder");
15  return false;
16  }
17  // TODO for some reason this does not appear properly in renderdoc, perhaps a bug there?
18  // the validation layers pick this name up, so it seems to be a renderdoc issue
19  vk_device_set_object_name(device->logical, VK_OBJECT_TYPE_COMMAND_BUFFER, encoder->buffer, desc.label);
20  return true;
21 }
22 
23 void gpu_backend_command_encoder_destroy(struct gpu_backend_command_encoder *encoder,
24  struct gpu_backend_device *device) {
25  vk_queue_return_command_encoder(device, *encoder);
26  encoder->buffer = VK_NULL_HANDLE;
27 }
28 
29 b8 gpu_backend_command_encoder_submit(struct gpu_backend_command_encoder *encoder, struct gpu_backend_device *device) {
30  return vk_queue_submit_encoder(encoder, device);
31 }
32 
33 b8 gpu_backend_cmd_begin(struct gpu_backend_command_encoder *encoder) {
34  VkCommandBufferBeginInfo begin_info = {
35  .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
36  .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
37  };
38  VK_CALL_B8(vkBeginCommandBuffer(encoder->buffer, &begin_info));
39  return true;
40 }
41 
42 void gpu_backend_cmd_copy_buffer_to_buffer(struct gpu_backend_command_encoder *encoder,
43  struct gpu_backend_buffer *src,
44  struct gpu_backend_buffer *dst,
45  usize src_offset,
46  usize dst_offset,
47  usize size) {
48  VkBufferCopy copy_info = {
49  .srcOffset = (VkDeviceSize) src_offset,
50  .dstOffset = (VkDeviceSize) dst_offset,
51  .size = (VkDeviceSize) size,
52  };
53  vkCmdCopyBuffer(encoder->buffer, src->raw, dst->raw, 1, &copy_info);
54 }
55 
56 b8 gpu_backend_begin_render_pass(struct gpu_backend_command_encoder *encoder,
57  struct gpu_backend_device *device,
58  struct gpu_render_pass_descriptor desc) {
59  u32 clear_value_count = 0;
60  VkClearValue clear_values[KB_GPU_MAX_ATTACHMENTS] = {0};
61  struct vk_renderpass_key rp_key = {0};
62  // TODO extent should maybe come from the render attachments
63  struct vk_framebuffer_key fb_key = {
64  .extent = {
65  .width = (u32) desc.extent.width,
66  .height = (u32) desc.extent.height,
67  .depth = (u32) desc.extent.layers,
68  },
69  };
70 
71  for (usize i = 0; i < desc.color_attachment_count; ++i) {
72  struct gpu_color_attachment col_at = desc.color_attachments[i];
73  // TODO respect format of attachment
74  // (https://github.com/gfx-rs/wgpu/blob/5b8be97a887eb198f1e58963b4c94d46cf4a84db/wgpu-hal/src/vulkan/conv.rs#L196)
75  clear_values[clear_value_count++] = (VkClearValue){
76  .color = {
77  .float32 = {
78  (f32) col_at.ops.clear_color.r,
79  (f32) col_at.ops.clear_color.g,
80  (f32) col_at.ops.clear_color.b,
81  (f32) col_at.ops.clear_color.a,
82  },
83  },
84  };
85  rp_key.colors[rp_key.color_count++] = (struct vk_color_attachment_key){
86  .base = {
87  .format = vk_convert_texture_format(col_at.view->desc.format),
88  .layout = vk_optimal_image_layout(GPU_TEXTURE_USE_COLOR_TARGET, col_at.view->desc.format),
89  .ops = gpu_operations_to_attachment_ops(col_at.ops),
90  },
91  };
92  fb_key.attachments[fb_key.attachment_count++] = (struct vk_framebuffer_attachment){
93  .view = col_at.view->btv.raw,
94  .usage = GPU_TEXTURE_USE_COLOR_TARGET, // TODO should use texture's usage I guess
95  .format = col_at.view->desc.format,
96  };
97  }
98  if (desc.depth_stencil_attachment.view) {
99  // TODO
100  }
101 
102  // TODO error handling
103  VkRenderPass renderpass;
104  VkFramebuffer framebuffer;
105  if (!vk_device_create_renderpass(device, rp_key, &renderpass)
106  || !vk_device_create_framebuffer(device, fb_key, renderpass, &framebuffer)) {
107  KB_ERROR("oof");
108  return false;
109  }
110  vk_device_set_object_name(device->logical, VK_OBJECT_TYPE_RENDER_PASS, renderpass, desc.label);
111 
112  VkRect2D render_area = {
113  .offset = { .x = 0, .y = 0 },
114  .extent = {
115  .width = (u32) desc.extent.width,
116  .height = (u32) desc.extent.height,
117  },
118  };
119  VkViewport viewport = {
120  .x = 0.0f,
121  .y = 0.0f,
122  .width = (f32) desc.extent.width,
123  .height = (f32) desc.extent.height,
124  .minDepth = 0.0f,
125  .maxDepth = 1.0f,
126  };
127 
128  VkRenderPassBeginInfo rp_begin_info = {
129  .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
130  .renderPass = renderpass,
131  .framebuffer = framebuffer,
132  .renderArea = render_area,
133  .clearValueCount = clear_value_count,
134  .pClearValues = clear_values,
135  };
136 
137  vkCmdSetViewport(encoder->buffer, 0, 1, &viewport);
138  vkCmdSetScissor(encoder->buffer, 0, 1, &render_area);
139  vkCmdBeginRenderPass(encoder->buffer, &rp_begin_info, VK_SUBPASS_CONTENTS_INLINE);
140  return true;
141 }
142 
143 void gpu_cmd_bind_pipeline(gpu_command_encoder encoder, gpu_pipeline pipeline) {
144  vkCmdBindPipeline(encoder->bce.buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->bp.raw);
145 }
146 
147 VkDeviceSize offset = 0;
148 
149 void gpu_cmd_bind_vertex_buffer(gpu_command_encoder encoder, usize slot, gpu_buffer buffer) {
150  // TODO store offset for bindings
151  offset = 0;
152  vkCmdBindVertexBuffers(encoder->bce.buffer, (u32) slot, 1u, &buffer->bb.raw, &offset);
153 }
154 
155 void gpu_cmd_bind_index_buffer(gpu_command_encoder encoder, gpu_buffer buffer, enum gpu_index_format format) {
156  vkCmdBindIndexBuffer(encoder->bce.buffer, buffer->bb.raw, offset, vk_convert_index_format(format));
157 }
158 
159 void gpu_cmd_draw(gpu_command_encoder encoder,
160  usize vertex_count,
161  usize instance_count,
162  usize first_vertex,
163  usize first_instance) {
164  vkCmdDraw(encoder->bce.buffer, (u32) vertex_count, (u32) instance_count, (u32) first_vertex, (u32) first_instance);
165 }
166 
167 void gpu_cmd_draw_indexed(gpu_command_encoder encoder,
168  usize first_index,
169  usize index_count,
170  usize first_vertex,
171  usize first_instance,
172  usize instance_count) {
173  vkCmdDrawIndexed(encoder->bce.buffer,
174  (u32) index_count,
175  (u32) instance_count,
176  (u32) first_index,
177  (i32) first_vertex,
178  (u32) first_instance);
179 }
180 
181 b8 gpu_backend_end_render_pass(struct gpu_backend_command_encoder *encoder) {
182  vkCmdEndRenderPass(encoder->buffer);
183  return true;
184 }
185 
186 void gpu_cmd_debug_marker_begin(gpu_command_encoder encoder, const char *label) {
187 #ifdef KB_DEBUG_BUILD
188  VkDebugUtilsLabelEXT info = {
189  .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
190  .pLabelName = label,
191  .color = {
192  (f32) encoder->desc.debug_color.r,
193  (f32) encoder->desc.debug_color.g,
194  (f32) encoder->desc.debug_color.b,
195  (f32) encoder->desc.debug_color.a,
196  },
197  };
198  vk_instance.cmdBeginDebugUtilsLabel(encoder->bce.buffer, &info);
199 #else
200  UNUSED(encoder);
201  UNUSED(label);
202 #endif
203 }
204 
205 void gpu_cmd_debug_marker_insert(gpu_command_encoder encoder, const char *label) {
206 #ifdef KB_DEBUG_BUILD
207  VkDebugUtilsLabelEXT info = {
208  .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
209  .pLabelName = label,
210  .color = {
211  (f32) encoder->desc.debug_color.r,
212  (f32) encoder->desc.debug_color.g,
213  (f32) encoder->desc.debug_color.b,
214  (f32) encoder->desc.debug_color.a,
215  },
216  };
217  vk_instance.cmdInsertDebugUtilsLabel(encoder->bce.buffer, &info);
218 #else
219  UNUSED(encoder);
220  UNUSED(label);
221 #endif
222 }
223 
224 void gpu_cmd_debug_marker_end(gpu_command_encoder encoder) {
225 #ifdef KB_DEBUG_BUILD
226  vk_instance.cmdEndDebugUtilsLabel(encoder->bce.buffer);
227 #else
228  UNUSED(encoder);
229 #endif
230 }
231 
232 b8 gpu_backend_cmd_end(struct gpu_backend_command_encoder *encoder) {
233  VK_CALL_B8(vkEndCommandBuffer(encoder->buffer));
234  return true;
235 }
236 
237 b8 gpu_backend_insert_texture_barriers(struct gpu_backend_command_encoder *encoder,
238  array_of(const struct gpu_texture_barrier) barriers) {
239  VkImageMemoryBarrier vk_barriers[32] = { 0 }; // FIXME
240  u32 vk_barrier_count = 0;
241  VkPipelineStageFlags src_stage = 0;
242  VkPipelineStageFlags dst_stage = 0;
243  array_for_each(const struct gpu_texture_barrier, bar, barriers) {
244  VkImageSubresourceRange subresource_range = {
245  .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
246  .baseMipLevel = 0,
247  .levelCount = 1,
248  .baseArrayLayer = 0,
249  .layerCount = 1,
250  };
251  VkImageLayout src = vk_optimal_image_layout(bar->source_use, bar->format);
252  VkImageLayout dst = vk_optimal_image_layout(bar->target_use, bar->format);
253  VkAccessFlags src_access = 0;
254  VkAccessFlags dst_access = 0;
255  vk_map_texture_use_to_barrier_flags(bar->source_use, &src_stage, &src_access);
256  vk_map_texture_use_to_barrier_flags(bar->target_use, &dst_stage, &dst_access);
257  VkImageMemoryBarrier barrier = {
258  .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
259  .oldLayout = src,
260  .newLayout = dst,
261  .image = bar->tex->raw,
262  .subresourceRange = subresource_range,
263  .srcAccessMask = src_access,
264  .dstAccessMask = dst_access,
265  };
266  vk_barriers[vk_barrier_count++] = barrier;
267  }
268 
269  vkCmdPipelineBarrier(encoder->buffer,
270  src_stage,
271  dst_stage,
272  0,
273  0,
274  KB_NULL,
275  0,
276  KB_NULL,
277  vk_barrier_count,
278  vk_barriers);
279  return true;
280 }
281 
282 b8 gpu_backend_insert_buffer_barriers(struct gpu_backend_command_encoder *encoder,
283  array_of(const struct gpu_buffer_barrier) barriers) {
284  VkBufferMemoryBarrier vk_barriers[32] = { 0 }; // FIXME
285  u32 vk_barrier_count = 0;
286  VkPipelineStageFlags src_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
287  VkPipelineStageFlags dst_stage = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
288  array_for_each(const struct gpu_buffer_barrier, bar, barriers) {
289  VkAccessFlags src_access = 0;
290  VkAccessFlags dst_access = 0;
291  vk_map_buffer_use_to_barrier_flags(bar->source_usage, &src_stage, &src_access);
292  vk_map_buffer_use_to_barrier_flags(bar->target_usage, &dst_stage, &dst_access);
293  VkBufferMemoryBarrier barrier = {
294  .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
295  .buffer = bar->buf->raw,
296  .size = VK_WHOLE_SIZE,
297  .srcAccessMask = src_access,
298  .dstAccessMask = dst_access,
299  };
300  vk_barriers[vk_barrier_count++] = barrier;
301  }
302 
303  vkCmdPipelineBarrier(encoder->buffer,
304  src_stage,
305  dst_stage,
306  0,
307  0,
308  KB_NULL,
309  vk_barrier_count,
310  vk_barriers,
311  0,
312  KB_NULL);
313  return true;
314 }
#define UNUSED(x)
Mark parameter as unused.
Definition: defines.h:21
#define KB_NULL
Value of an invalid ptr (nullptr).
Definition: defines.h:18
#define KB_ERROR(...)
Log entry with error log level.
Definition: log.h:142