kiba-engine
command.c
1 #include <kiba/gpu/command.h>
2 
3 #include <kiba/core/memory.h>
4 #include <kiba/gpu/device.h>
5 
6 // TODO validate resource usage transitions against usage declared in descriptor
7 
8 b8 gpu_command_encoder_create(gpu_command_encoder *encoder,
9  gpu_device device,
10  struct gpu_command_encoder_descriptor desc) {
11  gpu_command_encoder new_encoder;
12  if (!gpu_device_resource_command_encoder_create(device, &new_encoder)) {
13  KB_ERROR("failed to create command encoder as device resource");
14  return false;
15  }
16  const usize tex_tracker_size = array_size(device->textures);
17  // TODO handle allocation failures
18  new_encoder->texture_trackers = array_create(struct gpu_texture_tracker, tex_tracker_size, &device->alloc);
19  for (usize i = 0; i < tex_tracker_size; ++i) {
20  const struct gpu_texture_tracker new_tracker = {
21  .texture = device->textures[i].texture,
22  .start_use = GPU_TEXTURE_USE_UNINITIALIZED,
23  .cur_use = GPU_TEXTURE_USE_UNINITIALIZED,
24  };
25  array_push(new_encoder->texture_trackers, new_tracker); // safe due to capacity calc above
26  }
27  new_encoder->pending_barriers = array_create(struct gpu_texture_barrier, tex_tracker_size, &device->alloc);
28  const usize buf_tracker_size = array_size(device->buffer_trackers);
29  new_encoder->buffer_trackers = array_create(struct gpu_buffer_tracker, buf_tracker_size, &device->alloc);
30  for (usize i = 0; i < buf_tracker_size; ++i) {
31  const struct gpu_buffer_tracker new_tracker = {
32  .buffer = device->buffer_trackers[i].buffer,
33  .start_use = 0,
34  .cur_use = 0,
35  };
36  array_push(new_encoder->buffer_trackers, new_tracker); // safe due to capacity calc above
37  }
38  new_encoder->pending_buffer_barriers = array_create(struct gpu_buffer_barrier, buf_tracker_size, &device->alloc);
39  new_encoder->desc = desc;
40  if (!gpu_backend_command_encoder_create(&new_encoder->bce, &device->bd, desc)) {
41  memory_free(new_encoder, sizeof(struct gpu_command_encoder));
42  return false;
43  }
44  *encoder = new_encoder;
45  return true;
46 }
47 
48 void gpu_command_encoder_destroy(gpu_command_encoder encoder) {
49  gpu_device_resource_command_encoder_enqueue_destroy(encoder);
50 }
51 
52 void gpu_command_encoder_destroy_internal(gpu_command_encoder encoder) {
53  gpu_backend_command_encoder_destroy(&encoder->bce, &encoder->device->bd);
54  array_destroy(&encoder->buffer_trackers);
55  array_destroy(&encoder->texture_trackers);
56 }
57 
58 static void gpu_command_encoder_set_texture_usage(gpu_command_encoder encoder,
59  gpu_texture texture,
60  enum gpu_texture_use new_use) {
61  KB_ASSERT(array_size(encoder->texture_trackers) > texture->tracker_id,
62  "texture must be created before command encoder was created");
63  struct gpu_texture_tracker *tracker = encoder->texture_trackers + texture->tracker_id;
64  if (tracker->start_use == GPU_TEXTURE_USE_UNINITIALIZED) {
65  tracker->start_use = new_use;
66  } else if (tracker->cur_use != GPU_TEXTURE_USE_UNINITIALIZED && tracker->cur_use != new_use) {
67  struct gpu_texture_barrier barrier = {
68  .tex = &texture->bt,
69  .format = texture->desc.format,
70  .source_use = tracker->cur_use,
71  .target_use = new_use,
72  };
73  KB_ASSERT(array_push_checked(&encoder->pending_barriers, &barrier), "TODO propagate error");
74  }
75  tracker->cur_use = new_use;
76 }
77 
78 static void gpu_command_encoder_set_buffer_usage(gpu_command_encoder encoder,
79  gpu_buffer buffer,
80  enum gpu_buffer_usage new_usage) {
81  KB_ASSERT(array_size(encoder->buffer_trackers) > buffer->tracker_id,
82  "buffer must be created before command encoder was created");
83  struct gpu_buffer_tracker *tracker = encoder->buffer_trackers + buffer->tracker_id;
84  if (!tracker->start_use) {
85  tracker->start_use = new_usage;
86  } else if (tracker->cur_use && tracker->cur_use != new_usage) {
87  struct gpu_buffer_barrier barrier = {
88  .buf = &buffer->bb,
89  .source_usage = tracker->cur_use,
90  .target_usage = new_usage,
91  };
92  KB_ASSERT(array_push_checked(&encoder->pending_buffer_barriers, &barrier), "TODO propagate error");
93  }
94  tracker->cur_use = new_usage;
95 }
96 
97 static void gpu_command_encoder_insert_pending_texture_barriers(gpu_command_encoder encoder) {
98  if (array_size(encoder->pending_barriers)) {
99  gpu_backend_insert_texture_barriers(&encoder->bce, encoder->pending_barriers);
100  array_resize(&encoder->pending_barriers, 0);
101  }
102 }
103 
104 static void gpu_command_encoder_insert_pending_buffer_barriers(gpu_command_encoder encoder) {
105  if (array_size(encoder->pending_buffer_barriers)) {
106  gpu_backend_insert_buffer_barriers(&encoder->bce, encoder->pending_buffer_barriers);
107  array_resize(&encoder->pending_buffer_barriers, 0);
108  }
109 }
110 
111 b8 gpu_cmd_begin(gpu_command_encoder encoder) {
112  if (gpu_backend_cmd_begin(&encoder->bce)) {
113  // TODO, though this is convenient to have, it is mostly a hotfix for RenderDoc not showing
114  // the debug name of the command buffer even though it is set properly
115  gpu_cmd_debug_marker_begin(encoder, encoder->desc.label);
116  return true;
117  }
118  return false;
119 }
120 
121 b8 gpu_cmd_copy_buffer_to_buffer(gpu_command_encoder encoder,
122  gpu_buffer src,
123  gpu_buffer dst,
124  usize src_offset,
125  usize dst_offset,
126  usize size) {
127  // TODO proper error messages
128  if (!KB_FLAGS_ANY_SET(src->desc.usage, GPU_BUFFER_USAGE_COPY_SRC)) {
129  return false;
130  }
131  if (!KB_FLAGS_ANY_SET(dst->desc.usage, GPU_BUFFER_USAGE_COPY_DST)) {
132  return false;
133  }
134  gpu_command_encoder_set_buffer_usage(encoder, src, GPU_BUFFER_USAGE_COPY_SRC);
135  gpu_command_encoder_set_buffer_usage(encoder, dst, GPU_BUFFER_USAGE_COPY_DST);
136  gpu_command_encoder_insert_pending_buffer_barriers(encoder);
137  gpu_backend_cmd_copy_buffer_to_buffer(&encoder->bce, &src->bb, &dst->bb, src_offset, dst_offset, size);
138  return true;
139 }
140 
141 b8 gpu_begin_render_pass(gpu_command_encoder encoder, struct gpu_render_pass_descriptor desc) {
142  // TODO handle depth attachments
143  for (usize i = 0; i < desc.color_attachment_count; ++i) {
144  gpu_command_encoder_set_texture_usage(encoder,
145  desc.color_attachments[i].view->texture,
146  GPU_TEXTURE_USE_COLOR_TARGET);
147  }
148  gpu_command_encoder_insert_pending_texture_barriers(encoder);
149  return gpu_backend_begin_render_pass(&encoder->bce, &encoder->device->bd, desc);
150 }
151 
152 b8 gpu_end_render_pass(gpu_command_encoder encoder) { return gpu_backend_end_render_pass(&encoder->bce); }
153 
154 b8 gpu_cmd_end(gpu_command_encoder encoder) {
155  // TODO could move encoder to set of completed encoders so this can be used to record again
156  gpu_cmd_debug_marker_end(encoder);
157  return true;
158 }
159 
160 b8 gpu_command_encoder_submit(gpu_command_encoder encoder) {
161  const usize tracker_size = array_size(encoder->texture_trackers);
162  KB_ASSERT(tracker_size <= array_size(encoder->device->textures),
163  "at least all command buffer textures ({usize}) must still be available in the device ({usize})",
164  tracker_size,
165  array_size(encoder->device->textures));
166  if (tracker_size > 0) {
167  array_of(struct gpu_texture_barrier) init_barriers =
168  array_create(struct gpu_texture_barrier, tracker_size, &encoder->device->alloc);
169  for (usize i = 0; i < tracker_size; ++i) {
170  const struct gpu_texture_tracker encoder_tracker = encoder->texture_trackers[i];
171  const struct gpu_texture_tracker device_tracker = encoder->device->textures[i];
172  if (device_tracker.texture && encoder_tracker.cur_use != GPU_TEXTURE_USE_UNINITIALIZED
173  && encoder_tracker.start_use != device_tracker.cur_use) {
174  struct gpu_texture_barrier barrier = {
175  .tex = &device_tracker.texture->bt,
176  .format = device_tracker.texture->desc.format,
177  .source_use = device_tracker.cur_use,
178  .target_use = encoder_tracker.start_use,
179  };
180  KB_ASSERT(array_push_checked(&init_barriers, &barrier), "TODO propagate error");
181  encoder->device->textures[i].cur_use = encoder_tracker.cur_use;
182  }
183  }
184  if (array_size(init_barriers) > 0) {
185  struct gpu_backend_command_encoder init = { 0 };
186  struct gpu_command_encoder_descriptor init_desc = {
187  .label = "CE: texture init",
188  .debug_color = {
189  .r = 0.3,
190  .g = 0.3,
191  .b = 0.3,
192  .a = 0.3,
193  },
194  };
195  gpu_backend_command_encoder_create(&init, &encoder->device->bd, init_desc);
196  gpu_backend_cmd_begin(&init);
197  gpu_backend_insert_texture_barriers(&init, init_barriers);
198  gpu_backend_cmd_end(&init);
199  gpu_backend_command_encoder_submit(&init, &encoder->device->bd);
200  gpu_backend_command_encoder_destroy(&init, &encoder->device->bd);
201  }
202  array_destroy(&init_barriers);
203 
204  for (usize i = 0; i < tracker_size; ++i) {
205  const struct gpu_texture_tracker encoder_tracker = encoder->texture_trackers[i];
206  const struct gpu_texture_tracker device_tracker = encoder->device->textures[i];
207  if (device_tracker.texture && device_tracker.start_use != GPU_TEXTURE_USE_UNINITIALIZED
208  && encoder_tracker.cur_use != device_tracker.start_use) {
209  gpu_command_encoder_set_texture_usage(encoder, device_tracker.texture, device_tracker.start_use);
210  }
211  }
212  gpu_command_encoder_insert_pending_texture_barriers(encoder);
213  }
214  gpu_backend_cmd_end(&encoder->bce);
215  return gpu_backend_command_encoder_submit(&encoder->bce, &encoder->device->bd);
216 }
void memory_free(void *mem, usize size)
Free memory.
Definition: memory.c:77
Lightweight layer between platform and other engine components to enable tracing/monitoring.
#define KB_FLAGS_ANY_SET(value, flags)
Check if any of the flags are set inside value.
Definition: defines.h:67
#define KB_ASSERT(expr,...)
Perform runtime assertion and log failures.
Definition: log.h:133
#define KB_ERROR(...)
Log entry with error log level.
Definition: log.h:142