kiba-engine
device.c
1 #include <kiba/gpu/device.h>
2 
3 #include <kiba/core/memory.h>
4 #include <kiba/gpu/buffer.h>
5 #include <kiba/gpu/command.h>
6 #include <kiba/gpu/pipeline.h>
7 #include <kiba/gpu/shader.h>
8 #include <kiba/gpu/texture.h>
9 
10 static inline void gpu_device_resource_destroy(struct gpu_device_resource resource) {
11  switch (resource.type) {
12  case GPU_DEVICE_RESOURCE_TEXTURE:
13  gpu_device_resource_texture_destroy((gpu_texture) resource.handle);
14  break;
15  case GPU_DEVICE_RESOURCE_TEXTURE_VIEW:
16  gpu_device_resource_texture_view_destroy((gpu_texture_view) resource.handle);
17  break;
18  case GPU_DEVICE_RESOURCE_PIPELINE:
19  gpu_device_resource_pipeline_destroy((gpu_pipeline) resource.handle);
20  break;
21  case GPU_DEVICE_RESOURCE_PIPELINE_LAYOUT:
22  gpu_device_resource_pipeline_layout_destroy((gpu_pipeline_layout) resource.handle);
23  break;
24  case GPU_DEVICE_RESOURCE_SHADER_MODULE:
25  gpu_device_resource_shader_module_destroy((gpu_shader_module) resource.handle);
26  break;
27  case GPU_DEVICE_RESOURCE_BUFFER:
28  gpu_device_resource_buffer_destroy((gpu_buffer) resource.handle);
29  break;
30  case GPU_DEVICE_RESOURCE_COMMAND_ENCODER:
31  gpu_device_resource_command_encoder_destroy((gpu_command_encoder) resource.handle);
32  break;
33  }
34 }
35 
36 b8 gpu_device_create(gpu_device *device) {
37  // TODO error reporting
38  allocator dev_alloc = {0};
39  if (allocator_create(&dev_alloc, ALLOCATOR_FREE_LIST, KB_KILOBYTE(4))) {
40  struct gpu_device *new_dev = allocator_allocate(&dev_alloc, sizeof(struct gpu_device));
41  if (new_dev != KB_NULL) {
42  new_dev->alloc = dev_alloc;
43  if (id_generator_create(&new_dev->texture_id_gen, &new_dev->alloc)
44  && id_generator_create(&new_dev->buffer_id_gen, &new_dev->alloc)) {
45  new_dev->textures = array_create(struct gpu_texture_tracker, 8, &new_dev->alloc);
46  new_dev->buffer_trackers = array_create(struct gpu_buffer_tracker, 8, &new_dev->alloc);
47  if (new_dev->textures && new_dev->buffer_trackers) {
48  array_resize(&new_dev->textures, 8); // is safe due to <= capacity
49  array_resize(&new_dev->buffer_trackers, 8); // is safe due to <= capacity
50  new_dev->destruction_queue_index = 0;
51  for (usize i = 0; i < 2; ++i) {
52  new_dev->destruction_queues[i].resources =
53  array_create(struct gpu_device_resource, 8, &new_dev->alloc);
54  KB_ASSERT(new_dev->destruction_queues[i].resources, "FIXME this is temp");
55  }
56  if (gpu_backend_device_create(&new_dev->bd)) {
57  *device = new_dev;
58  return true;
59  }
60  array_destroy(&new_dev->textures);
61  }
62  id_generator_destroy(new_dev->buffer_id_gen);
63  id_generator_destroy(new_dev->texture_id_gen);
64  }
65  allocator_free(&dev_alloc, new_dev);
66  }
67  allocator_destroy(&dev_alloc);
68  }
69  *device = KB_NULL;
70  return false;
71 }
72 
73 void gpu_device_update(gpu_device device) {
74  device->destruction_queue_index ^= 1;
75  array_of(struct gpu_device_resource) resources =
76  device->destruction_queues[device->destruction_queue_index].resources;
77  array_for_each(struct gpu_device_resource, resource, resources) {
78  KB_INFO("destroying {u32} resource {pointer}", resource->type, resource->handle);
79  gpu_device_resource_destroy(*resource);
80  }
81  array_resize(&device->destruction_queues[device->destruction_queue_index].resources, 0);
82 }
83 
84 void gpu_device_destroy(gpu_device *device) {
85  KB_ASSERT(device != KB_NULL, "device handle must not be NULL");
86  struct gpu_device *dev = *device;
87  if (dev) {
88  gpu_backend_device_finish_running_tasks(&dev->bd);
89  // TODO cleanup resource arrays
90  for (usize i = 0; i < 2; ++i) {
91  if (dev->destruction_queues[i].resources) {
92  array_for_each(struct gpu_device_resource, resource, dev->destruction_queues[i].resources) {
93  KB_INFO("destroying {u32} resource {pointer}", resource->type, resource->handle);
94  gpu_device_resource_destroy(*resource);
95  }
96  array_destroy(&dev->destruction_queues[i].resources);
97  }
98  }
99  gpu_backend_device_destroy(&dev->bd);
100  array_destroy(&dev->buffer_trackers);
101  array_destroy(&dev->textures);
102  id_generator_destroy(dev->buffer_id_gen);
103  id_generator_destroy(dev->texture_id_gen);
104  allocator dev_alloc = dev->alloc;
105  allocator_free(&dev_alloc, dev);
106  allocator_destroy(&dev_alloc);
107  }
108  *device = KB_NULL;
109 }
110 
111 b8 gpu_device_resource_texture_create(gpu_device device, gpu_texture *texture) {
112  identifier track_id = id_request(device->texture_id_gen);
113  const usize textures_size = array_size(device->textures);
114  if (track_id >= textures_size && !array_resize(&device->textures, textures_size * 2)) {
115  KB_ERROR("unable to allocate space for new textures");
116  return false;
117  }
118 
119  // TODO get alignment of type
120  struct gpu_texture *tex = allocator_allocate_aligned(&device->alloc, sizeof(struct gpu_texture), 16);
121  if (!tex) {
122  KB_ERROR("unable to allocate memory for texture metadata");
123  id_return(device->texture_id_gen, track_id);
124  return false;
125  }
126  tex->device = device;
127  tex->tracker_id = track_id;
128  device->textures[track_id].texture = tex;
129  device->textures[track_id].start_use = GPU_TEXTURE_USE_UNINITIALIZED;
130  device->textures[track_id].cur_use = GPU_TEXTURE_USE_UNINITIALIZED;
131  *texture = tex;
132  return true;
133 }
134 
135 void gpu_device_resource_texture_enqueue_destroy(gpu_texture texture) {
136  gpu_device device = texture->device;
137  KB_ASSERT(device, "texture must have valid device handle");
138  struct gpu_device_resource resource = {
139  .type = GPU_DEVICE_RESOURCE_TEXTURE,
140  .handle = texture,
141  };
142  array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
143 }
144 
145 void gpu_device_resource_texture_destroy(gpu_texture texture) {
146  gpu_texture_destroy_internal(texture);
147  gpu_device_resource_texture_release(texture);
148 }
149 
150 void gpu_device_resource_texture_release(gpu_texture texture) {
151  gpu_device device = texture->device;
152  KB_ASSERT(device, "texture must have valid device handle");
153  const usize textures_size = array_size(device->textures);
154  if (texture->tracker_id < textures_size) {
155  device->textures[texture->tracker_id].texture = KB_NULL;
156  id_return(device->texture_id_gen, texture->tracker_id);
157  }
158  allocator_free(&device->alloc, texture);
159 }
160 
161 b8 gpu_device_resource_texture_view_create(gpu_device device, gpu_texture_view *view) {
162  // TODO get alignment of type
163  struct gpu_texture_view *v = allocator_allocate_aligned(&device->alloc, sizeof(struct gpu_texture_view), 16);
164  if (!v) {
165  KB_ERROR("unable to allocate memory for texture view metadata");
166  return false;
167  }
168  v->device = device;
169  *view = v;
170  return true;
171 }
172 
173 void gpu_device_resource_texture_view_enqueue_destroy(gpu_texture_view view) {
174  gpu_device device = view->device;
175  KB_ASSERT(device, "texture view must have valid device handle");
176  struct gpu_device_resource resource = {
177  .type = GPU_DEVICE_RESOURCE_TEXTURE_VIEW,
178  .handle = view,
179  };
180  array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
181 }
182 
183 void gpu_device_resource_texture_view_destroy(gpu_texture_view view) {
184  gpu_texture_view_destroy_internal(view);
185  allocator_free(&view->device->alloc, view);
186 }
187 
188 b8 gpu_device_resource_pipeline_layout_create(gpu_device device, gpu_pipeline_layout *layout) {
189  // TODO get alignment of type
190  struct gpu_pipeline_layout *l = allocator_allocate_aligned(&device->alloc, sizeof(struct gpu_pipeline_layout), 16);
191  if (!l) {
192  KB_ERROR("unable to allocate memory for pipeline layout metadata");
193  return false;
194  }
195  l->device = device;
196  *layout = l;
197  return true;
198 }
199 
200 void gpu_device_resource_pipeline_layout_enqueue_destroy(gpu_pipeline_layout layout) {
201  gpu_device device = layout->device;
202  KB_ASSERT(device, "pipeline layout must have valid device handle");
203  struct gpu_device_resource resource = {
204  .type = GPU_DEVICE_RESOURCE_PIPELINE_LAYOUT,
205  .handle = layout,
206  };
207  array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
208 }
209 
210 void gpu_device_resource_pipeline_layout_destroy(gpu_pipeline_layout layout) {
211  gpu_pipeline_layout_destroy_internal(layout);
212  allocator_free(&layout->device->alloc, layout);
213 }
214 
215 b8 gpu_device_resource_pipeline_create(gpu_device device, gpu_pipeline *pipeline) {
216  // TODO get alignment of type
217  struct gpu_pipeline *p = allocator_allocate_aligned(&device->alloc, sizeof(struct gpu_pipeline), 16);
218  if (!p) {
219  KB_ERROR("unable to allocate memory for pipeline metadata");
220  return false;
221  }
222  p->device = device;
223  *pipeline = p;
224  return true;
225 }
226 
227 void gpu_device_resource_pipeline_enqueue_destroy(gpu_pipeline pipeline) {
228  gpu_device device = pipeline->device;
229  KB_ASSERT(device, "pipeline must have valid device handle");
230  struct gpu_device_resource resource = {
231  .type = GPU_DEVICE_RESOURCE_PIPELINE,
232  .handle = pipeline,
233  };
234  array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
235 }
236 
237 void gpu_device_resource_pipeline_destroy(gpu_pipeline pipeline) {
238  gpu_render_pipeline_destroy_internal(pipeline);
239  allocator_free(&pipeline->device->alloc, pipeline);
240 }
241 
242 b8 gpu_device_resource_shader_module_create(gpu_device device, gpu_shader_module *shader) {
243  // TODO get alignment of type
244  struct gpu_shader_module *s = allocator_allocate_aligned(&device->alloc, sizeof(struct gpu_shader_module), 16);
245  if (!s) {
246  KB_ERROR("unable to allocate memory for shader metadata");
247  return false;
248  }
249  s->device = device;
250  *shader = s;
251  return true;
252 }
253 
254 void gpu_device_resource_shader_module_enqueue_destroy(gpu_shader_module shader) {
255  gpu_device device = shader->device;
256  KB_ASSERT(device, "shader must have valid device handle");
257  struct gpu_device_resource resource = {
258  .type = GPU_DEVICE_RESOURCE_SHADER_MODULE,
259  .handle = shader,
260  };
261  array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
262 }
263 
264 void gpu_device_resource_shader_module_destroy(gpu_shader_module shader) {
265  gpu_shader_module_destroy_internal(shader);
266  allocator_free(&shader->device->alloc, shader);
267 }
268 
269 b8 gpu_device_resource_buffer_create(gpu_device device, gpu_buffer *buffer) {
270  identifier track_id = id_request(device->buffer_id_gen);
271  const usize buffers_size = array_size(device->buffer_trackers);
272  if (track_id >= buffers_size && !array_resize(&device->buffer_trackers, buffers_size * 2)) {
273  KB_ERROR("unable to allocate space for new buffers");
274  return false;
275  }
276 
277  // TODO get alignment of type
278  struct gpu_buffer *buf = allocator_allocate_aligned(&device->alloc, sizeof(struct gpu_buffer), 16);
279  if (!buf) {
280  KB_ERROR("unable to allocate memory for buffer metadata");
281  id_return(device->buffer_id_gen, track_id);
282  return false;
283  }
284  buf->device = device;
285  buf->tracker_id = track_id;
286  device->buffer_trackers[track_id].buffer = buf;
287  device->buffer_trackers[track_id].start_use = 0;
288  device->buffer_trackers[track_id].cur_use = 0;
289  *buffer = buf;
290  return true;
291 }
292 
293 void gpu_device_resource_buffer_enqueue_destroy(gpu_buffer buffer) {
294  gpu_device device = buffer->device;
295  KB_ASSERT(device, "buffer must have valid device handle");
296  struct gpu_device_resource resource = {
297  .type = GPU_DEVICE_RESOURCE_BUFFER,
298  .handle = buffer,
299  };
300  array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
301 }
302 
303 void gpu_device_resource_buffer_destroy(gpu_buffer buffer) {
304  gpu_buffer_destroy_internal(buffer);
305  gpu_device_resource_buffer_release(buffer);
306 }
307 
308 void gpu_device_resource_buffer_release(gpu_buffer buffer) {
309  gpu_device device = buffer->device;
310  KB_ASSERT(device, "buffer must have valid device handle");
311  const usize buffers_size = array_size(device->buffer_trackers);
312  if (buffer->tracker_id < buffers_size) {
313  device->buffer_trackers[buffer->tracker_id].buffer = KB_NULL;
314  id_return(device->buffer_id_gen, buffer->tracker_id);
315  }
316  allocator_free(&device->alloc, buffer);
317 }
318 
319 b8 gpu_device_resource_command_encoder_create(gpu_device device, gpu_command_encoder *encoder) {
320  // TODO get alignment of type
321  struct gpu_command_encoder *e = allocator_allocate_aligned(&device->alloc, sizeof(struct gpu_command_encoder), 16);
322  if (!e) {
323  KB_ERROR("unable to allocate memory for encoder metadata");
324  return false;
325  }
326  e->device = device;
327  *encoder = e;
328  return true;
329 }
330 
331 void gpu_device_resource_command_encoder_enqueue_destroy(gpu_command_encoder encoder) {
332  gpu_device device = encoder->device;
333  KB_ASSERT(device, "encoder must have valid device handle");
334  struct gpu_device_resource resource = {
335  .type = GPU_DEVICE_RESOURCE_COMMAND_ENCODER,
336  .handle = encoder,
337  };
338  array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
339 }
340 
341 void gpu_device_resource_command_encoder_destroy(gpu_command_encoder encoder) {
342  gpu_command_encoder_destroy_internal(encoder);
343  allocator_free(&encoder->device->alloc, encoder);
344 }
void * allocator_allocate(allocator *alloc, usize size)
Allocate unaligned memory.
Definition: allocator.c:92
void * allocator_allocate_aligned(allocator *alloc, usize size, usize alignment)
Allocate aligned memory.
Definition: allocator.c:94
void allocator_free(allocator *alloc, void *mem)
Give back memory to the allocator.
Definition: allocator.c:134
b8 allocator_create(allocator *alloc, allocator_type type, usize size)
Create an allocator of a specific type.
Definition: allocator.c:66
void allocator_destroy(allocator *alloc)
Destroy an allocator.
Definition: allocator.c:84
Lightweight layer between platform and other engine components to enable tracing/monitoring.
#define KB_NULL
Value of an invalid ptr (nullptr).
Definition: defines.h:18
#define KB_ASSERT(expr,...)
Perform runtime assertion and log failures.
Definition: log.h:133
#define KB_ERROR(...)
Log entry with error log level.
Definition: log.h:142
#define KB_INFO(...)
Log entry with info log level.
Definition: log.h:162
Central allocator structure.
Definition: allocator.h:87