1 #include <kiba/gpu/device.h>
4 #include <kiba/gpu/buffer.h>
5 #include <kiba/gpu/command.h>
6 #include <kiba/gpu/pipeline.h>
7 #include <kiba/gpu/shader.h>
8 #include <kiba/gpu/texture.h>
11 switch (resource.type) {
12 case GPU_DEVICE_RESOURCE_TEXTURE:
13 gpu_device_resource_texture_destroy((
gpu_texture) resource.handle);
15 case GPU_DEVICE_RESOURCE_TEXTURE_VIEW:
16 gpu_device_resource_texture_view_destroy((
gpu_texture_view) resource.handle);
18 case GPU_DEVICE_RESOURCE_PIPELINE:
19 gpu_device_resource_pipeline_destroy((
gpu_pipeline) resource.handle);
21 case GPU_DEVICE_RESOURCE_PIPELINE_LAYOUT:
24 case GPU_DEVICE_RESOURCE_SHADER_MODULE:
27 case GPU_DEVICE_RESOURCE_BUFFER:
28 gpu_device_resource_buffer_destroy((
gpu_buffer) resource.handle);
30 case GPU_DEVICE_RESOURCE_COMMAND_ENCODER:
42 new_dev->alloc = dev_alloc;
43 if (id_generator_create(&new_dev->texture_id_gen, &new_dev->alloc)
44 && id_generator_create(&new_dev->buffer_id_gen, &new_dev->alloc)) {
46 new_dev->buffer_trackers = array_create(
struct gpu_buffer_tracker, 8, &new_dev->alloc);
47 if (new_dev->textures && new_dev->buffer_trackers) {
48 array_resize(&new_dev->textures, 8);
49 array_resize(&new_dev->buffer_trackers, 8);
50 new_dev->destruction_queue_index = 0;
51 for (usize i = 0; i < 2; ++i) {
52 new_dev->destruction_queues[i].resources =
54 KB_ASSERT(new_dev->destruction_queues[i].resources,
"FIXME this is temp");
56 if (gpu_backend_device_create(&new_dev->bd)) {
60 array_destroy(&new_dev->textures);
62 id_generator_destroy(new_dev->buffer_id_gen);
63 id_generator_destroy(new_dev->texture_id_gen);
74 device->destruction_queue_index ^= 1;
76 device->destruction_queues[device->destruction_queue_index].resources;
78 KB_INFO(
"destroying {u32} resource {pointer}", resource->type, resource->handle);
79 gpu_device_resource_destroy(*resource);
81 array_resize(&device->destruction_queues[device->destruction_queue_index].resources, 0);
88 gpu_backend_device_finish_running_tasks(&dev->bd);
90 for (usize i = 0; i < 2; ++i) {
91 if (dev->destruction_queues[i].resources) {
92 array_for_each(
struct gpu_device_resource, resource, dev->destruction_queues[i].resources) {
93 KB_INFO(
"destroying {u32} resource {pointer}", resource->type, resource->handle);
94 gpu_device_resource_destroy(*resource);
96 array_destroy(&dev->destruction_queues[i].resources);
99 gpu_backend_device_destroy(&dev->bd);
100 array_destroy(&dev->buffer_trackers);
101 array_destroy(&dev->textures);
102 id_generator_destroy(dev->buffer_id_gen);
103 id_generator_destroy(dev->texture_id_gen);
112 identifier track_id = id_request(device->texture_id_gen);
113 const usize textures_size = array_size(device->textures);
114 if (track_id >= textures_size && !array_resize(&device->textures, textures_size * 2)) {
115 KB_ERROR(
"unable to allocate space for new textures");
122 KB_ERROR(
"unable to allocate memory for texture metadata");
123 id_return(device->texture_id_gen, track_id);
126 tex->device = device;
127 tex->tracker_id = track_id;
128 device->textures[track_id].texture = tex;
129 device->textures[track_id].start_use = GPU_TEXTURE_USE_UNINITIALIZED;
130 device->textures[track_id].cur_use = GPU_TEXTURE_USE_UNINITIALIZED;
135 void gpu_device_resource_texture_enqueue_destroy(
gpu_texture texture) {
137 KB_ASSERT(device,
"texture must have valid device handle");
139 .type = GPU_DEVICE_RESOURCE_TEXTURE,
142 array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
145 void gpu_device_resource_texture_destroy(
gpu_texture texture) {
146 gpu_texture_destroy_internal(texture);
147 gpu_device_resource_texture_release(texture);
150 void gpu_device_resource_texture_release(
gpu_texture texture) {
152 KB_ASSERT(device,
"texture must have valid device handle");
153 const usize textures_size = array_size(device->textures);
154 if (texture->tracker_id < textures_size) {
155 device->textures[texture->tracker_id].texture =
KB_NULL;
156 id_return(device->texture_id_gen, texture->tracker_id);
165 KB_ERROR(
"unable to allocate memory for texture view metadata");
173 void gpu_device_resource_texture_view_enqueue_destroy(
gpu_texture_view view) {
175 KB_ASSERT(device,
"texture view must have valid device handle");
177 .type = GPU_DEVICE_RESOURCE_TEXTURE_VIEW,
180 array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
184 gpu_texture_view_destroy_internal(view);
192 KB_ERROR(
"unable to allocate memory for pipeline layout metadata");
202 KB_ASSERT(device,
"pipeline layout must have valid device handle");
204 .type = GPU_DEVICE_RESOURCE_PIPELINE_LAYOUT,
207 array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
211 gpu_pipeline_layout_destroy_internal(layout);
219 KB_ERROR(
"unable to allocate memory for pipeline metadata");
227 void gpu_device_resource_pipeline_enqueue_destroy(
gpu_pipeline pipeline) {
229 KB_ASSERT(device,
"pipeline must have valid device handle");
231 .type = GPU_DEVICE_RESOURCE_PIPELINE,
234 array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
237 void gpu_device_resource_pipeline_destroy(
gpu_pipeline pipeline) {
238 gpu_render_pipeline_destroy_internal(pipeline);
246 KB_ERROR(
"unable to allocate memory for shader metadata");
254 void gpu_device_resource_shader_module_enqueue_destroy(
gpu_shader_module shader) {
256 KB_ASSERT(device,
"shader must have valid device handle");
258 .type = GPU_DEVICE_RESOURCE_SHADER_MODULE,
261 array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
265 gpu_shader_module_destroy_internal(shader);
270 identifier track_id = id_request(device->buffer_id_gen);
271 const usize buffers_size = array_size(device->buffer_trackers);
272 if (track_id >= buffers_size && !array_resize(&device->buffer_trackers, buffers_size * 2)) {
273 KB_ERROR(
"unable to allocate space for new buffers");
280 KB_ERROR(
"unable to allocate memory for buffer metadata");
281 id_return(device->buffer_id_gen, track_id);
284 buf->device = device;
285 buf->tracker_id = track_id;
286 device->buffer_trackers[track_id].buffer = buf;
287 device->buffer_trackers[track_id].start_use = 0;
288 device->buffer_trackers[track_id].cur_use = 0;
293 void gpu_device_resource_buffer_enqueue_destroy(
gpu_buffer buffer) {
295 KB_ASSERT(device,
"buffer must have valid device handle");
297 .type = GPU_DEVICE_RESOURCE_BUFFER,
300 array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
303 void gpu_device_resource_buffer_destroy(
gpu_buffer buffer) {
304 gpu_buffer_destroy_internal(buffer);
305 gpu_device_resource_buffer_release(buffer);
308 void gpu_device_resource_buffer_release(
gpu_buffer buffer) {
310 KB_ASSERT(device,
"buffer must have valid device handle");
311 const usize buffers_size = array_size(device->buffer_trackers);
312 if (buffer->tracker_id < buffers_size) {
313 device->buffer_trackers[buffer->tracker_id].buffer =
KB_NULL;
314 id_return(device->buffer_id_gen, buffer->tracker_id);
323 KB_ERROR(
"unable to allocate memory for encoder metadata");
333 KB_ASSERT(device,
"encoder must have valid device handle");
335 .type = GPU_DEVICE_RESOURCE_COMMAND_ENCODER,
338 array_push_checked(&device->destruction_queues[device->destruction_queue_index].resources, &resource);
342 gpu_command_encoder_destroy_internal(encoder);
void * allocator_allocate(allocator *alloc, usize size)
Allocate unaligned memory.
void * allocator_allocate_aligned(allocator *alloc, usize size, usize alignment)
Allocate aligned memory.
void allocator_free(allocator *alloc, void *mem)
Give back memory to the allocator.
b8 allocator_create(allocator *alloc, allocator_type type, usize size)
Create an allocator of a specific type.
void allocator_destroy(allocator *alloc)
Destroy an allocator.
Lightweight layer between platform and other engine components to enable tracing/monitoring.
#define KB_NULL
Value of an invalid ptr (nullptr).
#define KB_ASSERT(expr,...)
Perform runtime assertion and log failures.
#define KB_ERROR(...)
Log entry with error log level.
#define KB_INFO(...)
Log entry with info log level.
Central allocator structure.