4 #include <kiba/gpu/enums.h>
5 #include <vulkan/vulkan.h>
7 static inline VkAttachmentLoadOp vk_convert_attachment_load_op(
enum gpu_attachment_ops op) {
8 return KB_FLAGS_ALL_SET(op, GPU_ATTACHMENT_OP_LOAD) ? VK_ATTACHMENT_LOAD_OP_LOAD : VK_ATTACHMENT_LOAD_OP_CLEAR;
11 static inline VkAttachmentStoreOp vk_convert_attachment_store_op(
enum gpu_attachment_ops op) {
12 return KB_FLAGS_ALL_SET(op, GPU_ATTACHMENT_OP_STORE) ? VK_ATTACHMENT_STORE_OP_STORE
13 : VK_ATTACHMENT_STORE_OP_DONT_CARE;
16 static inline VkCompareOp vk_convert_compare_function(
enum gpu_compare_func func) {
18 case GPU_COMPARE_FUNC_NEVER:
19 return VK_COMPARE_OP_NEVER;
20 case GPU_COMPARE_FUNC_LESS:
21 return VK_COMPARE_OP_LESS;
22 case GPU_COMPARE_FUNC_EQUAL:
23 return VK_COMPARE_OP_EQUAL;
24 case GPU_COMPARE_FUNC_LESS_EQUAL:
25 return VK_COMPARE_OP_LESS_OR_EQUAL;
26 case GPU_COMPARE_FUNC_GREATER:
27 return VK_COMPARE_OP_GREATER;
28 case GPU_COMPARE_FUNC_NOT_EQUAL:
29 return VK_COMPARE_OP_NOT_EQUAL;
30 case GPU_COMPARE_FUNC_GREATER_EQUAL:
31 return VK_COMPARE_OP_GREATER_OR_EQUAL;
32 case GPU_COMPARE_FUNC_ALWAYS:
33 return VK_COMPARE_OP_ALWAYS;
35 return VK_COMPARE_OP_ALWAYS;
38 static inline VkPrimitiveTopology vk_convert_primitive_topology(
enum gpu_primitive_topology topology) {
40 case GPU_PIMITIVE_TOPOLOGY_POINT_LIST:
41 return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
42 case GPU_PIMITIVE_TOPOLOGY_LINE_LIST:
43 return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
44 case GPU_PIMITIVE_TOPOLOGY_LINE_STRIP:
45 return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
46 case GPU_PIMITIVE_TOPOLOGY_TRIANGLE_LIST:
47 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
48 case GPU_PIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
49 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
51 return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
54 static inline VkFrontFace vk_convert_front_face(
enum gpu_front_face front_face) {
56 case GPU_FRONT_FACE_CCW:
57 return VK_FRONT_FACE_COUNTER_CLOCKWISE;
58 case GPU_FRONT_FACE_CW:
59 return VK_FRONT_FACE_CLOCKWISE;
61 return VK_FRONT_FACE_COUNTER_CLOCKWISE;
64 static inline VkCullModeFlags vk_convert_cull_mode(
enum gpu_cull_mode cull_mode) {
66 case GPU_CULL_MODE_NONE:
67 return VK_CULL_MODE_NONE;
68 case GPU_CULL_MODE_FRONT:
69 return VK_CULL_MODE_FRONT_BIT;
70 case GPU_CULL_MODE_BACK:
71 return VK_CULL_MODE_BACK_BIT;
73 return VK_CULL_MODE_NONE;
76 static inline VkPolygonMode vk_convert_polygon_mode(
enum gpu_polygon_mode polygon_mode) {
77 switch (polygon_mode) {
78 case GPU_POLYGON_MODE_FILL:
79 return VK_POLYGON_MODE_FILL;
80 case GPU_POLYGON_MODE_LINE:
81 return VK_POLYGON_MODE_LINE;
82 case GPU_POLYGON_MODE_POINT:
83 return VK_POLYGON_MODE_POINT;
85 return VK_POLYGON_MODE_FILL;
88 static inline VkVertexInputRate vk_convert_step_mode(
enum gpu_step_mode step_mode) {
90 case GPU_STEP_MODE_VERTEX:
91 return VK_VERTEX_INPUT_RATE_VERTEX;
92 case GPU_STEP_MODE_INSTANCE:
93 return VK_VERTEX_INPUT_RATE_INSTANCE;
95 return VK_VERTEX_INPUT_RATE_VERTEX;
98 static inline VkFormat vk_convert_vertex_format(
enum gpu_vertex_format format) {
100 case GPU_VERTEX_FLOAT32x1:
101 return VK_FORMAT_R32_SFLOAT;
102 case GPU_VERTEX_FLOAT32x2:
103 return VK_FORMAT_R32G32_SFLOAT;
104 case GPU_VERTEX_FLOAT32x3:
105 return VK_FORMAT_R32G32B32_SFLOAT;
106 case GPU_VERTEX_FLOAT32x4:
107 return VK_FORMAT_R32G32B32A32_SFLOAT;
109 return VK_FORMAT_UNDEFINED;
112 static inline VkIndexType vk_convert_index_format(
enum gpu_index_format format) {
115 return VK_INDEX_TYPE_UINT16;
117 return VK_INDEX_TYPE_UINT32;
119 return VK_INDEX_TYPE_UINT16;
122 static inline enum gpu_texture_format vk_convert_vk_surface_format(VkSurfaceFormatKHR format) {
123 switch (format.colorSpace) {
124 case VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT:
125 switch (format.format) {
126 case VK_FORMAT_R16G16B16A16_SFLOAT:
127 return GPU_TEXTURE_FORMAT_RGBA16_FLOAT;
128 case VK_FORMAT_R16G16B16A16_SNORM:
129 return GPU_TEXTURE_FORMAT_RGBA16_SNORM;
130 case VK_FORMAT_R16G16B16A16_UNORM:
131 return GPU_TEXTURE_FORMAT_RGBA16_UNORM;
132 case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
133 return GPU_TEXTURE_FORMAT_RGB10A2_UNORM;
138 case VK_COLOR_SPACE_SRGB_NONLINEAR_KHR:
139 switch (format.format) {
140 case VK_FORMAT_B8G8R8A8_UNORM:
141 return GPU_TEXTURE_FORMAT_BGRA8_UNORM;
142 case VK_FORMAT_B8G8R8A8_SNORM:
143 return GPU_TEXTURE_FORMAT_BGRA8_UNORM;
144 case VK_FORMAT_R8G8B8A8_SRGB:
145 return GPU_TEXTURE_FORMAT_RGBA8_UNORM_SRGB;
146 case VK_FORMAT_R8G8B8A8_SNORM:
147 return GPU_TEXTURE_FORMAT_RGBA8_SNORM;
148 case VK_FORMAT_R8G8B8A8_UNORM:
149 return GPU_TEXTURE_FORMAT_RGBA8_UNORM;
157 KB_ERROR(
"unsupported format of color_space: {u32} and format: {u32}, returning GPU_TEXTURE_FORMAT_RGBA8_UNORM as "
161 return GPU_TEXTURE_FORMAT_RGBA8_UNORM;
164 static inline VkImageType vk_convert_texture_dimension(
enum gpu_texture_dimension dim) {
166 case GPU_TEXTURE_DIMENSION_D1:
167 return VK_IMAGE_TYPE_1D;
168 case GPU_TEXTURE_DIMENSION_D2:
169 return VK_IMAGE_TYPE_2D;
170 case GPU_TEXTURE_DIMENSION_D3:
171 return VK_IMAGE_TYPE_3D;
173 return VK_IMAGE_TYPE_2D;
176 static inline VkImageUsageFlagBits vk_convert_texture_use(
enum gpu_texture_use usage) {
177 VkImageUsageFlagBits ret = 0;
179 ret |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
182 ret |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
185 ret |= VK_IMAGE_USAGE_SAMPLED_BIT;
188 ret |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
190 if (
KB_FLAGS_ANY_SET(usage, GPU_TEXTURE_USE_DEPTH_STENCIL_READ | GPU_TEXTURE_USE_DEPTH_STENCIL_WRITE)) {
191 ret |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
193 if (
KB_FLAGS_ANY_SET(usage, GPU_TEXTURE_USE_STORAGE_READ | GPU_TEXTURE_USE_STORAGE_WRITE)) {
194 ret |= VK_IMAGE_USAGE_STORAGE_BIT;
199 static inline VkImageLayout vk_optimal_image_layout(
enum gpu_texture_use use,
enum gpu_texture_format format) {
201 case GPU_TEXTURE_USE_UNINITIALIZED:
202 return VK_IMAGE_LAYOUT_UNDEFINED;
203 case GPU_TEXTURE_USE_COPY_SRC:
204 return VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
205 case GPU_TEXTURE_USE_COPY_DST:
206 return VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
207 case GPU_TEXTURE_USE_COLOR_TARGET:
208 return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
209 case GPU_TEXTURE_USE_DEPTH_STENCIL_WRITE:
210 return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
211 case GPU_TEXTURE_USE_PRESENT:
212 return VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
216 const b8 is_color = gpu_format_aspect_from_format(format) == GPU_TEXTURE_FORMAT_ASPECT_COLOR;
218 if (use == GPU_TEXTURE_USE_RESOURCE) {
219 return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
221 return VK_IMAGE_LAYOUT_GENERAL;
223 return VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
226 static inline void vk_map_texture_use_to_barrier_flags(
enum gpu_texture_use use,
227 VkPipelineStageFlags *stages,
228 VkAccessFlags *access) {
229 KB_ASSERT(use != GPU_TEXTURE_USE_UNKNOWN,
"texture use must be known");
230 if (use == GPU_TEXTURE_USE_UNINITIALIZED || use == GPU_TEXTURE_USE_PRESENT) {
231 *stages |= VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
234 const VkPipelineStageFlags shader_stages = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
235 | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
236 | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
238 *stages |= VK_PIPELINE_STAGE_TRANSFER_BIT;
239 *access |= VK_ACCESS_TRANSFER_READ_BIT;
242 *stages |= VK_PIPELINE_STAGE_TRANSFER_BIT;
243 *access |= VK_ACCESS_TRANSFER_WRITE_BIT;
246 *stages |= shader_stages;
247 *access |= VK_ACCESS_SHADER_READ_BIT;
250 *stages |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
251 *access |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
254 *stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
255 *access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
258 *stages |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
259 *access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
262 *stages |= shader_stages;
263 *access |= VK_ACCESS_SHADER_READ_BIT;
266 *stages |= shader_stages;
267 *access |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
271 static inline VkSampleCountFlagBits vk_convert_sample_count(usize count) {
274 return VK_SAMPLE_COUNT_1_BIT;
276 return VK_SAMPLE_COUNT_2_BIT;
278 return VK_SAMPLE_COUNT_4_BIT;
280 return VK_SAMPLE_COUNT_8_BIT;
282 return VK_SAMPLE_COUNT_16_BIT;
284 return VK_SAMPLE_COUNT_32_BIT;
286 return VK_SAMPLE_COUNT_64_BIT;
288 KB_WARN(
"received unsupported sample count of {usize}, assuming sample count of 1", count);
289 return VK_SAMPLE_COUNT_1_BIT;
292 static inline VkImageAspectFlagBits vk_convert_texture_format_aspect(
enum gpu_texture_format_aspect aspects) {
294 VkImageAspectFlagBits ret = 0;
296 ret |= VK_IMAGE_ASPECT_COLOR_BIT;
299 ret |= VK_IMAGE_ASPECT_STENCIL_BIT;
302 ret |= VK_IMAGE_ASPECT_DEPTH_BIT;
307 static inline VkImageViewType vk_convert_texture_view_dimension(
enum gpu_texture_view_dimension dim) {
309 case GPU_TEXTURE_VIEW_DIMENSION_D1:
310 return VK_IMAGE_VIEW_TYPE_1D;
311 case GPU_TEXTURE_VIEW_DIMENSION_D2:
312 return VK_IMAGE_VIEW_TYPE_2D;
313 case GPU_TEXTURE_VIEW_DIMENSION_D2_ARRAY:
314 return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
315 case GPU_TEXTURE_VIEW_DIMENSION_CUBE:
316 return VK_IMAGE_VIEW_TYPE_CUBE;
317 case GPU_TEXTURE_VIEW_DIMENSION_CUBE_ARRAY:
318 return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
319 case GPU_TEXTURE_VIEW_DIMENSION_D3:
320 return VK_IMAGE_VIEW_TYPE_3D;
322 return VK_IMAGE_VIEW_TYPE_2D;
325 static inline VkPresentModeKHR vk_convert_present_mode(
enum gpu_present_mode present_mode) {
326 switch (present_mode) {
327 case GPU_PRESENT_MODE_FIFO:
328 return VK_PRESENT_MODE_FIFO_KHR;
329 case GPU_PRESENT_MODE_FIFO_RELAXED:
330 return VK_PRESENT_MODE_FIFO_RELAXED_KHR;
331 case GPU_PRESENT_MODE_IMMEDIATE:
332 return VK_PRESENT_MODE_IMMEDIATE_KHR;
333 case GPU_PRESENT_MODE_MAILBOX:
334 return VK_PRESENT_MODE_MAILBOX_KHR;
336 return VK_PRESENT_MODE_FIFO_KHR;
339 static inline VkFormat vk_convert_texture_format(
enum gpu_texture_format format) {
342 case GPU_TEXTURE_FORMAT_R8_UNORM:
343 return VK_FORMAT_R8_UNORM;
344 case GPU_TEXTURE_FORMAT_R8_SNORM:
345 return VK_FORMAT_R8_SNORM;
346 case GPU_TEXTURE_FORMAT_R8_UINT:
347 return VK_FORMAT_R8_UINT;
348 case GPU_TEXTURE_FORMAT_R8_SINT:
349 return VK_FORMAT_R8_SINT;
351 case GPU_TEXTURE_FORMAT_R16_UINT:
352 return VK_FORMAT_R16_UINT;
353 case GPU_TEXTURE_FORMAT_R16_SINT:
354 return VK_FORMAT_R16_SINT;
355 case GPU_TEXTURE_FORMAT_R16_UNORM:
356 return VK_FORMAT_R16_UNORM;
357 case GPU_TEXTURE_FORMAT_R16_SNORM:
358 return VK_FORMAT_R16_SNORM;
359 case GPU_TEXTURE_FORMAT_R16_FLOAT:
360 return VK_FORMAT_R16_SFLOAT;
361 case GPU_TEXTURE_FORMAT_RG8_UNORM:
362 return VK_FORMAT_R8G8_UNORM;
363 case GPU_TEXTURE_FORMAT_RG8_SNORM:
364 return VK_FORMAT_R8G8_SNORM;
365 case GPU_TEXTURE_FORMAT_RG8_UINT:
366 return VK_FORMAT_R8G8_UINT;
367 case GPU_TEXTURE_FORMAT_RG8_SINT:
368 return VK_FORMAT_R8G8_SINT;
370 case GPU_TEXTURE_FORMAT_R32_UINT:
371 return VK_FORMAT_R32_UINT;
372 case GPU_TEXTURE_FORMAT_R32_SINT:
373 return VK_FORMAT_R32_SINT;
374 case GPU_TEXTURE_FORMAT_R32_FLOAT:
375 return VK_FORMAT_R32_SFLOAT;
376 case GPU_TEXTURE_FORMAT_RG16_UINT:
377 return VK_FORMAT_R16G16_UINT;
378 case GPU_TEXTURE_FORMAT_RG16_SINT:
379 return VK_FORMAT_R16G16_SINT;
380 case GPU_TEXTURE_FORMAT_RG16_UNORM:
381 return VK_FORMAT_R16G16_UNORM;
382 case GPU_TEXTURE_FORMAT_RG16_SNORM:
383 return VK_FORMAT_R16G16_SNORM;
384 case GPU_TEXTURE_FORMAT_RG16_FLOAT:
385 return VK_FORMAT_R16G16_SFLOAT;
386 case GPU_TEXTURE_FORMAT_RGBA8_UNORM:
387 return VK_FORMAT_R8G8B8A8_UNORM;
388 case GPU_TEXTURE_FORMAT_RGBA8_UNORM_SRGB:
389 return VK_FORMAT_R8G8B8A8_SRGB;
390 case GPU_TEXTURE_FORMAT_RGBA8_SNORM:
391 return VK_FORMAT_R8G8B8A8_SNORM;
392 case GPU_TEXTURE_FORMAT_RGBA8_UINT:
393 return VK_FORMAT_R8G8B8A8_UINT;
394 case GPU_TEXTURE_FORMAT_RGBA8_SINT:
395 return VK_FORMAT_R8G8B8A8_SINT;
396 case GPU_TEXTURE_FORMAT_BGRA8_UNORM:
397 return VK_FORMAT_B8G8R8A8_UNORM;
398 case GPU_TEXTURE_FORMAT_BGRA8_UNORM_SRGB:
399 return VK_FORMAT_B8G8R8A8_SRGB;
401 case GPU_TEXTURE_FORMAT_RGB9E5_UFLOAT:
402 return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32;
403 case GPU_TEXTURE_FORMAT_RGB10A2_UINT:
404 return VK_FORMAT_A2B10G10R10_UINT_PACK32;
405 case GPU_TEXTURE_FORMAT_RGB10A2_UNORM:
406 return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
408 case GPU_TEXTURE_FORMAT_RG32_UINT:
409 return VK_FORMAT_R32G32_UINT;
410 case GPU_TEXTURE_FORMAT_RG32_SINT:
411 return VK_FORMAT_R32G32_SINT;
412 case GPU_TEXTURE_FORMAT_RG32_FLOAT:
413 return VK_FORMAT_R32G32_SFLOAT;
414 case GPU_TEXTURE_FORMAT_RGBA16_UINT:
415 return VK_FORMAT_R16G16B16A16_UINT;
416 case GPU_TEXTURE_FORMAT_RGBA16_SINT:
417 return VK_FORMAT_R16G16B16A16_SINT;
418 case GPU_TEXTURE_FORMAT_RGBA16_UNORM:
419 return VK_FORMAT_R16G16B16A16_UNORM;
420 case GPU_TEXTURE_FORMAT_RGBA16_SNORM:
421 return VK_FORMAT_R16G16B16A16_SNORM;
422 case GPU_TEXTURE_FORMAT_RGBA16_FLOAT:
423 return VK_FORMAT_R16G16B16A16_SFLOAT;
425 case GPU_TEXTURE_FORMAT_RGBA32_UINT:
426 return VK_FORMAT_R32G32B32A32_UINT;
427 case GPU_TEXTURE_FORMAT_RGBA32_SINT:
428 return VK_FORMAT_R32G32B32A32_SINT;
429 case GPU_TEXTURE_FORMAT_RGBA32_FLOAT:
430 return VK_FORMAT_R32G32B32A32_SFLOAT;
432 case GPU_TEXTURE_FORMAT_STENCIL8:
433 return VK_FORMAT_D32_SFLOAT_S8_UINT;
434 case GPU_TEXTURE_FORMAT_DEPTH16_UNORM:
435 return VK_FORMAT_D16_UNORM;
436 case GPU_TEXTURE_FORMAT_DEPTH24_PLUS:
437 return VK_FORMAT_D32_SFLOAT;
438 case GPU_TEXTURE_FORMAT_DEPTH24_PLUS_STENCIL8:
439 return VK_FORMAT_D32_SFLOAT_S8_UINT;
440 case GPU_TEXTURE_FORMAT_DEPTH32_FLOAT:
441 return VK_FORMAT_D32_SFLOAT;
442 case GPU_TEXTURE_FORMAT_DEPTH32_FLOAT_STENCIL8:
443 return VK_FORMAT_D32_SFLOAT_S8_UINT;
445 return VK_FORMAT_B8G8R8A8_UNORM;
448 static inline void vk_map_buffer_use_to_barrier_flags(
enum gpu_buffer_usage use,
449 VkPipelineStageFlags *stages,
450 VkAccessFlags *access) {
451 KB_ASSERT(use,
"buffer must have dedicated usage");
452 const VkPipelineStageFlags shader_stages = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
453 | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
454 | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
456 *stages |= VK_PIPELINE_STAGE_HOST_BIT;
457 *access |= VK_ACCESS_HOST_READ_BIT;
460 *stages |= VK_PIPELINE_STAGE_HOST_BIT;
461 *access |= VK_ACCESS_HOST_WRITE_BIT;
464 *stages |= VK_PIPELINE_STAGE_TRANSFER_BIT;
465 *access |= VK_ACCESS_TRANSFER_READ_BIT;
468 *stages |= VK_PIPELINE_STAGE_TRANSFER_BIT;
469 *access |= VK_ACCESS_TRANSFER_WRITE_BIT;
472 *stages |= shader_stages;
473 *access |= VK_ACCESS_UNIFORM_READ_BIT;
476 *stages |= shader_stages;
477 *access |= VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
480 *stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
481 *access |= VK_ACCESS_INDEX_READ_BIT;
484 *stages |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
485 *access |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
488 *stages |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
489 *access |= VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
493 static inline VkBufferUsageFlagBits vk_convert_buffer_usage(
enum gpu_buffer_usage usage) {
494 VkBufferUsageFlagBits ret = 0;
496 ret |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
499 ret |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
502 ret |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
505 ret |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
508 ret |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
511 ret |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
514 ret |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
Global typedefs and macros.
#define KB_FLAGS_ANY_SET(value, flags)
Check if any of the flags are set inside value.
#define KB_FLAGS_ALL_SET(value, flags)
Check if all flags are set inside value.
#define KB_ASSERT(expr,...)
Perform runtime assertion and log failures.
#define KB_WARN(...)
Log entry with warn log level.
#define KB_ERROR(...)
Log entry with error log level.