1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
|
#include <libavutil/macros.h>
#include "video/out/gpu/spirv.h"
#include "utils.h"
#include "malloc.h"
const char* vk_err(VkResult res)
{
switch (res) {
// These are technically success codes, but include them nonetheless
case VK_SUCCESS: return "VK_SUCCESS";
case VK_NOT_READY: return "VK_NOT_READY";
case VK_TIMEOUT: return "VK_TIMEOUT";
case VK_EVENT_SET: return "VK_EVENT_SET";
case VK_EVENT_RESET: return "VK_EVENT_RESET";
case VK_INCOMPLETE: return "VK_INCOMPLETE";
case VK_SUBOPTIMAL_KHR: return "VK_SUBOPTIMAL_KHR";
// Actual error codes
case VK_ERROR_OUT_OF_HOST_MEMORY: return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY: return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED: return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST: return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED: return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT: return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT: return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT: return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER: return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS: return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED: return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_FRAGMENTED_POOL: return "VK_ERROR_FRAGMENTED_POOL";
case VK_ERROR_INVALID_SHADER_NV: return "VK_ERROR_INVALID_SHADER_NV";
case VK_ERROR_OUT_OF_DATE_KHR: return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_SURFACE_LOST_KHR: return "VK_ERROR_SURFACE_LOST_KHR";
}
return "Unknown error!";
}
static const char* vk_dbg_type(VkDebugReportObjectTypeEXT type)
{
switch (type) {
case VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT:
return "VkInstance";
case VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT:
return "VkPhysicalDevice";
case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT:
return "VkDevice";
case VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT:
return "VkQueue";
case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
return "VkSemaphore";
case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT:
return "VkCommandBuffer";
case VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT:
return "VkFence";
case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
return "VkDeviceMemory";
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
return "VkBuffer";
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
return "VkImage";
case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
return "VkEvent";
case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
return "VkQueryPool";
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
return "VkBufferView";
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
return "VkImageView";
case VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT:
return "VkShaderModule";
case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT:
return "VkPipelineCache";
case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT:
return "VkPipelineLayout";
case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
return "VkRenderPass";
case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
return "VkPipeline";
case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT:
return "VkDescriptorSetLayout";
case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
return "VkSampler";
case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
return "VkDescriptorPool";
case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
return "VkDescriptorSet";
case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
return "VkFramebuffer";
case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
return "VkCommandPool";
case VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT:
return "VkSurfaceKHR";
case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
return "VkSwapchainKHR";
case VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT:
return "VkDebugReportCallbackEXT";
case VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT:
default:
return "unknown object";
}
}
static VkBool32 vk_dbg_callback(VkDebugReportFlagsEXT flags,
VkDebugReportObjectTypeEXT objType,
uint64_t obj, size_t loc, int32_t msgCode,
const char *layer, const char *msg, void *priv)
{
struct mpvk_ctx *vk = priv;
int lev = MSGL_V;
switch (flags) {
case VK_DEBUG_REPORT_ERROR_BIT_EXT: lev = MSGL_ERR; break;
case VK_DEBUG_REPORT_WARNING_BIT_EXT: lev = MSGL_WARN; break;
case VK_DEBUG_REPORT_INFORMATION_BIT_EXT: lev = MSGL_TRACE; break;
case VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT: lev = MSGL_WARN; break;
case VK_DEBUG_REPORT_DEBUG_BIT_EXT: lev = MSGL_DEBUG; break;
};
MP_MSG(vk, lev, "vk [%s] %d: %s (obj 0x%llx (%s), loc 0x%zx)\n",
layer, (int)msgCode, msg, (unsigned long long)obj,
vk_dbg_type(objType), loc);
// The return value of this function determines whether the call will
// be explicitly aborted (to prevent GPU errors) or not. In this case,
// we generally want this to be on for the errors.
return (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT);
}
static void vk_cmdpool_uninit(struct mpvk_ctx *vk, struct vk_cmdpool *pool)
{
if (!pool)
return;
// also frees associated command buffers
vkDestroyCommandPool(vk->dev, pool->pool, MPVK_ALLOCATOR);
for (int n = 0; n < MPVK_MAX_CMDS; n++) {
vkDestroyFence(vk->dev, pool->cmds[n].fence, MPVK_ALLOCATOR);
vkDestroySemaphore(vk->dev, pool->cmds[n].done, MPVK_ALLOCATOR);
talloc_free(pool->cmds[n].callbacks);
}
talloc_free(pool);
}
void mpvk_uninit(struct mpvk_ctx *vk)
{
if (!vk->inst)
return;
if (vk->dev) {
vk_cmdpool_uninit(vk, vk->pool);
vk_malloc_uninit(vk);
vkDestroyDevice(vk->dev, MPVK_ALLOCATOR);
}
if (vk->dbg) {
// Same deal as creating the debug callback, we need to load this
// first.
VK_LOAD_PFN(vkDestroyDebugReportCallbackEXT)
pfn_vkDestroyDebugReportCallbackEXT(vk->inst, vk->dbg, MPVK_ALLOCATOR);
}
vkDestroySurfaceKHR(vk->inst, vk->surf, MPVK_ALLOCATOR);
vkDestroyInstance(vk->inst, MPVK_ALLOCATOR);
*vk = (struct mpvk_ctx){0};
}
bool mpvk_instance_init(struct mpvk_ctx *vk, struct mp_log *log,
const char *surf_ext_name, bool debug)
{
*vk = (struct mpvk_ctx) {
.log = log,
};
VkInstanceCreateInfo info = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
};
if (debug) {
// Enables the LunarG standard validation layer, which
// is a meta-layer that loads lots of other validators
static const char* layers[] = {
"VK_LAYER_LUNARG_standard_validation",
};
info.ppEnabledLayerNames = layers;
info.enabledLayerCount = MP_ARRAY_SIZE(layers);
}
// Enable whatever extensions were compiled in.
const char *extensions[] = {
VK_KHR_SURFACE_EXTENSION_NAME,
surf_ext_name,
// Extra extensions only used for debugging. These are toggled by
// decreasing the enabledExtensionCount, so the number needs to be
// synchronized with the code below.
VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
};
const int debugExtensionCount = 1;
info.ppEnabledExtensionNames = extensions;
info.enabledExtensionCount = MP_ARRAY_SIZE(extensions);
if (!debug)
info.enabledExtensionCount -= debugExtensionCount;
MP_VERBOSE(vk, "Creating instance with extensions:\n");
for (int i = 0; i < info.enabledExtensionCount; i++)
MP_VERBOSE(vk, " %s\n", info.ppEnabledExtensionNames[i]);
VkResult res = vkCreateInstance(&info, MPVK_ALLOCATOR, &vk->inst);
if (res != VK_SUCCESS) {
MP_VERBOSE(vk, "Failed creating instance: %s\n", vk_err(res));
return false;
}
if (debug) {
// Set up a debug callback to catch validation messages
VkDebugReportCallbackCreateInfoEXT dinfo = {
.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT,
.flags = VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
VK_DEBUG_REPORT_WARNING_BIT_EXT |
VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT |
VK_DEBUG_REPORT_ERROR_BIT_EXT |
VK_DEBUG_REPORT_DEBUG_BIT_EXT,
.pfnCallback = vk_dbg_callback,
.pUserData = vk,
};
// Since this is not part of the core spec, we need to load it. This
// can't fail because we've already successfully created an instance
// with this extension enabled.
VK_LOAD_PFN(vkCreateDebugReportCallbackEXT)
pfn_vkCreateDebugReportCallbackEXT(vk->inst, &dinfo, MPVK_ALLOCATOR,
&vk->dbg);
}
return true;
}
#define MPVK_MAX_DEVICES 16
static bool physd_supports_surface(struct mpvk_ctx *vk, VkPhysicalDevice physd)
{
uint32_t qfnum;
vkGetPhysicalDeviceQueueFamilyProperties(physd, &qfnum, NULL);
for (int i = 0; i < qfnum; i++) {
VkBool32 sup;
VK(vkGetPhysicalDeviceSurfaceSupportKHR(physd, i, vk->surf, &sup));
if (sup)
return true;
}
error:
return false;
}
bool mpvk_find_phys_device(struct mpvk_ctx *vk, const char *name, bool sw)
{
assert(vk->surf);
MP_VERBOSE(vk, "Probing for vulkan devices:\n");
VkPhysicalDevice *devices = NULL;
uint32_t num = 0;
VK(vkEnumeratePhysicalDevices(vk->inst, &num, NULL));
devices = talloc_array(NULL, VkPhysicalDevice, num);
VK(vkEnumeratePhysicalDevices(vk->inst, &num, devices));
// Sorted by "priority". Reuses some m_opt code for convenience
static const struct m_opt_choice_alternatives types[] = {
{"discrete", VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU},
{"integrated", VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU},
{"virtual", VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU},
{"software", VK_PHYSICAL_DEVICE_TYPE_CPU},
{"unknown", VK_PHYSICAL_DEVICE_TYPE_OTHER},
{0}
};
VkPhysicalDeviceProperties props[MPVK_MAX_DEVICES];
for (int i = 0; i < num; i++) {
vkGetPhysicalDeviceProperties(devices[i], &props[i]);
MP_VERBOSE(vk, " GPU %d: %s (%s)\n", i, props[i].deviceName,
m_opt_choice_str(types, props[i].deviceType));
}
// Iterate through each type in order of decreasing preference
for (int t = 0; types[t].name; t++) {
// Disallow SW rendering unless explicitly enabled
if (types[t].value == VK_PHYSICAL_DEVICE_TYPE_CPU && !sw)
continue;
for (int i = 0; i < num; i++) {
VkPhysicalDeviceProperties prop = props[i];
if (prop.deviceType != types[t].value)
continue;
if (name && strcmp(name, prop.deviceName) != 0)
continue;
if (!physd_supports_surface(vk, devices[i]))
continue;
MP_VERBOSE(vk, "Chose device:\n");
MP_VERBOSE(vk, " Device Name: %s\n", prop.deviceName);
MP_VERBOSE(vk, " Device ID: %x:%x\n",
(unsigned)prop.vendorID, (unsigned)prop.deviceID);
MP_VERBOSE(vk, " Driver version: %d\n", (int)prop.driverVersion);
MP_VERBOSE(vk, " API version: %d.%d.%d\n",
(int)VK_VERSION_MAJOR(prop.apiVersion),
(int)VK_VERSION_MINOR(prop.apiVersion),
(int)VK_VERSION_PATCH(prop.apiVersion));
vk->physd = devices[i];
vk->limits = prop.limits;
talloc_free(devices);
return true;
}
}
error:
MP_VERBOSE(vk, "Found no suitable device, giving up.\n");
talloc_free(devices);
return false;
}
bool mpvk_pick_surface_format(struct mpvk_ctx *vk)
{
assert(vk->physd);
VkSurfaceFormatKHR *formats = NULL;
int num;
// Enumerate through the surface formats and find one that we can map to
// a ra_format
VK(vkGetPhysicalDeviceSurfaceFormatsKHR(vk->physd, vk->surf, &num, NULL));
formats = talloc_array(NULL, VkSurfaceFormatKHR, num);
VK(vkGetPhysicalDeviceSurfaceFormatsKHR(vk->physd, vk->surf, &num, formats));
for (int i = 0; i < num; i++) {
// A value of VK_FORMAT_UNDEFINED means we can pick anything we want
if (formats[i].format == VK_FORMAT_UNDEFINED) {
vk->surf_format = (VkSurfaceFormatKHR) {
.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
.format = VK_FORMAT_R16G16B16A16_UNORM,
};
break;
}
if (formats[i].colorSpace != VK_COLOR_SPACE_SRGB_NONLINEAR_KHR)
continue;
// Format whitelist, since we want only >= 8 bit _UNORM formats
switch (formats[i].format) {
case VK_FORMAT_R8G8B8_UNORM:
case VK_FORMAT_B8G8R8_UNORM:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
case VK_FORMAT_R16G16B16_UNORM:
case VK_FORMAT_R16G16B16A16_UNORM:
break; // accept
default: continue;
}
vk->surf_format = formats[i];
break;
}
talloc_free(formats);
if (!vk->surf_format.format)
goto error;
return true;
error:
MP_ERR(vk, "Failed picking surface format!\n");
talloc_free(formats);
return false;
}
static bool vk_cmdpool_init(struct mpvk_ctx *vk, VkDeviceQueueCreateInfo qinfo,
VkQueueFamilyProperties props,
struct vk_cmdpool **out)
{
struct vk_cmdpool *pool = *out = talloc_ptrtype(NULL, pool);
*pool = (struct vk_cmdpool) {
.qf = qinfo.queueFamilyIndex,
.props = props,
.qcount = qinfo.queueCount,
};
for (int n = 0; n < pool->qcount; n++)
vkGetDeviceQueue(vk->dev, pool->qf, n, &pool->queues[n]);
VkCommandPoolCreateInfo cinfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
.queueFamilyIndex = pool->qf,
};
VK(vkCreateCommandPool(vk->dev, &cinfo, MPVK_ALLOCATOR, &pool->pool));
VkCommandBufferAllocateInfo ainfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.commandPool = pool->pool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = MPVK_MAX_CMDS,
};
VkCommandBuffer cmdbufs[MPVK_MAX_CMDS];
VK(vkAllocateCommandBuffers(vk->dev, &ainfo, cmdbufs));
for (int n = 0; n < MPVK_MAX_CMDS; n++) {
struct vk_cmd *cmd = &pool->cmds[n];
cmd->pool = pool;
cmd->buf = cmdbufs[n];
VkFenceCreateInfo finfo = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.flags = VK_FENCE_CREATE_SIGNALED_BIT,
};
VK(vkCreateFence(vk->dev, &finfo, MPVK_ALLOCATOR, &cmd->fence));
VkSemaphoreCreateInfo sinfo = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
};
VK(vkCreateSemaphore(vk->dev, &sinfo, MPVK_ALLOCATOR, &cmd->done));
}
return true;
error:
return false;
}
bool mpvk_device_init(struct mpvk_ctx *vk, struct mpvk_device_opts opts)
{
assert(vk->physd);
void *tmp = talloc_new(NULL);
// Enumerate the queue families and find suitable families for each task
int qfnum;
vkGetPhysicalDeviceQueueFamilyProperties(vk->physd, &qfnum, NULL);
VkQueueFamilyProperties *qfs = talloc_array(tmp, VkQueueFamilyProperties, qfnum);
vkGetPhysicalDeviceQueueFamilyProperties(vk->physd, &qfnum, qfs);
MP_VERBOSE(vk, "Queue families supported by device:\n");
for (int i = 0; i < qfnum; i++) {
MP_VERBOSE(vk, "QF %d: flags 0x%x num %d\n", i,
(unsigned)qfs[i].queueFlags, (int)qfs[i].queueCount);
}
// For most of our rendering operations, we want to use one "primary" pool,
// so just pick the queue family with the most features.
int idx = -1;
for (int i = 0; i < qfnum; i++) {
if (!(qfs[i].queueFlags & VK_QUEUE_GRAPHICS_BIT))
continue;
// QF supports more features
if (idx < 0 || qfs[i].queueFlags > qfs[idx].queueFlags)
idx = i;
// QF supports more queues (at the same specialization level)
if (qfs[i].queueFlags == qfs[idx].queueFlags &&
qfs[i].queueCount > qfs[idx].queueCount)
{
idx = i;
}
}
// Vulkan requires at least one GRAPHICS queue, so if this fails something
// is horribly wrong.
assert(idx >= 0);
// Ensure we can actually present to the surface using this queue
VkBool32 sup;
VK(vkGetPhysicalDeviceSurfaceSupportKHR(vk->physd, idx, vk->surf, &sup));
if (!sup) {
MP_ERR(vk, "Queue family does not support surface presentation!\n");
goto error;
}
// Now that we know which queue families we want, we can create the logical
// device
assert(opts.queue_count <= MPVK_MAX_QUEUES);
static const float priorities[MPVK_MAX_QUEUES] = {0};
VkDeviceQueueCreateInfo qinfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
.queueFamilyIndex = idx,
.queueCount = MPMIN(qfs[idx].queueCount, opts.queue_count),
.pQueuePriorities = priorities,
};
const char **exts = NULL;
int num_exts = 0;
MP_TARRAY_APPEND(tmp, exts, num_exts, VK_KHR_SWAPCHAIN_EXTENSION_NAME);
if (vk->spirv->required_ext)
MP_TARRAY_APPEND(tmp, exts, num_exts, vk->spirv->required_ext);
VkDeviceCreateInfo dinfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.queueCreateInfoCount = 1,
.pQueueCreateInfos = &qinfo,
.ppEnabledExtensionNames = exts,
.enabledExtensionCount = num_exts,
};
MP_VERBOSE(vk, "Creating vulkan device with extensions:\n");
for (int i = 0; i < num_exts; i++)
MP_VERBOSE(vk, " %s\n", exts[i]);
VK(vkCreateDevice(vk->physd, &dinfo, MPVK_ALLOCATOR, &vk->dev));
vk_malloc_init(vk);
// Create the vk_cmdpools and all required queues / synchronization objects
if (!vk_cmdpool_init(vk, qinfo, qfs[idx], &vk->pool))
goto error;
talloc_free(tmp);
return true;
error:
MP_ERR(vk, "Failed creating logical device!\n");
talloc_free(tmp);
return false;
}
static void run_callbacks(struct mpvk_ctx *vk, struct vk_cmd *cmd)
{
for (int i = 0; i < cmd->num_callbacks; i++) {
struct vk_callback *cb = &cmd->callbacks[i];
cb->run(cb->priv, cb->arg);
*cb = (struct vk_callback){0};
}
cmd->num_callbacks = 0;
// Also reset vk->last_cmd in case this was the last command to run
if (vk->last_cmd == cmd)
vk->last_cmd = NULL;
}
static void wait_for_cmds(struct mpvk_ctx *vk, struct vk_cmd cmds[], int num)
{
if (!num)
return;
VkFence fences[MPVK_MAX_CMDS];
for (int i = 0; i < num; i++)
fences[i] = cmds[i].fence;
vkWaitForFences(vk->dev, num, fences, true, UINT64_MAX);
for (int i = 0; i < num; i++)
run_callbacks(vk, &cmds[i]);
}
void mpvk_pool_wait_idle(struct mpvk_ctx *vk, struct vk_cmdpool *pool)
{
if (!pool)
return;
int idx = pool->cindex, pidx = pool->cindex_pending;
if (pidx < idx) { // range doesn't wrap
wait_for_cmds(vk, &pool->cmds[pidx], idx - pidx);
} else if (pidx > idx) { // range wraps
wait_for_cmds(vk, &pool->cmds[pidx], MPVK_MAX_CMDS - pidx);
wait_for_cmds(vk, &pool->cmds[0], idx);
}
pool->cindex_pending = pool->cindex;
}
void mpvk_dev_wait_idle(struct mpvk_ctx *vk)
{
mpvk_pool_wait_idle(vk, vk->pool);
}
void mpvk_pool_poll_cmds(struct mpvk_ctx *vk, struct vk_cmdpool *pool,
uint64_t timeout)
{
if (!pool)
return;
// If requested, hard block until at least one command completes
if (timeout > 0 && pool->cindex_pending != pool->cindex) {
vkWaitForFences(vk->dev, 1, &pool->cmds[pool->cindex_pending].fence,
true, timeout);
}
// Lazily garbage collect the commands based on their status
while (pool->cindex_pending != pool->cindex) {
struct vk_cmd *cmd = &pool->cmds[pool->cindex_pending];
VkResult res = vkGetFenceStatus(vk->dev, cmd->fence);
if (res != VK_SUCCESS)
break;
run_callbacks(vk, cmd);
pool->cindex_pending++;
pool->cindex_pending %= MPVK_MAX_CMDS;
}
}
void mpvk_dev_poll_cmds(struct mpvk_ctx *vk, uint32_t timeout)
{
mpvk_pool_poll_cmds(vk, vk->pool, timeout);
}
void vk_dev_callback(struct mpvk_ctx *vk, vk_cb callback, void *p, void *arg)
{
if (vk->last_cmd) {
vk_cmd_callback(vk->last_cmd, callback, p, arg);
} else {
// The device was already idle, so we can just immediately call it
callback(p, arg);
}
}
void vk_cmd_callback(struct vk_cmd *cmd, vk_cb callback, void *p, void *arg)
{
MP_TARRAY_GROW(NULL, cmd->callbacks, cmd->num_callbacks);
cmd->callbacks[cmd->num_callbacks++] = (struct vk_callback) {
.run = callback,
.priv = p,
.arg = arg,
};
}
void vk_cmd_dep(struct vk_cmd *cmd, VkSemaphore dep,
VkPipelineStageFlagBits depstage)
{
assert(cmd->num_deps < MPVK_MAX_CMD_DEPS);
cmd->deps[cmd->num_deps] = dep;
cmd->depstages[cmd->num_deps++] = depstage;
}
struct vk_cmd *vk_cmd_begin(struct mpvk_ctx *vk, struct vk_cmdpool *pool)
{
// Garbage collect the cmdpool first
mpvk_pool_poll_cmds(vk, pool, 0);
int next = (pool->cindex + 1) % MPVK_MAX_CMDS;
if (next == pool->cindex_pending) {
MP_ERR(vk, "No free command buffers!\n");
goto error;
}
struct vk_cmd *cmd = &pool->cmds[pool->cindex];
pool->cindex = next;
VK(vkResetCommandBuffer(cmd->buf, 0));
VkCommandBufferBeginInfo binfo = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
};
VK(vkBeginCommandBuffer(cmd->buf, &binfo));
return cmd;
error:
return NULL;
}
bool vk_cmd_submit(struct mpvk_ctx *vk, struct vk_cmd *cmd, VkSemaphore *done)
{
VK(vkEndCommandBuffer(cmd->buf));
struct vk_cmdpool *pool = cmd->pool;
VkQueue queue = pool->queues[pool->qindex];
VkSubmitInfo sinfo = {
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.commandBufferCount = 1,
.pCommandBuffers = &cmd->buf,
.waitSemaphoreCount = cmd->num_deps,
.pWaitSemaphores = cmd->deps,
.pWaitDstStageMask = cmd->depstages,
};
if (done) {
sinfo.signalSemaphoreCount = 1;
sinfo.pSignalSemaphores = &cmd->done;
*done = cmd->done;
}
VK(vkResetFences(vk->dev, 1, &cmd->fence));
VK(vkQueueSubmit(queue, 1, &sinfo, cmd->fence));
MP_TRACE(vk, "Submitted command on queue %p (QF %d)\n", (void *)queue,
pool->qf);
for (int i = 0; i < cmd->num_deps; i++)
cmd->deps[i] = NULL;
cmd->num_deps = 0;
vk->last_cmd = cmd;
return true;
error:
return false;
}
void vk_cmd_cycle_queues(struct mpvk_ctx *vk)
{
struct vk_cmdpool *pool = vk->pool;
pool->qindex = (pool->qindex + 1) % pool->qcount;
}
const VkImageSubresourceRange vk_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.levelCount = 1,
.layerCount = 1,
};
const VkImageSubresourceLayers vk_layers = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.layerCount = 1,
};
|