vulkan_device.h (20852B)
1 // SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <stenzek@gmail.com> 2 // SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0) 3 4 #pragma once 5 6 #include "gpu_device.h" 7 #include "gpu_framebuffer_manager.h" 8 #include "gpu_texture.h" 9 #include "vulkan_loader.h" 10 #include "vulkan_stream_buffer.h" 11 12 #include "common/dimensional_array.h" 13 14 #include <array> 15 #include <atomic> 16 #include <condition_variable> 17 #include <deque> 18 #include <functional> 19 #include <memory> 20 #include <mutex> 21 #include <string> 22 #include <thread> 23 #include <unordered_map> 24 #include <vector> 25 26 class VulkanPipeline; 27 class VulkanSwapChain; 28 class VulkanTexture; 29 class VulkanTextureBuffer; 30 class VulkanDownloadTexture; 31 32 struct VK_PIPELINE_CACHE_HEADER; 33 34 class VulkanDevice final : public GPUDevice 35 { 36 public: 37 friend VulkanTexture; 38 friend VulkanDownloadTexture; 39 40 enum : u32 41 { 42 NUM_COMMAND_BUFFERS = 3, 43 }; 44 45 struct OptionalExtensions 46 { 47 bool vk_ext_external_memory_host : 1; 48 bool vk_ext_fragment_shader_interlock : 1; 49 bool vk_ext_full_screen_exclusive : 1; 50 bool vk_ext_memory_budget : 1; 51 bool vk_ext_rasterization_order_attachment_access : 1; 52 bool vk_ext_swapchain_maintenance1 : 1; 53 bool vk_khr_get_memory_requirements2 : 1; 54 bool vk_khr_bind_memory2 : 1; 55 bool vk_khr_get_physical_device_properties2 : 1; 56 bool vk_khr_dedicated_allocation : 1; 57 bool vk_khr_driver_properties : 1; 58 bool vk_khr_dynamic_rendering : 1; 59 bool vk_khr_dynamic_rendering_local_read : 1; 60 bool vk_khr_maintenance4 : 1; 61 bool vk_khr_maintenance5 : 1; 62 bool vk_khr_push_descriptor : 1; 63 bool vk_khr_shader_non_semantic_info : 1; 64 }; 65 66 static GPUTexture::Format GetFormatForVkFormat(VkFormat format); 67 68 static const std::array<VkFormat, static_cast<u32>(GPUTexture::Format::MaxCount)> TEXTURE_FORMAT_MAPPING; 69 70 public: 71 VulkanDevice(); 72 ~VulkanDevice() override; 73 74 // Returns a list of Vulkan-compatible GPUs. 75 using GPUList = std::vector<std::pair<VkPhysicalDevice, AdapterInfo>>; 76 static GPUList EnumerateGPUs(VkInstance instance); 77 static GPUList EnumerateGPUs(); 78 static AdapterInfoList GetAdapterList(); 79 80 RenderAPI GetRenderAPI() const override; 81 82 bool HasSurface() const override; 83 84 bool UpdateWindow() override; 85 void ResizeWindow(s32 new_window_width, s32 new_window_height, float new_window_scale) override; 86 void DestroySurface() override; 87 88 std::string GetDriverInfo() const override; 89 90 void ExecuteAndWaitForGPUIdle() override; 91 92 std::unique_ptr<GPUTexture> CreateTexture(u32 width, u32 height, u32 layers, u32 levels, u32 samples, 93 GPUTexture::Type type, GPUTexture::Format format, 94 const void* data = nullptr, u32 data_stride = 0) override; 95 std::unique_ptr<GPUSampler> CreateSampler(const GPUSampler::Config& config) override; 96 std::unique_ptr<GPUTextureBuffer> CreateTextureBuffer(GPUTextureBuffer::Format format, u32 size_in_elements) override; 97 98 std::unique_ptr<GPUDownloadTexture> CreateDownloadTexture(u32 width, u32 height, GPUTexture::Format format) override; 99 std::unique_ptr<GPUDownloadTexture> CreateDownloadTexture(u32 width, u32 height, GPUTexture::Format format, 100 void* memory, size_t memory_size, 101 u32 memory_stride) override; 102 103 bool SupportsTextureFormat(GPUTexture::Format format) const override; 104 void CopyTextureRegion(GPUTexture* dst, u32 dst_x, u32 dst_y, u32 dst_layer, u32 dst_level, GPUTexture* src, 105 u32 src_x, u32 src_y, u32 src_layer, u32 src_level, u32 width, u32 height) override; 106 void ResolveTextureRegion(GPUTexture* dst, u32 dst_x, u32 dst_y, u32 dst_layer, u32 dst_level, GPUTexture* src, 107 u32 src_x, u32 src_y, u32 width, u32 height) override; 108 void ClearRenderTarget(GPUTexture* t, u32 c) override; 109 void ClearDepth(GPUTexture* t, float d) override; 110 void InvalidateRenderTarget(GPUTexture* t) override; 111 112 std::unique_ptr<GPUShader> CreateShaderFromBinary(GPUShaderStage stage, std::span<const u8> data, 113 Error* error) override; 114 std::unique_ptr<GPUShader> CreateShaderFromSource(GPUShaderStage stage, GPUShaderLanguage language, 115 std::string_view source, const char* entry_point, 116 DynamicHeapArray<u8>* out_binary, Error* error) override; 117 std::unique_ptr<GPUPipeline> CreatePipeline(const GPUPipeline::GraphicsConfig& config, Error* error) override; 118 119 void PushDebugGroup(const char* name) override; 120 void PopDebugGroup() override; 121 void InsertDebugMessage(const char* msg) override; 122 123 void MapVertexBuffer(u32 vertex_size, u32 vertex_count, void** map_ptr, u32* map_space, 124 u32* map_base_vertex) override; 125 void UnmapVertexBuffer(u32 vertex_size, u32 vertex_count) override; 126 void MapIndexBuffer(u32 index_count, DrawIndex** map_ptr, u32* map_space, u32* map_base_index) override; 127 void UnmapIndexBuffer(u32 used_index_count) override; 128 void PushUniformBuffer(const void* data, u32 data_size) override; 129 void* MapUniformBuffer(u32 size) override; 130 void UnmapUniformBuffer(u32 size) override; 131 void SetRenderTargets(GPUTexture* const* rts, u32 num_rts, GPUTexture* ds, 132 GPUPipeline::RenderPassFlag flags = GPUPipeline::NoRenderPassFlags) override; 133 void SetPipeline(GPUPipeline* pipeline) override; 134 void SetTextureSampler(u32 slot, GPUTexture* texture, GPUSampler* sampler) override; 135 void SetTextureBuffer(u32 slot, GPUTextureBuffer* buffer) override; 136 void SetViewport(const GSVector4i rc) override; 137 void SetScissor(const GSVector4i rc) override; 138 void Draw(u32 vertex_count, u32 base_vertex) override; 139 void DrawIndexed(u32 index_count, u32 base_index, u32 base_vertex) override; 140 void DrawIndexedWithBarrier(u32 index_count, u32 base_index, u32 base_vertex, DrawBarrier type) override; 141 142 bool SetGPUTimingEnabled(bool enabled) override; 143 float GetAndResetAccumulatedGPUTime() override; 144 145 void SetVSyncMode(GPUVSyncMode mode, bool allow_present_throttle) override; 146 147 bool BeginPresent(bool skip_present, u32 clear_color) override; 148 void EndPresent(bool explicit_present) override; 149 void SubmitPresent() override; 150 151 // Global state accessors 152 ALWAYS_INLINE static VulkanDevice& GetInstance() { return *static_cast<VulkanDevice*>(g_gpu_device.get()); } 153 ALWAYS_INLINE VkInstance GetVulkanInstance() const { return m_instance; } 154 ALWAYS_INLINE VkDevice GetVulkanDevice() const { return m_device; } 155 ALWAYS_INLINE VmaAllocator GetAllocator() const { return m_allocator; } 156 ALWAYS_INLINE VkPhysicalDevice GetVulkanPhysicalDevice() const { return m_physical_device; } 157 ALWAYS_INLINE u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; } 158 ALWAYS_INLINE u32 GetPresentQueueFamilyIndex() const { return m_present_queue_family_index; } 159 ALWAYS_INLINE const OptionalExtensions& GetOptionalExtensions() const { return m_optional_extensions; } 160 161 /// Returns true if Vulkan is suitable as a default for the devices in the system. 162 static bool IsSuitableDefaultRenderer(); 163 164 // Helpers for getting constants 165 ALWAYS_INLINE u32 GetBufferCopyOffsetAlignment() const 166 { 167 return static_cast<u32>(m_device_properties.limits.optimalBufferCopyOffsetAlignment); 168 } 169 ALWAYS_INLINE u32 GetBufferCopyRowPitchAlignment() const 170 { 171 return static_cast<u32>(m_device_properties.limits.optimalBufferCopyRowPitchAlignment); 172 } 173 174 void WaitForGPUIdle(); 175 176 // Creates a simple render pass. 177 VkRenderPass GetRenderPass(const GPUPipeline::GraphicsConfig& config); 178 VkRenderPass GetRenderPass(VulkanTexture* const* rts, u32 num_rts, VulkanTexture* ds, 179 GPUPipeline::RenderPassFlag render_pass_flags); 180 VkRenderPass GetSwapChainRenderPass(GPUTexture::Format format, VkAttachmentLoadOp load_op); 181 182 // Gets a non-clearing version of the specified render pass. Slow, don't call in hot path. 183 VkRenderPass GetRenderPassForRestarting(VkRenderPass pass); 184 185 // These command buffers are allocated per-frame. They are valid until the command buffer 186 // is submitted, after that you should call these functions again. 187 ALWAYS_INLINE VkCommandBuffer GetCurrentCommandBuffer() const { return m_current_command_buffer; } 188 ALWAYS_INLINE VulkanStreamBuffer& GetTextureUploadBuffer() { return m_texture_upload_buffer; } 189 VkCommandBuffer GetCurrentInitCommandBuffer(); 190 191 /// Allocates a descriptor set from the pool reserved for the current frame. 192 VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout); 193 194 /// Allocates a descriptor set from the pool reserved for the current frame. 195 VkDescriptorSet AllocatePersistentDescriptorSet(VkDescriptorSetLayout set_layout); 196 197 /// Frees a descriptor set allocated from the global pool. 198 void FreePersistentDescriptorSet(VkDescriptorSet set); 199 200 // Fence "counters" are used to track which commands have been completed by the GPU. 201 // If the last completed fence counter is greater or equal to N, it means that the work 202 // associated counter N has been completed by the GPU. The value of N to associate with 203 // commands can be retreived by calling GetCurrentFenceCounter(). 204 u64 GetCompletedFenceCounter() const { return m_completed_fence_counter; } 205 206 // Gets the fence that will be signaled when the currently executing command buffer is 207 // queued and executed. Do not wait for this fence before the buffer is executed. 208 // TODO: move out of struct 209 u64 GetCurrentFenceCounter() const { return m_frame_resources[m_current_frame].fence_counter; } 210 211 // Schedule a vulkan resource for destruction later on. This will occur when the command buffer 212 // is next re-used, and the GPU has finished working with the specified resource. 213 void DeferBufferDestruction(VkBuffer object, VmaAllocation allocation); 214 void DeferBufferDestruction(VkBuffer object, VkDeviceMemory memory); 215 void DeferFramebufferDestruction(VkFramebuffer object); 216 void DeferImageDestruction(VkImage object, VmaAllocation allocation); 217 void DeferImageViewDestruction(VkImageView object); 218 void DeferPipelineDestruction(VkPipeline object); 219 void DeferBufferViewDestruction(VkBufferView object); 220 void DeferPersistentDescriptorSetDestruction(VkDescriptorSet object); 221 222 // Wait for a fence to be completed. 223 // Also invokes callbacks for completion. 224 void WaitForFenceCounter(u64 fence_counter); 225 226 /// Ends any render pass, executes the command buffer, and invalidates cached state. 227 void SubmitCommandBuffer(bool wait_for_completion); 228 void SubmitCommandBuffer(bool wait_for_completion, const std::string_view reason); 229 void SubmitCommandBufferAndRestartRenderPass(const std::string_view reason); 230 231 void UnbindFramebuffer(VulkanTexture* tex); 232 void UnbindPipeline(VulkanPipeline* pl); 233 void UnbindTexture(VulkanTexture* tex); 234 void UnbindTextureBuffer(VulkanTextureBuffer* buf); 235 236 protected: 237 bool CreateDevice(std::string_view adapter, bool threaded_presentation, 238 std::optional<bool> exclusive_fullscreen_control, FeatureMask disabled_features, 239 Error* error) override; 240 void DestroyDevice() override; 241 242 bool ReadPipelineCache(std::optional<DynamicHeapArray<u8>> data) override; 243 bool GetPipelineCacheData(DynamicHeapArray<u8>* data) override; 244 245 private: 246 enum DIRTY_FLAG : u32 247 { 248 DIRTY_FLAG_INITIAL = (1 << 0), 249 DIRTY_FLAG_PIPELINE_LAYOUT = (1 << 1), 250 DIRTY_FLAG_DYNAMIC_OFFSETS = (1 << 2), 251 DIRTY_FLAG_TEXTURES_OR_SAMPLERS = (1 << 3), 252 DIRTY_FLAG_INPUT_ATTACHMENT = (1 << 4), 253 254 ALL_DIRTY_STATE = DIRTY_FLAG_INITIAL | DIRTY_FLAG_PIPELINE_LAYOUT | DIRTY_FLAG_DYNAMIC_OFFSETS | 255 DIRTY_FLAG_TEXTURES_OR_SAMPLERS | DIRTY_FLAG_INPUT_ATTACHMENT, 256 }; 257 258 enum class PipelineLayoutType : u8 259 { 260 Normal, 261 ColorFeedbackLoop, 262 BindRenderTargetsAsImages, 263 MaxCount, 264 }; 265 266 struct RenderPassCacheKey 267 { 268 struct RenderTarget 269 { 270 u8 format : 5; 271 u8 load_op : 2; 272 u8 store_op : 1; 273 }; 274 RenderTarget color[MAX_RENDER_TARGETS]; 275 276 u8 depth_format : 5; 277 u8 depth_load_op : 2; 278 u8 depth_store_op : 1; 279 u8 stencil_load_op : 2; 280 u8 stencil_store_op : 1; 281 u8 feedback_loop : 2; 282 u8 samples; 283 284 bool operator==(const RenderPassCacheKey& rhs) const; 285 bool operator!=(const RenderPassCacheKey& rhs) const; 286 }; 287 288 struct RenderPassCacheKeyHash 289 { 290 size_t operator()(const RenderPassCacheKey& rhs) const; 291 }; 292 293 struct CommandBuffer 294 { 295 // [0] - Init (upload) command buffer, [1] - draw command buffer 296 VkCommandPool command_pool = VK_NULL_HANDLE; 297 std::array<VkCommandBuffer, 2> command_buffers{VK_NULL_HANDLE, VK_NULL_HANDLE}; 298 VkDescriptorPool descriptor_pool = VK_NULL_HANDLE; 299 VkFence fence = VK_NULL_HANDLE; 300 u64 fence_counter = 0; 301 bool init_buffer_used = false; 302 bool needs_fence_wait = false; 303 bool timestamp_written = false; 304 }; 305 306 using CleanupObjectFunction = void (*)(VulkanDevice& dev, void* obj); 307 using SamplerMap = std::unordered_map<u64, VkSampler>; 308 309 // Helper method to create a Vulkan instance. 310 static VkInstance CreateVulkanInstance(const WindowInfo& wi, OptionalExtensions* oe, bool enable_debug_utils, 311 bool enable_validation_layer); 312 313 bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header); 314 void FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header); 315 316 // Enable/disable debug message runtime. 317 bool EnableDebugUtils(); 318 void DisableDebugUtils(); 319 320 /// Returns true if running on an NVIDIA GPU. 321 bool IsDeviceNVIDIA() const; 322 323 /// Returns true if running on an AMD GPU. 324 bool IsDeviceAMD() const; 325 326 // Vendor queries. 327 bool IsDeviceAdreno() const; 328 bool IsDeviceMali() const; 329 bool IsDeviceImgTec() const; 330 bool IsBrokenMobileDriver() const; 331 332 void EndAndSubmitCommandBuffer(VulkanSwapChain* present_swap_chain, bool explicit_present, bool submit_on_thread); 333 void MoveToNextCommandBuffer(); 334 void WaitForPresentComplete(); 335 bool CheckLastSubmitFail(); 336 337 using ExtensionList = std::vector<const char*>; 338 static bool SelectInstanceExtensions(ExtensionList* extension_list, const WindowInfo& wi, OptionalExtensions* oe, 339 bool enable_debug_utils); 340 bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface, Error* error); 341 bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer, FeatureMask disabled_features, Error* error); 342 void ProcessDeviceExtensions(); 343 void SetFeatures(FeatureMask disabled_features, const VkPhysicalDeviceFeatures& vk_features); 344 345 static u32 GetMaxMultisamples(VkPhysicalDevice physical_device, const VkPhysicalDeviceProperties& properties); 346 347 bool CreateAllocator(); 348 void DestroyAllocator(); 349 bool CreateCommandBuffers(); 350 void DestroyCommandBuffers(); 351 bool CreatePersistentDescriptorPool(); 352 void DestroyPersistentDescriptorPool(); 353 bool CreateNullTexture(); 354 bool CreateBuffers(); 355 void DestroyBuffers(); 356 bool CreatePipelineLayouts(); 357 void DestroyPipelineLayouts(); 358 bool CreatePersistentDescriptorSets(); 359 void DestroyPersistentDescriptorSets(); 360 VkSampler GetSampler(const GPUSampler::Config& config); 361 void DestroySamplers(); 362 363 void RenderBlankFrame(); 364 365 bool TryImportHostMemory(void* data, size_t data_size, VkBufferUsageFlags buffer_usage, VkDeviceMemory* out_memory, 366 VkBuffer* out_buffer, VkDeviceSize* out_offset); 367 368 /// Set dirty flags on everything to force re-bind at next draw time. 369 void InvalidateCachedState(); 370 371 s32 IsRenderTargetBoundIndex(const GPUTexture* tex) const; 372 373 /// Applies any changed state. 374 static PipelineLayoutType GetPipelineLayoutType(GPUPipeline::RenderPassFlag flags); 375 VkPipelineLayout GetCurrentVkPipelineLayout() const; 376 void SetInitialPipelineState(); 377 void PreDrawCheck(); 378 379 template<GPUPipeline::Layout layout> 380 bool UpdateDescriptorSetsForLayout(u32 dirty); 381 bool UpdateDescriptorSets(u32 dirty); 382 383 // Ends a render pass if we're currently in one. 384 // When Bind() is next called, the pass will be restarted. 385 void BeginRenderPass(); 386 void BeginSwapChainRenderPass(u32 clear_color); 387 void EndRenderPass(); 388 bool InRenderPass(); 389 390 VkRenderPass CreateCachedRenderPass(RenderPassCacheKey key); 391 static VkFramebuffer CreateFramebuffer(GPUTexture* const* rts, u32 num_rts, GPUTexture* ds, u32 flags); 392 static void DestroyFramebuffer(VkFramebuffer fbo); 393 394 VkImageMemoryBarrier GetColorBufferBarrier(const VulkanTexture* rt) const; 395 396 void BeginCommandBuffer(u32 index); 397 void WaitForCommandBufferCompletion(u32 index); 398 399 void DoSubmitCommandBuffer(u32 index, VulkanSwapChain* present_swap_chain); 400 void DoPresent(VulkanSwapChain* present_swap_chain); 401 void WaitForPresentComplete(std::unique_lock<std::mutex>& lock); 402 void PresentThread(); 403 void StartPresentThread(); 404 void StopPresentThread(); 405 406 VkInstance m_instance = VK_NULL_HANDLE; 407 VkPhysicalDevice m_physical_device = VK_NULL_HANDLE; 408 VkDevice m_device = VK_NULL_HANDLE; 409 VmaAllocator m_allocator = VK_NULL_HANDLE; 410 411 VkCommandBuffer m_current_command_buffer = VK_NULL_HANDLE; 412 413 VkDescriptorPool m_global_descriptor_pool = VK_NULL_HANDLE; 414 415 VkQueue m_graphics_queue = VK_NULL_HANDLE; 416 VkQueue m_present_queue = VK_NULL_HANDLE; 417 u32 m_graphics_queue_family_index = 0; 418 u32 m_present_queue_family_index = 0; 419 420 VkQueryPool m_timestamp_query_pool = VK_NULL_HANDLE; 421 float m_accumulated_gpu_time = 0.0f; 422 423 std::array<CommandBuffer, NUM_COMMAND_BUFFERS> m_frame_resources; 424 std::deque<std::pair<u64, std::function<void()>>> m_cleanup_objects; // [fence_counter, callback] 425 u64 m_next_fence_counter = 1; 426 u64 m_completed_fence_counter = 0; 427 u32 m_current_frame = 0; 428 429 std::atomic_bool m_last_submit_failed{false}; 430 std::atomic_bool m_present_done{true}; 431 std::mutex m_present_mutex; 432 std::condition_variable m_present_queued_cv; 433 std::condition_variable m_present_done_cv; 434 std::thread m_present_thread; 435 std::atomic_bool m_present_thread_done{false}; 436 437 struct QueuedPresent 438 { 439 VulkanSwapChain* swap_chain; 440 u32 command_buffer_index; 441 }; 442 443 QueuedPresent m_queued_present = {nullptr, 0xFFFFFFFFu}; 444 445 std::unordered_map<RenderPassCacheKey, VkRenderPass, RenderPassCacheKeyHash> m_render_pass_cache; 446 GPUFramebufferManager<VkFramebuffer, CreateFramebuffer, DestroyFramebuffer> m_framebuffer_manager; 447 VkPipelineCache m_pipeline_cache = VK_NULL_HANDLE; 448 449 // TODO: Move to static? 450 VkDebugUtilsMessengerEXT m_debug_messenger_callback = VK_NULL_HANDLE; 451 452 VkPhysicalDeviceProperties m_device_properties = {}; 453 VkPhysicalDeviceDriverPropertiesKHR m_device_driver_properties = {}; 454 OptionalExtensions m_optional_extensions = {}; 455 std::optional<bool> m_exclusive_fullscreen_control; 456 457 std::unique_ptr<VulkanSwapChain> m_swap_chain; 458 std::unique_ptr<VulkanTexture> m_null_texture; 459 460 VkDescriptorSetLayout m_ubo_ds_layout = VK_NULL_HANDLE; 461 VkDescriptorSetLayout m_single_texture_ds_layout = VK_NULL_HANDLE; 462 VkDescriptorSetLayout m_single_texture_buffer_ds_layout = VK_NULL_HANDLE; 463 VkDescriptorSetLayout m_multi_texture_ds_layout = VK_NULL_HANDLE; 464 VkDescriptorSetLayout m_feedback_loop_ds_layout = VK_NULL_HANDLE; 465 VkDescriptorSetLayout m_rov_ds_layout = VK_NULL_HANDLE; 466 DimensionalArray<VkPipelineLayout, static_cast<size_t>(GPUPipeline::Layout::MaxCount), 467 static_cast<size_t>(PipelineLayoutType::MaxCount)> 468 m_pipeline_layouts = {}; 469 470 VulkanStreamBuffer m_vertex_buffer; 471 VulkanStreamBuffer m_index_buffer; 472 VulkanStreamBuffer m_uniform_buffer; 473 VulkanStreamBuffer m_texture_upload_buffer; 474 475 VkDescriptorSet m_ubo_descriptor_set = VK_NULL_HANDLE; 476 u32 m_uniform_buffer_position = 0; 477 478 SamplerMap m_sampler_map; 479 480 // Which bindings/state has to be updated before the next draw. 481 u32 m_dirty_flags = ALL_DIRTY_STATE; 482 483 u32 m_num_current_render_targets = 0; 484 GPUPipeline::RenderPassFlag m_current_render_pass_flags = GPUPipeline::NoRenderPassFlags; 485 std::array<VulkanTexture*, MAX_RENDER_TARGETS> m_current_render_targets = {}; 486 VulkanTexture* m_current_depth_target = nullptr; 487 VkFramebuffer m_current_framebuffer = VK_NULL_HANDLE; 488 VkRenderPass m_current_render_pass = VK_NULL_HANDLE; 489 490 VulkanPipeline* m_current_pipeline = nullptr; 491 GPUPipeline::Layout m_current_pipeline_layout = GPUPipeline::Layout::SingleTextureAndPushConstants; 492 493 std::array<VulkanTexture*, MAX_TEXTURE_SAMPLERS> m_current_textures = {}; 494 std::array<VkSampler, MAX_TEXTURE_SAMPLERS> m_current_samplers = {}; 495 VulkanTextureBuffer* m_current_texture_buffer = nullptr; 496 GSVector4i m_current_viewport = GSVector4i::cxpr(0, 0, 1, 1); 497 GSVector4i m_current_scissor = GSVector4i::cxpr(0, 0, 1, 1); 498 };