From 398edf268b41a607e2467a185f7952f8ceb32e07 Mon Sep 17 00:00:00 2001 From: Elabajaba Date: Thu, 30 Jan 2025 21:55:24 -0500 Subject: [PATCH] Replace gpu-alloc with gpu-allocator for vulkan. --- Cargo.lock | 21 +-- Cargo.toml | 5 +- wgpu-hal/Cargo.toml | 6 +- wgpu-hal/src/vulkan/adapter.rs | 92 ++++------- wgpu-hal/src/vulkan/device.rs | 271 +++++++++++---------------------- wgpu-hal/src/vulkan/mod.rs | 13 +- 6 files changed, 131 insertions(+), 277 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e84e45acf2..14b1badd0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1600,31 +1600,13 @@ dependencies = [ "gl_generator", ] -[[package]] -name = "gpu-alloc" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbcd2dba93594b227a1f57ee09b8b9da8892c34d55aa332e034a228d0fe6a171" -dependencies = [ - "bitflags 2.8.0", - "gpu-alloc-types", -] - -[[package]] -name = "gpu-alloc-types" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98ff03b468aa837d70984d55f5d3f846f6ec31fe34bbb97c4f85219caeee1ca4" -dependencies = [ - "bitflags 2.8.0", -] - [[package]] name = "gpu-allocator" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c151a2a5ef800297b4e79efa4f4bec035c5f51d5ae587287c9b952bdf734cacd" dependencies = [ + "ash", "log", "presser", "thiserror 1.0.69", @@ -4527,7 +4509,6 @@ dependencies = [ "glutin", "glutin-winit", "glutin_wgl_sys 0.6.1", - "gpu-alloc", "gpu-allocator", "gpu-descriptor", "hashbrown", diff --git a/Cargo.toml b/Cargo.toml index 381cbb4a3b..cd27b4bc6f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -151,11 +151,12 @@ objc = "0.2.5" # Vulkan dependencies android_system_properties = "0.1.1" ash = "0.38.0" -gpu-alloc = "0.6" gpu-descriptor = "0.3" -# DX12 dependencies +# DX12 and Vulkan shared dependencies gpu-allocator = { version = "0.27", default-features = false } + +# DX12 dependencies range-alloc = "0.1" mach-dxcompiler-rs = { version = "0.1.4", default-features = false } windows-core = { version = "0.58", default-features = false } diff --git a/wgpu-hal/Cargo.toml b/wgpu-hal/Cargo.toml index e3056e2c77..a9e02237de 100644 --- a/wgpu-hal/Cargo.toml +++ b/wgpu-hal/Cargo.toml @@ -51,7 +51,7 @@ metal = [ vulkan = [ "naga/spv-out", "dep:ash", - "dep:gpu-alloc", + "gpu-allocator/vulkan", "dep:gpu-descriptor", "dep:libloading", "dep:smallvec", @@ -155,9 +155,10 @@ glow = { workspace = true, optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] # Backend: Vulkan ash = { workspace = true, optional = true } -gpu-alloc = { workspace = true, optional = true } gpu-descriptor = { workspace = true, optional = true } smallvec = { workspace = true, optional = true, features = ["union"] } +# Backend: DX12 and Vulkan +gpu-allocator = { workspace = true, optional = true } # Backend: GLES khronos-egl = { workspace = true, features = ["dynamic"], optional = true } libloading = { workspace = true, optional = true } @@ -182,7 +183,6 @@ windows-core = { workspace = true, optional = true } # Backend: Dx12 bit-set = { workspace = true, optional = true } range-alloc = { workspace = true, optional = true } -gpu-allocator = { workspace = true, optional = true } # backend: GLES glutin_wgl_sys = { workspace = true, optional = true } diff --git a/wgpu-hal/src/vulkan/adapter.rs b/wgpu-hal/src/vulkan/adapter.rs index 61020c2a34..a20fe80aab 100644 --- a/wgpu-hal/src/vulkan/adapter.rs +++ b/wgpu-hal/src/vulkan/adapter.rs @@ -1986,8 +1986,6 @@ impl super::Adapter { }; let mem_allocator = { - let limits = self.phd_capabilities.properties.limits; - // Note: the parameters here are not set in stone nor where they picked with // strong confidence. // `final_free_list_chunk` should be bigger than starting_free_list_chunk if @@ -1999,72 +1997,42 @@ impl super::Adapter { // (then VMA doubles the block size each time it needs a new block). // At some point it would be good to experiment with real workloads // - // TODO(#5925): The plan is to switch the Vulkan backend from `gpu_alloc` to - // `gpu_allocator` which has a different (simpler) set of configuration options. - // // TODO: These parameters should take hardware capabilities into account. let mb = 1024 * 1024; - let perf_cfg = gpu_alloc::Config { - starting_free_list_chunk: 128 * mb, - final_free_list_chunk: 512 * mb, - minimal_buddy_size: 1, - initial_buddy_dedicated_size: 8 * mb, - dedicated_threshold: 32 * mb, - preferred_dedicated_threshold: mb, - transient_dedicated_threshold: 128 * mb, - }; - let mem_usage_cfg = gpu_alloc::Config { - starting_free_list_chunk: 8 * mb, - final_free_list_chunk: 64 * mb, - minimal_buddy_size: 1, - initial_buddy_dedicated_size: 8 * mb, - dedicated_threshold: 8 * mb, - preferred_dedicated_threshold: mb, - transient_dedicated_threshold: 16 * mb, - }; - let config = match memory_hints { - wgt::MemoryHints::Performance => perf_cfg, - wgt::MemoryHints::MemoryUsage => mem_usage_cfg, + let allocation_sizes = match memory_hints { + wgt::MemoryHints::Performance => gpu_allocator::AllocationSizes::default(), + wgt::MemoryHints::MemoryUsage => { + gpu_allocator::AllocationSizes::new(8 * mb, 4 * mb) + } wgt::MemoryHints::Manual { suballocated_device_memory_block_size, - } => gpu_alloc::Config { - starting_free_list_chunk: suballocated_device_memory_block_size.start, - final_free_list_chunk: suballocated_device_memory_block_size.end, - initial_buddy_dedicated_size: suballocated_device_memory_block_size.start, - ..perf_cfg - }, + } => { + // TODO: Would it be useful to expose the host size in memory hints + // instead of always using half of the device size? + let device_size = suballocated_device_memory_block_size.start; + let host_size = device_size / 2; + gpu_allocator::AllocationSizes::new(device_size, host_size) + } }; - let max_memory_allocation_size = - if let Some(maintenance_3) = self.phd_capabilities.maintenance_3 { - maintenance_3.max_memory_allocation_size - } else { - u64::MAX - }; - let properties = gpu_alloc::DeviceProperties { - max_memory_allocation_count: limits.max_memory_allocation_count, - max_memory_allocation_size, - non_coherent_atom_size: limits.non_coherent_atom_size, - memory_types: memory_types - .iter() - .map(|memory_type| gpu_alloc::MemoryType { - props: gpu_alloc::MemoryPropertyFlags::from_bits_truncate( - memory_type.property_flags.as_raw() as u8, - ), - heap: memory_type.heap_index, - }) - .collect(), - memory_heaps: mem_properties - .memory_heaps_as_slice() - .iter() - .map(|&memory_heap| gpu_alloc::MemoryHeap { - size: memory_heap.size, - }) - .collect(), - buffer_device_address: enabled_extensions - .contains(&khr::buffer_device_address::NAME), - }; - gpu_alloc::GpuAllocator::new(config, properties) + match gpu_allocator::vulkan::Allocator::new( + &gpu_allocator::vulkan::AllocatorCreateDesc { + // TODO: Can we clone the arc for these instead somehow? + instance: shared.instance.raw.clone(), + device: shared.raw.clone(), + physical_device: self.raw_physical_device(), + debug_settings: Default::default(), + // TODO: How do we decide if this should be enabled? + buffer_device_address: false, + allocation_sizes, + }, + ) { + Ok(allocator) => allocator, + Err(e) => { + log::error!("Failed to create vulkan allocator, error: {}", e); + Err(e)? + } + } }; let desc_allocator = gpu_descriptor::DescriptorAllocator::new( if let Some(di) = self.phd_capabilities.descriptor_indexing { diff --git a/wgpu-hal/src/vulkan/device.rs b/wgpu-hal/src/vulkan/device.rs index bba28939f7..b4fff49c2b 100644 --- a/wgpu-hal/src/vulkan/device.rs +++ b/wgpu-hal/src/vulkan/device.rs @@ -282,105 +282,17 @@ impl super::DeviceShared { buffer: &'a super::Buffer, ranges: I, ) -> Option> { - let block = buffer.block.as_ref()?.lock(); + let block = buffer.allocation.as_ref()?; let mask = self.private_caps.non_coherent_map_mask; Some(ranges.map(move |range| { vk::MappedMemoryRange::default() - .memory(*block.memory()) + .memory(unsafe { block.memory() }) .offset((block.offset() + range.start) & !mask) .size((range.end - range.start + mask) & !mask) })) } } -impl gpu_alloc::MemoryDevice for super::DeviceShared { - unsafe fn allocate_memory( - &self, - size: u64, - memory_type: u32, - flags: gpu_alloc::AllocationFlags, - ) -> Result { - let mut info = vk::MemoryAllocateInfo::default() - .allocation_size(size) - .memory_type_index(memory_type); - - let mut info_flags; - - if flags.contains(gpu_alloc::AllocationFlags::DEVICE_ADDRESS) { - info_flags = vk::MemoryAllocateFlagsInfo::default() - .flags(vk::MemoryAllocateFlags::DEVICE_ADDRESS); - info = info.push_next(&mut info_flags); - } - - match unsafe { self.raw.allocate_memory(&info, None) } { - Ok(memory) => { - self.memory_allocations_counter.add(1); - Ok(memory) - } - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { - Err(gpu_alloc::OutOfMemory::OutOfDeviceMemory) - } - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { - Err(gpu_alloc::OutOfMemory::OutOfHostMemory) - } - // We don't use VK_KHR_external_memory - // VK_ERROR_INVALID_EXTERNAL_HANDLE - // We don't use VK_KHR_buffer_device_address - // VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR - Err(err) => handle_unexpected(err), - } - } - - unsafe fn deallocate_memory(&self, memory: vk::DeviceMemory) { - self.memory_allocations_counter.sub(1); - - unsafe { self.raw.free_memory(memory, None) }; - } - - unsafe fn map_memory( - &self, - memory: &mut vk::DeviceMemory, - offset: u64, - size: u64, - ) -> Result, gpu_alloc::DeviceMapError> { - match unsafe { - self.raw - .map_memory(*memory, offset, size, vk::MemoryMapFlags::empty()) - } { - Ok(ptr) => Ok(ptr::NonNull::new(ptr.cast::()) - .expect("Pointer to memory mapping must not be null")), - Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => { - Err(gpu_alloc::DeviceMapError::OutOfDeviceMemory) - } - Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => { - Err(gpu_alloc::DeviceMapError::OutOfHostMemory) - } - Err(vk::Result::ERROR_MEMORY_MAP_FAILED) => Err(gpu_alloc::DeviceMapError::MapFailed), - Err(err) => handle_unexpected(err), - } - } - - unsafe fn unmap_memory(&self, memory: &mut vk::DeviceMemory) { - unsafe { self.raw.unmap_memory(*memory) }; - } - - unsafe fn invalidate_memory_ranges( - &self, - _ranges: &[gpu_alloc::MappedMemoryRange<'_, vk::DeviceMemory>], - ) -> Result<(), gpu_alloc::OutOfMemory> { - // should never be called - unimplemented!() - } - - unsafe fn flush_memory_ranges( - &self, - _ranges: &[gpu_alloc::MappedMemoryRange<'_, vk::DeviceMemory>], - ) -> Result<(), gpu_alloc::OutOfMemory> { - // should never be called - unimplemented!() - } -} - impl gpu_descriptor::DescriptorDevice for super::DeviceShared @@ -862,7 +774,7 @@ impl super::Device { pub unsafe fn buffer_from_raw(vk_buffer: vk::Buffer) -> super::Buffer { super::Buffer { raw: vk_buffer, - block: None, + allocation: None, } } @@ -1038,47 +950,35 @@ impl crate::Device for super::Device { }; let req = unsafe { self.shared.raw.get_buffer_memory_requirements(raw) }; - let mut alloc_usage = if desc - .usage - .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE) - { - let mut flags = gpu_alloc::UsageFlags::HOST_ACCESS; - //TODO: find a way to use `crate::MemoryFlags::PREFER_COHERENT` - flags.set( - gpu_alloc::UsageFlags::DOWNLOAD, - desc.usage.contains(wgt::BufferUses::MAP_READ), - ); - flags.set( - gpu_alloc::UsageFlags::UPLOAD, - desc.usage.contains(wgt::BufferUses::MAP_WRITE), - ); - flags - } else { - gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS - }; - alloc_usage.set( - gpu_alloc::UsageFlags::TRANSIENT, - desc.memory_flags.contains(crate::MemoryFlags::TRANSIENT), - ); + let is_cpu_read = desc.usage.contains(wgt::BufferUses::MAP_READ); + let is_cpu_write = desc.usage.contains(wgt::BufferUses::MAP_WRITE); - let alignment_mask = req.alignment - 1; + let location = match (is_cpu_read, is_cpu_write) { + (true, true) => gpu_allocator::MemoryLocation::CpuToGpu, + (true, false) => gpu_allocator::MemoryLocation::GpuToCpu, + (false, true) => gpu_allocator::MemoryLocation::CpuToGpu, + (false, false) => gpu_allocator::MemoryLocation::GpuOnly, + }; - let block = unsafe { - self.mem_allocator.lock().alloc( - &*self.shared, - gpu_alloc::Request { - size: req.size, - align_mask: alignment_mask, - usage: alloc_usage, - memory_types: req.memory_type_bits & self.valid_ash_memory_types, - }, - )? + let alloc_desc = gpu_allocator::vulkan::AllocationCreateDesc { + name: desc.label.unwrap_or("Unlabeled buffer"), + // TODO: Previously used `req.memory_type_bits & self.valid_ash_memory_types` to set `memory_type_bits`, + // but according to the spec "Bit i is set if and only if the memory type i in the + // VkPhysicalDeviceMemoryProperties structure for the physical device is supported for the resource." + // https://registry.khronos.org/vulkan/specs/latest/man/html/VkMemoryRequirements.html + requirements: req, + location, + // It's a buffer, use linear + linear: true, + allocation_scheme: gpu_allocator::vulkan::AllocationScheme::GpuAllocatorManaged, }; + let allocation = self.mem_allocator.lock().allocate(&alloc_desc)?; + unsafe { self.shared .raw - .bind_buffer_memory(raw, *block.memory(), block.offset()) + .bind_buffer_memory(raw, allocation.memory(), allocation.offset()) .map_err(super::map_host_device_oom_and_ioca_err)? }; @@ -1086,20 +986,23 @@ impl crate::Device for super::Device { unsafe { self.shared.set_object_name(raw, label) }; } - self.counters.buffer_memory.add(block.size() as isize); + self.counters.buffer_memory.add(allocation.size() as isize); self.counters.buffers.add(1); Ok(super::Buffer { raw, - block: Some(Mutex::new(block)), + allocation: Some(allocation), }) } unsafe fn destroy_buffer(&self, buffer: super::Buffer) { unsafe { self.shared.raw.destroy_buffer(buffer.raw, None) }; - if let Some(block) = buffer.block { - let block = block.into_inner(); - self.counters.buffer_memory.sub(block.size() as isize); - unsafe { self.mem_allocator.lock().dealloc(&*self.shared, block) }; + if let Some(allocation) = buffer.allocation { + self.counters.buffer_memory.sub(allocation.size() as isize); + // TODO: Don't panic here + match self.mem_allocator.lock().free(allocation) { + Ok(()) => (), + Err(e) => panic!("Failed to destroy vulkan buffer, {e}"), + } } self.counters.buffers.sub(1); @@ -1114,24 +1017,27 @@ impl crate::Device for super::Device { buffer: &super::Buffer, range: crate::MemoryRange, ) -> Result { - if let Some(ref block) = buffer.block { - let size = range.end - range.start; - let mut block = block.lock(); - let ptr = unsafe { block.map(&*self.shared, range.start, size as usize)? }; - let is_coherent = block - .props() - .contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT); + if let Some(ref allocation) = buffer.allocation { + // TODO: Is this correct? + let ptr = allocation + .mapped_ptr() + .ok_or(crate::DeviceError::ResourceCreationFailed)? + .cast(); + let is_coherent = allocation + .memory_properties() + .contains(vk::MemoryPropertyFlags::HOST_COHERENT); Ok(crate::BufferMapping { ptr, is_coherent }) } else { crate::hal_usage_error("tried to map external buffer") } } unsafe fn unmap_buffer(&self, buffer: &super::Buffer) { - if let Some(ref block) = buffer.block { - unsafe { block.lock().unmap(&*self.shared) }; - } else { - crate::hal_usage_error("tried to unmap external buffer") - } + // TODO: map and unmap_buffer are probably wrong + // if let Some(ref block) = buffer.allocation { + // unsafe { block.lock().unmap(&*self.shared) }; + // } else { + // crate::hal_usage_error("tried to unmap external buffer") + // } } unsafe fn flush_mapped_ranges(&self, buffer: &super::Buffer, ranges: I) @@ -1171,24 +1077,23 @@ impl crate::Device for super::Device { ) -> Result { let image = self.create_image_without_memory(desc, None)?; - let block = unsafe { - self.mem_allocator.lock().alloc( - &*self.shared, - gpu_alloc::Request { - size: image.requirements.size, - align_mask: image.requirements.alignment - 1, - usage: gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS, - memory_types: image.requirements.memory_type_bits & self.valid_ash_memory_types, - }, - )? + let alloc_desc = gpu_allocator::vulkan::AllocationCreateDesc { + name: desc.label.unwrap_or("Unlabeled texture"), + requirements: image.requirements, + location: gpu_allocator::MemoryLocation::GpuOnly, + // TODO: Maybe wrong + linear: false, + allocation_scheme: gpu_allocator::vulkan::AllocationScheme::GpuAllocatorManaged, }; + let block = self.mem_allocator.lock().allocate(&alloc_desc)?; + self.counters.texture_memory.add(block.size() as isize); unsafe { self.shared .raw - .bind_image_memory(image.raw, *block.memory(), block.offset()) + .bind_image_memory(image.raw, block.memory(), block.offset()) .map_err(super::map_host_device_oom_err)? }; @@ -1220,7 +1125,10 @@ impl crate::Device for super::Device { if let Some(block) = texture.block { self.counters.texture_memory.sub(block.size() as isize); - unsafe { self.mem_allocator.lock().dealloc(&*self.shared, block) }; + match self.mem_allocator.lock().free(block) { + Ok(()) => (), + Err(e) => panic!("Failed to destroy vulkan texture: {}", e), + }; } self.counters.textures.sub(1); @@ -2490,19 +2398,30 @@ impl crate::Device for super::Device { .map_err(super::map_host_device_oom_and_ioca_err)?; let req = self.shared.raw.get_buffer_memory_requirements(raw_buffer); - let block = self.mem_allocator.lock().alloc( - &*self.shared, - gpu_alloc::Request { - size: req.size, - align_mask: req.alignment - 1, - usage: gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS, - memory_types: req.memory_type_bits & self.valid_ash_memory_types, - }, - )?; + let allocation_desc = gpu_allocator::vulkan::AllocationCreateDesc { + name: desc.label.unwrap_or("rt_acceleration_structure"), + requirements: req, + location: gpu_allocator::MemoryLocation::GpuOnly, + // idk? + linear: true, + allocation_scheme: gpu_allocator::vulkan::AllocationScheme::GpuAllocatorManaged, + }; + + let allocation = self.mem_allocator.lock().allocate(&allocation_desc)?; + + // let block = self.mem_allocator.lock().alloc( + // &*self.shared, + // gpu_alloc::Request { + // size: req.size, + // align_mask: req.alignment - 1, + // usage: gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS, + // memory_types: req.memory_type_bits & self.valid_ash_memory_types, + // }, + // )?; self.shared .raw - .bind_buffer_memory(raw_buffer, *block.memory(), block.offset()) + .bind_buffer_memory(raw_buffer, allocation.memory(), allocation.offset()) .map_err(super::map_host_device_oom_and_ioca_err)?; if let Some(label) = desc.label { @@ -2528,7 +2447,7 @@ impl crate::Device for super::Device { Ok(super::AccelerationStructure { raw: raw_acceleration_structure, buffer: raw_buffer, - block: Mutex::new(block), + allocation, }) } } @@ -2553,7 +2472,7 @@ impl crate::Device for super::Device { .destroy_buffer(acceleration_structure.buffer, None); self.mem_allocator .lock() - .dealloc(&*self.shared, acceleration_structure.block.into_inner()); + .free(acceleration_structure.allocation); } } @@ -2647,24 +2566,6 @@ impl super::DeviceShared { } } -impl From for crate::DeviceError { - fn from(error: gpu_alloc::AllocationError) -> Self { - use gpu_alloc::AllocationError as Ae; - match error { - Ae::OutOfDeviceMemory | Ae::OutOfHostMemory | Ae::TooManyObjects => Self::OutOfMemory, - Ae::NoCompatibleMemoryTypes => crate::hal_usage_error(error), - } - } -} -impl From for crate::DeviceError { - fn from(error: gpu_alloc::MapError) -> Self { - use gpu_alloc::MapError as Me; - match error { - Me::OutOfDeviceMemory | Me::OutOfHostMemory | Me::MapFailed => Self::OutOfMemory, - Me::NonHostVisible | Me::AlreadyMapped => crate::hal_usage_error(error), - } - } -} impl From for crate::DeviceError { fn from(error: gpu_descriptor::AllocationError) -> Self { use gpu_descriptor::AllocationError as Ae; diff --git a/wgpu-hal/src/vulkan/mod.rs b/wgpu-hal/src/vulkan/mod.rs index 4421d0a5ef..21d79538dd 100644 --- a/wgpu-hal/src/vulkan/mod.rs +++ b/wgpu-hal/src/vulkan/mod.rs @@ -666,7 +666,7 @@ impl Drop for DeviceShared { pub struct Device { shared: Arc, - mem_allocator: Mutex>, + mem_allocator: Mutex, desc_allocator: Mutex>, valid_ash_memory_types: u32, @@ -678,7 +678,8 @@ pub struct Device { impl Drop for Device { fn drop(&mut self) { - unsafe { self.mem_allocator.lock().cleanup(&*self.shared) }; + // TODO: how to cleanup gpu-allocator's vulkan memory allocator? + // unsafe { self.mem_allocator.lock().cleanup(&*self.shared) }; unsafe { self.desc_allocator.lock().cleanup(&*self.shared) }; } } @@ -776,7 +777,8 @@ impl Drop for Queue { #[derive(Debug)] pub struct Buffer { raw: vk::Buffer, - block: Option>>, + // TODO: Do we need to wrap this in a mutex? + allocation: Option, } impl crate::DynBuffer for Buffer {} @@ -785,7 +787,8 @@ impl crate::DynBuffer for Buffer {} pub struct AccelerationStructure { raw: vk::AccelerationStructureKHR, buffer: vk::Buffer, - block: Mutex>, + // TODO: Do we need to wrap this in a mutex? + allocation: gpu_allocator::vulkan::Allocation, } impl crate::DynAccelerationStructure for AccelerationStructure {} @@ -795,7 +798,7 @@ pub struct Texture { raw: vk::Image, drop_guard: Option, external_memory: Option, - block: Option>, + block: Option, usage: wgt::TextureUses, format: wgt::TextureFormat, raw_flags: vk::ImageCreateFlags,