diff --git a/examples/src/bin/gl-interop.rs b/examples/src/bin/gl-interop.rs index 362901b1dc..a58a54fdb4 100644 --- a/examples/src/bin/gl-interop.rs +++ b/examples/src/bin/gl-interop.rs @@ -159,7 +159,7 @@ mod linux { let image = Arc::new( raw_image - .bind_memory([MemoryAlloc::new(image_memory).unwrap()]) + .bind_memory([MemoryAlloc::new(image_memory)]) .map_err(|(err, _, _)| err) .unwrap(), ); diff --git a/vulkano-macros/src/derive_buffer_contents.rs b/vulkano-macros/src/derive_buffer_contents.rs index 2910e27e57..9ab823f704 100644 --- a/vulkano-macros/src/derive_buffer_contents.rs +++ b/vulkano-macros/src/derive_buffer_contents.rs @@ -73,7 +73,7 @@ pub fn derive_buffer_contents(mut ast: DeriveInput) -> Result { const LAYOUT: ::#crate_ident::buffer::BufferContentsLayout = #layout; #[inline(always)] - unsafe fn from_ffi(data: *mut ::std::ffi::c_void, range: usize) -> *mut Self { + unsafe fn ptr_from_slice(slice: ::std::ptr::NonNull<[u8]>) -> *mut Self { #[repr(C)] union PtrRepr { components: PtrComponents, @@ -83,14 +83,11 @@ pub fn derive_buffer_contents(mut ast: DeriveInput) -> Result { #[derive(Clone, Copy)] #[repr(C)] struct PtrComponents { - data: *mut ::std::ffi::c_void, + data: *mut u8, len: usize, } - let alignment = ::LAYOUT - .alignment() - .as_devicesize() as usize; - ::std::debug_assert!(data as usize % alignment == 0); + let data = <*mut [u8]>::cast::(slice.as_ptr()); let head_size = ::LAYOUT .head_size() as usize; @@ -98,8 +95,8 @@ pub fn derive_buffer_contents(mut ast: DeriveInput) -> Result { .element_size() .unwrap_or(1) as usize; - ::std::debug_assert!(range >= head_size); - let tail_size = range - head_size; + ::std::debug_assert!(slice.len() >= head_size); + let tail_size = slice.len() - head_size; ::std::debug_assert!(tail_size % element_size == 0); let len = tail_size / element_size; diff --git a/vulkano/src/buffer/subbuffer.rs b/vulkano/src/buffer/subbuffer.rs index 06c0bc88d9..d157aeb4ad 100644 --- a/vulkano/src/buffer/subbuffer.rs +++ b/vulkano/src/buffer/subbuffer.rs @@ -16,7 +16,7 @@ use crate::{ memory::{ self, allocator::{align_down, align_up, DeviceLayout}, - is_aligned, DeviceAlignment, + is_aligned, DeviceAlignment, MappedMemoryRange, }, sync::HostAccessError, DeviceSize, NonZeroDeviceSize, ValidationError, @@ -25,7 +25,6 @@ use bytemuck::AnyBitPattern; use std::{ alloc::Layout, cmp, - ffi::c_void, hash::{Hash, Hasher}, marker::PhantomData, mem::{self, align_of, size_of}, @@ -119,16 +118,23 @@ impl Subbuffer { } } - /// Returns the mapped pointer to the start of the subbuffer if the memory is host-visible, - /// otherwise returns [`None`]. - pub fn mapped_ptr(&self) -> Option> { + /// Returns the mapped pointer to the range of memory of `self`. + /// + /// The subbuffer must fall within the range of the memory mapping given to + /// [`DeviceMemory::map`]. + /// + /// See [`MappingState::slice`] for the safety invariants of the returned pointer. + /// + /// [`DeviceMemory::map`]: crate::memory::DeviceMemory::map + /// [`MappingState::slice`]: crate::memory::MappingState::slice + pub fn mapped_slice(&self) -> Result, HostAccessError> { match self.buffer().memory() { - BufferMemory::Normal(a) => a.mapped_ptr().map(|ptr| { - // SAFETY: The original address came from the Vulkan implementation, and allocation - // sizes are guaranteed to not exceed `isize::MAX` when there's a mapped pointer, - // so the offset better be in range. - unsafe { NonNull::new_unchecked(ptr.as_ptr().add(self.offset as usize)) } - }), + BufferMemory::Normal(a) => { + let opt = a.mapped_slice(self.range()); + + // SAFETY: `self.range()` is in bounds of the allocation. + unsafe { opt.unwrap_unchecked() } + } BufferMemory::Sparse => unreachable!(), } } @@ -327,27 +333,33 @@ where .map_err(HostAccessError::AccessConflict)?; unsafe { state.cpu_read_lock(range.clone()) }; + let mapped_slice = self.mapped_slice()?; + if allocation.atom_size().is_some() { + let memory_range = MappedMemoryRange { + offset: range.start, + size: range.end - range.start, + _ne: crate::NonExhaustive(()), + }; + // If there are other read locks being held at this point, they also called - // `invalidate_range` when locking. The GPU can't write data while the CPU holds a read - // lock, so there will be no new data and this call will do nothing. + // `invalidate_range_unchecked` when locking. The device can't write data while the + // host holds a read lock, so there will be no new data and this call will do nothing. // TODO: probably still more efficient to call it only if we're the first to acquire a - // read lock, but the number of CPU locks isn't currently tracked anywhere. - unsafe { - allocation - .invalidate_range(range.clone()) - .map_err(HostAccessError::Invalidate)? - }; + // read lock, but the number of host locks isn't currently tracked anywhere. + // + // SAFETY: + // - `self.mapped_slice()` didn't return an error, which means that the subbuffer falls + // within the mapped range of the memory. + // - We ensure that memory mappings are always aligned to the non-coherent atom size + // for non-host-coherent memory, therefore the subbuffer's range aligned to the + // non-coherent atom size must fall within the mapped range of the memory. + unsafe { allocation.invalidate_range_unchecked(memory_range) } + .map_err(HostAccessError::Invalidate)?; } - let mapped_ptr = self.mapped_ptr().ok_or_else(|| { - HostAccessError::ValidationError(Box::new(ValidationError { - problem: "the memory of this subbuffer is not host-visible".into(), - ..Default::default() - })) - })?; // SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`. - let data = unsafe { &*T::from_ffi(mapped_ptr.as_ptr(), self.size as usize) }; + let data = unsafe { &*T::ptr_from_slice(mapped_slice) }; Ok(BufferReadGuard { subbuffer: self, @@ -407,22 +419,27 @@ where .map_err(HostAccessError::AccessConflict)?; unsafe { state.cpu_write_lock(range.clone()) }; + let mapped_slice = self.mapped_slice()?; + if allocation.atom_size().is_some() { - unsafe { - allocation - .invalidate_range(range.clone()) - .map_err(HostAccessError::Invalidate)? + let memory_range = MappedMemoryRange { + offset: range.start, + size: range.end - range.start, + _ne: crate::NonExhaustive(()), }; + + // SAFETY: + // - `self.mapped_slice()` didn't return an error, which means that the subbuffer falls + // within the mapped range of the memory. + // - We ensure that memory mappings are always aligned to the non-coherent atom size + // for non-host-coherent memory, therefore the subbuffer's range aligned to the + // non-coherent atom size must fall within the mapped range of the memory. + unsafe { allocation.invalidate_range_unchecked(memory_range) } + .map_err(HostAccessError::Invalidate)?; } - let mapped_ptr = self.mapped_ptr().ok_or_else(|| { - HostAccessError::ValidationError(Box::new(ValidationError { - problem: "the memory of this subbuffer is not host-visible".into(), - ..Default::default() - })) - })?; // SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`. - let data = unsafe { &mut *T::from_ffi(mapped_ptr.as_ptr(), self.size as usize) }; + let data = unsafe { &mut *T::ptr_from_slice(mapped_slice) }; Ok(BufferWriteGuard { subbuffer: self, @@ -661,7 +678,13 @@ impl Drop for BufferWriteGuard<'_, T> { }; if allocation.atom_size().is_some() && !thread::panicking() { - unsafe { allocation.flush_range(self.range.clone()).unwrap() }; + let memory_range = MappedMemoryRange { + offset: self.range.start, + size: self.range.end - self.range.start, + _ne: crate::NonExhaustive(()), + }; + + unsafe { allocation.flush_range_unchecked(memory_range).unwrap() }; } let mut state = self.subbuffer.buffer().state(); @@ -777,24 +800,24 @@ impl DerefMut for BufferWriteGuard<'_, T> { // - `LAYOUT` must be the correct layout for the type, which also means the type must either be // sized or if it's unsized then its metadata must be the same as that of a slice. Implementing // `BufferContents` for any other kind of DST is instantaneous horrifically undefined behavior. -// - `from_ffi` must create a pointer with the same address as the `data` parameter that is passed -// in. The pointer is expected to be aligned properly already. -// - `from_ffi` must create a pointer that is expected to be valid for reads (and potentially -// writes) for exactly `range` bytes. The `data` and `range` are expected to be valid for the +// - `ptr_from_slice` must create a pointer with the same address as the `slice` parameter that is +// passed in. The pointer is expected to be aligned properly already. +// - `ptr_from_slice` must create a pointer that is expected to be valid for reads (and potentially +// writes) for exactly `slice.len()` bytes. The `slice.len()` is expected to be valid for the // `LAYOUT`. pub unsafe trait BufferContents: Send + Sync + 'static { /// The layout of the contents. const LAYOUT: BufferContentsLayout; - /// Creates a pointer to `Self` from a pointer to the start of the data and a range in bytes. + /// Creates a pointer to `Self` from a pointer to a range of mapped memory. /// /// # Safety /// - /// - If `Self` is sized, then `range` must match the size exactly. - /// - If `Self` is unsized, then the `range` minus the size of the head (sized part) of the DST - /// must be evenly divisible by the size of the element type. + /// - If `Self` is sized, then `slice.len()` must match the size exactly. + /// - If `Self` is unsized, then `slice.len()` minus the size of the head (sized part) of the + /// DST must be evenly divisible by the size of the element type. #[doc(hidden)] - unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self; + unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self; } unsafe impl BufferContents for T @@ -809,11 +832,10 @@ where }; #[inline(always)] - unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self { - debug_assert!(range == size_of::()); - debug_assert!(data as usize % align_of::() == 0); + unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self { + debug_assert!(slice.len() == size_of::()); - data.cast() + <*mut [u8]>::cast::(slice.as_ptr()) } } @@ -827,12 +849,12 @@ where }); #[inline(always)] - unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self { - debug_assert!(range % size_of::() == 0); - debug_assert!(data as usize % align_of::() == 0); - let len = range / size_of::(); + unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self { + let data = <*mut [u8]>::cast::(slice.as_ptr()); + let len = slice.len() / size_of::(); + debug_assert!(slice.len() % size_of::() == 0); - ptr::slice_from_raw_parts_mut(data.cast(), len) + ptr::slice_from_raw_parts_mut(data, len) } } diff --git a/vulkano/src/memory/allocator/mod.rs b/vulkano/src/memory/allocator/mod.rs index 3e071915c7..cbead90f7c 100644 --- a/vulkano/src/memory/allocator/mod.rs +++ b/vulkano/src/memory/allocator/mod.rs @@ -229,7 +229,7 @@ pub use self::{ }; use super::{ DedicatedAllocation, DeviceAlignment, DeviceMemory, ExternalMemoryHandleTypes, - MemoryAllocateFlags, MemoryAllocateInfo, MemoryProperties, MemoryPropertyFlags, + MemoryAllocateFlags, MemoryAllocateInfo, MemoryMapInfo, MemoryProperties, MemoryPropertyFlags, MemoryRequirements, MemoryType, }; use crate::{ @@ -381,7 +381,7 @@ pub unsafe trait MemoryAllocator: DeviceOwned { allocation_size: DeviceSize, dedicated_allocation: Option>, export_handle_types: ExternalMemoryHandleTypes, - ) -> Result; + ) -> Result; } /// Describes what memory property flags are required, preferred and not preferred when picking a @@ -812,6 +812,13 @@ impl StandardMemoryAllocator { /// /// See also [the `MemoryAllocator` implementation]. /// +/// # Mapping behavior +/// +/// Every time a new `DeviceMemory` block is allocated, it is mapped in full automatically as long +/// as it resides in host-visible memory. It remains mapped until it is dropped, which only happens +/// if the allocator is dropped. In other words, all eligible blocks are persistently mapped, so +/// you don't need to worry about whether or not your host-visible allocations are host-accessible. +/// /// # `DeviceMemory` allocation /// /// If an allocation is created with the [`MemoryAllocatePreference::Unknown`] option, and the @@ -1176,7 +1183,7 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { .property_flags .contains(ash::vk::MemoryPropertyFlags::LAZILY_ALLOCATED) { - return unsafe { + let allocation = unsafe { self.allocate_dedicated_unchecked( memory_type_index, create_info.layout.size(), @@ -1187,7 +1194,9 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { ExternalMemoryHandleTypes::empty() }, ) - }; + }?; + + return Ok(allocation); } unsafe { self.allocate_from_type_unchecked(memory_type_index, create_info, false) } @@ -1305,17 +1314,16 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { let mut i = 0; loop { - let allocate_info = MemoryAllocateInfo { - allocation_size: block_size >> i, + let allocation_size = block_size >> i; + + match self.allocate_dedicated_unchecked( memory_type_index, + allocation_size, + None, export_handle_types, - dedicated_allocation: None, - flags: self.flags, - ..Default::default() - }; - match DeviceMemory::allocate_unchecked(self.device.clone(), allocate_info, None) { - Ok(device_memory) => { - break S::new(MemoryAlloc::new(device_memory)?); + ) { + Ok(allocation) => { + break S::new(allocation); } // Retry up to 3 times, halving the allocation size each time. Err(VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory) if i < 3 => { @@ -1459,6 +1467,7 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { dedicated_allocation, export_handle_types, ) + .map_err(MemoryAllocatorError::VulkanError) } else { if size > block_size / 2 { prefers_dedicated_allocation = true; @@ -1476,6 +1485,7 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { dedicated_allocation, export_handle_types, ) + .map_err(MemoryAllocatorError::VulkanError) // Fall back to suballocation. .or_else(|err| { if size <= block_size { @@ -1504,6 +1514,7 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { dedicated_allocation, export_handle_types, ) + .map_err(MemoryAllocatorError::VulkanError) }) } } @@ -1515,12 +1526,14 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { self.allocate_from_type_unchecked(memory_type_index, create_info.clone(), true) } - MemoryAllocatePreference::AlwaysAllocate => self.allocate_dedicated_unchecked( - memory_type_index, - size, - dedicated_allocation, - export_handle_types, - ), + MemoryAllocatePreference::AlwaysAllocate => self + .allocate_dedicated_unchecked( + memory_type_index, + size, + dedicated_allocation, + export_handle_types, + ) + .map_err(MemoryAllocatorError::VulkanError), }; match res { @@ -1546,20 +1559,40 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { allocation_size: DeviceSize, dedicated_allocation: Option>, export_handle_types: ExternalMemoryHandleTypes, - ) -> Result { - let allocate_info = MemoryAllocateInfo { - allocation_size, - memory_type_index, - dedicated_allocation, - export_handle_types, - flags: self.flags, - ..Default::default() - }; - let mut allocation = MemoryAlloc::new(DeviceMemory::allocate_unchecked( + ) -> Result { + // SAFETY: The caller must uphold the safety contract. + let mut memory = DeviceMemory::allocate_unchecked( self.device.clone(), - allocate_info, + MemoryAllocateInfo { + allocation_size, + memory_type_index, + dedicated_allocation, + export_handle_types, + flags: self.flags, + ..Default::default() + }, None, - )?)?; + )?; + + if self.pools[memory_type_index as usize] + .memory_type + .property_flags + .intersects(ash::vk::MemoryPropertyFlags::HOST_VISIBLE) + { + // SAFETY: + // - We checked that the memory is host-visible. + // - The memory can't be mapped already, because we just allocated it. + // - Mapping the whole range is always valid. + memory.map_unchecked(MemoryMapInfo { + offset: 0, + size: memory.allocation_size(), + _ne: crate::NonExhaustive(()), + })?; + } + + let mut allocation = MemoryAlloc::new(memory); + + // SAFETY: The memory is freshly allocated. allocation.set_allocation_type(self.allocation_type); Ok(allocation) @@ -1628,7 +1661,7 @@ unsafe impl MemoryAllocator for Arc { allocation_size: DeviceSize, dedicated_allocation: Option>, export_handle_types: ExternalMemoryHandleTypes, - ) -> Result { + ) -> Result { (**self).allocate_dedicated_unchecked( memory_type_index, allocation_size, diff --git a/vulkano/src/memory/allocator/suballocator.rs b/vulkano/src/memory/allocator/suballocator.rs index 98437b8c5c..73906ba2f1 100644 --- a/vulkano/src/memory/allocator/suballocator.rs +++ b/vulkano/src/memory/allocator/suballocator.rs @@ -14,14 +14,13 @@ //! [the parent module]: super use self::host::SlotId; -use super::{ - align_down, align_up, array_vec::ArrayVec, DeviceAlignment, DeviceLayout, MemoryAllocatorError, -}; +use super::{align_down, align_up, array_vec::ArrayVec, DeviceAlignment, DeviceLayout}; use crate::{ device::{Device, DeviceOwned}, image::ImageTiling, - memory::{is_aligned, DeviceMemory, MemoryPropertyFlags}, - DeviceSize, NonZeroDeviceSize, VulkanError, VulkanObject, + memory::{self, is_aligned, DeviceMemory, MappedMemoryRange, MemoryPropertyFlags}, + sync::HostAccessError, + DeviceSize, NonZeroDeviceSize, Validated, ValidationError, VulkanError, }; use crossbeam_queue::ArrayQueue; use parking_lot::Mutex; @@ -29,12 +28,10 @@ use std::{ cell::Cell, cmp, error::Error, - ffi::c_void, fmt::{self, Display}, - mem::{self, ManuallyDrop, MaybeUninit}, - ops::Range, + mem::{self, ManuallyDrop}, + ops::RangeBounds, ptr::{self, NonNull}, - slice, sync::{ atomic::{AtomicU64, Ordering}, Arc, @@ -57,8 +54,6 @@ pub struct MemoryAlloc { size: DeviceSize, // Needed when binding resources to the allocation in order to avoid aliasing memory. allocation_type: AllocationType, - // Mapped pointer to the start of the allocation or `None` is the memory is not host-visible. - mapped_ptr: Option>, // Used by the suballocators to align allocations to the non-coherent atom size when the memory // type is host-visible but not host-coherent. This will be `None` for any other memory type. atom_size: Option, @@ -86,17 +81,10 @@ enum AllocParent { Dedicated(DeviceMemory), } -// It is safe to share `mapped_ptr` between threads because the user would have to use unsafe code -// themself to get UB in the first place. -unsafe impl Send for MemoryAlloc {} -unsafe impl Sync for MemoryAlloc {} - impl MemoryAlloc { /// Creates a new `MemoryAlloc`. - /// - /// The memory is mapped automatically if it's host-visible. #[inline] - pub fn new(device_memory: DeviceMemory) -> Result { + pub fn new(device_memory: DeviceMemory) -> Self { // Sanity check: this would lead to UB when suballocating. assert!(device_memory.allocation_size() <= DeviceLayout::MAX_SIZE); @@ -107,47 +95,21 @@ impl MemoryAlloc { [memory_type_index as usize] .property_flags; - let mapped_ptr = if property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE) { - // Sanity check: this would lead to UB when calculating pointer offsets. - assert!(device_memory.allocation_size() <= isize::MAX.try_into().unwrap()); - - let fns = device.fns(); - let mut output = MaybeUninit::uninit(); - // This is always valid because we are mapping the whole range. - unsafe { - (fns.v1_0.map_memory)( - device.handle(), - device_memory.handle(), - 0, - ash::vk::WHOLE_SIZE, - ash::vk::MemoryMapFlags::empty(), - output.as_mut_ptr(), - ) - .result() - .map_err(VulkanError::from)?; - - Some(NonNull::new(output.assume_init()).unwrap()) - } - } else { - None - }; - let atom_size = (property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE) && !property_flags.intersects(MemoryPropertyFlags::HOST_COHERENT)) .then_some(physical_device.properties().non_coherent_atom_size); - Ok(MemoryAlloc { + MemoryAlloc { offset: 0, size: device_memory.allocation_size(), allocation_type: AllocationType::Unknown, - mapped_ptr, atom_size, parent: if device_memory.is_dedicated() { AllocParent::Dedicated(device_memory) } else { AllocParent::Root(Arc::new(device_memory)) }, - }) + } } /// Returns the offset of the allocation within the [`DeviceMemory`] block. @@ -168,37 +130,31 @@ impl MemoryAlloc { self.allocation_type } - /// Returns the mapped pointer to the start of the allocation if the memory is host-visible, - /// otherwise returns [`None`]. - #[inline] - pub fn mapped_ptr(&self) -> Option> { - self.mapped_ptr - } - - /// Returns a mapped slice to the data within the allocation if the memory is host-visible, - /// otherwise returns [`None`]. + /// Returns the mapped pointer to a range of the allocation, or returns [`None`] if ouf of + /// bounds. /// - /// # Safety + /// `range` is specified in bytes relative to the beginning of `self` and must fall within the + /// range of the memory mapping given to [`DeviceMemory::map`]. /// - /// - While the returned slice exists, there must be no operations pending or executing in a - /// GPU queue that write to the same memory. - #[inline] - pub unsafe fn mapped_slice(&self) -> Option<&[u8]> { - self.mapped_ptr - .map(|ptr| slice::from_raw_parts(ptr.as_ptr().cast(), self.size as usize)) - } - - /// Returns a mapped mutable slice to the data within the allocation if the memory is - /// host-visible, otherwise returns [`None`]. + /// See [`MappingState::slice`] for the safety invariants of the returned pointer. /// - /// # Safety - /// - /// - While the returned slice exists, there must be no operations pending or executing in a - /// GPU queue that access the same memory. + /// [`MappingState::slice`]: crate::memory::MappingState::slice #[inline] - pub unsafe fn mapped_slice_mut(&mut self) -> Option<&mut [u8]> { - self.mapped_ptr - .map(|ptr| slice::from_raw_parts_mut(ptr.as_ptr().cast(), self.size as usize)) + pub fn mapped_slice( + &self, + range: impl RangeBounds, + ) -> Option, HostAccessError>> { + let mut range = memory::range(range, ..self.size())?; + range.start += self.offset(); + range.end += self.offset(); + + let res = if let Some(state) = self.device_memory().mapping_state() { + state.slice(range).ok_or(HostAccessError::OutOfMappedRange) + } else { + Err(HostAccessError::NotHostMapped) + }; + + Some(res) } pub(crate) fn atom_size(&self) -> Option { @@ -207,144 +163,124 @@ impl MemoryAlloc { /// Invalidates the host (CPU) cache for a range of the allocation. /// - /// You must call this method before the memory is read by the host, if the device previously - /// wrote to the memory. It has no effect if the memory is not mapped or if the memory is - /// [host-coherent]. - /// - /// `range` is specified in bytes relative to the start of the allocation. The start and end of - /// `range` must be a multiple of the [`non_coherent_atom_size`] device property, but - /// `range.end` can also equal to `self.size()`. + /// If the device memory is not [host-coherent], you must call this function before the memory + /// is read by the host, if the device previously wrote to the memory. It has no effect if the + /// memory is host-coherent. /// /// # Safety /// - /// - If there are memory writes by the GPU that have not been propagated into the CPU cache, - /// then there must not be any references in Rust code to the specified `range` of the memory. - /// - /// # Panics - /// - /// - Panics if `range` is empty. - /// - Panics if `range.end` exceeds `self.size`. - /// - Panics if `range.start` or `range.end` are not a multiple of the `non_coherent_atom_size`. + /// - If there are memory writes by the device that have not been propagated into the host + /// cache, then there must not be any references in Rust code to any portion of the specified + /// `memory_range`. /// /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size #[inline] - pub unsafe fn invalidate_range(&self, range: Range) -> Result<(), VulkanError> { - // VUID-VkMappedMemoryRange-memory-00684 - if let Some(atom_size) = self.atom_size { - let range = self.create_memory_range(range, atom_size); - let device = self.device(); - let fns = device.fns(); - (fns.v1_0.invalidate_mapped_memory_ranges)(device.handle(), 1, &range) - .result() - .map_err(VulkanError::from)?; - } else { - self.debug_validate_memory_range(&range); - } + pub unsafe fn invalidate_range( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), Validated> { + self.validate_memory_range(&memory_range)?; - Ok(()) + self.device_memory() + .invalidate_range(self.create_memory_range(memory_range)) } - /// Flushes the host (CPU) cache for a range of the allocation. - /// - /// You must call this method after writing to the memory from the host, if the device is going - /// to read the memory. It has no effect if the memory is not mapped or if the memory is - /// [host-coherent]. + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + #[inline] + pub unsafe fn invalidate_range_unchecked( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), VulkanError> { + self.device_memory() + .invalidate_range_unchecked(self.create_memory_range(memory_range)) + } + + /// Flushes the host cache for a range of the allocation. /// - /// `range` is specified in bytes relative to the start of the allocation. The start and end of - /// `range` must be a multiple of the [`non_coherent_atom_size`] device property, but - /// `range.end` can also equal to `self.size()`. + /// If the device memory is not [host-coherent], you must call this function after writing to + /// the memory, if the device is going to read the memory. It has no effect if the memory is + /// host-coherent. /// /// # Safety /// - /// - There must be no operations pending or executing in a GPU queue that access the specified - /// `range` of the memory. - /// - /// # Panics - /// - /// - Panics if `range` is empty. - /// - Panics if `range.end` exceeds `self.size`. - /// - Panics if `range.start` or `range.end` are not a multiple of the `non_coherent_atom_size`. + /// - There must be no operations pending or executing in a device queue, that access the + /// specified `memory_range`. /// /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size #[inline] - pub unsafe fn flush_range(&self, range: Range) -> Result<(), VulkanError> { - // VUID-VkMappedMemoryRange-memory-00684 - if let Some(atom_size) = self.atom_size { - let range = self.create_memory_range(range, atom_size); - let device = self.device(); - let fns = device.fns(); - (fns.v1_0.flush_mapped_memory_ranges)(device.handle(), 1, &range) - .result() - .map_err(VulkanError::from)?; - } else { - self.debug_validate_memory_range(&range); - } + pub unsafe fn flush_range( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), Validated> { + self.validate_memory_range(&memory_range)?; - Ok(()) + self.device_memory() + .flush_range(self.create_memory_range(memory_range)) } - fn create_memory_range( + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + #[inline] + pub unsafe fn flush_range_unchecked( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), VulkanError> { + self.device_memory() + .flush_range_unchecked(self.create_memory_range(memory_range)) + } + + fn validate_memory_range( &self, - range: Range, - atom_size: DeviceAlignment, - ) -> ash::vk::MappedMemoryRange { - assert!(!range.is_empty() && range.end <= self.size); + memory_range: &MappedMemoryRange, + ) -> Result<(), Box> { + let &MappedMemoryRange { + offset, + size, + _ne: _, + } = memory_range; - // VUID-VkMappedMemoryRange-size-00685 - // Guaranteed because we always map the entire `DeviceMemory`. + if !(offset <= self.size() && size <= self.size() - offset) { + return Err(Box::new(ValidationError { + context: "memory_range".into(), + problem: "is not contained within the allocation".into(), + ..Default::default() + })); + } - // VUID-VkMappedMemoryRange-offset-00687 - // VUID-VkMappedMemoryRange-size-01390 - assert!( - is_aligned(range.start, atom_size) - && (is_aligned(range.end, atom_size) || range.end == self.size) - ); + Ok(()) + } - // VUID-VkMappedMemoryRange-offset-00687 - // Guaranteed as long as `range.start` is aligned because the suballocators always align - // `self.offset` to the non-coherent atom size for non-coherent host-visible memory. - let offset = self.offset + range.start; + fn create_memory_range(&self, memory_range: MappedMemoryRange) -> MappedMemoryRange { + let MappedMemoryRange { + mut offset, + mut size, + _ne: _, + } = memory_range; - let mut size = range.end - range.start; - let device_memory = self.device_memory(); + let memory = self.device_memory(); + + offset += self.offset(); // VUID-VkMappedMemoryRange-size-01390 - if offset + size < device_memory.allocation_size() { - // We align the size in case `range.end == self.size`. We can do this without aliasing - // other allocations because the suballocators ensure that all allocations are aligned - // to the atom size for non-coherent host-visible memory. - size = align_up(size, atom_size); + if memory_range.offset + size == self.size() { + // We can align the end of the range like this without aliasing other allocations, + // because the suballocators ensure that all allocations are aligned to the atom size + // for non-host-coherent host-visible memory. + let end = cmp::min( + align_up(offset + size, memory.atom_size()), + memory.allocation_size(), + ); + size = end - offset; } - ash::vk::MappedMemoryRange { - memory: device_memory.handle(), + MappedMemoryRange { offset, size, - ..Default::default() + _ne: crate::NonExhaustive(()), } } - /// This exists because even if no cache control is required, the parameters should still be - /// valid, otherwise you might have bugs in your code forever just because your memory happens - /// to be host-coherent. - fn debug_validate_memory_range(&self, range: &Range) { - debug_assert!(!range.is_empty() && range.end <= self.size); - - let atom_size = self - .device() - .physical_device() - .properties() - .non_coherent_atom_size; - debug_assert!( - is_aligned(range.start, atom_size) - && (is_aligned(range.end, atom_size) || range.end == self.size), - "attempted to invalidate or flush a memory range that is not aligned to the \ - non-coherent atom size", - ); - } - /// Returns the underlying block of [`DeviceMemory`]. #[inline] pub fn device_memory(&self) -> &DeviceMemory { @@ -490,12 +426,6 @@ impl MemoryAlloc { /// [`shift`]: Self::shift #[inline] pub unsafe fn set_offset(&mut self, new_offset: DeviceSize) { - if let Some(ptr) = self.mapped_ptr.as_mut() { - *ptr = NonNull::new_unchecked( - ptr.as_ptr() - .offset(new_offset as isize - self.offset as isize), - ); - } self.offset = new_offset; } @@ -642,8 +572,7 @@ unsafe impl DeviceOwned for MemoryAlloc { /// }, /// ) /// .unwrap(), -/// ) -/// .unwrap(); +/// ); /// /// // You can now feed `region` into any suballocator. /// ``` @@ -1159,26 +1088,10 @@ unsafe impl Suballocator for Arc { // constrained by the remaining size of the region. self.free_size.fetch_sub(size, Ordering::Release); - let mapped_ptr = self.region.mapped_ptr.map(|ptr| { - // This can't overflow because offsets in the free-list are confined - // to the range [region.offset, region.offset + region.size). - let relative_offset = offset - self.region.offset; - - // SAFETY: Allocation sizes are guaranteed to not exceed - // `isize::MAX` when they have a mapped pointer, and the original - // pointer was handed to us from the Vulkan implementation, - // so the offset better be in range. - let ptr = ptr.as_ptr().offset(relative_offset as isize); - - // SAFETY: Same as the previous. - NonNull::new_unchecked(ptr) - }); - return Ok(MemoryAlloc { offset, size, allocation_type, - mapped_ptr, atom_size: self.region.atom_size, parent: AllocParent::FreeList { allocator: self.clone(), @@ -1773,25 +1686,10 @@ unsafe impl Suballocator for Arc { // constrained by the remaining size of the region. self.free_size.fetch_sub(size, Ordering::Release); - let mapped_ptr = self.region.mapped_ptr.map(|ptr| { - // This can't overflow because offsets in the free-list are confined to the - // range [region.offset, region.offset + region.size). - let relative_offset = offset - self.region.offset; - - // SAFETY: Allocation sizes are guaranteed to not exceed `isize::MAX` when - // they have a mapped pointer, and the original pointer was handed to us - // from the Vulkan implementation, so the offset better be in range. - let ptr = unsafe { ptr.as_ptr().offset(relative_offset as isize) }; - - // SAFETY: Same as the previous. - unsafe { NonNull::new_unchecked(ptr) } - }); - return Ok(MemoryAlloc { offset, size: layout.size(), allocation_type, - mapped_ptr, atom_size: self.region.atom_size, parent: AllocParent::Buddy { allocator: self.clone(), @@ -2176,21 +2074,10 @@ impl PoolAllocatorInner { }; } - let mapped_ptr = self.region.mapped_ptr.map(|ptr| { - // SAFETY: Allocation sizes are guaranteed to not exceed `isize::MAX` when they have a - // mapped pointer, and the original pointer was handed to us from the Vulkan - // implementation, so the offset better be in range. - let ptr = unsafe { ptr.as_ptr().offset(relative_offset as isize) }; - - // SAFETY: Same as the previous. - unsafe { NonNull::new_unchecked(ptr) } - }); - Ok(MemoryAlloc { offset, size, allocation_type: self.region.allocation_type, - mapped_ptr, atom_size: self.region.atom_size, parent: AllocParent::Pool { allocator: self, @@ -2453,21 +2340,10 @@ unsafe impl Suballocator for Arc { Ordering::Relaxed, ) { Ok(_) => { - let mapped_ptr = self.region.mapped_ptr.map(|ptr| { - // SAFETY: Allocation sizes are guaranteed to not exceed `isize::MAX` when - // they have a mapped pointer, and the original pointer was handed to us - // from the Vulkan implementation, so the offset better be in range. - let ptr = unsafe { ptr.as_ptr().offset(relative_offset as isize) }; - - // SAFETY: Same as the previous. - unsafe { NonNull::new_unchecked(ptr) } - }); - return Ok(MemoryAlloc { offset, size, allocation_type, - mapped_ptr, atom_size: self.region.atom_size, parent: AllocParent::Bump(self.clone()), }); @@ -2644,44 +2520,6 @@ mod tests { use crate::memory::MemoryAllocateInfo; use std::thread; - #[test] - fn memory_alloc_set_offset() { - let (device, _) = gfx_dev_and_queue!(); - let memory_type_index = device - .physical_device() - .memory_properties() - .memory_types - .iter() - .position(|memory_type| { - memory_type - .property_flags - .contains(MemoryPropertyFlags::HOST_VISIBLE) - }) - .unwrap() as u32; - let mut alloc = MemoryAlloc::new( - DeviceMemory::allocate( - device, - MemoryAllocateInfo { - memory_type_index, - allocation_size: 1024, - ..Default::default() - }, - ) - .unwrap(), - ) - .unwrap(); - let ptr = alloc.mapped_ptr().unwrap().as_ptr(); - - unsafe { - alloc.set_offset(16); - assert_eq!(alloc.mapped_ptr().unwrap().as_ptr(), ptr.offset(16)); - alloc.set_offset(0); - assert_eq!(alloc.mapped_ptr().unwrap().as_ptr(), ptr.offset(0)); - alloc.set_offset(32); - assert_eq!(alloc.mapped_ptr().unwrap().as_ptr(), ptr.offset(32)); - } - } - #[test] fn free_list_allocator_capacity() { const THREADS: DeviceSize = 12; @@ -2782,7 +2620,7 @@ mod tests { .unwrap(); PoolAllocator::new( - MemoryAlloc::new(device_memory).unwrap(), + MemoryAlloc::new(device_memory), DeviceAlignment::new(1).unwrap(), ) } @@ -2837,7 +2675,7 @@ mod tests { .unwrap(); PoolAllocator::::new( - MemoryAlloc::new(device_memory).unwrap(), + MemoryAlloc::new(device_memory), DeviceAlignment::new(1).unwrap(), ) }; @@ -2872,7 +2710,7 @@ mod tests { }, ) .unwrap(); - let mut region = MemoryAlloc::new(device_memory).unwrap(); + let mut region = MemoryAlloc::new(device_memory); unsafe { region.set_allocation_type(allocation_type) }; PoolAllocator::new(region, DeviceAlignment::new(256).unwrap()) @@ -3108,7 +2946,7 @@ mod tests { }, ) .unwrap(); - let mut allocator = <$type>::new(MemoryAlloc::new(device_memory).unwrap()); + let mut allocator = <$type>::new(MemoryAlloc::new(device_memory)); Arc::get_mut(&mut allocator) .unwrap() .buffer_image_granularity = DeviceAlignment::new($granularity).unwrap(); diff --git a/vulkano/src/memory/device_memory.rs b/vulkano/src/memory/device_memory.rs index 70e27850fb..5afa1b3696 100644 --- a/vulkano/src/memory/device_memory.rs +++ b/vulkano/src/memory/device_memory.rs @@ -22,7 +22,8 @@ use std::{ mem::MaybeUninit, num::NonZeroU64, ops::Range, - ptr, slice, + ptr::{self, NonNull}, + slice, sync::{atomic::Ordering, Arc}, }; @@ -61,6 +62,10 @@ pub struct DeviceMemory { export_handle_types: ExternalMemoryHandleTypes, imported_handle_type: Option, flags: MemoryAllocateFlags, + + mapping_state: Option, + atom_size: DeviceAlignment, + is_coherent: bool, } impl DeviceMemory { @@ -71,7 +76,6 @@ impl DeviceMemory { /// /// # Panics /// - /// - Panics if `allocate_info.allocation_size` is 0. /// - Panics if `allocate_info.dedicated_allocation` is `Some` and the contained buffer or /// image does not belong to `device`. #[inline] @@ -82,7 +86,7 @@ impl DeviceMemory { if !(device.api_version() >= Version::V1_1 || device.enabled_extensions().khr_dedicated_allocation) { - // Fall back instead of erroring out + // Fall back instead of erroring out. allocate_info.dedicated_allocation = None; } @@ -99,7 +103,6 @@ impl DeviceMemory { /// /// # Panics /// - /// - Panics if `allocate_info.allocation_size` is 0. /// - Panics if `allocate_info.dedicated_allocation` is `Some` and the contained buffer or /// image does not belong to `device`. #[inline] @@ -111,7 +114,7 @@ impl DeviceMemory { if !(device.api_version() >= Version::V1_1 || device.enabled_extensions().khr_dedicated_allocation) { - // Fall back instead of erroring out + // Fall back instead of erroring out. allocate_info.dedicated_allocation = None; } @@ -281,16 +284,28 @@ impl DeviceMemory { output.assume_init() }; + let atom_size = device.physical_device().properties().non_coherent_atom_size; + + let is_coherent = device.physical_device().memory_properties().memory_types + [memory_type_index as usize] + .property_flags + .intersects(MemoryPropertyFlags::HOST_COHERENT); + Ok(DeviceMemory { handle, device: InstanceOwnedDebugWrapper(device), id: Self::next_id(), + allocation_size, memory_type_index, dedicated_to: dedicated_allocation.map(Into::into), export_handle_types, imported_handle_type, flags, + + mapping_state: None, + atom_size, + is_coherent, }) } @@ -315,16 +330,28 @@ impl DeviceMemory { _ne: _, } = allocate_info; + let atom_size = device.physical_device().properties().non_coherent_atom_size; + + let is_coherent = device.physical_device().memory_properties().memory_types + [memory_type_index as usize] + .property_flags + .intersects(MemoryPropertyFlags::HOST_COHERENT); + DeviceMemory { handle, device: InstanceOwnedDebugWrapper(device), id: Self::next_id(), + allocation_size, memory_type_index, dedicated_to: dedicated_allocation.map(Into::into), export_handle_types, imported_handle_type: None, flags, + + mapping_state: None, + atom_size, + is_coherent, } } @@ -370,6 +397,305 @@ impl DeviceMemory { self.flags } + /// Returns the current mapping state, or [`None`] if the memory is not currently host-mapped. + #[inline] + pub fn mapping_state(&self) -> Option<&MappingState> { + self.mapping_state.as_ref() + } + + pub(crate) fn atom_size(&self) -> DeviceAlignment { + self.atom_size + } + + /// Maps a range of memory to be accessed by the host. + /// + /// `self` must not be host-mapped already and must be allocated from host-visible memory. + #[inline] + pub fn map(&mut self, map_info: MemoryMapInfo) -> Result<(), Validated> { + self.validate_map(&map_info)?; + + unsafe { Ok(self.map_unchecked(map_info)?) } + } + + fn validate_map(&self, map_info: &MemoryMapInfo) -> Result<(), Box> { + if self.mapping_state.is_some() { + return Err(Box::new(ValidationError { + problem: "this device memory is already host-mapped".into(), + vuids: &["VUID-vkMapMemory-memory-00678"], + ..Default::default() + })); + } + + map_info + .validate(self) + .map_err(|err| err.add_context("map_info"))?; + + let memory_type = &self + .device() + .physical_device() + .memory_properties() + .memory_types[self.memory_type_index() as usize]; + + if !memory_type + .property_flags + .intersects(MemoryPropertyFlags::HOST_VISIBLE) + { + return Err(Box::new(ValidationError { + problem: "`self.memory_type_index()` refers to a memory type whose \ + `property_flags` does not contain `MemoryPropertyFlags::HOST_VISIBLE`" + .into(), + vuids: &["VUID-vkMapMemory-memory-00682"], + ..Default::default() + })); + } + + Ok(()) + } + + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + pub unsafe fn map_unchecked(&mut self, map_info: MemoryMapInfo) -> Result<(), VulkanError> { + let MemoryMapInfo { + offset, + size, + _ne: _, + } = map_info; + + // Sanity check: this would lead to UB when calculating pointer offsets. + assert!(size <= isize::MAX.try_into().unwrap()); + + let device = self.device(); + + let ptr = { + let fns = device.fns(); + let mut output = MaybeUninit::uninit(); + + if device.enabled_extensions().khr_map_memory2 { + let map_info_vk = ash::vk::MemoryMapInfoKHR { + flags: ash::vk::MemoryMapFlags::empty(), + memory: self.handle(), + offset, + size, + ..Default::default() + }; + + (fns.khr_map_memory2.map_memory2_khr)( + device.handle(), + &map_info_vk, + output.as_mut_ptr(), + ) + .result() + .map_err(VulkanError::from)?; + } else { + (fns.v1_0.map_memory)( + device.handle(), + self.handle, + offset, + size, + ash::vk::MemoryMapFlags::empty(), + output.as_mut_ptr(), + ) + .result() + .map_err(VulkanError::from)?; + } + + output.assume_init() + }; + + let ptr = NonNull::new(ptr).unwrap(); + let range = offset..offset + size; + self.mapping_state = Some(MappingState { ptr, range }); + + Ok(()) + } + + /// Unmaps the memory. It will no longer be accessible from the host. + /// + /// `self` must be currently host-mapped. + // + // NOTE(Marc): The `&mut` here is more than just because we need to mutate the struct. + // `vkMapMemory` and `vkUnmapMemory` must be externally synchronized, but more importantly, if + // we allowed unmapping through a shared reference, it would be possible to unmap a resource + // that's currently being read or written by the host elsewhere, requiring even more locking on + // each host access. + #[inline] + pub fn unmap(&mut self, unmap_info: MemoryUnmapInfo) -> Result<(), Validated> { + self.validate_unmap(&unmap_info)?; + + unsafe { self.unmap_unchecked(unmap_info) }?; + + Ok(()) + } + + fn validate_unmap(&self, unmap_info: &MemoryUnmapInfo) -> Result<(), Box> { + if self.mapping_state.is_none() { + return Err(Box::new(ValidationError { + problem: "this device memory is not currently host-mapped".into(), + vuids: &["VUID-vkUnmapMemory-memory-00689"], + ..Default::default() + })); + } + + unmap_info + .validate(self) + .map_err(|err| err.add_context("unmap_info"))?; + + Ok(()) + } + + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + pub unsafe fn unmap_unchecked( + &mut self, + unmap_info: MemoryUnmapInfo, + ) -> Result<(), VulkanError> { + let MemoryUnmapInfo { _ne: _ } = unmap_info; + + let device = self.device(); + let fns = device.fns(); + + if device.enabled_extensions().khr_map_memory2 { + let unmap_info_vk = ash::vk::MemoryUnmapInfoKHR { + flags: ash::vk::MemoryUnmapFlagsKHR::empty(), + memory: self.handle(), + ..Default::default() + }; + + (fns.khr_map_memory2.unmap_memory2_khr)(device.handle(), &unmap_info_vk) + .result() + .map_err(VulkanError::from)?; + } else { + (fns.v1_0.unmap_memory)(device.handle(), self.handle); + } + + self.mapping_state = None; + + Ok(()) + } + + /// Invalidates the host cache for a range of mapped memory. + /// + /// If the device memory is not [host-coherent], you must call this function before the memory + /// is read by the host, if the device previously wrote to the memory. It has no effect if the + /// memory is host-coherent. + /// + /// # Safety + /// + /// - If there are memory writes by the device that have not been propagated into the host + /// cache, then there must not be any references in Rust code to any portion of the specified + /// `memory_range`. + /// + /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT + /// [`map`]: Self::map + /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size + #[inline] + pub unsafe fn invalidate_range( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), Validated> { + self.validate_memory_range(&memory_range)?; + + Ok(self.invalidate_range_unchecked(memory_range)?) + } + + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + #[inline] + pub unsafe fn invalidate_range_unchecked( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), VulkanError> { + if self.is_coherent { + return Ok(()); + } + + let MappedMemoryRange { + offset, + size, + _ne: _, + } = memory_range; + + let memory_range_vk = ash::vk::MappedMemoryRange { + memory: self.handle(), + offset, + size, + ..Default::default() + }; + + let fns = self.device().fns(); + (fns.v1_0.invalidate_mapped_memory_ranges)(self.device().handle(), 1, &memory_range_vk) + .result() + .map_err(VulkanError::from)?; + + Ok(()) + } + + /// Flushes the host cache for a range of mapped memory. + /// + /// If the device memory is not [host-coherent], you must call this function after writing to + /// the memory, if the device is going to read the memory. It has no effect if the memory is + /// host-coherent. + /// + /// # Safety + /// + /// - There must be no operations pending or executing in a device queue, that access the + /// specified `memory_range`. + /// + /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT + /// [`map`]: Self::map + /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size + #[inline] + pub unsafe fn flush_range( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), Validated> { + self.validate_memory_range(&memory_range)?; + + Ok(self.flush_range_unchecked(memory_range)?) + } + + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + #[inline] + pub unsafe fn flush_range_unchecked( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), VulkanError> { + if self.is_coherent { + return Ok(()); + } + + let MappedMemoryRange { + offset, + size, + _ne: _, + } = memory_range; + + let memory_range_vk = ash::vk::MappedMemoryRange { + memory: self.handle(), + offset, + size, + ..Default::default() + }; + + let fns = self.device().fns(); + (fns.v1_0.flush_mapped_memory_ranges)(self.device().handle(), 1, &memory_range_vk) + .result() + .map_err(VulkanError::from)?; + + Ok(()) + } + + // NOTE(Marc): We are validating the parameters regardless of whether the memory is + // non-coherent on purpose, to catch potential bugs arising because the code isn't tested on + // such hardware. + fn validate_memory_range( + &self, + memory_range: &MappedMemoryRange, + ) -> Result<(), Box> { + memory_range + .validate(self) + .map_err(|err| err.add_context("memory_range"))?; + + Ok(()) + } + /// Retrieves the amount of lazily-allocated memory that is currently commited to this /// memory object. /// @@ -1034,7 +1360,7 @@ vulkan_bitflags_enum! { vulkan_bitflags! { #[non_exhaustive] - /// A mask specifying flags for device memory allocation. + /// Flags specifying additional properties of a device memory allocation. MemoryAllocateFlags = MemoryAllocateFlags(u32); /* TODO: enable @@ -1054,6 +1380,306 @@ vulkan_bitflags! { DEVICE_ADDRESS_CAPTURE_REPLAY = DEVICE_ADDRESS_CAPTURE_REPLAY,*/ } +/// Parameters of a memory map operation. +#[derive(Debug)] +pub struct MemoryMapInfo { + /// The offset (in bytes) from the beginning of the `DeviceMemory`, where the mapping starts. + /// + /// Must be less than the [`allocation_size`] of the device memory. If the the memory was not + /// allocated from [host-coherent] memory, then this must be a multiple of the + /// [`non_coherent_atom_size`] device property. + /// + /// The default value is `0`. + /// + /// [`allocation_size`]: DeviceMemory::allocation_size + /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size + pub offset: DeviceSize, + + /// The size (in bytes) of the mapping. + /// + /// Must be less than or equal to the [`allocation_size`] of the device memory minus `offset`. + /// If the the memory was not allocated from [host-coherent] memory, then this must be a + /// multiple of the [`non_coherent_atom_size`] device property, or be equal to the allocation + /// size minus `offset`. + /// + /// The default value is `0`, which must be overridden. + /// + /// [`allocation_size`]: DeviceMemory::allocation_size + /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size + pub size: DeviceSize, + + pub _ne: crate::NonExhaustive, +} + +impl MemoryMapInfo { + pub(crate) fn validate(&self, memory: &DeviceMemory) -> Result<(), Box> { + let &Self { + offset, + size, + _ne: _, + } = self; + + if !(offset < memory.allocation_size()) { + return Err(Box::new(ValidationError { + context: "offset".into(), + problem: "is not less than `self.allocation_size()`".into(), + vuids: &["VUID-vkMapMemory-offset-00679"], + ..Default::default() + })); + } + + if size == 0 { + return Err(Box::new(ValidationError { + context: "size".into(), + problem: "is zero".into(), + vuids: &["VUID-vkMapMemory-size-00680"], + ..Default::default() + })); + } + + if !(size <= memory.allocation_size() - offset) { + return Err(Box::new(ValidationError { + context: "size".into(), + problem: "is not less than or equal to `self.allocation_size()` minus `offset`" + .into(), + vuids: &["VUID-vkMapMemory-size-00681"], + ..Default::default() + })); + } + + let atom_size = memory.atom_size(); + + // Not required for merely mapping, but without this check the user can end up with + // parts of the mapped memory at the start and end that they're not able to + // invalidate/flush, which is probably unintended. + // + // NOTE(Marc): We also rely on this for soundness, because it is easier and more optimal to + // not have to worry about whether a range of mapped memory is still in bounds of the + // mapped memory after being aligned to the non-coherent atom size. + if !memory.is_coherent + && (!is_aligned(offset, atom_size) + || (!is_aligned(size, atom_size) && offset + size != memory.allocation_size())) + { + return Err(Box::new(ValidationError { + problem: "`self.memory_type_index()` refers to a memory type whose \ + `property_flags` does not contain `MemoryPropertyFlags::HOST_COHERENT`, and \ + `offset` and/or `size` are not aligned to the `non_coherent_atom_size` device \ + property" + .into(), + ..Default::default() + })); + } + + Ok(()) + } +} + +impl Default for MemoryMapInfo { + #[inline] + fn default() -> Self { + MemoryMapInfo { + offset: 0, + size: 0, + _ne: crate::NonExhaustive(()), + } + } +} + +/// Parameters of a memory unmap operation. +#[derive(Debug)] +pub struct MemoryUnmapInfo { + pub _ne: crate::NonExhaustive, +} + +impl MemoryUnmapInfo { + pub(crate) fn validate(&self, _memory: &DeviceMemory) -> Result<(), Box> { + let &Self { _ne: _ } = self; + + Ok(()) + } +} + +impl Default for MemoryUnmapInfo { + #[inline] + fn default() -> Self { + MemoryUnmapInfo { + _ne: crate::NonExhaustive(()), + } + } +} + +/// Represents the currently host-mapped region of a [`DeviceMemory`] block. +#[derive(Debug)] +pub struct MappingState { + ptr: NonNull, + range: Range, +} + +// It is safe to share `ptr` between threads because the user would have to use unsafe code +// themself to get UB in the first place. +unsafe impl Send for MappingState {} +unsafe impl Sync for MappingState {} + +impl MappingState { + /// Returns the pointer to the start of the mapped memory. Meaning that the pointer is already + /// offset by the [`offset`]. + /// + /// [`offset`]: Self::offset + #[inline] + pub fn ptr(&self) -> NonNull { + self.ptr + } + + /// Returns the offset given to [`DeviceMemory::map`]. + #[inline] + pub fn offset(&self) -> DeviceSize { + self.range.start + } + + /// Returns the size given to [`DeviceMemory::map`]. + #[inline] + pub fn size(&self) -> DeviceSize { + self.range.end - self.range.start + } + + /// Returns a pointer to a slice of the mapped memory. Returns `None` if out of bounds. + /// + /// `range` is specified in bytes relative to the start of the memory allocation, and must fall + /// within the range of the memory mapping given to [`DeviceMemory::map`]. + /// + /// This function is safe in the sense that the returned pointer is guaranteed to be within + /// bounds of the mapped memory, however dereferencing the pointer isn't: + /// + /// - Normal Rust aliasing rules apply: if you create a mutable reference out of the pointer, + /// you must ensure that no other references exist in Rust to any portion of the same memory. + /// - While a reference created from the pointer exists, there must be no operations pending or + /// executing in any queue on the device, that write to any portion of the same memory. + /// - While a mutable reference created from the pointer exists, there must be no operations + /// pending or executing in any queue on the device, that read from any portion of the same + /// memory. + #[inline] + pub fn slice(&self, range: Range) -> Option> { + if self.range.start <= range.start + && range.start <= range.end + && range.end <= self.range.end + { + // SAFETY: We checked that the range is within the currently mapped range. + Some(unsafe { self.slice_unchecked(range) }) + } else { + None + } + } + + /// # Safety + /// + /// - `range` must be within the currently mapped range. + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + #[inline] + pub unsafe fn slice_unchecked(&self, range: Range) -> NonNull<[u8]> { + let ptr = self.ptr.as_ptr(); + + // SAFETY: The caller must guarantee that `range` is within the currently mapped range, + // which means that the offset pointer and length must denote a slice that's contained + // within the allocated (mapped) object. + let ptr = ptr.add((range.start - self.range.start) as usize); + let len = (range.end - range.start) as usize; + + let ptr = ptr::slice_from_raw_parts_mut(<*mut c_void>::cast::(ptr), len); + + // SAFETY: The original pointer was non-null, and the caller must guarantee that `range` + // is within the currently mapped range, which means that the offset couldn't have wrapped + // around the address space. + NonNull::new_unchecked(ptr) + } +} + +/// Represents a range of host-mapped [`DeviceMemory`] to be invalidated or flushed. +/// +/// Must be contained within the currently mapped range of the device memory. +#[derive(Debug)] +pub struct MappedMemoryRange { + /// The offset (in bytes) from the beginning of the allocation, where the range starts. + /// + /// Must be a multiple of the [`non_coherent_atom_size`] device property. + /// + /// The default value is `0`. + /// + /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size + pub offset: DeviceSize, + + /// The size (in bytes) of the range. + /// + /// Must be a multiple of the [`non_coherent_atom_size`] device property, or be equal to the + /// allocation size minus `offset`. + /// + /// The default value is `0`. + /// + /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size + pub size: DeviceSize, + + pub _ne: crate::NonExhaustive, +} + +impl MappedMemoryRange { + pub(crate) fn validate(&self, memory: &DeviceMemory) -> Result<(), Box> { + let &Self { + offset, + size, + _ne: _, + } = self; + + if let Some(state) = &memory.mapping_state { + if !(state.range.start <= offset && size <= state.range.end - offset) { + return Err(Box::new(ValidationError { + problem: "is not contained within the mapped range of this device memory" + .into(), + vuids: &["VUID-VkMappedMemoryRange-size-00685"], + ..Default::default() + })); + } + } else { + return Err(Box::new(ValidationError { + problem: "this device memory is not currently host-mapped".into(), + vuids: &["VUID-VkMappedMemoryRange-memory-00684"], + ..Default::default() + })); + } + + if !is_aligned(offset, memory.atom_size()) { + return Err(Box::new(ValidationError { + context: "offset".into(), + problem: "is not aligned to the `non_coherent_atom_size` device property".into(), + vuids: &["VUID-VkMappedMemoryRange-offset-00687"], + ..Default::default() + })); + } + + if !(is_aligned(size, memory.atom_size()) || size == memory.allocation_size() - offset) { + return Err(Box::new(ValidationError { + context: "size".into(), + problem: "is not aligned to the `non_coherent_atom_size` device property nor \ + equal to `self.allocation_size()` minus `offset`" + .into(), + vuids: &["VUID-VkMappedMemoryRange-size-01390"], + ..Default::default() + })); + } + + Ok(()) + } +} + +impl Default for MappedMemoryRange { + #[inline] + fn default() -> Self { + MappedMemoryRange { + offset: 0, + size: 0, + _ne: crate::NonExhaustive(()), + } + } +} + /// Represents device memory that has been mapped in a CPU-accessible space. /// /// In order to access the contents of the allocated memory, you can use the `read` and `write` @@ -1095,6 +1721,10 @@ vulkan_bitflags! { /// } /// ``` #[derive(Debug)] +#[deprecated( + since = "0.34.0", + note = "use the methods provided directly on `DeviceMemory` instead" +)] pub struct MappedDeviceMemory { memory: DeviceMemory, pointer: *mut c_void, // points to `range.start` @@ -1110,6 +1740,7 @@ pub struct MappedDeviceMemory { // Vulkan specs, documentation of `vkFreeMemory`: // > If a memory object is mapped at the time it is freed, it is implicitly unmapped. +#[allow(deprecated)] impl MappedDeviceMemory { /// Maps a range of memory to be accessed by the CPU. /// @@ -1165,8 +1796,14 @@ impl MappedDeviceMemory { })); } - // VUID-vkMapMemory-memory-00678 - // Guaranteed because we take ownership of `memory`, no other mapping can exist. + if memory.mapping_state().is_some() { + return Err(Box::new(ValidationError { + context: "memory".into(), + problem: "is already host-mapped".into(), + vuids: &["VUID-vkMapMemory-memory-00678"], + ..Default::default() + })); + } if range.end > memory.allocation_size { return Err(Box::new(ValidationError { @@ -1208,6 +1845,9 @@ impl MappedDeviceMemory { memory: DeviceMemory, range: Range, ) -> Result { + // Sanity check: this would lead to UB when calculating pointer offsets. + assert!(range.end - range.start <= isize::MAX.try_into().unwrap()); + let device = memory.device(); let pointer = unsafe { @@ -1480,6 +2120,7 @@ impl MappedDeviceMemory { } } +#[allow(deprecated)] impl AsRef for MappedDeviceMemory { #[inline] fn as_ref(&self) -> &DeviceMemory { @@ -1487,6 +2128,7 @@ impl AsRef for MappedDeviceMemory { } } +#[allow(deprecated)] impl AsMut for MappedDeviceMemory { #[inline] fn as_mut(&mut self) -> &mut DeviceMemory { @@ -1494,6 +2136,7 @@ impl AsMut for MappedDeviceMemory { } } +#[allow(deprecated)] unsafe impl DeviceOwned for MappedDeviceMemory { #[inline] fn device(&self) -> &Arc { @@ -1501,7 +2144,9 @@ unsafe impl DeviceOwned for MappedDeviceMemory { } } +#[allow(deprecated)] unsafe impl Send for MappedDeviceMemory {} +#[allow(deprecated)] unsafe impl Sync for MappedDeviceMemory {} #[cfg(test)] diff --git a/vulkano/src/padded.rs b/vulkano/src/padded.rs index dbf18f1bce..5d78b48f38 100644 --- a/vulkano/src/padded.rs +++ b/vulkano/src/padded.rs @@ -15,11 +15,11 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::{ alloc::Layout, cmp::Ordering, - ffi::c_void, fmt::{Debug, Display, Formatter, Result as FmtResult}, hash::{Hash, Hasher}, - mem::{align_of, size_of, MaybeUninit}, + mem::{size_of, MaybeUninit}, ops::{Deref, DerefMut}, + ptr::NonNull, }; /// A newtype wrapper around `T`, with `N` bytes of trailing padding. @@ -303,11 +303,10 @@ where panic!("zero-sized types are not valid buffer contents"); }; - unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self { - debug_assert!(range == size_of::()); - debug_assert!(data as usize % align_of::() == 0); + unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self { + debug_assert!(slice.len() == size_of::>()); - data.cast() + <*mut [u8]>::cast::>(slice.as_ptr()) } } diff --git a/vulkano/src/sync/mod.rs b/vulkano/src/sync/mod.rs index 57fb6494f2..ef30950657 100644 --- a/vulkano/src/sync/mod.rs +++ b/vulkano/src/sync/mod.rs @@ -25,7 +25,7 @@ pub use self::{ MemoryBarrier, PipelineStage, PipelineStages, QueueFamilyOwnershipTransfer, }, }; -use crate::{device::Queue, ValidationError, VulkanError}; +use crate::{device::Queue, VulkanError}; use std::{ error::Error, fmt::{Display, Formatter}, @@ -104,7 +104,8 @@ pub(crate) enum CurrentAccess { pub enum HostAccessError { AccessConflict(AccessConflict), Invalidate(VulkanError), - ValidationError(Box), + NotHostMapped, + OutOfMappedRange, } impl Error for HostAccessError { @@ -112,7 +113,7 @@ impl Error for HostAccessError { match self { Self::AccessConflict(err) => Some(err), Self::Invalidate(err) => Some(err), - Self::ValidationError(err) => Some(err), + _ => None, } } } @@ -124,7 +125,13 @@ impl Display for HostAccessError { write!(f, "the resource is already in use in a conflicting way") } HostAccessError::Invalidate(_) => write!(f, "invalidating the device memory failed"), - HostAccessError::ValidationError(_) => write!(f, "validation error"), + HostAccessError::NotHostMapped => { + write!(f, "the device memory is not current host-mapped") + } + HostAccessError::OutOfMappedRange => write!( + f, + "the requested range is not within the currently mapped range of device memory", + ), } } }