use std::error;
use std::fmt;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ops::Range;
use std::os::raw::c_void;
use std::ptr;
use std::sync::Arc;
use Error;
use OomError;
use VulkanObject;
use check_errors;
use device::Device;
use device::DeviceOwned;
use instance::MemoryType;
use memory::Content;
use memory::DedicatedAlloc;
use vk;
pub struct DeviceMemory {
memory: vk::DeviceMemory,
device: Arc<Device>,
size: usize,
memory_type_index: u32,
}
impl DeviceMemory {
#[inline]
pub fn alloc(device: Arc<Device>, memory_type: MemoryType, size: usize)
-> Result<DeviceMemory, DeviceMemoryAllocError> {
DeviceMemory::dedicated_alloc(device, memory_type, size, DedicatedAlloc::None)
}
#[inline]
pub fn dedicated_alloc(device: Arc<Device>, memory_type: MemoryType, size: usize,
resource: DedicatedAlloc)
-> Result<DeviceMemory, DeviceMemoryAllocError> {
assert!(size >= 1);
assert_eq!(device.physical_device().internal_object(),
memory_type.physical_device().internal_object());
let memory = unsafe {
let physical_device = device.physical_device();
let mut allocation_count = device.allocation_count().lock().expect("Poisoned mutex");
if *allocation_count >= physical_device.limits().max_memory_allocation_count() {
return Err(DeviceMemoryAllocError::TooManyObjects);
}
let vk = device.pointers();
let dedicated_alloc_info = if device.loaded_extensions().khr_dedicated_allocation {
match resource {
DedicatedAlloc::Buffer(buffer) => {
Some(vk::MemoryDedicatedAllocateInfoKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
pNext: ptr::null(),
image: 0,
buffer: buffer.internal_object(),
})
},
DedicatedAlloc::Image(image) => {
Some(vk::MemoryDedicatedAllocateInfoKHR {
sType: vk::STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
pNext: ptr::null(),
image: image.internal_object(),
buffer: 0,
})
},
DedicatedAlloc::None => {
None
},
}
} else {
None
};
let infos = vk::MemoryAllocateInfo {
sType: vk::STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
pNext: dedicated_alloc_info
.as_ref()
.map(|i| i as *const vk::MemoryDedicatedAllocateInfoKHR)
.unwrap_or(ptr::null()) as *const _,
allocationSize: size as u64,
memoryTypeIndex: memory_type.id(),
};
let mut output = MaybeUninit::uninit();
check_errors(vk.AllocateMemory(device.internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr()))?;
*allocation_count += 1;
output.assume_init()
};
Ok(DeviceMemory {
memory: memory,
device: device,
size: size,
memory_type_index: memory_type.id(),
})
}
#[inline]
pub fn alloc_and_map(device: Arc<Device>, memory_type: MemoryType, size: usize)
-> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
DeviceMemory::dedicated_alloc_and_map(device, memory_type, size, DedicatedAlloc::None)
}
pub fn dedicated_alloc_and_map(device: Arc<Device>, memory_type: MemoryType, size: usize,
resource: DedicatedAlloc)
-> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
let vk = device.pointers();
assert!(memory_type.is_host_visible());
let mem = DeviceMemory::dedicated_alloc(device.clone(), memory_type, size, resource)?;
let coherent = memory_type.is_host_coherent();
let ptr = unsafe {
let mut output = MaybeUninit::uninit();
check_errors(vk.MapMemory(device.internal_object(),
mem.memory,
0,
mem.size as vk::DeviceSize,
0,
output.as_mut_ptr()))?;
output.assume_init()
};
Ok(MappedDeviceMemory {
memory: mem,
pointer: ptr,
coherent: coherent,
})
}
#[inline]
pub fn memory_type(&self) -> MemoryType {
self.device
.physical_device()
.memory_type_by_id(self.memory_type_index)
.unwrap()
}
#[inline]
pub fn size(&self) -> usize {
self.size
}
}
unsafe impl DeviceOwned for DeviceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl fmt::Debug for DeviceMemory {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("DeviceMemory")
.field("device", &*self.device)
.field("memory_type", &self.memory_type())
.field("size", &self.size)
.finish()
}
}
unsafe impl VulkanObject for DeviceMemory {
type Object = vk::DeviceMemory;
const TYPE: vk::ObjectType = vk::OBJECT_TYPE_DEVICE_MEMORY;
#[inline]
fn internal_object(&self) -> vk::DeviceMemory {
self.memory
}
}
impl Drop for DeviceMemory {
#[inline]
fn drop(&mut self) {
unsafe {
let vk = self.device.pointers();
vk.FreeMemory(self.device.internal_object(), self.memory, ptr::null());
let mut allocation_count = self.device
.allocation_count()
.lock()
.expect("Poisoned mutex");
*allocation_count -= 1;
}
}
}
pub struct MappedDeviceMemory {
memory: DeviceMemory,
pointer: *mut c_void,
coherent: bool,
}
impl MappedDeviceMemory {
pub fn unmap(self) -> DeviceMemory {
unsafe {
let device = self.memory.device();
let vk = device.pointers();
vk.UnmapMemory(device.internal_object(), self.memory.memory);
}
self.memory
}
#[inline]
pub unsafe fn read_write<T: ?Sized>(&self, range: Range<usize>) -> CpuAccess<T>
where T: Content
{
let vk = self.memory.device().pointers();
let pointer = T::ref_from_ptr((self.pointer as usize + range.start) as *mut _,
range.end - range.start)
.unwrap();
if !self.coherent {
let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(),
memory: self.memory.internal_object(),
offset: range.start as u64,
size: (range.end - range.start) as u64,
};
vk.InvalidateMappedMemoryRanges(self.memory.device().internal_object(), 1, &range);
}
CpuAccess {
pointer: pointer,
mem: self,
coherent: self.coherent,
range: range,
}
}
}
impl AsRef<DeviceMemory> for MappedDeviceMemory {
#[inline]
fn as_ref(&self) -> &DeviceMemory {
&self.memory
}
}
impl AsMut<DeviceMemory> for MappedDeviceMemory {
#[inline]
fn as_mut(&mut self) -> &mut DeviceMemory {
&mut self.memory
}
}
unsafe impl DeviceOwned for MappedDeviceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
self.memory.device()
}
}
unsafe impl Send for MappedDeviceMemory {
}
unsafe impl Sync for MappedDeviceMemory {
}
impl fmt::Debug for MappedDeviceMemory {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("MappedDeviceMemory")
.field(&self.memory)
.finish()
}
}
pub struct CpuAccess<'a, T: ?Sized + 'a> {
pointer: *mut T,
mem: &'a MappedDeviceMemory,
coherent: bool,
range: Range<usize>,
}
impl<'a, T: ?Sized + 'a> CpuAccess<'a, T> {
#[doc(hidden)]
#[inline]
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U>
where F: FnOnce(*mut T) -> *mut U
{
CpuAccess {
pointer: f(self.pointer),
mem: self.mem,
coherent: self.coherent,
range: self.range.clone(),
}
}
}
unsafe impl<'a, T: ?Sized + 'a> Send for CpuAccess<'a, T> {
}
unsafe impl<'a, T: ?Sized + 'a> Sync for CpuAccess<'a, T> {
}
impl<'a, T: ?Sized + 'a> Deref for CpuAccess<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &*self.pointer }
}
}
impl<'a, T: ?Sized + 'a> DerefMut for CpuAccess<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.pointer }
}
}
impl<'a, T: ?Sized + 'a> Drop for CpuAccess<'a, T> {
#[inline]
fn drop(&mut self) {
if !self.coherent {
let vk = self.mem.as_ref().device().pointers();
let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(),
memory: self.mem.as_ref().internal_object(),
offset: self.range.start as u64,
size: (self.range.end - self.range.start) as u64,
};
unsafe {
vk.FlushMappedMemoryRanges(self.mem.as_ref().device().internal_object(), 1, &range);
}
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum DeviceMemoryAllocError {
OomError(OomError),
TooManyObjects,
MemoryMapFailed,
}
impl error::Error for DeviceMemoryAllocError {
#[inline]
fn description(&self) -> &str {
match *self {
DeviceMemoryAllocError::OomError(_) => "not enough memory available",
DeviceMemoryAllocError::TooManyObjects =>
"the maximum number of allocations has been exceeded",
DeviceMemoryAllocError::MemoryMapFailed => "memory map failed",
}
}
#[inline]
fn cause(&self) -> Option<&dyn error::Error> {
match *self {
DeviceMemoryAllocError::OomError(ref err) => Some(err),
_ => None,
}
}
}
impl fmt::Display for DeviceMemoryAllocError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}
impl From<Error> for DeviceMemoryAllocError {
#[inline]
fn from(err: Error) -> DeviceMemoryAllocError {
match err {
e @ Error::OutOfHostMemory |
e @ Error::OutOfDeviceMemory => DeviceMemoryAllocError::OomError(e.into()),
Error::TooManyObjects => DeviceMemoryAllocError::TooManyObjects,
Error::MemoryMapFailed => DeviceMemoryAllocError::MemoryMapFailed,
_ => panic!("unexpected error: {:?}", err),
}
}
}
impl From<OomError> for DeviceMemoryAllocError {
#[inline]
fn from(err: OomError) -> DeviceMemoryAllocError {
DeviceMemoryAllocError::OomError(err)
}
}
#[cfg(test)]
mod tests {
use OomError;
use memory::DeviceMemory;
use memory::DeviceMemoryAllocError;
#[test]
fn create() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
}
#[test]
fn zero_size() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
assert_should_panic!({
let _ = DeviceMemory::alloc(device.clone(), mem_ty, 0);
});
}
#[test]
#[cfg(target_pointer_width = "64")]
fn oom_single() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device
.physical_device()
.memory_types()
.filter(|m| !m.is_lazily_allocated())
.next()
.unwrap();
match DeviceMemory::alloc(device.clone(), mem_ty, 0xffffffffffffffff) {
Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)) => (),
_ => panic!(),
}
}
#[test]
#[ignore]
fn oom_multi() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device
.physical_device()
.memory_types()
.filter(|m| !m.is_lazily_allocated())
.next()
.unwrap();
let heap_size = mem_ty.heap().size();
let mut allocs = Vec::new();
for _ in 0 .. 4 {
match DeviceMemory::alloc(device.clone(), mem_ty, heap_size / 3) {
Err(DeviceMemoryAllocError::OomError(OomError::OutOfDeviceMemory)) => return,
Ok(a) => allocs.push(a),
_ => (),
}
}
panic!()
}
#[test]
fn allocation_count() {
let (device, _) = gfx_dev_and_queue!();
let mem_ty = device.physical_device().memory_types().next().unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 0);
let mem1 = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 1);
{
let mem2 = DeviceMemory::alloc(device.clone(), mem_ty, 256).unwrap();
assert_eq!(*device.allocation_count().lock().unwrap(), 2);
}
assert_eq!(*device.allocation_count().lock().unwrap(), 1);
}
}