MemoryAllocator.h#
↰ Parent directory (Vulkan
)
Includes#
PVRCore/Log.h
PVRVk/ApiObjectsVk.h
PVRVk/pvrvk_vulkan_wrapper.h
algorithm
atomic
cassert
cstdint
cstdlib
cstring
list
mutex
unordered_map
vector
Included By#
Namespaces#
Classes#
Enums#
Functions#
Defines#
Typedefs#
Source Code#
#pragma once
#include "PVRVk/ApiObjectsVk.h"
#include "PVRCore/Log.h"
#define VMA_STATIC_VULKAN_FUNCTIONS 0
#include "PVRVk/pvrvk_vulkan_wrapper.h"
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <vector>
#include <unordered_map>
#include <list>
#include <cassert> // for assert
#include <algorithm> // for min, max
#include <mutex> // for std::mutex
#include <atomic> // for std::atomic
#include <cstdlib>
#if (_ANDROID)
inline void* aligned_alloc(size_t size, size_t alignment)
{
void* ret = memalign(alignment, size);
return ret;
}
#endif
namespace pvr {
namespace utils {
namespace vma {
namespace impl {
class Pool_;
class Allocation_;
class Allocator_;
class DeviceMemoryWrapper_;
class DeviceMemoryCallbackDispatcher_;
#ifdef DEBUG
// Enable the following defines for improved validation of memory usage in debug builds
// VMA_DEBUG_INITIALIZE_ALLOCATIONS:
// Makes memory of all new allocations initialized to bit pattern `0xDCDCDCDC`.
// Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
// If you find these values while debugging your program, good chances are that you incorrectly
// read Vulkan memory that is allocated but not initialized, or already freed, respectively.
#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
// VMA_DEBUG_MARGIN:
// Enforces a specified number of bytes as a margin before and after every allocation.
// If your bug goes away after enabling margins, it means it may be caused by memory
// being overwritten outside of allocation boundaries.It is not100 % certain though. Change in application behavior may also be caused by different order and
// distribution of allocations across memory blocks after margins are applied.
#define VMA_DEBUG_MARGIN 4
// VMA_DEBUG_DETECT_CORRUPTION:
// If this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
// (it must be multiply of 4) before and after every allocation is filled with a magic number. This idea is also know as "canary".
// Memory is automatically mapped and unmapped if necessary. This number is validated automatically when the allocation is destroyed.
// If it's not equal to the expected value, `VMA_ASSERT()` is executed. It clearly means that either CPU or GPU overwritten the
// memory outside of boundaries of the allocation, which indicates a serious bug.
#define VMA_DEBUG_DETECT_CORRUPTION 1
#endif
// The v2.2.0 release of VulkanMemoryAllocator uses an incorrect ifdef guard - this issue has already been fixed on master.
// See here for more details: https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/issues/52
#define VMA_USE_STL_SHARED_MUTEX 0
#include "vk_mem_alloc.h"
} // namespace impl
typedef std::shared_ptr<impl::Pool_> Pool;
typedef std::shared_ptr<impl::Allocation_> Allocation;
typedef std::shared_ptr<impl::Allocator_> Allocator;
typedef std::weak_ptr<impl::Allocator_> AllocatorWeakPtr;
typedef std::shared_ptr<impl::DeviceMemoryWrapper_> DeviceMemoryWrapper;
typedef void(VKAPI_PTR* PFN_AllocateDeviceMemoryFunction)(Allocator allocator, uint32_t memoryType, pvrvk::DeviceMemory memory, VkDeviceSize size);
typedef void(VKAPI_PTR* PFN_FreeDeviceMemoryFunction)(Allocator allocator, uint32_t memoryType, pvrvk::DeviceMemory memory, VkDeviceSize size);
struct DeviceMemoryCallbacks
{
PFN_AllocateDeviceMemoryFunction pfnAllocate;
PFN_FreeDeviceMemoryFunction pfnFree;
DeviceMemoryCallbacks() : pfnAllocate(nullptr), pfnFree(nullptr) {}
};
enum class AllocationCreateFlags
{
e_NONE = 0,
e_DEDICATED_MEMORY_BIT = impl::VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
e_NEVER_ALLOCATE_BIT = impl::VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT,
e_MAPPED_BIT = impl::VMA_ALLOCATION_CREATE_MAPPED_BIT,
e_CAN_BECOME_LOST_BIT = impl::VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT,
e_CAN_MAKE_OTHER_LOST_BIT = impl::VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT,
e_USER_DATA_COPY_STRING_BIT = impl::VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT,
e_FLAG_BITS_MAX_ENUM = impl::VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
};
DEFINE_ENUM_BITWISE_OPERATORS(AllocationCreateFlags)
enum class AllocatorCreateFlags
{
e_NONE = 0x00000000,
e_EXTERNALLY_SYNCHRONIZED_BIT = impl::VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT,
e_KHR_DEDICATED_ALLOCATION_BIT = impl::VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT,
e_FLAG_BITS_MAX_ENUM = impl::VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
};
DEFINE_ENUM_BITWISE_OPERATORS(AllocatorCreateFlags)
enum class MemoryUsage
{
e_UNKNOWN = impl::VMA_MEMORY_USAGE_UNKNOWN,
e_GPU_ONLY = impl::VMA_MEMORY_USAGE_GPU_ONLY,
e_CPU_ONLY = impl::VMA_MEMORY_USAGE_CPU_ONLY,
e_CPU_TO_GPU = impl::VMA_MEMORY_USAGE_CPU_TO_GPU,
e_GPU_TO_CPU = impl::VMA_MEMORY_USAGE_GPU_TO_CPU,
e_MAX_ENUM = impl::VMA_MEMORY_USAGE_MAX_ENUM
};
enum class PoolCreateFlags
{
e_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = impl::VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT,
e_FLAG_BITS_MAX_ENUM = impl::VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
};
DEFINE_ENUM_BITWISE_OPERATORS(PoolCreateFlags)
enum class DebugReportFlags
{
None = 0,
DeviceMemory = 0x1,
Allocation = 0x2,
Defragments = 0x4,
All = DeviceMemory | Allocation | Defragments,
};
DEFINE_ENUM_BITWISE_OPERATORS(DebugReportFlags)
struct AllocationCreateInfo
{
AllocationCreateFlags flags;
MemoryUsage usage;
pvrvk::MemoryPropertyFlags requiredFlags;
pvrvk::MemoryPropertyFlags preferredFlags;
uint32_t memoryTypeBits;
Pool pool;
void* pUserData;
AllocationCreateInfo()
: flags(AllocationCreateFlags::e_NONE), usage(MemoryUsage::e_UNKNOWN), requiredFlags(pvrvk::MemoryPropertyFlags::e_NONE),
preferredFlags(pvrvk::MemoryPropertyFlags::e_NONE), memoryTypeBits(0), pUserData(nullptr)
{}
};
struct AllocatorCreateInfo
{
AllocatorCreateFlags flags;
pvrvk::Device device;
pvrvk::DeviceSize preferredLargeHeapBlockSize;
const pvrvk::AllocationCallbacks* pAllocationCallbacks;
const DeviceMemoryCallbacks* pDeviceMemoryCallbacks;
uint32_t frameInUseCount;
const pvrvk::DeviceSize* pHeapSizeLimit;
DebugReportFlags reportFlags;
AllocatorCreateInfo()
: flags(AllocatorCreateFlags(0)), preferredLargeHeapBlockSize(0), pAllocationCallbacks(nullptr), pDeviceMemoryCallbacks(nullptr), frameInUseCount(0),
pHeapSizeLimit(nullptr), reportFlags(DebugReportFlags(0))
{}
AllocatorCreateInfo(pvrvk::Device& device, pvrvk::DeviceSize preferredLargeHeapBlockSize = 0, AllocatorCreateFlags flags = AllocatorCreateFlags::e_NONE,
DebugReportFlags debugReportFlags = DebugReportFlags::None, uint32_t frameInUseCount = 0, const pvrvk::DeviceSize* pHeapSizeLimit = nullptr,
const pvrvk::AllocationCallbacks* pAllocationCallbacks = nullptr, const DeviceMemoryCallbacks* pDeviceMemoryCallbacks = nullptr)
: flags(flags), device(device), preferredLargeHeapBlockSize(preferredLargeHeapBlockSize), pAllocationCallbacks(pAllocationCallbacks),
pDeviceMemoryCallbacks(pDeviceMemoryCallbacks), frameInUseCount(frameInUseCount), pHeapSizeLimit(pHeapSizeLimit), reportFlags(debugReportFlags)
{
if (device->getEnabledExtensionTable().khrDedicatedAllocationEnabled && device->getEnabledExtensionTable().khrGetMemoryRequirements2Enabled)
{ flags |= AllocatorCreateFlags::e_KHR_DEDICATED_ALLOCATION_BIT; }
}
};
struct DefragmentationInfo : private impl::VmaDefragmentationInfo
{
pvrvk::DeviceSize getMaxBytesToMove() const { return maxBytesToMove; }
uint32_t getMaxAllocationsToMove() const { return maxAllocationsToMove; }
DefragmentationInfo& setMaxBytesToMove(pvrvk::DeviceSize bytesToMove)
{
maxBytesToMove = bytesToMove;
return *this;
}
DefragmentationInfo& setMaxAllocationsToMove(uint32_t allocationToMove)
{
maxAllocationsToMove = allocationToMove;
return *this;
}
};
struct StatInfo : private impl::VmaStatInfo
{
public:
uint32_t getBlockCount() const { return blockCount; }
uint32_t getAllocationCount() const { return allocationCount; }
uint32_t getUnusedRangeCount() const { return unusedRangeCount; }
VkDeviceSize getUsedBytes() const { return usedBytes; }
VkDeviceSize getUnusedBytes() const { return unusedBytes; }
VkDeviceSize getAllocationSizeMin() const { return allocationSizeMin; }
VkDeviceSize getAllocationSizeAvg() const { return allocationSizeAvg; }
VkDeviceSize getAllocationSizeMax() const { return allocationSizeMax; }
VkDeviceSize getUnusedRangeSizeMin() const { return unusedRangeSizeMin; }
VkDeviceSize getUnusedRangeSizeAvg() const { return unusedRangeSizeAvg; }
VkDeviceSize getUnusedRangeSizeMax() const { return unusedRangeSizeMax; }
};
struct Stats
{
friend class impl::Allocator_;
StatInfo memoryType[VK_MAX_MEMORY_TYPES];
StatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
StatInfo total;
private:
Stats(const impl::VmaStats& vmaStats)
{
memcpy(memoryType, vmaStats.memoryType, sizeof(vmaStats.memoryType));
memcpy(memoryHeap, vmaStats.memoryHeap, sizeof(vmaStats.memoryHeap));
memcpy(&total, &vmaStats.total, sizeof(vmaStats.total));
}
};
struct DefragmentationStats : impl::VmaDefragmentationStats
{
pvrvk::DeviceSize getBytesMoved() const;
pvrvk::DeviceSize getBytesFreed() const;
uint32_t getAllocationsMoved() const;
uint32_t getDeviceMemoryBlocksFreed() const;
};
struct PoolStats : private impl::VmaPoolStats
{
public:
pvrvk::DeviceSize getUnusedSize() const;
size_t getAllocationCount() const;
pvrvk::DeviceSize getUnusedRangeSizeMax() const;
size_t getUnusedRangeCount() const;
pvrvk::DeviceSize getSize() const;
};
struct PoolCreateInfo
{
uint32_t memoryTypeIndex;
PoolCreateFlags flags;
pvrvk::DeviceSize blockSize;
size_t minBlockCount;
size_t maxBlockCount;
uint32_t frameInUseCount;
PoolCreateInfo() : memoryTypeIndex(static_cast<uint32_t>(-1)), flags(PoolCreateFlags(0)), blockSize(0), minBlockCount(0), maxBlockCount(0), frameInUseCount(0) {}
PoolCreateInfo(uint32_t memoryTypeIndex, PoolCreateFlags flags, pvrvk::DeviceSize blockSize = 0, size_t minBlockCount = 0, size_t maxBlockCount = 0, uint32_t frameInUseCount = 0)
: memoryTypeIndex(memoryTypeIndex), flags(flags), blockSize(blockSize), minBlockCount(minBlockCount), maxBlockCount(maxBlockCount), frameInUseCount(frameInUseCount)
{}
};
namespace impl {
class Pool_
{
private:
friend class pvr::utils::vma::impl::Allocator_;
class make_shared_enabler
{
public:
make_shared_enabler() {}
friend class Pool_;
};
static Pool constructShared(const PoolCreateInfo& poolCreateInfo) { return std::make_shared<Pool_>(make_shared_enabler{}, poolCreateInfo); }
Allocator _allocator;
VmaPool _vmaPool;
public:
DECLARE_NO_COPY_SEMANTICS(Pool_)
Pool_(make_shared_enabler, const PoolCreateInfo& createInfo);
~Pool_();
PoolStats getStats() const;
size_t makeAllocationsLost();
};
class DeviceMemoryWrapper_ : public pvrvk::impl::DeviceMemory_
{
private:
friend class Allocator_;
class make_shared_enabler : public DeviceMemory_::make_shared_enabler
{
protected:
make_shared_enabler() : DeviceMemory_::make_shared_enabler() {}
friend class DeviceMemoryWrapper_;
};
static DeviceMemoryWrapper constructShared(const pvrvk::DeviceWeakPtr& device, const pvrvk::MemoryAllocationInfo& allocationInfo, pvrvk::MemoryPropertyFlags memPropFlags,
VkDeviceMemory vkMemoryHandle = VK_NULL_HANDLE)
{
return std::make_shared<DeviceMemoryWrapper_>(make_shared_enabler{}, device, allocationInfo, memPropFlags, vkMemoryHandle);
}
public:
DeviceMemoryWrapper_(make_shared_enabler, const pvrvk::DeviceWeakPtr& device, const pvrvk::MemoryAllocationInfo& allocationInfo, pvrvk::MemoryPropertyFlags memPropFlags,
VkDeviceMemory vkMemoryHandle)
: pvrvk::impl::DeviceMemory_(make_shared_enabler{}, device, allocationInfo, memPropFlags, vkMemoryHandle)
{}
virtual ~DeviceMemoryWrapper_()
{
_vkHandle = VK_NULL_HANDLE; // avoid pvrvk::impl::DeviceMemory_ from calling vkFreeMemory
}
virtual void* map(VkDeviceSize /*offset*/, VkDeviceSize /*size*/, pvrvk::MemoryMapFlags /*memoryMapFlags*/)
{
throw std::runtime_error("VMA DeviceMemory cannot be mapped, Use Allocation map");
}
virtual void unmap() { throw std::runtime_error("VMA DeviceMemory cannot be unmapped, Use Allocation unmap"); }
};
class Allocation_ : public pvrvk::impl::IDeviceMemory_
{
private:
friend class pvr::utils::vma::impl::Allocator_;
class make_shared_enabler
{
private:
make_shared_enabler() {}
friend class Allocation_;
};
static Allocation constructShared(Allocator& memAllocator, const AllocationCreateInfo& allocCreateInfo, VmaAllocation vmaAllocation, const VmaAllocationInfo& allocInfo)
{
return std::make_shared<Allocation_>(make_shared_enabler{}, memAllocator, allocCreateInfo, vmaAllocation, allocInfo);
}
void recalculateOffsetAndSize(VkDeviceSize& offset, VkDeviceSize& size) const;
void updateAllocationInfo() const;
Pool _pool;
Allocator _memAllocator;
VmaAllocation _vmaAllocation;
mutable VmaAllocationInfo _allocInfo;
AllocationCreateFlags _createFlags;
pvrvk::MemoryPropertyFlags _flags;
pvrvk::DeviceSize _mappedSize;
pvrvk::DeviceSize _mappedOffset;
public:
~Allocation_();
Allocation_(make_shared_enabler, Allocator& memAllocator, const AllocationCreateInfo& allocCreateInfo, VmaAllocation vmaAllocation, const VmaAllocationInfo& allocInfo);
bool isMappable() const;
pvrvk::MemoryPropertyFlags getMemoryFlags() const;
uint32_t getMemoryType() const;
void* map(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE, pvrvk::MemoryMapFlags memoryMapFlags = pvrvk::MemoryMapFlags::e_NONE);
void unmap();
bool isMapped() const;
pvrvk::DeviceSize getOffset() const;
void flushRange(pvrvk::DeviceSize offset = 0, pvrvk::DeviceSize size = VK_WHOLE_SIZE);
void* getMappedData();
void* getUserData();
void setUserData(void* userData);
bool isAllocationLost() const;
AllocationCreateFlags getCreateFlags() const;
bool canBecomeLost() const;
VkDeviceSize getMappedOffset() const;
VkDeviceSize getMappedSize() const;
VkDeviceSize getSize() const;
Pool getMemoryPool();
void invalidateRange(VkDeviceSize offset = 0, VkDeviceSize size = VK_WHOLE_SIZE);
};
class Allocator_ : public std::enable_shared_from_this<Allocator_>
{
private:
friend class AllocatorCreateFactory;
friend class Pool_;
friend class Allocation_;
friend class DeviceMemoryCallbackDispatcher_;
class make_shared_enabler
{
public:
make_shared_enabler() {}
friend class Allocator_;
};
static Allocator constructShared(const AllocatorCreateInfo& createInfo) { return std::make_shared<Allocator_>(make_shared_enabler{}, createInfo); }
pvrvk::DeviceWeakPtr _device;
VmaAllocator _vmaAllocator;
mutable std::vector<pvrvk::DeviceMemory> _deviceMemory;
DebugReportFlags _reportFlags;
Allocation createMemoryAllocation(const AllocationCreateInfo& allocCreateInfo, const VmaAllocationInfo& allocInfo, VmaAllocation vmaAllocation);
void onAllocateDeviceMemoryFunction(uint32_t memoryType, VkDeviceMemory memory, pvrvk::DeviceSize size);
void onFreeDeviceMemoryFunction(uint32_t memoryType, VkDeviceMemory memory, pvrvk::DeviceSize size);
DeviceMemoryCallbacks _deviceMemCallbacks;
public:
DECLARE_NO_COPY_SEMANTICS(Allocator_)
~Allocator_();
Allocator_(make_shared_enabler, const AllocatorCreateInfo& createInfo);
void addCallbackDispatcherContext();
Allocation allocateMemoryForImage(pvrvk::Image& image, const AllocationCreateInfo& allocCreateInfo);
Allocation allocateMemoryForBuffer(pvrvk::Buffer& buffer, const AllocationCreateInfo& createInfo);
Allocation allocateMemory(const pvrvk::MemoryRequirements* vkMemoryRequirements, const AllocationCreateInfo& createInfo);
Pool createPool(const PoolCreateInfo& poolCreateInfo);
void findMemoryTypeIndex(uint32_t memoryTypeBits, const AllocationCreateInfo& allocationCreateInfo, uint32_t& outMemoryTypeIndex);
pvrvk::Buffer createBuffer(const pvrvk::BufferCreateInfo& createInfo, const AllocationCreateInfo& allocationCreateInfo);
pvrvk::Image createImage(const pvrvk::ImageCreateInfo& createInfo, const AllocationCreateInfo& allocationCreateInfo);
void defragment(
Allocation* memAllocations, uint32_t numAllocations, const VmaDefragmentationInfo* defragInfo, pvrvk::Bool32* outAllocationsChanged, DefragmentationStats* outDefragStatus);
pvrvk::DeviceWeakPtr getDevice();
std::string buildStatsString(bool detailedMap)
{
char* _statsString;
vmaBuildStatsString(_vmaAllocator, &_statsString, detailedMap);
const std::string statStr(_statsString);
vmaFreeStatsString(_vmaAllocator, _statsString);
return statStr;
}
Stats calculateStats() const
{
impl::VmaStats vmaStats;
vmaCalculateStats(_vmaAllocator, &vmaStats);
return Stats(vmaStats);
}
};
inline Pool Allocator_::createPool(const PoolCreateInfo& poolCreateInfo) { return Pool_::constructShared(poolCreateInfo); }
inline pvrvk::DeviceWeakPtr Allocator_::getDevice() { return _device; }
inline Allocation Allocator_::createMemoryAllocation(const AllocationCreateInfo& allocCreateInfo, const VmaAllocationInfo& allocInfo, VmaAllocation vmaAllocation)
{
Allocator allocator = shared_from_this();
Allocation allocation = Allocation_::constructShared(allocator, allocCreateInfo, vmaAllocation, allocInfo);
if (uint32_t(_reportFlags & DebugReportFlags::Allocation) != 0)
{
Log(LogLevel::Debug, "VMA: New Allocation 0x%llx: DeviceMemory 0x%llx, MemoryType %d, Offset %lu bytes, Size %lu bytes", allocation->_vmaAllocation, allocInfo.deviceMemory,
allocInfo.memoryType, allocInfo.offset, allocInfo.size);
}
return allocation;
}
inline void Allocator_::onAllocateDeviceMemoryFunction(uint32_t memoryType, VkDeviceMemory memory, pvrvk::DeviceSize size)
{
VkMemoryPropertyFlags memProp;
vmaGetMemoryTypeProperties(_vmaAllocator, memoryType, &memProp);
DeviceMemoryWrapper deviceMemory =
DeviceMemoryWrapper_::constructShared(getDevice(), pvrvk::MemoryAllocationInfo(size, memoryType), static_cast<pvrvk::MemoryPropertyFlags>(memProp), memory);
_deviceMemory.emplace_back(deviceMemory);
if (uint32_t(_reportFlags & DebugReportFlags::DeviceMemory) != 0)
{ Log(LogLevel::Debug, "VMA: New DeviceMemory 0x%llx, MemoryType %d, Size %lu bytes", memory, memoryType, size); }
if (_deviceMemCallbacks.pfnAllocate) { _deviceMemCallbacks.pfnAllocate(shared_from_this(), memoryType, _deviceMemory.back(), size); }
}
inline void Allocator_::onFreeDeviceMemoryFunction(uint32_t memoryType, VkDeviceMemory memory, pvrvk::DeviceSize size)
{
if (uint32_t(_reportFlags & DebugReportFlags::DeviceMemory) != 0)
{ Log(LogLevel::Debug, "VMA: Freed DeviceMemory 0x%llx: MemoryType %d, Size %lu bytes", memory, memoryType, size); }
if (_deviceMemCallbacks.pfnFree)
{
auto it = std::find_if(_deviceMemory.begin(), _deviceMemory.end(), [&](const pvrvk::DeviceMemory& deviceMemory) { return deviceMemory->getVkHandle() == memory; });
if (it != _deviceMemory.end()) { _deviceMemCallbacks.pfnAllocate(shared_from_this(), memoryType, *it, size); }
}
}
inline uint32_t Allocation_::getMemoryType() const { return _allocInfo.memoryType; }
inline bool Allocation_::isMapped() const { return _mappedSize > 0; }
inline bool Allocation_::isMappable() const { return static_cast<uint32_t>(getMemoryFlags() & pvrvk::MemoryPropertyFlags::e_HOST_VISIBLE_BIT) != 0; }
inline pvrvk::MemoryPropertyFlags Allocation_::getMemoryFlags() const { return _flags; }
inline pvrvk::DeviceSize Allocation_::getOffset() const { return _allocInfo.offset; }
inline void Allocation_::flushRange(pvrvk::DeviceSize offset, pvrvk::DeviceSize size)
{
recalculateOffsetAndSize(offset, size);
if (static_cast<uint32_t>(_flags & pvrvk::MemoryPropertyFlags::e_HOST_COHERENT_BIT) != 0)
{
Log(LogLevel::Warning,
"Flushing allocation 0x%llx from memory block 0x%llx"
" created using HOST_COHERENT_BIT memory flags - this is unnecessary.",
_vmaAllocation, getVkHandle());
}
VkMappedMemoryRange range = {};
range.sType = static_cast<VkStructureType>(pvrvk::StructureType::e_MAPPED_MEMORY_RANGE);
range.memory = getVkHandle();
range.offset = offset;
range.size = size;
pvrvk::impl::vkThrowIfFailed(_device.lock()->getVkBindings().vkFlushMappedMemoryRanges(_device.lock()->getVkHandle(), 1, &range), "Failed to flush range of memory block");
}
inline void Allocation_::invalidateRange(VkDeviceSize offset, VkDeviceSize size)
{
recalculateOffsetAndSize(offset, size);
if (static_cast<uint32_t>(_flags & pvrvk::MemoryPropertyFlags::e_HOST_COHERENT_BIT) != 0)
{
Log(LogLevel::Warning,
"Invalidating range of an allocation 0x%llx from memory block 0x%llx"
" created using HOST_COHERENT_BIT memory flags - this is unnecessary.",
_vmaAllocation, getVkHandle());
}
VkMappedMemoryRange range = {};
range.sType = static_cast<VkStructureType>(pvrvk::StructureType::e_MAPPED_MEMORY_RANGE);
range.memory = getVkHandle();
range.offset = offset;
range.size = size;
pvrvk::impl::vkThrowIfFailed(_device.lock()->getVkBindings().vkInvalidateMappedMemoryRanges(_device.lock()->getVkHandle(), 1, &range), "Failed to invalidate range of memory block");
}
inline void Allocation_::recalculateOffsetAndSize(VkDeviceSize& offset, VkDeviceSize& size) const
{
offset += getOffset();
if (size == VK_WHOLE_SIZE) { size = getOffset() + getSize() - offset; }
assert(size <= _allocInfo.size);
}
inline void* Allocation_::getUserData()
{
updateAllocationInfo();
return _allocInfo.pUserData;
}
inline AllocationCreateFlags Allocation_::getCreateFlags() const { return _createFlags; }
inline bool Allocation_::canBecomeLost() const { return static_cast<uint32_t>(_createFlags & AllocationCreateFlags::e_CAN_BECOME_LOST_BIT) != 0; }
inline VkDeviceSize Allocation_::getMappedOffset() const { return _mappedOffset; }
inline VkDeviceSize Allocation_::getMappedSize() const { return _mappedSize; }
inline VkDeviceSize Allocation_::getSize() const { return _allocInfo.size; }
inline Pool Allocation_::getMemoryPool() { return _pool; }
} // namespace impl
inline pvrvk::DeviceSize PoolStats::getUnusedSize() const { return unusedSize; }
inline size_t PoolStats::getAllocationCount() const { return allocationCount; }
inline pvrvk::DeviceSize PoolStats::getUnusedRangeSizeMax() const { return unusedRangeSizeMax; }
inline size_t PoolStats::getUnusedRangeCount() const { return unusedRangeCount; }
inline pvrvk::DeviceSize PoolStats::getSize() const { return size; }
inline pvrvk::DeviceSize DefragmentationStats::getBytesMoved() const { return bytesMoved; }
inline pvrvk::DeviceSize DefragmentationStats::getBytesFreed() const { return bytesFreed; }
inline uint32_t DefragmentationStats::getAllocationsMoved() const { return allocationsMoved; }
inline uint32_t DefragmentationStats::getDeviceMemoryBlocksFreed() const { return deviceMemoryBlocksFreed; }
Allocator createAllocator(const AllocatorCreateInfo& createInfo);
} // namespace vma
} // namespace utils
} // namespace pvr