mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-15 14:50:51 +01:00
Don't try an allocation on a heap that is smaller than the size we require.
This commit is contained in:
parent
cbc0d1af79
commit
8400015337
@ -364,6 +364,12 @@ vk::DeviceMemory *ggml_vk_allocate(size_t size, vk::MemoryPropertyFlags flags, v
|
|||||||
bool memoryTypeIndexFound = false;
|
bool memoryTypeIndexFound = false;
|
||||||
vk::PhysicalDeviceMemoryProperties memoryProperties = komputeManager()->physicalDevice()->getMemoryProperties();
|
vk::PhysicalDeviceMemoryProperties memoryProperties = komputeManager()->physicalDevice()->getMemoryProperties();
|
||||||
for (uint32_t i = 0; i < memoryProperties.memoryTypeCount; i++) {
|
for (uint32_t i = 0; i < memoryProperties.memoryTypeCount; i++) {
|
||||||
|
const vk::MemoryType &memoryType = memoryProperties.memoryTypes[i];
|
||||||
|
const vk::MemoryHeap &memoryHeap = memoryProperties.memoryHeaps[memoryType.heapIndex];
|
||||||
|
if (memoryHeap.size < size) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (requirements.memoryTypeBits & (1 << i)) {
|
if (requirements.memoryTypeBits & (1 << i)) {
|
||||||
if (((memoryProperties.memoryTypes[i]).propertyFlags &
|
if (((memoryProperties.memoryTypes[i]).propertyFlags &
|
||||||
flags) == flags) {
|
flags) == flags) {
|
||||||
|
Loading…
Reference in New Issue
Block a user