mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-05 18:44:51 +01:00
allocator cleanup
This commit is contained in:
parent
7de7882537
commit
e2b9575951
@ -264,7 +264,6 @@ void ggml_allocator_default_free_tensor(struct ggml_backend_buffer * alloc, stru
|
|||||||
size_t size = ggml_backend_buffer_get_alloc_size(alloc, tensor);
|
size_t size = ggml_backend_buffer_get_alloc_size(alloc, tensor);
|
||||||
size = aligned_offset(NULL, size, allocator_ctx->alignment);
|
size = aligned_offset(NULL, size, allocator_ctx->alignment);
|
||||||
AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, allocator_ctx->n_free_blocks);
|
AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, allocator_ctx->n_free_blocks);
|
||||||
tensor->freed = true;
|
|
||||||
|
|
||||||
#ifdef GGML_ALLOCATOR_DEBUG
|
#ifdef GGML_ALLOCATOR_DEBUG
|
||||||
remove_allocated_tensor(allocator_ctx, tensor);
|
remove_allocated_tensor(allocator_ctx, tensor);
|
||||||
@ -858,13 +857,12 @@ static void ggml_graph_allocate_tensors_n(
|
|||||||
struct ggml_tensor * node = gf->nodes[i];
|
struct ggml_tensor * node = gf->nodes[i];
|
||||||
node->n_children = 0;
|
node->n_children = 0;
|
||||||
node->n_views = 0;
|
node->n_views = 0;
|
||||||
//node->freed = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < gf->n_leafs; i++) {
|
for (int i = 0; i < gf->n_leafs; i++) {
|
||||||
struct ggml_tensor * leaf = gf->leafs[i];
|
struct ggml_tensor * leaf = gf->leafs[i];
|
||||||
leaf->n_children = 0;
|
leaf->n_children = 0;
|
||||||
leaf->n_views = 0;
|
leaf->n_views = 0;
|
||||||
//leaf->freed = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -912,7 +910,6 @@ static void ggml_graph_allocate_tensors_n(
|
|||||||
if (parent == NULL) {
|
if (parent == NULL) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
GGML_ASSERT(!parent->freed && "tensor used after free");
|
|
||||||
allocate_node(buffer, parent);
|
allocate_node(buffer, parent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
14
ggml.c
14
ggml.c
@ -4530,16 +4530,15 @@ struct ggml_tensor * ggml_new_tensor_impl(
|
|||||||
/*.is_param =*/ false,
|
/*.is_param =*/ false,
|
||||||
/*.grad =*/ NULL,
|
/*.grad =*/ NULL,
|
||||||
/*.src =*/ { NULL },
|
/*.src =*/ { NULL },
|
||||||
/*.node_id =*/ -1,
|
/*.visited =*/ false,
|
||||||
/*.n_children =*/ 0,
|
/*.n_children =*/ 0,
|
||||||
/*.n_views =*/ 0,
|
/*.n_views =*/ 0,
|
||||||
/*.freed =*/ false,
|
|
||||||
/*.perf_runs =*/ 0,
|
/*.perf_runs =*/ 0,
|
||||||
/*.perf_cycles =*/ 0,
|
/*.perf_cycles =*/ 0,
|
||||||
/*.perf_time_us =*/ 0,
|
/*.perf_time_us =*/ 0,
|
||||||
/*.data =*/ data,
|
/*.data =*/ data,
|
||||||
/*.name =*/ { 0 },
|
|
||||||
/*.extra =*/ NULL,
|
/*.extra =*/ NULL,
|
||||||
|
/*.name =*/ { 0 },
|
||||||
/*.padding =*/ { 0 },
|
/*.padding =*/ { 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -15818,9 +15817,10 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check if already visited
|
// check if already visited
|
||||||
if (node->node_id != -1) {
|
if (node->visited) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
node->visited = true;
|
||||||
|
|
||||||
for (int i = 0; i < GGML_MAX_SRC; ++i) {
|
for (int i = 0; i < GGML_MAX_SRC; ++i) {
|
||||||
if (node->src[i]) {
|
if (node->src[i]) {
|
||||||
@ -15839,7 +15839,6 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor *
|
|||||||
ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
|
ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
|
||||||
}
|
}
|
||||||
|
|
||||||
node->node_id = cgraph->n_leafs;
|
|
||||||
cgraph->leafs[cgraph->n_leafs] = node;
|
cgraph->leafs[cgraph->n_leafs] = node;
|
||||||
cgraph->n_leafs++;
|
cgraph->n_leafs++;
|
||||||
} else {
|
} else {
|
||||||
@ -15849,7 +15848,6 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor *
|
|||||||
ggml_format_name(node, "node_%d", cgraph->n_nodes);
|
ggml_format_name(node, "node_%d", cgraph->n_nodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
node->node_id = cgraph->n_nodes;
|
|
||||||
cgraph->nodes[cgraph->n_nodes] = node;
|
cgraph->nodes[cgraph->n_nodes] = node;
|
||||||
cgraph->grads[cgraph->n_nodes] = node->grad;
|
cgraph->grads[cgraph->n_nodes] = node->grad;
|
||||||
cgraph->n_nodes++;
|
cgraph->n_nodes++;
|
||||||
@ -15883,10 +15881,10 @@ void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor *
|
|||||||
// TODO: this can be removed when ggml_build_forward_expand is removed
|
// TODO: this can be removed when ggml_build_forward_expand is removed
|
||||||
void ggml_graph_close(struct ggml_cgraph * cgraph) {
|
void ggml_graph_close(struct ggml_cgraph * cgraph) {
|
||||||
for (int i = 0; i < cgraph->n_nodes; ++i) {
|
for (int i = 0; i < cgraph->n_nodes; ++i) {
|
||||||
cgraph->nodes[i]->node_id = -1;
|
cgraph->nodes[i]->visited = false;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < cgraph->n_leafs; ++i) {
|
for (int i = 0; i < cgraph->n_leafs; ++i) {
|
||||||
cgraph->leafs[i]->node_id = -1;
|
cgraph->leafs[i]->visited = false;
|
||||||
}
|
}
|
||||||
cgraph->closed = true;
|
cgraph->closed = true;
|
||||||
}
|
}
|
||||||
|
11
ggml.h
11
ggml.h
@ -422,10 +422,9 @@ extern "C" {
|
|||||||
struct ggml_tensor * grad;
|
struct ggml_tensor * grad;
|
||||||
struct ggml_tensor * src[GGML_MAX_SRC];
|
struct ggml_tensor * src[GGML_MAX_SRC];
|
||||||
|
|
||||||
int node_id; // used to build graphs
|
bool visited; // used to build graphs
|
||||||
int n_children;
|
int n_children; // used by the allocator
|
||||||
int n_views;
|
int n_views;
|
||||||
bool freed; // debug
|
|
||||||
|
|
||||||
// performance
|
// performance
|
||||||
int perf_runs;
|
int perf_runs;
|
||||||
@ -434,11 +433,11 @@ extern "C" {
|
|||||||
|
|
||||||
void * data;
|
void * data;
|
||||||
|
|
||||||
char name[GGML_MAX_NAME];
|
|
||||||
|
|
||||||
void * extra; // extra things e.g. for ggml-cuda.cu
|
void * extra; // extra things e.g. for ggml-cuda.cu
|
||||||
|
|
||||||
char padding[8];
|
char name[GGML_MAX_NAME];
|
||||||
|
|
||||||
|
char padding[12];
|
||||||
};
|
};
|
||||||
|
|
||||||
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
|
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
|
||||||
|
Loading…
Reference in New Issue
Block a user