Skip to content

Commit 672dda1

Browse files
authored
ggml : fixed runtime bugs and compile errors related to GGML_PERF and GGML_DEBUG (#2219)
* fixed runtime bugs and compile errors related to GGML_PERF and GGML_DEBUG * remove ifdef GGML_PERF; update fmt
1 parent 27ab66e commit 672dda1

File tree

1 file changed

+5
-10
lines changed

1 file changed

+5
-10
lines changed

ggml.c

+5-10
Original file line numberDiff line numberDiff line change
@@ -4412,8 +4412,8 @@ void ggml_free(struct ggml_context * ctx) {
44124412
if (&g_state.contexts[i].context == ctx) {
44134413
g_state.contexts[i].used = false;
44144414

4415-
GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
4416-
__func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);
4415+
GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
4416+
__func__, i, ggml_used_mem(ctx));
44174417

44184418
if (ctx->mem_buffer_owned) {
44194419
GGML_ALIGNED_FREE(ctx->mem_buffer);
@@ -16317,8 +16317,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
1631716317
if (GGML_OP_HAS_FINALIZE[node->op]) {
1631816318
params.nth = n_tasks_arr[node_n];
1631916319
ggml_compute_forward(&params, node);
16320-
ggml_graph_compute_perf_stats_node(node, state->shared);
1632116320
}
16321+
ggml_graph_compute_perf_stats_node(node, state->shared);
1632216322
}
1632316323

1632416324
// distribute new work or execute it direct if 1T
@@ -16348,8 +16348,9 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
1634816348
if (GGML_OP_HAS_FINALIZE[node->op]) {
1634916349
params.type = GGML_TASK_FINALIZE;
1635016350
ggml_compute_forward(&params, node);
16351-
ggml_graph_compute_perf_stats_node(node, state->shared);
1635216351
}
16352+
16353+
ggml_graph_compute_perf_stats_node(node, state->shared);
1635316354
} else {
1635416355
break;
1635516356
}
@@ -16891,9 +16892,6 @@ static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char
1689116892
}
1689216893

1689316894
void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
16894-
//assert(cgraph->work == NULL);
16895-
//assert(cgraph->work_size == 0);
16896-
1689716895
uint64_t size_eval = 0;
1689816896

1689916897
// compute size of intermediate results
@@ -17332,9 +17330,6 @@ void ggml_graph_print(const struct ggml_cgraph * cgraph) {
1733217330

1733317331
GGML_PRINT("=== GRAPH ===\n");
1733417332

17335-
GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads);
17336-
GGML_PRINT_DEBUG("total work size = %zu bytes\n", cgraph->work_size);
17337-
1733817333
GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
1733917334
for (int i = 0; i < cgraph->n_nodes; i++) {
1734017335
struct ggml_tensor * node = cgraph->nodes[i];

0 commit comments

Comments
 (0)