@@ -4412,8 +4412,8 @@ void ggml_free(struct ggml_context * ctx) {
4412
4412
if (&g_state.contexts[i].context == ctx) {
4413
4413
g_state.contexts[i].used = false;
4414
4414
4415
- GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
4416
- __func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size );
4415
+ GGML_PRINT_DEBUG("%s: context %d has been freed. memory used = %zu\n",
4416
+ __func__, i, ggml_used_mem( ctx) );
4417
4417
4418
4418
if (ctx->mem_buffer_owned) {
4419
4419
GGML_ALIGNED_FREE(ctx->mem_buffer);
@@ -16317,8 +16317,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
16317
16317
if (GGML_OP_HAS_FINALIZE[node->op]) {
16318
16318
params.nth = n_tasks_arr[node_n];
16319
16319
ggml_compute_forward(¶ms, node);
16320
- ggml_graph_compute_perf_stats_node(node, state->shared);
16321
16320
}
16321
+ ggml_graph_compute_perf_stats_node(node, state->shared);
16322
16322
}
16323
16323
16324
16324
// distribute new work or execute it direct if 1T
@@ -16348,8 +16348,9 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
16348
16348
if (GGML_OP_HAS_FINALIZE[node->op]) {
16349
16349
params.type = GGML_TASK_FINALIZE;
16350
16350
ggml_compute_forward(¶ms, node);
16351
- ggml_graph_compute_perf_stats_node(node, state->shared);
16352
16351
}
16352
+
16353
+ ggml_graph_compute_perf_stats_node(node, state->shared);
16353
16354
} else {
16354
16355
break;
16355
16356
}
@@ -16891,9 +16892,6 @@ static void ggml_graph_export_node(const struct ggml_tensor * tensor, const char
16891
16892
}
16892
16893
16893
16894
void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) {
16894
- //assert(cgraph->work == NULL);
16895
- //assert(cgraph->work_size == 0);
16896
-
16897
16895
uint64_t size_eval = 0;
16898
16896
16899
16897
// compute size of intermediate results
@@ -17332,9 +17330,6 @@ void ggml_graph_print(const struct ggml_cgraph * cgraph) {
17332
17330
17333
17331
GGML_PRINT("=== GRAPH ===\n");
17334
17332
17335
- GGML_PRINT_DEBUG("n_threads = %d\n", cgraph->n_threads);
17336
- GGML_PRINT_DEBUG("total work size = %zu bytes\n", cgraph->work_size);
17337
-
17338
17333
GGML_PRINT("n_nodes = %d\n", cgraph->n_nodes);
17339
17334
for (int i = 0; i < cgraph->n_nodes; i++) {
17340
17335
struct ggml_tensor * node = cgraph->nodes[i];
0 commit comments