From 59d1de0bd1c3a68df3515f2819ca26521ad59985 Mon Sep 17 00:00:00 2001 From: Rafal Rudnicki Date: Thu, 14 Nov 2024 14:18:30 +0100 Subject: [PATCH] cleanup --- src/pool/pool_disjoint.c | 222 ++++++++++++++++----------------------- 1 file changed, 93 insertions(+), 129 deletions(-) diff --git a/src/pool/pool_disjoint.c b/src/pool/pool_disjoint.c index cb4ec6828..c3879f890 100644 --- a/src/pool/pool_disjoint.c +++ b/src/pool/pool_disjoint.c @@ -36,9 +36,7 @@ void destroy_slab(slab_t *slab); void *slab_get(const slab_t *slab); void *slab_get_end(const slab_t *slab); -bucket_t *slab_get_bucket(slab_t *slab); void *slab_get_chunk(slab_t *slab); -size_t slab_get_num_chunks(const slab_t *slab); size_t slab_get_chunk_size(const slab_t *slab); size_t slab_get_num_allocated(const slab_t *slab); @@ -59,7 +57,6 @@ bool bucket_can_pool(bucket_t *bucket, bool *to_pool); void bucket_on_free_chunk(bucket_t *bucket, slab_t *slab, bool *to_pool); void bucket_decrement_pool(bucket_t *bucket, bool *from_pool); void *bucket_get_chunk(bucket_t *bucket, bool *from_pool); -size_t bucket_get_size(bucket_t *bucket); size_t bucket_chunk_cut_off(bucket_t *bucket); size_t bucket_capacity(bucket_t *bucket); void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab, @@ -75,27 +72,10 @@ slab_list_item_t *bucket_get_avail_full_slab(bucket_t *bucket, bool *from_pool); void bucket_free_slab(bucket_t *bucket, slab_t *slab, bool *to_pool); const umf_disjoint_pool_shared_limits_t *bucket_get_limits(bucket_t *bucket); -umf_disjoint_pool_params_t *bucket_get_params(bucket_t *bucket); umf_memory_provider_handle_t bucket_get_mem_handle(bucket_t *bucket); utils_mutex_t *bucket_get_known_slabs_map_lock(bucket_t *bucket); critnib *bucket_get_known_slabs(bucket_t *bucket); -bucket_t *disjoint_pool_findBucket(disjoint_pool_t *pool, size_t Size); -umf_result_t disjoint_pool_deallocate(disjoint_pool_t *pool, void *Ptr, - bool *ToPool); -umf_disjoint_pool_shared_limits_t * -disjoint_pool_getLimits(disjoint_pool_t *pool); -void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t Size, - bool *FromPool); -void *disjoint_pool_allocate_align(disjoint_pool_t *pool, size_t Size, - size_t Alignment, bool *FromPool); - -umf_memory_provider_handle_t disjoint_pool_getMemHandle(disjoint_pool_t *pool); -utils_mutex_t *disjoint_pool_getKnownSlabsMapLock(disjoint_pool_t *pool); -critnib *disjoint_pool_getKnownSlabs(disjoint_pool_t *pool); -size_t disjoint_pool_SlabMinSize(disjoint_pool_t *pool); -umf_disjoint_pool_params_t *disjoint_pool_getParams(disjoint_pool_t *pool); - static __TLS umf_result_t TLS_last_allocation_error; // Allocations are a minimum of 4KB/64KB/2MB even when a smaller size is @@ -147,13 +127,13 @@ void annotate_memory_undefined(void *ptr, size_t size); typedef struct slab_list_item_t slab_list_item_t; typedef struct bucket_t { - size_t Size; + size_t size; // List of slabs which have at least 1 available chunk. - slab_list_item_t *AvailableSlabs; + slab_list_item_t *available_slabs; // List of slabs with 0 available chunk. - slab_list_item_t *UnavailableSlabs; + slab_list_item_t *unavailable_slabs; // Protects the bucket and all the corresponding slabs utils_mutex_t bucket_lock; @@ -181,18 +161,18 @@ typedef struct bucket_t { // if any of them is entirely free. Instead we keep a counter of entirely // empty slabs within the Available list to speed up the process of checking // if a slab in this bucket is already pooled. - size_t chunkedSlabsInPool; + size_t chunked_slabs_in_pool; // Statistics - size_t allocPoolCount; - size_t freeCount; - size_t currSlabsInUse; - size_t currSlabsInPool; - size_t maxSlabsInPool; + size_t alloc_pool_count; + size_t free_count; + size_t curr_slabs_in_use; + size_t curr_slabs_in_pool; + size_t max_slabs_in_pool; // Statistics - size_t allocCount; - size_t maxSlabsInUse; + size_t alloc_count; + size_t max_slabs_in_use; } bucket_t; @@ -276,8 +256,6 @@ typedef struct disjoint_pool_t { size_t ProviderMinPageSize; } disjoint_pool_t; -size_t bucket_get_size(bucket_t *bucket); - void slab_reg(slab_t *slab); void slab_unreg(slab_t *slab); @@ -285,6 +263,7 @@ slab_t *create_slab(bucket_t *bucket) { // In case bucket size is not a multiple of SlabMinSize, we would have // some padding at the end of the slab. slab_t *slab = umf_ba_global_alloc(sizeof(slab_t)); + // TODO check res and errors here and everywhere // TODO use logger slab->num_allocated = 0; @@ -296,7 +275,7 @@ slab_t *create_slab(bucket_t *bucket) { slab->iter->val = slab; slab->iter->prev = slab->iter->next = NULL; - slab->num_chunks = bucket_slab_min_size(bucket) / bucket_get_size(bucket); + slab->num_chunks = bucket_slab_min_size(bucket) / bucket->size; slab->chunks = umf_ba_global_alloc(sizeof(bool) * slab->num_chunks); memset(slab->chunks, 0, sizeof(bool) * slab->num_chunks); @@ -338,8 +317,6 @@ size_t slab_get_num_allocated(const slab_t *slab) { return slab->num_allocated; } -size_t slab_get_num_chunks(const slab_t *slab) { return slab->num_chunks; } - // Return the index of the first available chunk, SIZE_MAX otherwise size_t slab_find_first_available_chunk_idx(const slab_t *slab) { // Use the first free chunk index as a hint for the search. @@ -387,10 +364,7 @@ void *slab_get_end(const slab_t *slab) { // TODO remove? why need getter/setter? void *slab_get(const slab_t *slab) { return slab->mem_ptr; } -bucket_t *slab_get_bucket(slab_t *slab) { return slab->bucket; } -size_t slab_get_chunk_size(const slab_t *slab) { - return bucket_get_size(slab->bucket); -} +size_t slab_get_chunk_size(const slab_t *slab) { return slab->bucket->size; } void slab_free_chunk(slab_t *slab, void *ptr) { // This method should be called through bucket(since we might remove the @@ -424,7 +398,7 @@ bool slab_has_avail(const slab_t *slab) { } void slab_reg(slab_t *slab) { - bucket_t *bucket = slab_get_bucket(slab); + bucket_t *bucket = slab->bucket; // NOTE: changed vs original - slab is already aligned to bucket_slab_min_size // I also decr end_addr by 1 void *start_addr = (void *)ALIGN_DOWN((size_t)slab_get(slab), @@ -439,7 +413,7 @@ void slab_reg(slab_t *slab) { } void slab_unreg(slab_t *slab) { - bucket_t *bucket = slab_get_bucket(slab); + bucket_t *bucket = slab->bucket; // NOTE: changed vs original - slab is already aligned to bucket_slab_min_size // I also decr end_addr by 1 void *start_addr = (void *)ALIGN_DOWN((size_t)slab_get(slab), @@ -457,18 +431,18 @@ bucket_t *create_bucket(size_t Sz, disjoint_pool_t *pool, umf_disjoint_pool_shared_limits_t *shared_limits) { bucket_t *bucket = (bucket_t *)umf_ba_global_alloc(sizeof(bucket_t)); - bucket->Size = Sz; + bucket->size = Sz; bucket->pool = pool; - bucket->AvailableSlabs = NULL; - bucket->UnavailableSlabs = NULL; - bucket->chunkedSlabsInPool = 0; - bucket->allocPoolCount = 0; - bucket->freeCount = 0; - bucket->currSlabsInUse = 0; - bucket->currSlabsInPool = 0; - bucket->maxSlabsInPool = 0; - bucket->allocCount = 0; - bucket->maxSlabsInUse = 0; + bucket->available_slabs = NULL; + bucket->unavailable_slabs = NULL; + bucket->chunked_slabs_in_pool = 0; + bucket->alloc_pool_count = 0; + bucket->free_count = 0; + bucket->curr_slabs_in_use = 0; + bucket->curr_slabs_in_pool = 0; + bucket->max_slabs_in_pool = 0; + bucket->alloc_count = 0; + bucket->max_slabs_in_use = 0; bucket->shared_limits = shared_limits; assert(shared_limits); @@ -482,8 +456,8 @@ void destroy_bucket(bucket_t *bucket) { slab_list_item_t *it = NULL, *tmp = NULL; // TODO check eng // use extra tmp to store next iterator before the slab is destroyed - LL_FOREACH_SAFE(bucket->AvailableSlabs, it, tmp) { destroy_slab(it->val); } - LL_FOREACH_SAFE(bucket->UnavailableSlabs, it, tmp) { + LL_FOREACH_SAFE(bucket->available_slabs, it, tmp) { destroy_slab(it->val); } + LL_FOREACH_SAFE(bucket->unavailable_slabs, it, tmp) { destroy_slab(it->val); } @@ -498,11 +472,11 @@ void bucket_on_free_chunk(bucket_t *bucket, slab_t *slab, bool *ToPool) { // In case if the slab was previously full and now has 1 available // chunk, it should be moved to the list of available slabs - if (slab_get_num_allocated(slab) == (slab_get_num_chunks(slab) - 1)) { + if (slab_get_num_allocated(slab) == (slab->num_chunks - 1)) { slab_list_item_t *slab_it = slab->iter; assert(slab_it->val != NULL); - DL_DELETE(bucket->UnavailableSlabs, slab_it); - DL_PREPEND(bucket->AvailableSlabs, slab_it); + DL_DELETE(bucket->unavailable_slabs, slab_it); + DL_PREPEND(bucket->available_slabs, slab_it); } // Check if slab is empty, and pool it if we can. @@ -517,40 +491,35 @@ void bucket_on_free_chunk(bucket_t *bucket, slab_t *slab, bool *ToPool) { slab_list_item_t *slab_it = slab->iter; assert(slab_it->val != NULL); slab_unreg(slab_it->val); - DL_DELETE(bucket->AvailableSlabs, slab_it); + DL_DELETE(bucket->available_slabs, slab_it); destroy_slab(slab_it->val); } } } -// Return the allocation size of this bucket. -size_t bucket_get_size(bucket_t *bucket) { return bucket->Size; } +void bucket_count_free(bucket_t *bucket) { ++bucket->free_count; } -disjoint_pool_t *bucket_get_alloc_ctx(bucket_t *bucket) { return bucket->pool; } - -void bucket_count_free(bucket_t *bucket) { ++bucket->freeCount; } - -void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *Slab, - bool *ToPool) { +void bucket_free_chunk(bucket_t *bucket, void *ptr, slab_t *slab, + bool *to_pool) { utils_mutex_lock(&bucket->bucket_lock); - slab_free_chunk(Slab, ptr); - bucket_on_free_chunk(bucket, Slab, ToPool); + slab_free_chunk(slab, ptr); + bucket_on_free_chunk(bucket, slab, to_pool); utils_mutex_unlock(&bucket->bucket_lock); } -void bucket_count_alloc(bucket_t *bucket, bool FromPool) { - ++bucket->allocCount; - if (FromPool) { - ++bucket->allocPoolCount; +void bucket_count_alloc(bucket_t *bucket, bool from_pool) { + ++bucket->alloc_count; + if (from_pool) { + ++bucket->alloc_pool_count; } } -void *bucket_get_chunk(bucket_t *bucket, bool *FromPool) { +void *bucket_get_chunk(bucket_t *bucket, bool *from_pool) { utils_mutex_lock(&bucket->bucket_lock); - slab_list_item_t *slab_it = bucket_get_avail_slab(bucket, FromPool); + slab_list_item_t *slab_it = bucket_get_avail_slab(bucket, from_pool); if (slab_it == NULL) { utils_mutex_unlock(&bucket->bucket_lock); return NULL; @@ -560,8 +529,8 @@ void *bucket_get_chunk(bucket_t *bucket, bool *FromPool) { // If the slab is full, move it to unavailable slabs and update its iterator if (!(slab_has_avail(slab_it->val))) { - DL_DELETE(bucket->AvailableSlabs, slab_it); - DL_PREPEND(bucket->UnavailableSlabs, slab_it); + DL_DELETE(bucket->available_slabs, slab_it); + DL_PREPEND(bucket->unavailable_slabs, slab_it); } utils_mutex_unlock(&bucket->bucket_lock); @@ -574,19 +543,19 @@ size_t bucket_chunk_cut_off(bucket_t *bucket) { size_t bucket_slab_alloc_size(bucket_t *bucket) { // return max - return (bucket_get_size(bucket) > bucket_slab_min_size(bucket)) - ? bucket_get_size(bucket) + return (bucket->size > bucket_slab_min_size(bucket)) + ? bucket->size : bucket_slab_min_size(bucket); } size_t bucket_slab_min_size(bucket_t *bucket) { - return bucket_get_params(bucket)->SlabMinSize; + return bucket->pool->params.SlabMinSize; } slab_list_item_t *bucket_get_avail_full_slab(bucket_t *bucket, bool *from_pool) { // Return a slab that will be used for a single allocation. - if (bucket->AvailableSlabs == NULL) { + if (bucket->available_slabs == NULL) { slab_t *slab = create_slab(bucket); if (slab == NULL) { //assert(0); @@ -594,14 +563,14 @@ slab_list_item_t *bucket_get_avail_full_slab(bucket_t *bucket, } slab_reg(slab); - DL_PREPEND(bucket->AvailableSlabs, slab->iter); + DL_PREPEND(bucket->available_slabs, slab->iter); *from_pool = false; bucket_update_stats(bucket, 1, 0); } else { bucket_decrement_pool(bucket, from_pool); } - return bucket->AvailableSlabs; + return bucket->available_slabs; } void *bucket_get_slab(bucket_t *bucket, bool *from_pool) { @@ -615,8 +584,8 @@ void *bucket_get_slab(bucket_t *bucket, bool *from_pool) { slab_t *slab = slab_it->val; void *ptr = slab_get(slab); - DL_DELETE(bucket->AvailableSlabs, slab_it); - DL_PREPEND(bucket->UnavailableSlabs, slab_it); + DL_DELETE(bucket->available_slabs, slab_it); + DL_PREPEND(bucket->unavailable_slabs, slab_it); utils_mutex_unlock(&bucket->bucket_lock); return ptr; @@ -628,18 +597,18 @@ void bucket_free_slab(bucket_t *bucket, slab_t *slab, bool *to_pool) { slab_list_item_t *slab_it = slab->iter; assert(slab_it->val != NULL); if (bucket_can_pool(bucket, to_pool)) { - DL_DELETE(bucket->UnavailableSlabs, slab_it); - DL_PREPEND(bucket->AvailableSlabs, slab_it); + DL_DELETE(bucket->unavailable_slabs, slab_it); + DL_PREPEND(bucket->available_slabs, slab_it); } else { slab_unreg(slab_it->val); - DL_DELETE(bucket->UnavailableSlabs, slab_it); + DL_DELETE(bucket->unavailable_slabs, slab_it); destroy_slab(slab_it->val); } utils_mutex_unlock(&bucket->bucket_lock); } slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket, bool *from_pool) { - if (bucket->AvailableSlabs == NULL) { + if (bucket->available_slabs == NULL) { slab_t *slab = create_slab(bucket); if (slab == NULL) { // TODO log @@ -648,14 +617,14 @@ slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket, bool *from_pool) { } slab_reg(slab); - DL_PREPEND(bucket->AvailableSlabs, slab->iter); + DL_PREPEND(bucket->available_slabs, slab->iter); bucket_update_stats(bucket, 1, 0); *from_pool = false; } else { - if (slab_get_num_allocated(bucket->AvailableSlabs->val) == 0) { + if (slab_get_num_allocated(bucket->available_slabs->val) == 0) { // If this was an empty slab, it was in the pool. // Now it is no longer in the pool, so update count. - --bucket->chunkedSlabsInPool; + --bucket->chunked_slabs_in_pool; bucket_decrement_pool(bucket, from_pool); } else { // Allocation from existing slab is treated as from pool for statistics. @@ -663,38 +632,38 @@ slab_list_item_t *bucket_get_avail_slab(bucket_t *bucket, bool *from_pool) { } } - return bucket->AvailableSlabs; + return bucket->available_slabs; } size_t bucket_capacity(bucket_t *bucket) { // For buckets used in chunked mode, just one slab in pool is sufficient. // For larger buckets, the capacity could be more and is adjustable. - if (bucket_get_size(bucket) <= bucket_chunk_cut_off(bucket)) { + if (bucket->size <= bucket_chunk_cut_off(bucket)) { return 1; } else { - return bucket_get_params(bucket)->Capacity; + return bucket->pool->params.Capacity; } } size_t bucket_max_poolable_size(bucket_t *bucket) { - return bucket_get_params(bucket)->MaxPoolableSize; + return bucket->pool->params.MaxPoolableSize; } void bucket_update_stats(bucket_t *bucket, int in_use, int in_pool) { - if (bucket_get_params(bucket)->PoolTrace == 0) { + if (bucket->pool->params.PoolTrace == 0) { return; } - bucket->currSlabsInUse += in_use; - bucket->maxSlabsInUse = - utils_max(bucket->currSlabsInUse, bucket->maxSlabsInUse); - bucket->currSlabsInPool += in_pool; - bucket->maxSlabsInPool = - utils_max(bucket->currSlabsInPool, bucket->maxSlabsInPool); + bucket->curr_slabs_in_use += in_use; + bucket->max_slabs_in_use = + utils_max(bucket->curr_slabs_in_use, bucket->max_slabs_in_use); + bucket->curr_slabs_in_pool += in_pool; + bucket->max_slabs_in_pool = + utils_max(bucket->curr_slabs_in_pool, bucket->max_slabs_in_pool); // Increment or decrement current pool sizes based on whether // slab was added to or removed from pool. - bucket_get_params(bucket)->CurPoolSize += + bucket->pool->params.CurPoolSize += in_pool * bucket_slab_alloc_size(bucket); } @@ -711,15 +680,14 @@ bool bucket_can_pool(bucket_t *bucket, bool *to_pool) { size_t NewFreeSlabsInBucket; // Check if this bucket is used in chunked form or as full slabs. - bool chunkedBucket = - bucket_get_size(bucket) <= bucket_chunk_cut_off(bucket); + bool chunkedBucket = bucket->size <= bucket_chunk_cut_off(bucket); if (chunkedBucket) { - NewFreeSlabsInBucket = bucket->chunkedSlabsInPool + 1; + NewFreeSlabsInBucket = bucket->chunked_slabs_in_pool + 1; } else { // TODO optimize size_t avail_num = 0; slab_list_item_t *it = NULL; - DL_FOREACH(bucket->AvailableSlabs, it) { avail_num++; } + DL_FOREACH(bucket->available_slabs, it) { avail_num++; } NewFreeSlabsInBucket = avail_num + 1; } @@ -743,7 +711,7 @@ bool bucket_can_pool(bucket_t *bucket, bool *to_pool) { &pool_size, &new_pool_size)) { #endif if (chunkedBucket) { - ++bucket->chunkedSlabsInPool; + ++bucket->chunked_slabs_in_pool; } bucket_update_stats(bucket, -1, 1); @@ -758,10 +726,6 @@ bool bucket_can_pool(bucket_t *bucket, bool *to_pool) { return false; } -umf_disjoint_pool_params_t *bucket_get_params(bucket_t *bucket) { - return &bucket->pool->params; -} - umf_memory_provider_handle_t bucket_get_mem_handle(bucket_t *bucket) { return bucket->pool->MemHandle; } @@ -775,7 +739,7 @@ utils_mutex_t *bucket_get_known_slabs_map_lock(bucket_t *bucket) { } void slab_reg_by_addr(void *addr, slab_t *slab) { - bucket_t *bucket = slab_get_bucket(slab); + bucket_t *bucket = slab->bucket; utils_mutex_t *lock = bucket_get_known_slabs_map_lock(bucket); critnib *slabs = bucket_get_known_slabs(bucket); @@ -799,7 +763,7 @@ void slab_reg_by_addr(void *addr, slab_t *slab) { } void slab_unreg_by_addr(void *addr, slab_t *slab) { - bucket_t *bucket = slab_get_bucket(slab); + bucket_t *bucket = slab->bucket; utils_mutex_t *lock = bucket_get_known_slabs_map_lock(bucket); critnib *slabs = bucket_get_known_slabs(bucket); @@ -851,12 +815,12 @@ umf_disjoint_pool_shared_limits_t *AllocImpl_getLimits(disjoint_pool_t *pool) { bucket_t *AllocImpl_findBucket(disjoint_pool_t *pool, size_t Size) { size_t calculatedIdx = AllocImpl_sizeToIdx(pool, Size); bucket_t *bucket = pool->buckets[calculatedIdx]; - assert(bucket_get_size(bucket) >= Size); + assert(bucket->size >= Size); (void)bucket; if (calculatedIdx > 0) { bucket_t *bucket_prev = pool->buckets[calculatedIdx - 1]; - assert(bucket_get_size(bucket_prev) < Size); + assert(bucket_prev->size < Size); (void)bucket_prev; } @@ -876,8 +840,8 @@ void AllocImpl_printStats(disjoint_pool_t *pool, bool *TitlePrinted, //(*B).printStats(TitlePrinted, MTName); bucket_t *bucket = pool->buckets[i]; *HighPeakSlabsInUse = - utils_max(bucket->maxSlabsInUse, *HighPeakSlabsInUse); - if (bucket->allocCount) { + utils_max(bucket->max_slabs_in_use, *HighPeakSlabsInUse); + if (bucket->alloc_count) { *HighBucketSize = utils_max(bucket_slab_alloc_size(bucket), *HighBucketSize); } @@ -958,7 +922,7 @@ void *AllocImpl_allocate(disjoint_pool_t *pool, size_t Size, bool *FromPool) { } VALGRIND_DO_MEMPOOL_ALLOC(pool, Ptr, Size); - annotate_memory_undefined(Ptr, bucket_get_size(bucket)); + annotate_memory_undefined(Ptr, bucket->size); return Ptr; } @@ -1046,15 +1010,15 @@ umf_result_t AllocImpl_deallocate(disjoint_pool_t *pool, void *Ptr, // Unlock the map before freeing the chunk, it may be locked on write // there utils_mutex_unlock(&pool->known_slabs_map_lock); - bucket_t *bucket = slab_get_bucket(slab); + bucket_t *bucket = slab->bucket; if (pool->params.PoolTrace > 1) { bucket_count_free(bucket); } VALGRIND_DO_MEMPOOL_FREE(pool, Ptr); - annotate_memory_inaccessible(Ptr, bucket_get_size(bucket)); - if (bucket_get_size(bucket) <= bucket_chunk_cut_off(bucket)) { + annotate_memory_inaccessible(Ptr, bucket->size); + if (bucket->size <= bucket_chunk_cut_off(bucket)) { bucket_free_chunk(bucket, Ptr, slab, ToPool); } else { bucket_free_slab(bucket, slab, ToPool); @@ -1076,7 +1040,7 @@ umf_result_t AllocImpl_deallocate(disjoint_pool_t *pool, void *Ptr, // TODO? std::ostream &operator<<(std::ostream &Os, slab_t &Slab) { Os << "Slab<" << slab_get(&Slab) << ", " << slab_get_end(&Slab) << ", " - << slab_get_bucket(&Slab)->getSize() << ">"; + << slab->bucket->getSize() << ">"; return Os; } */ @@ -1084,7 +1048,7 @@ std::ostream &operator<<(std::ostream &Os, slab_t &Slab) { /* // TODO move void Bucket::printStats(bool &TitlePrinted, const std::string &Label) { - if (allocCount) { + if (alloc_count) { if (!TitlePrinted) { std::cout << Label << " memory statistics\n"; std::cout << std::setw(14) << "Bucket Size" << std::setw(12) @@ -1094,10 +1058,10 @@ void Bucket::printStats(bool &TitlePrinted, const std::string &Label) { << "Peak Slabs in Pool" << std::endl; TitlePrinted = true; } - std::cout << std::setw(14) << getSize() << std::setw(12) << allocCount - << std::setw(12) << freeCount << std::setw(18) - << allocPoolCount << std::setw(20) << maxSlabsInUse - << std::setw(21) << maxSlabsInPool << std::endl; + std::cout << std::setw(14) << getSize() << std::setw(12) << alloc_count + << std::setw(12) << free_count << std::setw(18) + << allocPoolCount << std::setw(20) << max_slabs_in_use + << std::setw(21) << max_slabs_in_pool << std::endl; } } */