From: Mitch Skiba Date: Thu, 17 May 2018 19:06:08 +0000 (+0000) Subject: lib: Implement an allocator for 32 bit ID numbers X-Git-Tag: frr-7.1-dev~196^2~1 X-Git-Url: https://git.puffer.fish/?a=commitdiff_plain;h=a94eca0968d914d58dff819c2411542377f56bae;p=matthieu%2Ffrr.git lib: Implement an allocator for 32 bit ID numbers This commit introduces lib/id_alloc, which has facilities for both an ID number allocator, and less efficient ID holding pools. The pools are meant to be a temporary holding area for ID numbers meant to be re-used, and are implemented as a linked-list stack. The allocator itself is much more efficient with memory. Based on sizeof values on my 64 bit desktop, the allocator requires around 155 KiB per million IDs tracked. IDs are ultimately tracked in a bit-map split into many "pages." The allocator tracks a list of pages that have free bits, and which sections of each page have free IDs, so there isn't any scanning required to find a free ID. (The library utility ffs, or "Find First Set," is generally a single CPU instruction.) At the moment, totally empty pages will not be freed, so the memory utilization of this allocator will remain at the high water mark. The initial intended use case is for BGP's TX Addpath IDs to be pulled from an allocator that tracks which IDs are in use, rather than a free running counter. The allocator reserves ID #0 as a sentinel value for an invalid ID numbers, and BGP will want ID #1 reserved as well. To support this, the allocator allows for IDs to be explicitly reserved, though be aware this is only practical to use with low numbered IDs because the allocator must allocate pages in order. Signed-off-by Mitchell Skiba --- diff --git a/lib/id_alloc.c b/lib/id_alloc.c new file mode 100644 index 0000000000..b4d37dbdcf --- /dev/null +++ b/lib/id_alloc.c @@ -0,0 +1,406 @@ +/* + * FRR ID Number Allocator + * Copyright (C) 2018 Amazon.com, Inc. or its affiliates + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "id_alloc.h" + +#include "log.h" +#include "lib_errors.h" +#include "memory.h" + +#include + +DEFINE_MTYPE_STATIC(LIB, IDALLOC_ALLOCATOR, "ID Number Allocator") +DEFINE_MTYPE_STATIC(LIB, IDALLOC_ALLOCATOR_NAME, "ID Number Allocator Name") +DEFINE_MTYPE_STATIC(LIB, IDALLOC_DIRECTORY, "ID Number Allocator Directory") +DEFINE_MTYPE_STATIC(LIB, IDALLOC_SUBDIRECTORY, + "ID Number Allocator Subdirectory") +DEFINE_MTYPE_STATIC(LIB, IDALLOC_PAGE, "ID Number Allocator Page") +DEFINE_MTYPE_STATIC(LIB, IDALLOC_POOL, "ID Number temporary holding pool entry") + +#if UINT_MAX >= UINT32_MAX +#define FFS32(x) ffs(x) +#else +/* ints less than 32 bits? Yikes. */ +#define FFS32(x) ffsl(x) +#endif + +#define DIR_MASK ((1<> DIR_SHIFT) & DIR_MASK) +#define ID_SUBDIR(id) ((id >> SUBDIR_SHIFT) & SUBDIR_MASK) +#define ID_PAGE(id) ((id >> PAGE_SHIFT) & PAGE_MASK) +#define ID_WORD(id) ((id >> WORD_SHIFT) & WORD_MASK) +#define ID_OFFSET(id) ((id >> OFFSET_SHIFT) & OFFSET_MASK) + +/* + * Find the page that an ID number belongs to in an allocator. + * Optionally create the page if it doesn't exist. + */ +static struct id_alloc_page *find_or_create_page(struct id_alloc *alloc, + uint32_t id, int create) +{ + struct id_alloc_dir *dir = NULL; + struct id_alloc_subdir *subdir = NULL; + struct id_alloc_page *page = NULL; + + dir = alloc->sublevels[ID_DIR(id)]; + if (dir == NULL) { + if (create) { + dir = XCALLOC(MTYPE_IDALLOC_DIRECTORY, sizeof(*dir)); + alloc->sublevels[ID_DIR(id)] = dir; + } else { + return NULL; + } + } + + subdir = dir->sublevels[ID_SUBDIR(id)]; + if (subdir == NULL) { + if (create) { + subdir = XCALLOC(MTYPE_IDALLOC_SUBDIRECTORY, + sizeof(*subdir)); + dir->sublevels[ID_SUBDIR(id)] = subdir; + } else { + return NULL; + } + } + + page = subdir->sublevels[ID_PAGE(id)]; + if (page == NULL && create) { + page = XCALLOC(MTYPE_IDALLOC_PAGE, sizeof(*page)); + page->base_value = id; + subdir->sublevels[ID_PAGE(id)] = page; + + alloc->capacity += 1 << PAGE_SHIFT; + page->next_has_free = alloc->has_free; + alloc->has_free = page; + } else if (page != NULL && create) { + flog_err( + EC_LIB_ID_CONSISTENCY, + "ID Allocator %s attempt to re-create page at %" PRIu32, + alloc->name, id); + } + + return page; +} + +/* + * Return an ID number back to the allocator. + * While this ID can be re-assigned through idalloc_allocate, the underlying + * memory will not be freed. If this is the first free ID in the page, the page + * will be added to the allocator's list of pages with free IDs. + */ +void idalloc_free(struct id_alloc *alloc, uint32_t id) +{ + struct id_alloc_page *page = NULL; + + int word, offset; + uint32_t old_word, old_word_mask; + + page = find_or_create_page(alloc, id, 0); + if (!page) { + flog_err(EC_LIB_ID_CONSISTENCY, + "ID Allocator %s cannot free #%" PRIu32 + ". ID Block does not exist.", + alloc->name, id); + return; + } + + word = ID_WORD(id); + offset = ID_OFFSET(id); + + if ((page->allocated_mask[word] & (1 << offset)) == 0) { + flog_err(EC_LIB_ID_CONSISTENCY, + "ID Allocator %s cannot free #%" PRIu32 + ". ID was not allocated at the time of free.", + alloc->name, id); + return; + } + + old_word = page->allocated_mask[word]; + page->allocated_mask[word] &= ~(((uint32_t)1) << offset); + alloc->allocated -= 1; + + if (old_word == UINT32_MAX) { + /* first bit in this block of 32 to be freed.*/ + + old_word_mask = page->full_word_mask; + page->full_word_mask &= ~(((uint32_t)1) << word); + + if (old_word_mask == UINT32_MAX) { + /* first bit in page freed, add this to the allocator's + * list of pages with free space + */ + page->next_has_free = alloc->has_free; + alloc->has_free = page; + } + } +} + +/* + * Add a allocation page to the end of the allocator's current range. + * Returns null if the allocator has had all possible pages allocated already. + */ +static struct id_alloc_page *create_next_page(struct id_alloc *alloc) +{ + if (alloc->capacity == 0 && alloc->sublevels[0]) + return NULL; /* All IDs allocated and the capacity looped. */ + + return find_or_create_page(alloc, alloc->capacity, 1); +} + +/* + * Marks an ID within an allocator page as in use. + * If the ID was the last free ID in the page, the page is removed from the + * allocator's list of free IDs. In the typical allocation case, this page is + * the first page in the list, and removing the page is fast. If instead an ID + * is being reserved by number, this may end up scanning the whole single linked + * list of pages in order to remove it. + */ +static void reserve_bit(struct id_alloc *alloc, struct id_alloc_page *page, + int word, int offset) +{ + struct id_alloc_page *itr; + + page->allocated_mask[word] |= ((uint32_t)1) << offset; + alloc->allocated += 1; + + if (page->allocated_mask[word] == UINT32_MAX) { + page->full_word_mask |= ((uint32_t)1) << word; + if (page->full_word_mask == UINT32_MAX) { + if (alloc->has_free == page) { + /* allocate always pulls from alloc->has_free */ + alloc->has_free = page->next_has_free; + } else { + /* reserve could pull from any page with free + * bits + */ + itr = alloc->has_free; + while (itr) { + if (itr->next_has_free == page) { + itr->next_has_free = + page->next_has_free; + return; + } + + itr = itr->next_has_free; + } + } + } + } +} + +/* + * Reserve an ID number from the allocator. Returns IDALLOC_INVALID (0) if the + * allocator has no more IDs available. + */ +uint32_t idalloc_allocate(struct id_alloc *alloc) +{ + struct id_alloc_page *page; + int word, offset; + uint32_t return_value; + + if (alloc->has_free == NULL) + create_next_page(alloc); + + if (alloc->has_free == NULL) { + flog_err(EC_LIB_ID_EXHAUST, + "ID Allocator %s has run out of IDs.", alloc->name); + return IDALLOC_INVALID; + } + + page = alloc->has_free; + word = FFS32(~(page->full_word_mask)) - 1; + + if (word < 0 || word >= 32) { + flog_err(EC_LIB_ID_CONSISTENCY, + "ID Allocator %s internal error. Page starting at %d is inconsistent.", + alloc->name, page->base_value); + return IDALLOC_INVALID; + } + + offset = FFS32(~(page->allocated_mask[word])) - 1; + if (offset < 0 || offset >= 32) { + flog_err(EC_LIB_ID_CONSISTENCY, + "ID Allocator %s internal error. Page starting at %d is inconsistent on word %d", + alloc->name, page->base_value, word); + return IDALLOC_INVALID; + } + return_value = page->base_value + word * 32 + offset; + + reserve_bit(alloc, page, word, offset); + + return return_value; +} + +/* + * Tries to allocate a specific ID from the allocator. Returns IDALLOC_INVALID + * when the ID being "reserved" has allready been assigned/reserved. This should + * only be done with low numbered IDs, as the allocator needs to reserve bit-map + * pages in order + */ +uint32_t idalloc_reserve(struct id_alloc *alloc, uint32_t id) +{ + struct id_alloc_page *page; + int word, offset; + + while (alloc->capacity <= id) + create_next_page(alloc); + + word = ID_WORD(id); + offset = ID_OFFSET(id); + page = find_or_create_page(alloc, id, 0); + /* page can't be null because the loop above ensured it was created. */ + + if (page->allocated_mask[word] & (((uint32_t)1) << offset)) { + flog_err(EC_LIB_ID_CONSISTENCY, + "ID Allocator %s could not reserve %" PRIu32 + " because it is already allocated.", + alloc->name, id); + return IDALLOC_INVALID; + } + + reserve_bit(alloc, page, word, offset); + return id; +} + +/* + * Set up an empty ID allocator, with IDALLOC_INVALID pre-reserved. + */ +struct id_alloc *idalloc_new(const char *name) +{ + struct id_alloc *ret; + + ret = XCALLOC(MTYPE_IDALLOC_ALLOCATOR, sizeof(*ret)); + ret->name = XSTRDUP(MTYPE_IDALLOC_ALLOCATOR_NAME, name); + + idalloc_reserve(ret, IDALLOC_INVALID); + + return ret; +} + +/* + * Free a subdir, and all pages below it. + */ +static void idalloc_destroy_subdir(struct id_alloc_subdir *subdir) +{ + int i; + + for (i = 0; i < IDALLOC_PAGE_COUNT; i++) { + if (subdir->sublevels[i]) + XFREE(MTYPE_IDALLOC_PAGE, subdir->sublevels[i]); + else + break; + } + XFREE(MTYPE_IDALLOC_SUBDIRECTORY, subdir); +} + +/* + * Free a dir, and all subdirs/pages below it. + */ +static void idalloc_destroy_dir(struct id_alloc_dir *dir) +{ + int i; + + for (i = 0; i < IDALLOC_SUBDIR_COUNT; i++) { + if (dir->sublevels[i]) + idalloc_destroy_subdir(dir->sublevels[i]); + else + break; + } + XFREE(MTYPE_IDALLOC_DIRECTORY, dir); +} + +/* + * Free all memory associated with an ID allocator. + */ +void idalloc_destroy(struct id_alloc *alloc) +{ + int i; + + for (i = 0; i < IDALLOC_DIR_COUNT; i++) { + if (alloc->sublevels[i]) + idalloc_destroy_dir(alloc->sublevels[i]); + else + break; + } + + XFREE(MTYPE_IDALLOC_ALLOCATOR_NAME, alloc->name); + XFREE(MTYPE_IDALLOC_ALLOCATOR, alloc); +} + +/* + * Give an ID number to temporary holding pool. + */ +void idalloc_free_to_pool(struct id_alloc_pool **pool_ptr, uint32_t id) +{ + struct id_alloc_pool *new_pool; + + new_pool = XMALLOC(MTYPE_IDALLOC_POOL, sizeof(*new_pool)); + new_pool->id = id; + new_pool->next = *pool_ptr; + *pool_ptr = new_pool; +} + +/* + * Free all ID numbers held in a holding pool back to the main allocator. + */ +void idalloc_drain_pool(struct id_alloc *alloc, struct id_alloc_pool **pool_ptr) +{ + struct id_alloc_pool *current, *next; + + while (*pool_ptr) { + current = *pool_ptr; + next = current->next; + idalloc_free(alloc, current->id); + XFREE(MTYPE_IDALLOC_POOL, current); + *pool_ptr = next; + } +} + +/* + * Allocate an ID from either a holding pool, or the main allocator. IDs will + * only be pulled form the main allocator when the pool is empty. + */ +uint32_t idalloc_allocate_prefer_pool(struct id_alloc *alloc, + struct id_alloc_pool **pool_ptr) +{ + uint32_t ret; + struct id_alloc_pool *pool_head = *pool_ptr; + + if (pool_head) { + ret = pool_head->id; + *pool_ptr = pool_head->next; + XFREE(MTYPE_IDALLOC_POOL, pool_head); + return ret; + } else { + return idalloc_allocate(alloc); + } +} diff --git a/lib/id_alloc.h b/lib/id_alloc.h new file mode 100644 index 0000000000..efe355658a --- /dev/null +++ b/lib/id_alloc.h @@ -0,0 +1,90 @@ +/* + * FRR ID Number Allocator + * Copyright (C) 2018 Amazon.com, Inc. or its affiliates + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; see the file COPYING; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _ZEBRA_ID_ALLOC_H +#define _ZEBRA_ID_ALLOC_H + +#include +#include +#include + +#define IDALLOC_INVALID 0 + +#define IDALLOC_DIR_BITS 8 +#define IDALLOC_SUBDIR_BITS 7 +#define IDALLOC_PAGE_BITS 7 +#define IDALLOC_WORD_BITS 5 +#define IDALLOC_OFFSET_BITS 5 + +#define IDALLOC_DIR_COUNT (1 << IDALLOC_DIR_BITS) +#define IDALLOC_SUBDIR_COUNT (1 << IDALLOC_SUBDIR_BITS) +#define IDALLOC_PAGE_COUNT (1 << IDALLOC_PAGE_BITS) +#define IDALLOC_WORD_COUNT (1 << IDALLOC_WORD_BITS) + +struct id_alloc_page { + /* Bitmask of allocations. 1s indicates the ID is already allocated. */ + uint32_t allocated_mask[IDALLOC_WORD_COUNT]; + + /* Bitmask for free space in allocated_mask. 1s indicate whole 32 bit + * section is full. + */ + uint32_t full_word_mask; + + /* The ID that bit 0 in allocated_mask corresponds to. */ + uint32_t base_value; + + struct id_alloc_page + *next_has_free; /* Next page with at least one bit open */ +}; + +struct id_alloc_subdir { + struct id_alloc_page *sublevels[IDALLOC_PAGE_COUNT]; +}; + +struct id_alloc_dir { + struct id_alloc_subdir *sublevels[IDALLOC_SUBDIR_COUNT]; +}; + +struct id_alloc { + struct id_alloc_dir *sublevels[IDALLOC_DIR_COUNT]; + + struct id_alloc_page *has_free; + + char *name; + + uint32_t allocated, capacity; +}; + +struct id_alloc_pool { + struct id_alloc_pool *next; + uint32_t id; +}; + +void idalloc_free(struct id_alloc *alloc, uint32_t id); +void idalloc_free_to_pool(struct id_alloc_pool **pool_ptr, uint32_t id); +void idalloc_drain_pool(struct id_alloc *alloc, + struct id_alloc_pool **pool_ptr); +uint32_t idalloc_allocate(struct id_alloc *alloc); +uint32_t idalloc_allocate_prefer_pool(struct id_alloc *alloc, + struct id_alloc_pool **pool_ptr); +uint32_t idalloc_reserve(struct id_alloc *alloc, uint32_t id); +struct id_alloc *idalloc_new(const char *name); +void idalloc_destroy(struct id_alloc *alloc); + +#endif diff --git a/lib/lib_errors.h b/lib/lib_errors.h index 3831116dd3..38c75f913e 100644 --- a/lib/lib_errors.h +++ b/lib/lib_errors.h @@ -72,6 +72,8 @@ enum lib_log_refs { EC_LIB_SYSREPO_INIT, EC_LIB_SYSREPO_DATA_CONVERT, EC_LIB_LIBSYSREPO, + EC_LIB_ID_CONSISTENCY, + EC_LIB_ID_EXHAUST, }; extern void lib_error_init(void); diff --git a/lib/subdir.am b/lib/subdir.am index 356796fbb1..c144c2c2e1 100644 --- a/lib/subdir.am +++ b/lib/subdir.am @@ -29,6 +29,7 @@ lib_libfrr_la_SOURCES = \ lib/graph.c \ lib/hash.c \ lib/hook.c \ + lib/id_alloc.c \ lib/if.c \ lib/if_rmap.c \ lib/imsg-buffer.c \ @@ -147,6 +148,7 @@ pkginclude_HEADERS += \ lib/graph.h \ lib/hash.h \ lib/hook.h \ + lib/id_alloc.h \ lib/if.h \ lib/if_rmap.h \ lib/imsg.h \ diff --git a/tests/lib/test_idalloc.c b/tests/lib/test_idalloc.c new file mode 100644 index 0000000000..3053c1c074 --- /dev/null +++ b/tests/lib/test_idalloc.c @@ -0,0 +1,193 @@ +#include "id_alloc.h" + +#include +#include +#include +#include + +#define IDS_PER_PAGE (1<<(IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS)) +char allocated_markers[IDS_PER_PAGE*3]; + +int main(int argc, char **argv) +{ + int i, val; + uint32_t pg; + struct id_alloc *a; + + /* 1. Rattle test, shake it a little and make sure it doesn't make any + * noise :) + */ + a = idalloc_new("Rattle test"); + for (i = 0; i < 1000000; i++) + assert(idalloc_allocate(a) != 0); + + idalloc_destroy(a); + + /* 2. Reserve a few low IDs, make sure they are skipped by normal + * allocation. + */ + a = idalloc_new("Low Reservations"); + assert(idalloc_reserve(a, 1) == 1); + assert(idalloc_reserve(a, 3) == 3); + assert(idalloc_reserve(a, 5) == 5); + for (i = 0; i < 100; i++) { + val = idalloc_allocate(a); + assert(val != 1 && val != 3 && val != 5); + } + idalloc_destroy(a); + + /* 3. Single page testing. Check that IDs are kept unique, and all IDs + * in the existing page are allocated before a new page is added. + */ + memset(allocated_markers, 0, sizeof(allocated_markers)); + allocated_markers[IDALLOC_INVALID] = 1; + + a = idalloc_new("Single Page"); + + /* reserve the rest of the first page */ + for (i = 0; i < IDS_PER_PAGE - 1; i++) { + val = idalloc_allocate(a); + assert(val < IDS_PER_PAGE); + assert(allocated_markers[val] == 0); + assert(a->capacity == IDS_PER_PAGE); + allocated_markers[val] = 1; + } + /* Check that the count is right */ + assert(a->allocated == IDS_PER_PAGE); + + /* Free some IDs out of the middle. */ + idalloc_free(a, 300); + allocated_markers[300] = 0; + idalloc_free(a, 400); + allocated_markers[400] = 0; + idalloc_free(a, 500); + allocated_markers[500] = 0; + + assert(a->allocated == IDS_PER_PAGE-3); + + /* Allocate the three IDs back and make sure they are pulled from the + * set just freed + */ + for (i = 0; i < 3; i++) { + val = idalloc_allocate(a); + assert(val < IDS_PER_PAGE); + assert(allocated_markers[val] == 0); + assert(a->capacity == IDS_PER_PAGE); + allocated_markers[val] = 1; + } + idalloc_destroy(a); + + /* 4. Multi-page testing. */ + memset(allocated_markers, 0, sizeof(allocated_markers)); + allocated_markers[IDALLOC_INVALID] = 1; + + a = idalloc_new("Multi-page"); + + /* reserve the rest of the first page and all of the second and third */ + for (i = 0; i < 3 * IDS_PER_PAGE - 1; i++) { + val = idalloc_allocate(a); + assert(val < 3*IDS_PER_PAGE); + assert(allocated_markers[val] == 0); + allocated_markers[val] = 1; + } + assert(a->capacity == 3*IDS_PER_PAGE); + assert(a->allocated == 3*IDS_PER_PAGE); + + /* Free two IDs from each page. */ + for (i = 0; i < 3; i++) { + idalloc_free(a, 7 + i*IDS_PER_PAGE); + allocated_markers[7 + i*IDS_PER_PAGE] = 0; + + idalloc_free(a, 4 + i*IDS_PER_PAGE); + allocated_markers[4 + i*IDS_PER_PAGE] = 0; + } + + assert(a->allocated == 3*IDS_PER_PAGE - 6); + + /* Allocate the six IDs back and make sure they are pulled from the set + * just freed. + */ + for (i = 0; i < 6; i++) { + val = idalloc_allocate(a); + assert(val < 3*IDS_PER_PAGE); + assert(allocated_markers[val] == 0); + assert(a->capacity == 3*IDS_PER_PAGE); + allocated_markers[val] = 1; + } + + assert(a->capacity == 3*IDS_PER_PAGE); + assert(a->allocated == 3*IDS_PER_PAGE); + + /* Walk each allocated ID. Free it, then re-allocate it back. */ + for (i = 1; i < 3 * IDS_PER_PAGE - 1; i++) { + idalloc_free(a, i); + val = idalloc_allocate(a); + assert(val == i); + assert(a->capacity == 3*IDS_PER_PAGE); + assert(a->allocated == 3*IDS_PER_PAGE); + } + idalloc_destroy(a); + + /* 5. Weird Reservations + * idalloc_reserve exists primarily to black out low numbered IDs that + * are reserved for special cases. However, we will test it for more + * complex use cases to avoid unpleasant surprises. + */ + + memset(allocated_markers, 0, sizeof(allocated_markers)); + allocated_markers[IDALLOC_INVALID] = 1; + + a = idalloc_new("Weird Reservations"); + + /* Start with 3 pages fully allocated. */ + for (i = 0; i < 3 * IDS_PER_PAGE - 1; i++) { + val = idalloc_allocate(a); + assert(val < 3*IDS_PER_PAGE); + assert(allocated_markers[val] == 0); + allocated_markers[val] = 1; + } + assert(a->capacity == 3*IDS_PER_PAGE); + assert(a->allocated == 3*IDS_PER_PAGE); + + /* Free a bit out of each of the three pages. Then reserve one of the + * three freed IDs. Finally, allocate the other two freed IDs. Do this + * each of three ways. (Reserve out of the first, seconds then third + * page.) + * The intent here is to exercise the rare cases on reserve_bit's + * linked-list removal in the case that it is not removing the first + * page with a free bit in its list of pages with free bits. + */ + + for (pg = 0; pg < 3; pg++) { + /* free a bit out of each of the three pages */ + for (i = 0; i < 3; i++) { + idalloc_free(a, i*IDS_PER_PAGE + 17); + allocated_markers[i*IDS_PER_PAGE + 17] = 0; + } + + assert(a->capacity == 3*IDS_PER_PAGE); + assert(a->allocated == 3*IDS_PER_PAGE-3); + + /* Reserve one of the freed IDs */ + assert(idalloc_reserve(a, pg*IDS_PER_PAGE + 17) == + pg*IDS_PER_PAGE + 17); + allocated_markers[pg*IDS_PER_PAGE + 17] = 1; + + assert(a->capacity == 3*IDS_PER_PAGE); + assert(a->allocated == 3*IDS_PER_PAGE-2); + + /* Allocate the other two back */ + for (i = 0; i < 2; i++) { + val = idalloc_allocate(a); + assert(val < 3*IDS_PER_PAGE); + assert(allocated_markers[val] == 0); + allocated_markers[val] = 1; + } + assert(a->capacity == 3*IDS_PER_PAGE); + assert(a->allocated == 3*IDS_PER_PAGE); + } + idalloc_destroy(a); + + puts("ID Allocator test successful.\n"); + return 0; +} diff --git a/tests/lib/test_idalloc.py b/tests/lib/test_idalloc.py new file mode 100644 index 0000000000..22de082be4 --- /dev/null +++ b/tests/lib/test_idalloc.py @@ -0,0 +1,6 @@ +import frrtest + +class TestIDAlloc(frrtest.TestMultiOut): + program = './test_idalloc' + +TestIDAlloc.onesimple('ID Allocator test successful.') diff --git a/tests/subdir.am b/tests/subdir.am index a4a754b088..b7dfa9d5f8 100644 --- a/tests/subdir.am +++ b/tests/subdir.am @@ -50,6 +50,7 @@ check_PROGRAMS = \ tests/lib/test_heavy_thread \ tests/lib/test_heavy_wq \ tests/lib/test_heavy \ + tests/lib/test_idalloc \ tests/lib/test_memory \ tests/lib/test_nexthop_iter \ tests/lib/test_privs \ @@ -193,6 +194,9 @@ tests_lib_test_heavy_wq_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_heavy_wq_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_heavy_wq_LDADD = $(ALL_TESTS_LDADD) -lm tests_lib_test_heavy_wq_SOURCES = tests/lib/test_heavy_wq.c tests/helpers/c/main.c +tests_lib_test_idalloc_CFLAGS = $(TESTS_CFLAGS) +tests_lib_test_idalloc_LDADD = $(ALL_TESTS_LDADD) +tests_lib_test_idalloc_SOURCES = tests/lib/test_idalloc.c tests_lib_test_memory_CFLAGS = $(TESTS_CFLAGS) tests_lib_test_memory_CPPFLAGS = $(TESTS_CPPFLAGS) tests_lib_test_memory_LDADD = $(ALL_TESTS_LDADD)