--- /dev/null
+/*
+ * FRR ID Number Allocator
+ * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "id_alloc.h"
+
+#include "log.h"
+#include "lib_errors.h"
+#include "memory.h"
+
+#include <inttypes.h>
+
+DEFINE_MTYPE_STATIC(LIB, IDALLOC_ALLOCATOR, "ID Number Allocator")
+DEFINE_MTYPE_STATIC(LIB, IDALLOC_ALLOCATOR_NAME, "ID Number Allocator Name")
+DEFINE_MTYPE_STATIC(LIB, IDALLOC_DIRECTORY, "ID Number Allocator Directory")
+DEFINE_MTYPE_STATIC(LIB, IDALLOC_SUBDIRECTORY,
+ "ID Number Allocator Subdirectory")
+DEFINE_MTYPE_STATIC(LIB, IDALLOC_PAGE, "ID Number Allocator Page")
+DEFINE_MTYPE_STATIC(LIB, IDALLOC_POOL, "ID Number temporary holding pool entry")
+
+#if UINT_MAX >= UINT32_MAX
+#define FFS32(x) ffs(x)
+#else
+/* ints less than 32 bits? Yikes. */
+#define FFS32(x) ffsl(x)
+#endif
+
+#define DIR_MASK ((1<<IDALLOC_DIR_BITS)-1)
+#define SUBDIR_MASK ((1<<IDALLOC_SUBDIR_BITS)-1)
+#define PAGE_MASK ((1<<IDALLOC_PAGE_BITS)-1)
+#define WORD_MASK ((1<<IDALLOC_WORD_BITS)-1)
+#define OFFSET_MASK ((1<<IDALLOC_OFFSET_BITS)-1)
+
+#define DIR_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS + \
+ IDALLOC_PAGE_BITS + IDALLOC_SUBDIR_BITS)
+#define SUBDIR_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS + \
+ IDALLOC_PAGE_BITS)
+#define PAGE_SHIFT (IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS)
+#define WORD_SHIFT (IDALLOC_OFFSET_BITS)
+#define OFFSET_SHIFT (0)
+
+#define ID_DIR(id) ((id >> DIR_SHIFT) & DIR_MASK)
+#define ID_SUBDIR(id) ((id >> SUBDIR_SHIFT) & SUBDIR_MASK)
+#define ID_PAGE(id) ((id >> PAGE_SHIFT) & PAGE_MASK)
+#define ID_WORD(id) ((id >> WORD_SHIFT) & WORD_MASK)
+#define ID_OFFSET(id) ((id >> OFFSET_SHIFT) & OFFSET_MASK)
+
+/*
+ * Find the page that an ID number belongs to in an allocator.
+ * Optionally create the page if it doesn't exist.
+ */
+static struct id_alloc_page *find_or_create_page(struct id_alloc *alloc,
+ uint32_t id, int create)
+{
+ struct id_alloc_dir *dir = NULL;
+ struct id_alloc_subdir *subdir = NULL;
+ struct id_alloc_page *page = NULL;
+
+ dir = alloc->sublevels[ID_DIR(id)];
+ if (dir == NULL) {
+ if (create) {
+ dir = XCALLOC(MTYPE_IDALLOC_DIRECTORY, sizeof(*dir));
+ alloc->sublevels[ID_DIR(id)] = dir;
+ } else {
+ return NULL;
+ }
+ }
+
+ subdir = dir->sublevels[ID_SUBDIR(id)];
+ if (subdir == NULL) {
+ if (create) {
+ subdir = XCALLOC(MTYPE_IDALLOC_SUBDIRECTORY,
+ sizeof(*subdir));
+ dir->sublevels[ID_SUBDIR(id)] = subdir;
+ } else {
+ return NULL;
+ }
+ }
+
+ page = subdir->sublevels[ID_PAGE(id)];
+ if (page == NULL && create) {
+ page = XCALLOC(MTYPE_IDALLOC_PAGE, sizeof(*page));
+ page->base_value = id;
+ subdir->sublevels[ID_PAGE(id)] = page;
+
+ alloc->capacity += 1 << PAGE_SHIFT;
+ page->next_has_free = alloc->has_free;
+ alloc->has_free = page;
+ } else if (page != NULL && create) {
+ flog_err(
+ EC_LIB_ID_CONSISTENCY,
+ "ID Allocator %s attempt to re-create page at %" PRIu32,
+ alloc->name, id);
+ }
+
+ return page;
+}
+
+/*
+ * Return an ID number back to the allocator.
+ * While this ID can be re-assigned through idalloc_allocate, the underlying
+ * memory will not be freed. If this is the first free ID in the page, the page
+ * will be added to the allocator's list of pages with free IDs.
+ */
+void idalloc_free(struct id_alloc *alloc, uint32_t id)
+{
+ struct id_alloc_page *page = NULL;
+
+ int word, offset;
+ uint32_t old_word, old_word_mask;
+
+ page = find_or_create_page(alloc, id, 0);
+ if (!page) {
+ flog_err(EC_LIB_ID_CONSISTENCY,
+ "ID Allocator %s cannot free #%" PRIu32
+ ". ID Block does not exist.",
+ alloc->name, id);
+ return;
+ }
+
+ word = ID_WORD(id);
+ offset = ID_OFFSET(id);
+
+ if ((page->allocated_mask[word] & (1 << offset)) == 0) {
+ flog_err(EC_LIB_ID_CONSISTENCY,
+ "ID Allocator %s cannot free #%" PRIu32
+ ". ID was not allocated at the time of free.",
+ alloc->name, id);
+ return;
+ }
+
+ old_word = page->allocated_mask[word];
+ page->allocated_mask[word] &= ~(((uint32_t)1) << offset);
+ alloc->allocated -= 1;
+
+ if (old_word == UINT32_MAX) {
+ /* first bit in this block of 32 to be freed.*/
+
+ old_word_mask = page->full_word_mask;
+ page->full_word_mask &= ~(((uint32_t)1) << word);
+
+ if (old_word_mask == UINT32_MAX) {
+ /* first bit in page freed, add this to the allocator's
+ * list of pages with free space
+ */
+ page->next_has_free = alloc->has_free;
+ alloc->has_free = page;
+ }
+ }
+}
+
+/*
+ * Add a allocation page to the end of the allocator's current range.
+ * Returns null if the allocator has had all possible pages allocated already.
+ */
+static struct id_alloc_page *create_next_page(struct id_alloc *alloc)
+{
+ if (alloc->capacity == 0 && alloc->sublevels[0])
+ return NULL; /* All IDs allocated and the capacity looped. */
+
+ return find_or_create_page(alloc, alloc->capacity, 1);
+}
+
+/*
+ * Marks an ID within an allocator page as in use.
+ * If the ID was the last free ID in the page, the page is removed from the
+ * allocator's list of free IDs. In the typical allocation case, this page is
+ * the first page in the list, and removing the page is fast. If instead an ID
+ * is being reserved by number, this may end up scanning the whole single linked
+ * list of pages in order to remove it.
+ */
+static void reserve_bit(struct id_alloc *alloc, struct id_alloc_page *page,
+ int word, int offset)
+{
+ struct id_alloc_page *itr;
+
+ page->allocated_mask[word] |= ((uint32_t)1) << offset;
+ alloc->allocated += 1;
+
+ if (page->allocated_mask[word] == UINT32_MAX) {
+ page->full_word_mask |= ((uint32_t)1) << word;
+ if (page->full_word_mask == UINT32_MAX) {
+ if (alloc->has_free == page) {
+ /* allocate always pulls from alloc->has_free */
+ alloc->has_free = page->next_has_free;
+ } else {
+ /* reserve could pull from any page with free
+ * bits
+ */
+ itr = alloc->has_free;
+ while (itr) {
+ if (itr->next_has_free == page) {
+ itr->next_has_free =
+ page->next_has_free;
+ return;
+ }
+
+ itr = itr->next_has_free;
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Reserve an ID number from the allocator. Returns IDALLOC_INVALID (0) if the
+ * allocator has no more IDs available.
+ */
+uint32_t idalloc_allocate(struct id_alloc *alloc)
+{
+ struct id_alloc_page *page;
+ int word, offset;
+ uint32_t return_value;
+
+ if (alloc->has_free == NULL)
+ create_next_page(alloc);
+
+ if (alloc->has_free == NULL) {
+ flog_err(EC_LIB_ID_EXHAUST,
+ "ID Allocator %s has run out of IDs.", alloc->name);
+ return IDALLOC_INVALID;
+ }
+
+ page = alloc->has_free;
+ word = FFS32(~(page->full_word_mask)) - 1;
+
+ if (word < 0 || word >= 32) {
+ flog_err(EC_LIB_ID_CONSISTENCY,
+ "ID Allocator %s internal error. Page starting at %d is inconsistent.",
+ alloc->name, page->base_value);
+ return IDALLOC_INVALID;
+ }
+
+ offset = FFS32(~(page->allocated_mask[word])) - 1;
+ if (offset < 0 || offset >= 32) {
+ flog_err(EC_LIB_ID_CONSISTENCY,
+ "ID Allocator %s internal error. Page starting at %d is inconsistent on word %d",
+ alloc->name, page->base_value, word);
+ return IDALLOC_INVALID;
+ }
+ return_value = page->base_value + word * 32 + offset;
+
+ reserve_bit(alloc, page, word, offset);
+
+ return return_value;
+}
+
+/*
+ * Tries to allocate a specific ID from the allocator. Returns IDALLOC_INVALID
+ * when the ID being "reserved" has allready been assigned/reserved. This should
+ * only be done with low numbered IDs, as the allocator needs to reserve bit-map
+ * pages in order
+ */
+uint32_t idalloc_reserve(struct id_alloc *alloc, uint32_t id)
+{
+ struct id_alloc_page *page;
+ int word, offset;
+
+ while (alloc->capacity <= id)
+ create_next_page(alloc);
+
+ word = ID_WORD(id);
+ offset = ID_OFFSET(id);
+ page = find_or_create_page(alloc, id, 0);
+ /* page can't be null because the loop above ensured it was created. */
+
+ if (page->allocated_mask[word] & (((uint32_t)1) << offset)) {
+ flog_err(EC_LIB_ID_CONSISTENCY,
+ "ID Allocator %s could not reserve %" PRIu32
+ " because it is already allocated.",
+ alloc->name, id);
+ return IDALLOC_INVALID;
+ }
+
+ reserve_bit(alloc, page, word, offset);
+ return id;
+}
+
+/*
+ * Set up an empty ID allocator, with IDALLOC_INVALID pre-reserved.
+ */
+struct id_alloc *idalloc_new(const char *name)
+{
+ struct id_alloc *ret;
+
+ ret = XCALLOC(MTYPE_IDALLOC_ALLOCATOR, sizeof(*ret));
+ ret->name = XSTRDUP(MTYPE_IDALLOC_ALLOCATOR_NAME, name);
+
+ idalloc_reserve(ret, IDALLOC_INVALID);
+
+ return ret;
+}
+
+/*
+ * Free a subdir, and all pages below it.
+ */
+static void idalloc_destroy_subdir(struct id_alloc_subdir *subdir)
+{
+ int i;
+
+ for (i = 0; i < IDALLOC_PAGE_COUNT; i++) {
+ if (subdir->sublevels[i])
+ XFREE(MTYPE_IDALLOC_PAGE, subdir->sublevels[i]);
+ else
+ break;
+ }
+ XFREE(MTYPE_IDALLOC_SUBDIRECTORY, subdir);
+}
+
+/*
+ * Free a dir, and all subdirs/pages below it.
+ */
+static void idalloc_destroy_dir(struct id_alloc_dir *dir)
+{
+ int i;
+
+ for (i = 0; i < IDALLOC_SUBDIR_COUNT; i++) {
+ if (dir->sublevels[i])
+ idalloc_destroy_subdir(dir->sublevels[i]);
+ else
+ break;
+ }
+ XFREE(MTYPE_IDALLOC_DIRECTORY, dir);
+}
+
+/*
+ * Free all memory associated with an ID allocator.
+ */
+void idalloc_destroy(struct id_alloc *alloc)
+{
+ int i;
+
+ for (i = 0; i < IDALLOC_DIR_COUNT; i++) {
+ if (alloc->sublevels[i])
+ idalloc_destroy_dir(alloc->sublevels[i]);
+ else
+ break;
+ }
+
+ XFREE(MTYPE_IDALLOC_ALLOCATOR_NAME, alloc->name);
+ XFREE(MTYPE_IDALLOC_ALLOCATOR, alloc);
+}
+
+/*
+ * Give an ID number to temporary holding pool.
+ */
+void idalloc_free_to_pool(struct id_alloc_pool **pool_ptr, uint32_t id)
+{
+ struct id_alloc_pool *new_pool;
+
+ new_pool = XMALLOC(MTYPE_IDALLOC_POOL, sizeof(*new_pool));
+ new_pool->id = id;
+ new_pool->next = *pool_ptr;
+ *pool_ptr = new_pool;
+}
+
+/*
+ * Free all ID numbers held in a holding pool back to the main allocator.
+ */
+void idalloc_drain_pool(struct id_alloc *alloc, struct id_alloc_pool **pool_ptr)
+{
+ struct id_alloc_pool *current, *next;
+
+ while (*pool_ptr) {
+ current = *pool_ptr;
+ next = current->next;
+ idalloc_free(alloc, current->id);
+ XFREE(MTYPE_IDALLOC_POOL, current);
+ *pool_ptr = next;
+ }
+}
+
+/*
+ * Allocate an ID from either a holding pool, or the main allocator. IDs will
+ * only be pulled form the main allocator when the pool is empty.
+ */
+uint32_t idalloc_allocate_prefer_pool(struct id_alloc *alloc,
+ struct id_alloc_pool **pool_ptr)
+{
+ uint32_t ret;
+ struct id_alloc_pool *pool_head = *pool_ptr;
+
+ if (pool_head) {
+ ret = pool_head->id;
+ *pool_ptr = pool_head->next;
+ XFREE(MTYPE_IDALLOC_POOL, pool_head);
+ return ret;
+ } else {
+ return idalloc_allocate(alloc);
+ }
+}
--- /dev/null
+/*
+ * FRR ID Number Allocator
+ * Copyright (C) 2018 Amazon.com, Inc. or its affiliates
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ZEBRA_ID_ALLOC_H
+#define _ZEBRA_ID_ALLOC_H
+
+#include <strings.h>
+#include <limits.h>
+#include <stdint.h>
+
+#define IDALLOC_INVALID 0
+
+#define IDALLOC_DIR_BITS 8
+#define IDALLOC_SUBDIR_BITS 7
+#define IDALLOC_PAGE_BITS 7
+#define IDALLOC_WORD_BITS 5
+#define IDALLOC_OFFSET_BITS 5
+
+#define IDALLOC_DIR_COUNT (1 << IDALLOC_DIR_BITS)
+#define IDALLOC_SUBDIR_COUNT (1 << IDALLOC_SUBDIR_BITS)
+#define IDALLOC_PAGE_COUNT (1 << IDALLOC_PAGE_BITS)
+#define IDALLOC_WORD_COUNT (1 << IDALLOC_WORD_BITS)
+
+struct id_alloc_page {
+ /* Bitmask of allocations. 1s indicates the ID is already allocated. */
+ uint32_t allocated_mask[IDALLOC_WORD_COUNT];
+
+ /* Bitmask for free space in allocated_mask. 1s indicate whole 32 bit
+ * section is full.
+ */
+ uint32_t full_word_mask;
+
+ /* The ID that bit 0 in allocated_mask corresponds to. */
+ uint32_t base_value;
+
+ struct id_alloc_page
+ *next_has_free; /* Next page with at least one bit open */
+};
+
+struct id_alloc_subdir {
+ struct id_alloc_page *sublevels[IDALLOC_PAGE_COUNT];
+};
+
+struct id_alloc_dir {
+ struct id_alloc_subdir *sublevels[IDALLOC_SUBDIR_COUNT];
+};
+
+struct id_alloc {
+ struct id_alloc_dir *sublevels[IDALLOC_DIR_COUNT];
+
+ struct id_alloc_page *has_free;
+
+ char *name;
+
+ uint32_t allocated, capacity;
+};
+
+struct id_alloc_pool {
+ struct id_alloc_pool *next;
+ uint32_t id;
+};
+
+void idalloc_free(struct id_alloc *alloc, uint32_t id);
+void idalloc_free_to_pool(struct id_alloc_pool **pool_ptr, uint32_t id);
+void idalloc_drain_pool(struct id_alloc *alloc,
+ struct id_alloc_pool **pool_ptr);
+uint32_t idalloc_allocate(struct id_alloc *alloc);
+uint32_t idalloc_allocate_prefer_pool(struct id_alloc *alloc,
+ struct id_alloc_pool **pool_ptr);
+uint32_t idalloc_reserve(struct id_alloc *alloc, uint32_t id);
+struct id_alloc *idalloc_new(const char *name);
+void idalloc_destroy(struct id_alloc *alloc);
+
+#endif
--- /dev/null
+#include "id_alloc.h"
+
+#include <inttypes.h>
+#include <string.h>
+#include <assert.h>
+#include <stdio.h>
+
+#define IDS_PER_PAGE (1<<(IDALLOC_OFFSET_BITS + IDALLOC_WORD_BITS))
+char allocated_markers[IDS_PER_PAGE*3];
+
+int main(int argc, char **argv)
+{
+ int i, val;
+ uint32_t pg;
+ struct id_alloc *a;
+
+ /* 1. Rattle test, shake it a little and make sure it doesn't make any
+ * noise :)
+ */
+ a = idalloc_new("Rattle test");
+ for (i = 0; i < 1000000; i++)
+ assert(idalloc_allocate(a) != 0);
+
+ idalloc_destroy(a);
+
+ /* 2. Reserve a few low IDs, make sure they are skipped by normal
+ * allocation.
+ */
+ a = idalloc_new("Low Reservations");
+ assert(idalloc_reserve(a, 1) == 1);
+ assert(idalloc_reserve(a, 3) == 3);
+ assert(idalloc_reserve(a, 5) == 5);
+ for (i = 0; i < 100; i++) {
+ val = idalloc_allocate(a);
+ assert(val != 1 && val != 3 && val != 5);
+ }
+ idalloc_destroy(a);
+
+ /* 3. Single page testing. Check that IDs are kept unique, and all IDs
+ * in the existing page are allocated before a new page is added.
+ */
+ memset(allocated_markers, 0, sizeof(allocated_markers));
+ allocated_markers[IDALLOC_INVALID] = 1;
+
+ a = idalloc_new("Single Page");
+
+ /* reserve the rest of the first page */
+ for (i = 0; i < IDS_PER_PAGE - 1; i++) {
+ val = idalloc_allocate(a);
+ assert(val < IDS_PER_PAGE);
+ assert(allocated_markers[val] == 0);
+ assert(a->capacity == IDS_PER_PAGE);
+ allocated_markers[val] = 1;
+ }
+ /* Check that the count is right */
+ assert(a->allocated == IDS_PER_PAGE);
+
+ /* Free some IDs out of the middle. */
+ idalloc_free(a, 300);
+ allocated_markers[300] = 0;
+ idalloc_free(a, 400);
+ allocated_markers[400] = 0;
+ idalloc_free(a, 500);
+ allocated_markers[500] = 0;
+
+ assert(a->allocated == IDS_PER_PAGE-3);
+
+ /* Allocate the three IDs back and make sure they are pulled from the
+ * set just freed
+ */
+ for (i = 0; i < 3; i++) {
+ val = idalloc_allocate(a);
+ assert(val < IDS_PER_PAGE);
+ assert(allocated_markers[val] == 0);
+ assert(a->capacity == IDS_PER_PAGE);
+ allocated_markers[val] = 1;
+ }
+ idalloc_destroy(a);
+
+ /* 4. Multi-page testing. */
+ memset(allocated_markers, 0, sizeof(allocated_markers));
+ allocated_markers[IDALLOC_INVALID] = 1;
+
+ a = idalloc_new("Multi-page");
+
+ /* reserve the rest of the first page and all of the second and third */
+ for (i = 0; i < 3 * IDS_PER_PAGE - 1; i++) {
+ val = idalloc_allocate(a);
+ assert(val < 3*IDS_PER_PAGE);
+ assert(allocated_markers[val] == 0);
+ allocated_markers[val] = 1;
+ }
+ assert(a->capacity == 3*IDS_PER_PAGE);
+ assert(a->allocated == 3*IDS_PER_PAGE);
+
+ /* Free two IDs from each page. */
+ for (i = 0; i < 3; i++) {
+ idalloc_free(a, 7 + i*IDS_PER_PAGE);
+ allocated_markers[7 + i*IDS_PER_PAGE] = 0;
+
+ idalloc_free(a, 4 + i*IDS_PER_PAGE);
+ allocated_markers[4 + i*IDS_PER_PAGE] = 0;
+ }
+
+ assert(a->allocated == 3*IDS_PER_PAGE - 6);
+
+ /* Allocate the six IDs back and make sure they are pulled from the set
+ * just freed.
+ */
+ for (i = 0; i < 6; i++) {
+ val = idalloc_allocate(a);
+ assert(val < 3*IDS_PER_PAGE);
+ assert(allocated_markers[val] == 0);
+ assert(a->capacity == 3*IDS_PER_PAGE);
+ allocated_markers[val] = 1;
+ }
+
+ assert(a->capacity == 3*IDS_PER_PAGE);
+ assert(a->allocated == 3*IDS_PER_PAGE);
+
+ /* Walk each allocated ID. Free it, then re-allocate it back. */
+ for (i = 1; i < 3 * IDS_PER_PAGE - 1; i++) {
+ idalloc_free(a, i);
+ val = idalloc_allocate(a);
+ assert(val == i);
+ assert(a->capacity == 3*IDS_PER_PAGE);
+ assert(a->allocated == 3*IDS_PER_PAGE);
+ }
+ idalloc_destroy(a);
+
+ /* 5. Weird Reservations
+ * idalloc_reserve exists primarily to black out low numbered IDs that
+ * are reserved for special cases. However, we will test it for more
+ * complex use cases to avoid unpleasant surprises.
+ */
+
+ memset(allocated_markers, 0, sizeof(allocated_markers));
+ allocated_markers[IDALLOC_INVALID] = 1;
+
+ a = idalloc_new("Weird Reservations");
+
+ /* Start with 3 pages fully allocated. */
+ for (i = 0; i < 3 * IDS_PER_PAGE - 1; i++) {
+ val = idalloc_allocate(a);
+ assert(val < 3*IDS_PER_PAGE);
+ assert(allocated_markers[val] == 0);
+ allocated_markers[val] = 1;
+ }
+ assert(a->capacity == 3*IDS_PER_PAGE);
+ assert(a->allocated == 3*IDS_PER_PAGE);
+
+ /* Free a bit out of each of the three pages. Then reserve one of the
+ * three freed IDs. Finally, allocate the other two freed IDs. Do this
+ * each of three ways. (Reserve out of the first, seconds then third
+ * page.)
+ * The intent here is to exercise the rare cases on reserve_bit's
+ * linked-list removal in the case that it is not removing the first
+ * page with a free bit in its list of pages with free bits.
+ */
+
+ for (pg = 0; pg < 3; pg++) {
+ /* free a bit out of each of the three pages */
+ for (i = 0; i < 3; i++) {
+ idalloc_free(a, i*IDS_PER_PAGE + 17);
+ allocated_markers[i*IDS_PER_PAGE + 17] = 0;
+ }
+
+ assert(a->capacity == 3*IDS_PER_PAGE);
+ assert(a->allocated == 3*IDS_PER_PAGE-3);
+
+ /* Reserve one of the freed IDs */
+ assert(idalloc_reserve(a, pg*IDS_PER_PAGE + 17) ==
+ pg*IDS_PER_PAGE + 17);
+ allocated_markers[pg*IDS_PER_PAGE + 17] = 1;
+
+ assert(a->capacity == 3*IDS_PER_PAGE);
+ assert(a->allocated == 3*IDS_PER_PAGE-2);
+
+ /* Allocate the other two back */
+ for (i = 0; i < 2; i++) {
+ val = idalloc_allocate(a);
+ assert(val < 3*IDS_PER_PAGE);
+ assert(allocated_markers[val] == 0);
+ allocated_markers[val] = 1;
+ }
+ assert(a->capacity == 3*IDS_PER_PAGE);
+ assert(a->allocated == 3*IDS_PER_PAGE);
+ }
+ idalloc_destroy(a);
+
+ puts("ID Allocator test successful.\n");
+ return 0;
+}