summaryrefslogtreecommitdiff
path: root/lib/vrf.c
diff options
context:
space:
mode:
authorDonald Sharp <sharpd@nvidia.com>2023-04-19 08:13:18 -0400
committerDonald Sharp <sharpd@nvidia.com>2023-06-26 14:59:21 -0400
commit161972c9fe108ffe3de851a537d9b34efeb09e31 (patch)
tree6de17e2202d18969ee4aae4c85ecaccdc89c4f89 /lib/vrf.c
parentdee79c33a425d264e53e0e5d0ad51b1bc13945d0 (diff)
*: Rearrange vrf_bitmap_X api to reduce memory footprint
When running all daemons with config for most of them, FRR has sharpd@janelle:~/frr$ vtysh -c "show debug hashtable" | grep "VRF BIT HASH" | wc -l 3570 3570 hashes for bitmaps associated with the vrf. This is a very large number of hashes. Let's do two things: a) Reduce the created size of the actually created hashes to 2 instead of 32. b) Delay generation of the hash *until* a set operation happens. As that no hash directly implies a unset value if/when checked. This reduces the number of hashes to 61 in my setup for normal operation. Signed-off-by: Donald Sharp <sharpd@nvidia.com>
Diffstat (limited to 'lib/vrf.c')
-rw-r--r--lib/vrf.c56
1 files changed, 41 insertions, 15 deletions
diff --git a/lib/vrf.c b/lib/vrf.c
index 73f5d8ff72..b279e8b7bf 100644
--- a/lib/vrf.c
+++ b/lib/vrf.c
@@ -384,54 +384,80 @@ static void vrf_hash_bitmap_free(void *data)
XFREE(MTYPE_VRF_BITMAP, bit);
}
-vrf_bitmap_t vrf_bitmap_init(void)
+void vrf_bitmap_init(vrf_bitmap_t *pbmap)
{
- return hash_create_size(32, vrf_hash_bitmap_key, vrf_hash_bitmap_cmp,
- "VRF BIT HASH");
+ *pbmap = NULL;
}
-void vrf_bitmap_free(vrf_bitmap_t bmap)
+void vrf_bitmap_free(vrf_bitmap_t *pbmap)
{
- struct hash *vrf_hash = bmap;
+ struct hash *vrf_hash;
+
+ if (!*pbmap)
+ return;
+
+ vrf_hash = *pbmap;
hash_clean_and_free(&vrf_hash, vrf_hash_bitmap_free);
}
-void vrf_bitmap_set(vrf_bitmap_t bmap, vrf_id_t vrf_id)
+void vrf_bitmap_set(vrf_bitmap_t *pbmap, vrf_id_t vrf_id)
{
struct vrf_bit_set lookup = { .vrf_id = vrf_id };
- struct hash *vrf_hash = bmap;
+ struct hash *vrf_hash;
struct vrf_bit_set *bit;
- if (vrf_hash == NULL || vrf_id == VRF_UNKNOWN)
+ if (vrf_id == VRF_UNKNOWN)
return;
+ if (!*pbmap)
+ *pbmap = vrf_hash =
+ hash_create_size(2, vrf_hash_bitmap_key,
+ vrf_hash_bitmap_cmp, "VRF BIT HASH");
+ else
+ vrf_hash = *pbmap;
+
bit = hash_get(vrf_hash, &lookup, vrf_hash_bitmap_alloc);
bit->set = true;
}
-void vrf_bitmap_unset(vrf_bitmap_t bmap, vrf_id_t vrf_id)
+void vrf_bitmap_unset(vrf_bitmap_t *pbmap, vrf_id_t vrf_id)
{
struct vrf_bit_set lookup = { .vrf_id = vrf_id };
- struct hash *vrf_hash = bmap;
+ struct hash *vrf_hash;
struct vrf_bit_set *bit;
- if (vrf_hash == NULL || vrf_id == VRF_UNKNOWN)
+ if (vrf_id == VRF_UNKNOWN)
+ return;
+
+ /*
+ * If the hash is not created then unsetting is unnecessary
+ */
+ if (!*pbmap)
+ return;
+
+ vrf_hash = *pbmap;
+
+ /*
+ * If we can't look it up, no need to unset it!
+ */
+ bit = hash_lookup(vrf_hash, &lookup);
+ if (!bit)
return;
- bit = hash_get(vrf_hash, &lookup, vrf_hash_bitmap_alloc);
bit->set = false;
}
-int vrf_bitmap_check(vrf_bitmap_t bmap, vrf_id_t vrf_id)
+int vrf_bitmap_check(vrf_bitmap_t *pbmap, vrf_id_t vrf_id)
{
struct vrf_bit_set lookup = { .vrf_id = vrf_id };
- struct hash *vrf_hash = bmap;
+ struct hash *vrf_hash;
struct vrf_bit_set *bit;
- if (vrf_hash == NULL || vrf_id == VRF_UNKNOWN)
+ if (!*pbmap || vrf_id == VRF_UNKNOWN)
return 0;
+ vrf_hash = *pbmap;
bit = hash_lookup(vrf_hash, &lookup);
if (bit)
return bit->set;