summaryrefslogtreecommitdiff
path: root/bgpd/bgp_io.c
diff options
context:
space:
mode:
authorQuentin Young <qlyoung@nvidia.com>2021-04-26 18:59:48 -0400
committerQuentin Young <qlyoung@nvidia.com>2021-04-29 12:12:32 -0400
commit338f4a78cc0078e5f59780fe883084c26842157b (patch)
treee629cb1aafacc7f7f50e0a83413744e961d45970 /bgpd/bgp_io.c
parent6c55ee964e600cba385afabb8438b9d09eb509f2 (diff)
bgpd: avoid allocating very large stack buffer
As pointed out on code review of BGP extended messages, increasing the maximum BGP message size has the consequence of growing the dynamically sized stack buffer up to 650K. While unlikely to exceed modern stack sizes it is still unreasonably large. Remedy this with a heap buffer. Signed-off-by: Quentin Young <qlyoung@nvidia.com>
Diffstat (limited to 'bgpd/bgp_io.c')
-rw-r--r--bgpd/bgp_io.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c
index fec96700da..99d0344c9f 100644
--- a/bgpd/bgp_io.c
+++ b/bgpd/bgp_io.c
@@ -465,10 +465,10 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
size_t readsize; // how many bytes we want to read
ssize_t nbytes; // how many bytes we actually read
uint16_t status = 0;
- uint8_t ibw[peer->max_packet_size * BGP_READ_PACKET_MAX];
- readsize = MIN(ringbuf_space(peer->ibuf_work), sizeof(ibw));
- nbytes = read(peer->fd, ibw, readsize);
+ readsize =
+ MIN(ringbuf_space(peer->ibuf_work), sizeof(peer->ibuf_scratch));
+ nbytes = read(peer->fd, peer->ibuf_scratch, readsize);
/* EAGAIN or EWOULDBLOCK; come back later */
if (nbytes < 0 && ERRNO_IO_RETRY(errno)) {
@@ -497,7 +497,7 @@ static uint16_t bgp_read(struct peer *peer, int *code_p)
SET_FLAG(status, BGP_IO_FATAL_ERR);
} else {
- assert(ringbuf_put(peer->ibuf_work, ibw, nbytes)
+ assert(ringbuf_put(peer->ibuf_work, peer->ibuf_scratch, nbytes)
== (size_t)nbytes);
}