summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bgpd/bgp_fsm.c16
-rw-r--r--bgpd/bgp_io.c90
-rw-r--r--bgpd/bgp_packet.c21
-rw-r--r--bgpd/bgp_snmp.c18
-rw-r--r--bgpd/bgp_vty.c179
-rw-r--r--bgpd/bgp_zebra.c5
-rw-r--r--bgpd/bgpd.c7
-rw-r--r--bgpd/bgpd.h20
-rw-r--r--bgpd/rfapi/rfapi.c3
-rw-r--r--bgpd/rfapi/vnc_zebra.c3
-rw-r--r--lib/ringbuf.c15
-rw-r--r--lib/ringbuf.h10
-rw-r--r--pimd/pim_cmd.c6
-rw-r--r--tests/lib/test_ringbuf.c4
14 files changed, 219 insertions, 178 deletions
diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c
index 9e58e466e1..de453de0c8 100644
--- a/bgpd/bgp_fsm.c
+++ b/bgpd/bgp_fsm.c
@@ -27,6 +27,7 @@
#include "thread.h"
#include "log.h"
#include "stream.h"
+#include "ringbuf.h"
#include "memory.h"
#include "plist.h"
#include "workqueue.h"
@@ -155,7 +156,6 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
stream_fifo_clean(peer->ibuf);
stream_fifo_clean(peer->obuf);
- stream_reset(peer->ibuf_work);
/*
* this should never happen, since bgp_process_packet() is the
@@ -183,7 +183,9 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
stream_fifo_push(peer->ibuf,
stream_fifo_pop(from_peer->ibuf));
- stream_copy(peer->ibuf_work, from_peer->ibuf_work);
+ ringbuf_wipe(peer->ibuf_work);
+ ringbuf_copy(peer->ibuf_work, from_peer->ibuf_work,
+ ringbuf_remain(from_peer->ibuf_work));
}
pthread_mutex_unlock(&from_peer->io_mtx);
pthread_mutex_unlock(&peer->io_mtx);
@@ -264,14 +266,16 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
}
}
+
+ // Note: peer_xfer_stats() must be called with I/O turned OFF
+ if (from_peer)
+ peer_xfer_stats(peer, from_peer);
+
bgp_reads_on(peer);
bgp_writes_on(peer);
thread_add_timer_msec(bm->master, bgp_process_packet, peer, 0,
&peer->t_process_packet);
- if (from_peer)
- peer_xfer_stats(peer, from_peer);
-
return (peer);
}
@@ -1097,7 +1101,7 @@ int bgp_stop(struct peer *peer)
stream_fifo_clean(peer->obuf);
if (peer->ibuf_work)
- stream_reset(peer->ibuf_work);
+ ringbuf_wipe(peer->ibuf_work);
if (peer->obuf_work)
stream_reset(peer->obuf_work);
diff --git a/bgpd/bgp_io.c b/bgpd/bgp_io.c
index cc9c1bda56..5ab14d5cd6 100644
--- a/bgpd/bgp_io.c
+++ b/bgpd/bgp_io.c
@@ -29,6 +29,7 @@
#include "memory.h" // for MTYPE_TMP, XCALLOC, XFREE
#include "network.h" // for ERRNO_IO_RETRY
#include "stream.h" // for stream_get_endp, stream_getw_from, str...
+#include "ringbuf.h" // for ringbuf_remain, ringbuf_peek, ringbuf_...
#include "thread.h" // for THREAD_OFF, THREAD_ARG, thread, thread...
#include "zassert.h" // for assert
@@ -263,14 +264,12 @@ static int bgp_process_reads(struct thread *thread)
/* static buffer for transferring packets */
static unsigned char pktbuf[BGP_MAX_PACKET_SIZE];
/* shorter alias to peer's input buffer */
- struct stream *ibw = peer->ibuf_work;
- /* offset of start of current packet */
- size_t offset = stream_get_getp(ibw);
+ struct ringbuf *ibw = peer->ibuf_work;
/* packet size as given by header */
- u_int16_t pktsize = 0;
+ uint16_t pktsize = 0;
/* check that we have enough data for a header */
- if (STREAM_READABLE(ibw) < BGP_HEADER_SIZE)
+ if (ringbuf_remain(ibw) < BGP_HEADER_SIZE)
break;
/* validate header */
@@ -282,16 +281,18 @@ static int bgp_process_reads(struct thread *thread)
}
/* header is valid; retrieve packet size */
- pktsize = stream_getw_from(ibw, offset + BGP_MARKER_SIZE);
+ ringbuf_peek(ibw, BGP_MARKER_SIZE, &pktsize, sizeof(pktsize));
+
+ pktsize = ntohs(pktsize);
/* if this fails we are seriously screwed */
assert(pktsize <= BGP_MAX_PACKET_SIZE);
/* If we have that much data, chuck it into its own
* stream and append to input queue for processing. */
- if (STREAM_READABLE(ibw) >= pktsize) {
+ if (ringbuf_remain(ibw) >= pktsize) {
struct stream *pkt = stream_new(pktsize);
- stream_get(pktbuf, ibw, pktsize);
+ assert(ringbuf_get(ibw, pktbuf, pktsize) == pktsize);
stream_put(pkt, pktbuf, pktsize);
pthread_mutex_lock(&peer->io_mtx);
@@ -305,28 +306,12 @@ static int bgp_process_reads(struct thread *thread)
break;
}
- /*
- * After reading:
- * 1. Move unread data to stream start to make room for more.
- * 2. Reschedule and return when we have additional data.
- *
- * XXX: Heavy abuse of stream API. This needs a ring buffer.
- */
- if (more && STREAM_WRITEABLE(peer->ibuf_work) < BGP_MAX_PACKET_SIZE) {
- void *from = stream_pnt(peer->ibuf_work);
- void *to = peer->ibuf_work->data;
- size_t siz = STREAM_READABLE(peer->ibuf_work);
- memmove(to, from, siz);
- stream_set_getp(peer->ibuf_work, 0);
- stream_set_endp(peer->ibuf_work, siz);
- }
-
- assert(STREAM_WRITEABLE(peer->ibuf_work) >= BGP_MAX_PACKET_SIZE);
+ assert(ringbuf_space(peer->ibuf_work) >= BGP_MAX_PACKET_SIZE);
/* handle invalid header */
if (fatal) {
/* wipe buffer just in case someone screwed up */
- stream_reset(peer->ibuf_work);
+ ringbuf_wipe(peer->ibuf_work);
} else {
thread_add_read(fpt->master, bgp_process_reads, peer, peer->fd,
&peer->t_read);
@@ -464,14 +449,16 @@ static uint16_t bgp_read(struct peer *peer)
size_t readsize; // how many bytes we want to read
ssize_t nbytes; // how many bytes we actually read
uint16_t status = 0;
+ static uint8_t ibw[BGP_MAX_PACKET_SIZE * BGP_READ_PACKET_MAX];
- readsize = STREAM_WRITEABLE(peer->ibuf_work);
-
- nbytes = stream_read_try(peer->ibuf_work, peer->fd, readsize);
+ readsize = MIN(ringbuf_space(peer->ibuf_work), sizeof(ibw));
+ nbytes = read(peer->fd, ibw, readsize);
- switch (nbytes) {
+ /* EAGAIN or EWOULDBLOCK; come back later */
+ if (nbytes < 0 && ERRNO_IO_RETRY(errno)) {
+ SET_FLAG(status, BGP_IO_TRANS_ERR);
/* Fatal error; tear down session */
- case -1:
+ } else if (nbytes < 0) {
zlog_err("%s [Error] bgp_read_packet error: %s", peer->host,
safe_strerror(errno));
@@ -485,10 +472,8 @@ static uint16_t bgp_read(struct peer *peer)
BGP_EVENT_ADD(peer, TCP_fatal_error);
SET_FLAG(status, BGP_IO_FATAL_ERR);
- break;
-
/* Received EOF / TCP session closed */
- case 0:
+ } else if (nbytes == 0) {
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [Event] BGP connection closed fd %d",
peer->host, peer->fd);
@@ -503,14 +488,9 @@ static uint16_t bgp_read(struct peer *peer)
BGP_EVENT_ADD(peer, TCP_connection_closed);
SET_FLAG(status, BGP_IO_FATAL_ERR);
- break;
-
- /* EAGAIN or EWOULDBLOCK; come back later */
- case -2:
- SET_FLAG(status, BGP_IO_TRANS_ERR);
- break;
- default:
- break;
+ } else {
+ assert(ringbuf_put(peer->ibuf_work, ibw, nbytes)
+ == (size_t)nbytes);
}
return status;
@@ -519,27 +499,35 @@ static uint16_t bgp_read(struct peer *peer)
/*
* Called after we have read a BGP packet header. Validates marker, message
* type and packet length. If any of these aren't correct, sends a notify.
+ *
+ * Assumes that there are at least BGP_HEADER_SIZE readable bytes in the input
+ * buffer.
*/
static bool validate_header(struct peer *peer)
{
uint16_t size;
uint8_t type;
- struct stream *pkt = peer->ibuf_work;
- size_t getp = stream_get_getp(pkt);
+ struct ringbuf *pkt = peer->ibuf_work;
- static uint8_t marker[BGP_MARKER_SIZE] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ static uint8_t m_correct[BGP_MARKER_SIZE] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ uint8_t m_rx[BGP_MARKER_SIZE] = {0x00};
- if (memcmp(marker, stream_pnt(pkt), BGP_MARKER_SIZE) != 0) {
+ if (ringbuf_peek(pkt, 0, m_rx, BGP_MARKER_SIZE) != BGP_MARKER_SIZE)
+ return false;
+
+ if (memcmp(m_correct, m_rx, BGP_MARKER_SIZE) != 0) {
bgp_notify_send(peer, BGP_NOTIFY_HEADER_ERR,
BGP_NOTIFY_HEADER_NOT_SYNC);
return false;
}
- /* Get size and type in host byte order. */
- size = stream_getw_from(pkt, getp + BGP_MARKER_SIZE);
- type = stream_getc_from(pkt, getp + BGP_MARKER_SIZE + 2);
+ /* Get size and type in network byte order. */
+ ringbuf_peek(pkt, BGP_MARKER_SIZE, &size, sizeof(size));
+ ringbuf_peek(pkt, BGP_MARKER_SIZE + 2, &type, sizeof(type));
+
+ size = ntohs(size);
/* BGP type check. */
if (type != BGP_MSG_OPEN && type != BGP_MSG_UPDATE
diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c
index 4b018aef4d..eed5fdc65d 100644
--- a/bgpd/bgp_packet.c
+++ b/bgpd/bgp_packet.c
@@ -595,6 +595,7 @@ static int bgp_write_notify(struct peer *peer)
assert(type == BGP_MSG_NOTIFY);
/* Type should be notify. */
+ atomic_fetch_add_explicit(&peer->notify_out, 1, memory_order_relaxed);
peer->notify_out++;
/* Double start timer. */
@@ -1682,7 +1683,7 @@ static int bgp_notify_receive(struct peer *peer, bgp_size_t size)
}
/* peer count update */
- peer->notify_in++;
+ atomic_fetch_add_explicit(&peer->notify_in, 1, memory_order_relaxed);
peer->last_reset = PEER_DOWN_NOTIFY_RECEIVED;
@@ -2192,7 +2193,8 @@ int bgp_process_packet(struct thread *thread)
*/
switch (type) {
case BGP_MSG_OPEN:
- peer->open_in++;
+ atomic_fetch_add_explicit(&peer->open_in, 1,
+ memory_order_relaxed);
mprc = bgp_open_receive(peer, size);
if (mprc == BGP_Stop)
zlog_err(
@@ -2200,7 +2202,8 @@ int bgp_process_packet(struct thread *thread)
__FUNCTION__, peer->host);
break;
case BGP_MSG_UPDATE:
- peer->update_in++;
+ atomic_fetch_add_explicit(&peer->update_in, 1,
+ memory_order_relaxed);
peer->readtime = monotime(NULL);
mprc = bgp_update_receive(peer, size);
if (mprc == BGP_Stop)
@@ -2209,7 +2212,8 @@ int bgp_process_packet(struct thread *thread)
__FUNCTION__, peer->host);
break;
case BGP_MSG_NOTIFY:
- peer->notify_in++;
+ atomic_fetch_add_explicit(&peer->notify_in, 1,
+ memory_order_relaxed);
mprc = bgp_notify_receive(peer, size);
if (mprc == BGP_Stop)
zlog_err(
@@ -2218,7 +2222,8 @@ int bgp_process_packet(struct thread *thread)
break;
case BGP_MSG_KEEPALIVE:
peer->readtime = monotime(NULL);
- peer->keepalive_in++;
+ atomic_fetch_add_explicit(&peer->keepalive_in, 1,
+ memory_order_relaxed);
mprc = bgp_keepalive_receive(peer, size);
if (mprc == BGP_Stop)
zlog_err(
@@ -2227,7 +2232,8 @@ int bgp_process_packet(struct thread *thread)
break;
case BGP_MSG_ROUTE_REFRESH_NEW:
case BGP_MSG_ROUTE_REFRESH_OLD:
- peer->refresh_in++;
+ atomic_fetch_add_explicit(&peer->refresh_in, 1,
+ memory_order_relaxed);
mprc = bgp_route_refresh_receive(peer, size);
if (mprc == BGP_Stop)
zlog_err(
@@ -2235,7 +2241,8 @@ int bgp_process_packet(struct thread *thread)
__FUNCTION__, peer->host);
break;
case BGP_MSG_CAPABILITY:
- peer->dynamic_cap_in++;
+ atomic_fetch_add_explicit(&peer->dynamic_cap_in, 1,
+ memory_order_relaxed);
mprc = bgp_capability_receive(peer, size);
if (mprc == BGP_Stop)
zlog_err(
diff --git a/bgpd/bgp_snmp.c b/bgpd/bgp_snmp.c
index 484ea7c433..8317a252e1 100644
--- a/bgpd/bgp_snmp.c
+++ b/bgpd/bgp_snmp.c
@@ -512,6 +512,7 @@ static u_char *bgpPeerTable(struct variable *v, oid name[], size_t *length,
{
static struct in_addr addr;
struct peer *peer;
+ uint32_t ui, uo;
if (smux_header_table(v, name, length, exact, var_len, write_method)
== MATCH_FAILED)
@@ -571,21 +572,20 @@ static u_char *bgpPeerTable(struct variable *v, oid name[], size_t *length,
return SNMP_INTEGER(peer->as);
break;
case BGPPEERINUPDATES:
- return SNMP_INTEGER(peer->update_in);
+ ui = atomic_load_explicit(&peer->update_in,
+ memory_order_relaxed);
+ return SNMP_INTEGER(ui);
break;
case BGPPEEROUTUPDATES:
- return SNMP_INTEGER(peer->update_out);
+ uo = atomic_load_explicit(&peer->update_out,
+ memory_order_relaxed);
+ return SNMP_INTEGER(uo);
break;
case BGPPEERINTOTALMESSAGES:
- return SNMP_INTEGER(peer->open_in + peer->update_in
- + peer->keepalive_in + peer->notify_in
- + peer->refresh_in + peer->dynamic_cap_in);
+ return SNMP_INTEGER(PEER_TOTAL_RX(peer));
break;
case BGPPEEROUTTOTALMESSAGES:
- return SNMP_INTEGER(peer->open_out + peer->update_out
- + peer->keepalive_out + peer->notify_out
- + peer->refresh_out
- + peer->dynamic_cap_out);
+ return SNMP_INTEGER(PEER_TOTAL_TX(peer));
break;
case BGPPEERLASTERROR: {
static u_char lasterror[2];
diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c
index d1d684ccb3..50e5bcf096 100644
--- a/bgpd/bgp_vty.c
+++ b/bgpd/bgp_vty.c
@@ -7053,17 +7053,9 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
json_object_int_add(json_peer, "remoteAs", peer->as);
json_object_int_add(json_peer, "version", 4);
json_object_int_add(json_peer, "msgRcvd",
- peer->open_in + peer->update_in
- + peer->keepalive_in
- + peer->notify_in
- + peer->refresh_in
- + peer->dynamic_cap_in);
+ PEER_TOTAL_RX(peer));
json_object_int_add(json_peer, "msgSent",
- peer->open_out + peer->update_out
- + peer->keepalive_out
- + peer->notify_out
- + peer->refresh_out
- + peer->dynamic_cap_out);
+ PEER_TOTAL_TX(peer));
json_object_int_add(json_peer, "tableVersion",
peer->version[afi][safi]);
@@ -7120,48 +7112,18 @@ static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
" ");
vty_out(vty, "4 %10u %7u %7u %8" PRIu64 " %4d %4zd %8s",
- peer->as,
- atomic_load_explicit(&peer->open_in,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->update_in,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->keepalive_in,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->notify_in,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->refresh_in,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->dynamic_cap_in,
- memory_order_relaxed),
- atomic_load_explicit(&peer->open_out,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->update_out,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->keepalive_out,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->notify_out,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->refresh_out,
- memory_order_relaxed)
- + atomic_load_explicit(
- &peer->dynamic_cap_out,
- memory_order_relaxed),
- peer->version[afi][safi], 0, peer->obuf->count,
+ peer->as, PEER_TOTAL_RX(peer),
+ PEER_TOTAL_TX(peer), peer->version[afi][safi],
+ 0, peer->obuf->count,
peer_uptime(peer->uptime, timebuf,
BGP_UPTIME_LEN, 0, NULL));
if (peer->status == Established)
- vty_out(vty, " %12ld",
- peer->pcount[afi][pfx_rcd_safi]);
+ if (peer->afc_recv[afi][pfx_rcd_safi])
+ vty_out(vty, " %12ld",
+ peer->pcount[afi][pfx_rcd_safi]);
+ else
+ vty_out(vty, " NoNeg");
else {
if (CHECK_FLAG(peer->flags, PEER_FLAG_SHUTDOWN))
vty_out(vty, " Idle (Admin)");
@@ -8288,17 +8250,29 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, u_char use_json,
if (p->status == Established) {
time_t uptime;
- struct tm *tm;
uptime = bgp_clock();
uptime -= p->uptime;
- tm = gmtime(&uptime);
epoch_tbuf = time(NULL) - uptime;
+#if CONFDATE > 20200101
+ CPP_NOTICE("bgpTimerUp should be deprecated and can be removed now");
+#endif
+ /*
+ * bgpTimerUp was miliseconds that was accurate
+ * up to 1 day, then the value returned
+ * became garbage. So in order to provide
+ * some level of backwards compatability,
+ * we still provde the data, but now
+ * we are returning the correct value
+ * and also adding a new bgpTimerUpMsec
+ * which will allow us to deprecate
+ * this eventually
+ */
json_object_int_add(json_neigh, "bgpTimerUp",
- (tm->tm_sec * 1000)
- + (tm->tm_min * 60000)
- + (tm->tm_hour * 3600000));
+ uptime * 1000);
+ json_object_int_add(json_neigh, "bgpTimerUpMsec",
+ uptime * 1000);
json_object_string_add(json_neigh, "bgpTimerUpString",
peer_uptime(p->uptime, timebuf,
BGP_UPTIME_LEN, 0,
@@ -9329,34 +9303,44 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, u_char use_json,
json_object_int_add(json_stat, "depthInq", 0);
json_object_int_add(json_stat, "depthOutq",
(unsigned long)p->obuf->count);
- json_object_int_add(json_stat, "opensSent", p->open_out);
- json_object_int_add(json_stat, "opensRecv", p->open_in);
+ json_object_int_add(json_stat, "opensSent",
+ atomic_load_explicit(&p->open_out,
+ memory_order_relaxed));
+ json_object_int_add(json_stat, "opensRecv",
+ atomic_load_explicit(&p->open_in,
+ memory_order_relaxed));
json_object_int_add(json_stat, "notificationsSent",
- p->notify_out);
+ atomic_load_explicit(&p->notify_out,
+ memory_order_relaxed));
json_object_int_add(json_stat, "notificationsRecv",
- p->notify_in);
- json_object_int_add(json_stat, "updatesSent", p->update_out);
- json_object_int_add(json_stat, "updatesRecv", p->update_in);
+ atomic_load_explicit(&p->notify_in,
+ memory_order_relaxed));
+ json_object_int_add(json_stat, "updatesSent",
+ atomic_load_explicit(&p->update_out,
+ memory_order_relaxed));
+ json_object_int_add(json_stat, "updatesRecv",
+ atomic_load_explicit(&p->update_in,
+ memory_order_relaxed));
json_object_int_add(json_stat, "keepalivesSent",
- p->keepalive_out);
+ atomic_load_explicit(&p->keepalive_out,
+ memory_order_relaxed));
json_object_int_add(json_stat, "keepalivesRecv",
- p->keepalive_in);
+ atomic_load_explicit(&p->keepalive_in,
+ memory_order_relaxed));
json_object_int_add(json_stat, "routeRefreshSent",
- p->refresh_out);
+ atomic_load_explicit(&p->refresh_out,
+ memory_order_relaxed));
json_object_int_add(json_stat, "routeRefreshRecv",
- p->refresh_in);
+ atomic_load_explicit(&p->refresh_in,
+ memory_order_relaxed));
json_object_int_add(json_stat, "capabilitySent",
- p->dynamic_cap_out);
+ atomic_load_explicit(&p->dynamic_cap_out,
+ memory_order_relaxed));
json_object_int_add(json_stat, "capabilityRecv",
- p->dynamic_cap_in);
- json_object_int_add(json_stat, "totalSent",
- p->open_out + p->notify_out + p->update_out
- + p->keepalive_out + p->refresh_out
- + p->dynamic_cap_out);
- json_object_int_add(json_stat, "totalRecv",
- p->open_in + p->notify_in + p->update_in
- + p->keepalive_in + p->refresh_in
- + p->dynamic_cap_in);
+ atomic_load_explicit(&p->dynamic_cap_in,
+ memory_order_relaxed));
+ json_object_int_add(json_stat, "totalSent", PEER_TOTAL_TX(p));
+ json_object_int_add(json_stat, "totalRecv", PEER_TOTAL_RX(p));
json_object_object_add(json_neigh, "messageStats", json_stat);
} else {
/* Packet counts. */
@@ -9365,25 +9349,38 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, u_char use_json,
vty_out(vty, " Outq depth is %lu\n",
(unsigned long)p->obuf->count);
vty_out(vty, " Sent Rcvd\n");
- vty_out(vty, " Opens: %10d %10d\n", p->open_out,
- p->open_in);
- vty_out(vty, " Notifications: %10d %10d\n", p->notify_out,
- p->notify_in);
- vty_out(vty, " Updates: %10d %10d\n", p->update_out,
- p->update_in);
- vty_out(vty, " Keepalives: %10d %10d\n", p->keepalive_out,
- p->keepalive_in);
- vty_out(vty, " Route Refresh: %10d %10d\n", p->refresh_out,
- p->refresh_in);
+ vty_out(vty, " Opens: %10d %10d\n",
+ atomic_load_explicit(&p->open_out,
+ memory_order_relaxed),
+ atomic_load_explicit(&p->open_in,
+ memory_order_relaxed));
+ vty_out(vty, " Notifications: %10d %10d\n",
+ atomic_load_explicit(&p->notify_out,
+ memory_order_relaxed),
+ atomic_load_explicit(&p->notify_in,
+ memory_order_relaxed));
+ vty_out(vty, " Updates: %10d %10d\n",
+ atomic_load_explicit(&p->update_out,
+ memory_order_relaxed),
+ atomic_load_explicit(&p->update_in,
+ memory_order_relaxed));
+ vty_out(vty, " Keepalives: %10d %10d\n",
+ atomic_load_explicit(&p->keepalive_out,
+ memory_order_relaxed),
+ atomic_load_explicit(&p->keepalive_in,
+ memory_order_relaxed));
+ vty_out(vty, " Route Refresh: %10d %10d\n",
+ atomic_load_explicit(&p->refresh_out,
+ memory_order_relaxed),
+ atomic_load_explicit(&p->refresh_in,
+ memory_order_relaxed));
vty_out(vty, " Capability: %10d %10d\n",
- p->dynamic_cap_out, p->dynamic_cap_in);
- vty_out(vty, " Total: %10d %10d\n",
- p->open_out + p->notify_out + p->update_out
- + p->keepalive_out + p->refresh_out
- + p->dynamic_cap_out,
- p->open_in + p->notify_in + p->update_in
- + p->keepalive_in + p->refresh_in
- + p->dynamic_cap_in);
+ atomic_load_explicit(&p->dynamic_cap_out,
+ memory_order_relaxed),
+ atomic_load_explicit(&p->dynamic_cap_in,
+ memory_order_relaxed));
+ vty_out(vty, " Total: %10d %10d\n", PEER_TOTAL_TX(p),
+ PEER_TOTAL_RX(p));
}
if (use_json) {
diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c
index 1cf04abfce..b2db54ef79 100644
--- a/bgpd/bgp_zebra.c
+++ b/bgpd/bgp_zebra.c
@@ -1516,8 +1516,9 @@ void bgp_update_redist_vrf_bitmaps(struct bgp *bgp, vrf_id_t old_vrf_id)
for (afi = AFI_IP; afi < AFI_MAX; afi++)
for (i = 0; i < ZEBRA_ROUTE_MAX; i++)
- if (vrf_bitmap_check(zclient->redist[afi][i],
- old_vrf_id)) {
+ if ((old_vrf_id == VRF_UNKNOWN)
+ || vrf_bitmap_check(zclient->redist[afi][i],
+ old_vrf_id)) {
vrf_bitmap_unset(zclient->redist[afi][i],
old_vrf_id);
vrf_bitmap_set(zclient->redist[afi][i],
diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c
index 4d8e4ffe37..d76d5b0b7e 100644
--- a/bgpd/bgpd.c
+++ b/bgpd/bgpd.c
@@ -24,6 +24,7 @@
#include "thread.h"
#include "buffer.h"
#include "stream.h"
+#include "ringbuf.h"
#include "command.h"
#include "sockunion.h"
#include "sockopt.h"
@@ -1162,7 +1163,9 @@ struct peer *peer_new(struct bgp *bgp)
*/
peer->obuf_work =
stream_new(BGP_MAX_PACKET_SIZE + BGP_MAX_PACKET_SIZE_OVERFLOW);
- peer->ibuf_work = stream_new(BGP_MAX_PACKET_SIZE * BGP_READ_PACKET_MAX);
+ peer->ibuf_work =
+ ringbuf_new(BGP_MAX_PACKET_SIZE * BGP_READ_PACKET_MAX);
+
peer->scratch = stream_new(BGP_MAX_PACKET_SIZE);
bgp_sync_init(peer);
@@ -2179,7 +2182,7 @@ int peer_delete(struct peer *peer)
}
if (peer->ibuf_work) {
- stream_free(peer->ibuf_work);
+ ringbuf_del(peer->ibuf_work);
peer->ibuf_work = NULL;
}
diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h
index ce56587727..20b99c171e 100644
--- a/bgpd/bgpd.h
+++ b/bgpd/bgpd.h
@@ -598,8 +598,8 @@ struct peer {
struct stream_fifo *ibuf; // packets waiting to be processed
struct stream_fifo *obuf; // packets waiting to be written
- struct stream *ibuf_work; // WiP buffer used by bgp_read() only
- struct stream *obuf_work; // WiP buffer used to construct packets
+ struct ringbuf *ibuf_work; // WiP buffer used by bgp_read() only
+ struct stream *obuf_work; // WiP buffer used to construct packets
struct stream *curr; // the current packet being parsed
@@ -829,6 +829,22 @@ struct peer {
/* workqueues */
struct work_queue *clear_node_queue;
+#define PEER_TOTAL_RX(peer) \
+ atomic_load_explicit(&peer->open_in, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->update_in, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->notify_in, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->refresh_in, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->keepalive_in, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->dynamic_cap_in, memory_order_relaxed)
+
+#define PEER_TOTAL_TX(peer) \
+ atomic_load_explicit(&peer->open_out, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->update_out, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->notify_out, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->refresh_out, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->keepalive_out, memory_order_relaxed) + \
+ atomic_load_explicit(&peer->dynamic_cap_out, memory_order_relaxed)
+
/* Statistics field */
_Atomic uint32_t open_in; /* Open message input count */
_Atomic uint32_t open_out; /* Open message output count */
diff --git a/bgpd/rfapi/rfapi.c b/bgpd/rfapi/rfapi.c
index 5ba7a96a8f..1e3c5a0352 100644
--- a/bgpd/rfapi/rfapi.c
+++ b/bgpd/rfapi/rfapi.c
@@ -31,6 +31,7 @@
#include "lib/linklist.h"
#include "lib/command.h"
#include "lib/stream.h"
+#include "lib/ringbuf.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_ecommunity.h"
@@ -1310,7 +1311,7 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
stream_fifo_free(rfd->peer->obuf);
if (rfd->peer->ibuf_work)
- stream_free(rfd->peer->ibuf_work);
+ ringbuf_del(rfd->peer->ibuf_work);
if (rfd->peer->obuf_work)
stream_free(rfd->peer->obuf_work);
diff --git a/bgpd/rfapi/vnc_zebra.c b/bgpd/rfapi/vnc_zebra.c
index 07be7833b6..f8b38468f5 100644
--- a/bgpd/rfapi/vnc_zebra.c
+++ b/bgpd/rfapi/vnc_zebra.c
@@ -30,6 +30,7 @@
#include "lib/command.h"
#include "lib/zclient.h"
#include "lib/stream.h"
+#include "lib/ringbuf.h"
#include "lib/memory.h"
#include "bgpd/bgpd.h"
@@ -198,7 +199,7 @@ static void vnc_redistribute_add(struct prefix *p, u_int32_t metric,
stream_fifo_free(vncHD1VR.peer->obuf);
if (vncHD1VR.peer->ibuf_work)
- stream_free(vncHD1VR.peer->ibuf_work);
+ ringbuf_del(vncHD1VR.peer->ibuf_work);
if (vncHD1VR.peer->obuf_work)
stream_free(vncHD1VR.peer->obuf_work);
diff --git a/lib/ringbuf.c b/lib/ringbuf.c
index f8e9a9bd8a..11db502a94 100644
--- a/lib/ringbuf.c
+++ b/lib/ringbuf.c
@@ -58,7 +58,7 @@ size_t ringbuf_put(struct ringbuf *buf, const void *data, size_t size)
size_t space = ringbuf_space(buf);
size_t copysize = MIN(size, space);
size_t tocopy = copysize;
- if (tocopy > buf->size - buf->end) {
+ if (tocopy >= buf->size - buf->end) {
size_t ts = buf->size - buf->end;
memcpy(buf->data + buf->end, dp, ts);
buf->end = 0;
@@ -102,7 +102,7 @@ size_t ringbuf_peek(struct ringbuf *buf, size_t offset, void *data, size_t size)
if (tocopy >= buf->size - cstart) {
size_t ts = buf->size - cstart;
memcpy(dp, buf->data + cstart, ts);
- buf->start = cstart = 0;
+ cstart = 0;
tocopy -= ts;
dp += ts;
}
@@ -110,6 +110,16 @@ size_t ringbuf_peek(struct ringbuf *buf, size_t offset, void *data, size_t size)
return copysize;
}
+size_t ringbuf_copy(struct ringbuf *to, struct ringbuf *from, size_t size)
+{
+ size_t tocopy = MIN(ringbuf_space(to), size);
+ uint8_t *cbuf = XCALLOC(MTYPE_TMP, tocopy);
+ tocopy = ringbuf_peek(from, 0, cbuf, tocopy);
+ size_t put = ringbuf_put(to, cbuf, tocopy);
+ XFREE(MTYPE_TMP, cbuf);
+ return put;
+}
+
void ringbuf_reset(struct ringbuf *buf)
{
buf->start = buf->end = 0;
@@ -120,5 +130,4 @@ void ringbuf_wipe(struct ringbuf *buf)
{
memset(buf->data, 0x00, buf->size);
ringbuf_reset(buf);
- buf->empty = true;
}
diff --git a/lib/ringbuf.h b/lib/ringbuf.h
index 2288a2716f..15049e3eea 100644
--- a/lib/ringbuf.h
+++ b/lib/ringbuf.h
@@ -99,6 +99,16 @@ size_t ringbuf_peek(struct ringbuf *buf, size_t offset, void *data,
size_t size);
/*
+ * Copy data from one ringbuf to another.
+ *
+ * @param to destination ringbuf
+ * @param from source ringbuf
+ * @param size how much data to copy
+ * @return amount of data copied
+ */
+size_t ringbuf_copy(struct ringbuf *to, struct ringbuf *from, size_t size);
+
+/*
* Reset buffer. Does not wipe.
*
* @param buf
diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c
index 374e2c52a9..76ba505ad4 100644
--- a/pimd/pim_cmd.c
+++ b/pimd/pim_cmd.c
@@ -7333,6 +7333,7 @@ static int interface_pim_use_src_cmd_worker(struct vty *vty, const char *source)
{
int result;
struct in_addr source_addr;
+ int ret = CMD_SUCCESS;
VTY_DECLVAR_CONTEXT(interface, ifp);
result = inet_pton(AF_INET, source, &source_addr);
@@ -7347,16 +7348,19 @@ static int interface_pim_use_src_cmd_worker(struct vty *vty, const char *source)
case PIM_SUCCESS:
break;
case PIM_IFACE_NOT_FOUND:
+ ret = CMD_WARNING_CONFIG_FAILED;
vty_out(vty, "Pim not enabled on this interface\n");
break;
case PIM_UPDATE_SOURCE_DUP:
+ ret = CMD_WARNING;
vty_out(vty, "%% Source already set to %s\n", source);
break;
default:
+ ret = CMD_WARNING_CONFIG_FAILED;
vty_out(vty, "%% Source set failed\n");
}
- return result ? CMD_WARNING_CONFIG_FAILED : CMD_SUCCESS;
+ return ret;
}
DEFUN (interface_pim_use_source,
diff --git a/tests/lib/test_ringbuf.c b/tests/lib/test_ringbuf.c
index c2f4f76a6f..7ba5a29b62 100644
--- a/tests/lib/test_ringbuf.c
+++ b/tests/lib/test_ringbuf.c
@@ -65,7 +65,7 @@ int main(int argc, char **argv)
validate_state(soil, BUFSIZ, BUFSIZ);
assert(soil->start == 0);
- assert(soil->end == BUFSIZ);
+ assert(soil->end == 0);
/* read 15 bytes of garbage */
printf("Validating read...\n");
@@ -73,7 +73,7 @@ int main(int argc, char **argv)
validate_state(soil, BUFSIZ, BUFSIZ - 15);
assert(soil->start == 15);
- assert(soil->end == BUFSIZ);
+ assert(soil->end == 0);
/* put another 10 bytes and validate wraparound */
printf("Validating wraparound...\n");