bgp_io_thread_started = false;
}
-static int bgp_io_dummy(struct thread *thread)
-{
- return 0;
-}
+/* Unused callback for thread_add_read() */
+static int bgp_io_dummy(struct thread *thread) { return 0; }
void *bgp_io_start(void *arg)
{
struct thread task;
- atomic_store_explicit(&bgp_io_thread_run, true, memory_order_relaxed);
+ atomic_store_explicit(&bgp_io_thread_run, true, memory_order_seq_cst);
atomic_store_explicit(&bgp_io_thread_started, true,
- memory_order_relaxed);
+ memory_order_seq_cst);
while (bgp_io_thread_run) {
if (thread_fetch(fpt->master, &task)) {
static int bgp_io_finish(struct thread *thread)
{
- atomic_store_explicit(&bgp_io_thread_run, false, memory_order_relaxed);
+ atomic_store_explicit(&bgp_io_thread_run, false, memory_order_seq_cst);
return 0;
}
void bgp_writes_on(struct peer *peer)
{
- while (!atomic_load_explicit(&bgp_io_thread_started,
- memory_order_relaxed))
+ while (
+ !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
;
assert(peer->status != Deleted);
void bgp_writes_off(struct peer *peer)
{
- while (!atomic_load_explicit(&bgp_io_thread_started,
- memory_order_relaxed))
+ while (
+ !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
;
struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
void bgp_reads_on(struct peer *peer)
{
- while (!atomic_load_explicit(&bgp_io_thread_started,
- memory_order_relaxed))
+ while (
+ !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
;
assert(peer->status != Deleted);
void bgp_reads_off(struct peer *peer)
{
- while (!atomic_load_explicit(&bgp_io_thread_started,
- memory_order_relaxed))
+ while (
+ !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
;
struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
uint32_t wpkt_quanta_old;
// cache current write quanta
- wpkt_quanta_old = atomic_load_explicit(&peer->bgp->wpkt_quanta,
- memory_order_relaxed);
+ wpkt_quanta_old =
+ atomic_load_explicit(&peer->bgp->wpkt_quanta, memory_order_relaxed);
while (count < wpkt_quanta_old && (s = stream_fifo_head(peer->obuf))) {
int writenum;
switch (type) {
case BGP_MSG_OPEN:
- peer->open_out++;
+ atomic_fetch_add_explicit(&peer->open_out, 1,
+ memory_order_relaxed);
break;
case BGP_MSG_UPDATE:
- peer->update_out++;
+ atomic_fetch_add_explicit(&peer->update_out, 1,
+ memory_order_relaxed);
break;
case BGP_MSG_NOTIFY:
- peer->notify_out++;
+ atomic_fetch_add_explicit(&peer->notify_out, 1,
+ memory_order_relaxed);
/* Double start timer. */
peer->v_start *= 2;
goto done;
case BGP_MSG_KEEPALIVE:
- peer->keepalive_out++;
+ atomic_fetch_add_explicit(&peer->keepalive_out, 1,
+ memory_order_relaxed);
break;
case BGP_MSG_ROUTE_REFRESH_NEW:
case BGP_MSG_ROUTE_REFRESH_OLD:
- peer->refresh_out++;
+ atomic_fetch_add_explicit(&peer->refresh_out, 1,
+ memory_order_relaxed);
break;
case BGP_MSG_CAPABILITY:
- peer->dynamic_cap_out++;
+ atomic_fetch_add_explicit(&peer->dynamic_cap_out, 1,
+ memory_order_relaxed);
break;
}
done : {
/* Update last_update if UPDATEs were written. */
if (peer->update_out > oc)
- peer->last_update = bgp_clock();
+ atomic_store_explicit(&peer->last_update, bgp_clock(),
+ memory_order_relaxed);
/* If we TXed any flavor of packet update last_write */
if (update_last_write)
- peer->last_write = bgp_clock();
+ atomic_store_explicit(&peer->last_write, bgp_clock(),
+ memory_order_relaxed);
}
return status;
if (CHECK_FLAG(peer->sflags,
PEER_STATUS_NSF_MODE)) {
peer->last_reset =
- PEER_DOWN_NSF_CLOSE_SESSION;
+ PEER_DOWN_NSF_CLOSE_SESSION;
SET_FLAG(peer->sflags,
PEER_STATUS_NSF_WAIT);
} else
peer->last_reset =
- PEER_DOWN_CLOSE_SESSION;
+ PEER_DOWN_CLOSE_SESSION;
}
BGP_EVENT_ADD(peer, TCP_fatal_error);
case 0: // TCP session closed
if (bgp_debug_neighbor_events(peer))
zlog_debug(
- "%s [Event] BGP connection closed fd %d",
- peer->host, peer->fd);
+ "%s [Event] BGP connection closed fd %d",
+ peer->host, peer->fd);
if (peer->status == Established) {
if (CHECK_FLAG(peer->sflags,
PEER_STATUS_NSF_MODE)) {
peer->last_reset =
- PEER_DOWN_NSF_CLOSE_SESSION;
+ PEER_DOWN_NSF_CLOSE_SESSION;
SET_FLAG(peer->sflags,
PEER_STATUS_NSF_WAIT);
} else
peer->last_reset =
- PEER_DOWN_CLOSE_SESSION;
+ PEER_DOWN_CLOSE_SESSION;
}
BGP_EVENT_ADD(peer, TCP_connection_closed);
size_t getp = stream_get_getp(pkt);
static uint8_t marker[BGP_MARKER_SIZE] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
if (memcmp(marker, stream_pnt(pkt), BGP_MARKER_SIZE) != 0) {
bgp_notify_send(peer, BGP_NOTIFY_HEADER_ERR,
&& type != BGP_MSG_ROUTE_REFRESH_NEW
&& type != BGP_MSG_ROUTE_REFRESH_OLD
&& type != BGP_MSG_CAPABILITY) {
- if (bgp_debug_neighbor_events(peer))
+ if (bgp_debug_neighbor_events(peer)) {
+ // XXX: zlog is not MT-safe
zlog_debug("%s unknown message type 0x%02x", peer->host,
type);
+ }
bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
BGP_NOTIFY_HEADER_BAD_MESTYPE,
- (u_char *)&type, 1);
+ (u_char *) &type, 1);
return false;
}
&& size < BGP_MSG_ROUTE_REFRESH_MIN_SIZE)
|| (type == BGP_MSG_CAPABILITY
&& size < BGP_MSG_CAPABILITY_MIN_SIZE)) {
- if (bgp_debug_neighbor_events(peer))
+ if (bgp_debug_neighbor_events(peer)) {
+ // XXX: zlog is not MT-safe
zlog_debug("%s bad message length - %d for %s",
peer->host, size,
type == 128 ? "ROUTE-REFRESH"
- : bgp_type_str[(int)type]);
+ : bgp_type_str[(int) type]);
+ }
bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
BGP_NOTIFY_HEADER_BAD_MESLEN,
- (u_char *)&size, 2);
+ (u_char *) &size, 2);
return false;
}
(CHECK_FLAG(peer->config, PEER_CONFIG_TIMER) \
|| CHECK_FLAG(peer->config, PEER_GROUP_CONFIG_TIMER))
- u_int32_t holdtime;
- u_int32_t keepalive;
- u_int32_t connect;
- u_int32_t routeadv;
+ _Atomic uint32_t holdtime;
+ _Atomic uint32_t keepalive;
+ _Atomic uint32_t connect;
+ _Atomic uint32_t routeadv;
/* Timer values. */
- u_int32_t v_start;
- u_int32_t v_connect;
- u_int32_t v_holdtime;
- u_int32_t v_keepalive;
- u_int32_t v_routeadv;
- u_int32_t v_pmax_restart;
- u_int32_t v_gr_restart;
+ _Atomic uint32_t v_start;
+ _Atomic uint32_t v_connect;
+ _Atomic uint32_t v_holdtime;
+ _Atomic uint32_t v_keepalive;
+ _Atomic uint32_t v_routeadv;
+ _Atomic uint32_t v_pmax_restart;
+ _Atomic uint32_t v_gr_restart;
/* Threads. */
struct thread *t_read;
struct thread *t_process_packet;
/* Thread flags. */
- u_int16_t thread_flags;
+ _Atomic uint16_t thread_flags;
#define PEER_THREAD_WRITES_ON (1 << 0)
#define PEER_THREAD_READS_ON (1 << 1)
#define PEER_THREAD_KEEPALIVES_ON (1 << 2)
struct work_queue *clear_node_queue;
/* Statistics field */
- u_int32_t open_in; /* Open message input count */
- u_int32_t open_out; /* Open message output count */
- u_int32_t update_in; /* Update message input count */
- u_int32_t update_out; /* Update message ouput count */
- time_t update_time; /* Update message received time. */
- u_int32_t keepalive_in; /* Keepalive input count */
- u_int32_t keepalive_out; /* Keepalive output count */
- u_int32_t notify_in; /* Notify input count */
- u_int32_t notify_out; /* Notify output count */
- u_int32_t refresh_in; /* Route Refresh input count */
- u_int32_t refresh_out; /* Route Refresh output count */
- u_int32_t dynamic_cap_in; /* Dynamic Capability input count. */
- u_int32_t dynamic_cap_out; /* Dynamic Capability output count. */
+ _Atomic uint32_t open_in; /* Open message input count */
+ _Atomic uint32_t open_out; /* Open message output count */
+ _Atomic uint32_t update_in; /* Update message input count */
+ _Atomic uint32_t update_out; /* Update message ouput count */
+ _Atomic time_t update_time; /* Update message received time. */
+ _Atomic uint32_t keepalive_in; /* Keepalive input count */
+ _Atomic uint32_t keepalive_out; /* Keepalive output count */
+ _Atomic uint32_t notify_in; /* Notify input count */
+ _Atomic uint32_t notify_out; /* Notify output count */
+ _Atomic uint32_t refresh_in; /* Route Refresh input count */
+ _Atomic uint32_t refresh_out; /* Route Refresh output count */
+ _Atomic uint32_t dynamic_cap_in; /* Dynamic Capability input count. */
+ _Atomic uint32_t dynamic_cap_out; /* Dynamic Capability output count. */
/* BGP state count */
u_int32_t established; /* Established */
/* Syncronization list and time. */
struct bgp_synchronize *sync[AFI_MAX][SAFI_MAX];
time_t synctime;
- time_t last_write; /* timestamp when the last msg was written */
- time_t last_update; /* timestamp when the last UPDATE msg was written */
+ /* timestamp when the last UPDATE msg was written */
+ _Atomic time_t last_write;
+ /* timestamp when the last msg was written */
+ _Atomic time_t last_update;
/* Send prefix count. */
unsigned long scount[AFI_MAX][SAFI_MAX];