]> git.puffer.fish Git - mirror/frr.git/commitdiff
bgpd: update atomic memory orders
authorQuentin Young <qlyoung@cumulusnetworks.com>
Wed, 5 Jul 2017 15:38:57 +0000 (11:38 -0400)
committerQuentin Young <qlyoung@cumulusnetworks.com>
Thu, 30 Nov 2017 21:18:04 +0000 (16:18 -0500)
Use best-performing memory orders where appropriate.
Also update some style and add missing comments.

Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
bgpd/bgp_io.c
bgpd/bgpd.h

index 26629c1b2fc481d85f663dc2b5ecf2ddbf67cf0b..f80757f85f74376afbd31dfaf2a99342f7a742e0 100644 (file)
@@ -61,10 +61,8 @@ void bgp_io_init()
        bgp_io_thread_started = false;
 }
 
-static int bgp_io_dummy(struct thread *thread)
-{
-       return 0;
-}
+/* Unused callback for thread_add_read() */
+static int bgp_io_dummy(struct thread *thread) { return 0; }
 
 void *bgp_io_start(void *arg)
 {
@@ -81,9 +79,9 @@ void *bgp_io_start(void *arg)
 
        struct thread task;
 
-       atomic_store_explicit(&bgp_io_thread_run, true, memory_order_relaxed);
+       atomic_store_explicit(&bgp_io_thread_run, true, memory_order_seq_cst);
        atomic_store_explicit(&bgp_io_thread_started, true,
-                             memory_order_relaxed);
+                             memory_order_seq_cst);
 
        while (bgp_io_thread_run) {
                if (thread_fetch(fpt->master, &task)) {
@@ -99,7 +97,7 @@ void *bgp_io_start(void *arg)
 
 static int bgp_io_finish(struct thread *thread)
 {
-       atomic_store_explicit(&bgp_io_thread_run, false, memory_order_relaxed);
+       atomic_store_explicit(&bgp_io_thread_run, false, memory_order_seq_cst);
        return 0;
 }
 
@@ -114,8 +112,8 @@ int bgp_io_stop(void **result, struct frr_pthread *fpt)
 
 void bgp_writes_on(struct peer *peer)
 {
-       while (!atomic_load_explicit(&bgp_io_thread_started,
-                                    memory_order_relaxed))
+       while (
+           !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
                ;
 
        assert(peer->status != Deleted);
@@ -134,8 +132,8 @@ void bgp_writes_on(struct peer *peer)
 
 void bgp_writes_off(struct peer *peer)
 {
-       while (!atomic_load_explicit(&bgp_io_thread_started,
-                                    memory_order_relaxed))
+       while (
+           !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
                ;
 
        struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
@@ -148,8 +146,8 @@ void bgp_writes_off(struct peer *peer)
 
 void bgp_reads_on(struct peer *peer)
 {
-       while (!atomic_load_explicit(&bgp_io_thread_started,
-                                    memory_order_relaxed))
+       while (
+           !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
                ;
 
        assert(peer->status != Deleted);
@@ -171,8 +169,8 @@ void bgp_reads_on(struct peer *peer)
 
 void bgp_reads_off(struct peer *peer)
 {
-       while (!atomic_load_explicit(&bgp_io_thread_started,
-                                    memory_order_relaxed))
+       while (
+           !atomic_load_explicit(&bgp_io_thread_started, memory_order_seq_cst))
                ;
 
        struct frr_pthread *fpt = frr_pthread_get(PTHREAD_IO);
@@ -376,8 +374,8 @@ static uint16_t bgp_write(struct peer *peer)
        uint32_t wpkt_quanta_old;
 
        // cache current write quanta
-       wpkt_quanta_old = atomic_load_explicit(&peer->bgp->wpkt_quanta,
-                                              memory_order_relaxed);
+       wpkt_quanta_old =
+           atomic_load_explicit(&peer->bgp->wpkt_quanta, memory_order_relaxed);
 
        while (count < wpkt_quanta_old && (s = stream_fifo_head(peer->obuf))) {
                int writenum;
@@ -405,13 +403,16 @@ static uint16_t bgp_write(struct peer *peer)
 
                switch (type) {
                case BGP_MSG_OPEN:
-                       peer->open_out++;
+                       atomic_fetch_add_explicit(&peer->open_out, 1,
+                                                 memory_order_relaxed);
                        break;
                case BGP_MSG_UPDATE:
-                       peer->update_out++;
+                       atomic_fetch_add_explicit(&peer->update_out, 1,
+                                                 memory_order_relaxed);
                        break;
                case BGP_MSG_NOTIFY:
-                       peer->notify_out++;
+                       atomic_fetch_add_explicit(&peer->notify_out, 1,
+                                                 memory_order_relaxed);
                        /* Double start timer. */
                        peer->v_start *= 2;
 
@@ -427,14 +428,17 @@ static uint16_t bgp_write(struct peer *peer)
                        goto done;
 
                case BGP_MSG_KEEPALIVE:
-                       peer->keepalive_out++;
+                       atomic_fetch_add_explicit(&peer->keepalive_out, 1,
+                                                 memory_order_relaxed);
                        break;
                case BGP_MSG_ROUTE_REFRESH_NEW:
                case BGP_MSG_ROUTE_REFRESH_OLD:
-                       peer->refresh_out++;
+                       atomic_fetch_add_explicit(&peer->refresh_out, 1,
+                                                 memory_order_relaxed);
                        break;
                case BGP_MSG_CAPABILITY:
-                       peer->dynamic_cap_out++;
+                       atomic_fetch_add_explicit(&peer->dynamic_cap_out, 1,
+                                                 memory_order_relaxed);
                        break;
                }
 
@@ -447,11 +451,13 @@ static uint16_t bgp_write(struct peer *peer)
 done : {
        /* Update last_update if UPDATEs were written. */
        if (peer->update_out > oc)
-               peer->last_update = bgp_clock();
+               atomic_store_explicit(&peer->last_update, bgp_clock(),
+                                     memory_order_relaxed);
 
        /* If we TXed any flavor of packet update last_write */
        if (update_last_write)
-               peer->last_write = bgp_clock();
+               atomic_store_explicit(&peer->last_write, bgp_clock(),
+                                     memory_order_relaxed);
 }
 
        return status;
@@ -483,12 +489,12 @@ static uint16_t bgp_read(struct peer *peer)
                                if (CHECK_FLAG(peer->sflags,
                                               PEER_STATUS_NSF_MODE)) {
                                        peer->last_reset =
-                                               PEER_DOWN_NSF_CLOSE_SESSION;
+                                           PEER_DOWN_NSF_CLOSE_SESSION;
                                        SET_FLAG(peer->sflags,
                                                 PEER_STATUS_NSF_WAIT);
                                } else
                                        peer->last_reset =
-                                               PEER_DOWN_CLOSE_SESSION;
+                                           PEER_DOWN_CLOSE_SESSION;
                        }
 
                        BGP_EVENT_ADD(peer, TCP_fatal_error);
@@ -498,19 +504,19 @@ static uint16_t bgp_read(struct peer *peer)
                case 0: // TCP session closed
                        if (bgp_debug_neighbor_events(peer))
                                zlog_debug(
-                                       "%s [Event] BGP connection closed fd %d",
-                                       peer->host, peer->fd);
+                                   "%s [Event] BGP connection closed fd %d",
+                                   peer->host, peer->fd);
 
                        if (peer->status == Established) {
                                if (CHECK_FLAG(peer->sflags,
                                               PEER_STATUS_NSF_MODE)) {
                                        peer->last_reset =
-                                               PEER_DOWN_NSF_CLOSE_SESSION;
+                                           PEER_DOWN_NSF_CLOSE_SESSION;
                                        SET_FLAG(peer->sflags,
                                                 PEER_STATUS_NSF_WAIT);
                                } else
                                        peer->last_reset =
-                                               PEER_DOWN_CLOSE_SESSION;
+                                           PEER_DOWN_CLOSE_SESSION;
                        }
 
                        BGP_EVENT_ADD(peer, TCP_connection_closed);
@@ -541,8 +547,8 @@ static bool validate_header(struct peer *peer)
        size_t getp = stream_get_getp(pkt);
 
        static uint8_t marker[BGP_MARKER_SIZE] = {
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-               0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
        if (memcmp(marker, stream_pnt(pkt), BGP_MARKER_SIZE) != 0) {
                bgp_notify_send(peer, BGP_NOTIFY_HEADER_ERR,
@@ -560,13 +566,15 @@ static bool validate_header(struct peer *peer)
            && type != BGP_MSG_ROUTE_REFRESH_NEW
            && type != BGP_MSG_ROUTE_REFRESH_OLD
            && type != BGP_MSG_CAPABILITY) {
-               if (bgp_debug_neighbor_events(peer))
+               if (bgp_debug_neighbor_events(peer)) {
+                       // XXX: zlog is not MT-safe
                        zlog_debug("%s unknown message type 0x%02x", peer->host,
                                   type);
+               }
 
                bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
                                          BGP_NOTIFY_HEADER_BAD_MESTYPE,
-                                         (u_char *)&type, 1);
+                                         (u_char *) &type, 1);
                return false;
        }
 
@@ -582,15 +590,17 @@ static bool validate_header(struct peer *peer)
                && size < BGP_MSG_ROUTE_REFRESH_MIN_SIZE)
            || (type == BGP_MSG_CAPABILITY
                && size < BGP_MSG_CAPABILITY_MIN_SIZE)) {
-               if (bgp_debug_neighbor_events(peer))
+               if (bgp_debug_neighbor_events(peer)) {
+                       // XXX: zlog is not MT-safe
                        zlog_debug("%s bad message length - %d for %s",
                                   peer->host, size,
                                   type == 128 ? "ROUTE-REFRESH"
-                                              : bgp_type_str[(int)type]);
+                                              : bgp_type_str[(int) type]);
+               }
 
                bgp_notify_send_with_data(peer, BGP_NOTIFY_HEADER_ERR,
                                          BGP_NOTIFY_HEADER_BAD_MESLEN,
-                                         (u_char *)&size, 2);
+                                         (u_char *) &size, 2);
                return false;
        }
 
index 015565e368e443fe74b7dc5224831376443a61e1..086e59486b349bd13ff1fb18c985fb6e41ed7993 100644 (file)
@@ -789,19 +789,19 @@ struct peer {
        (CHECK_FLAG(peer->config, PEER_CONFIG_TIMER)                         \
         || CHECK_FLAG(peer->config, PEER_GROUP_CONFIG_TIMER))
 
-       u_int32_t holdtime;
-       u_int32_t keepalive;
-       u_int32_t connect;
-       u_int32_t routeadv;
+       _Atomic uint32_t holdtime;
+       _Atomic uint32_t keepalive;
+       _Atomic uint32_t connect;
+       _Atomic uint32_t routeadv;
 
        /* Timer values. */
-       u_int32_t v_start;
-       u_int32_t v_connect;
-       u_int32_t v_holdtime;
-       u_int32_t v_keepalive;
-       u_int32_t v_routeadv;
-       u_int32_t v_pmax_restart;
-       u_int32_t v_gr_restart;
+       _Atomic uint32_t v_start;
+       _Atomic uint32_t v_connect;
+       _Atomic uint32_t v_holdtime;
+       _Atomic uint32_t v_keepalive;
+       _Atomic uint32_t v_routeadv;
+       _Atomic uint32_t v_pmax_restart;
+       _Atomic uint32_t v_gr_restart;
 
        /* Threads. */
        struct thread *t_read;
@@ -818,7 +818,7 @@ struct peer {
        struct thread *t_process_packet;
 
        /* Thread flags. */
-       u_int16_t thread_flags;
+       _Atomic uint16_t thread_flags;
 #define PEER_THREAD_WRITES_ON         (1 << 0)
 #define PEER_THREAD_READS_ON          (1 << 1)
 #define PEER_THREAD_KEEPALIVES_ON     (1 << 2)
@@ -826,19 +826,19 @@ struct peer {
        struct work_queue *clear_node_queue;
 
        /* Statistics field */
-       u_int32_t open_in;       /* Open message input count */
-       u_int32_t open_out;     /* Open message output count */
-       u_int32_t update_in;       /* Update message input count */
-       u_int32_t update_out;      /* Update message ouput count */
-       time_t update_time;     /* Update message received time. */
-       u_int32_t keepalive_in;    /* Keepalive input count */
-       u_int32_t keepalive_out;   /* Keepalive output count */
-       u_int32_t notify_in;       /* Notify input count */
-       u_int32_t notify_out;      /* Notify output count */
-       u_int32_t refresh_in;      /* Route Refresh input count */
-       u_int32_t refresh_out;     /* Route Refresh output count */
-       u_int32_t dynamic_cap_in;  /* Dynamic Capability input count.  */
-       u_int32_t dynamic_cap_out; /* Dynamic Capability output count.  */
+       _Atomic uint32_t open_in;         /* Open message input count */
+       _Atomic uint32_t open_out;        /* Open message output count */
+       _Atomic uint32_t update_in;       /* Update message input count */
+       _Atomic uint32_t update_out;      /* Update message ouput count */
+       _Atomic time_t update_time;       /* Update message received time. */
+       _Atomic uint32_t keepalive_in;    /* Keepalive input count */
+       _Atomic uint32_t keepalive_out;   /* Keepalive output count */
+       _Atomic uint32_t notify_in;       /* Notify input count */
+       _Atomic uint32_t notify_out;      /* Notify output count */
+       _Atomic uint32_t refresh_in;      /* Route Refresh input count */
+       _Atomic uint32_t refresh_out;     /* Route Refresh output count */
+       _Atomic uint32_t dynamic_cap_in;  /* Dynamic Capability input count.  */
+       _Atomic uint32_t dynamic_cap_out; /* Dynamic Capability output count.  */
 
        /* BGP state count */
        u_int32_t established; /* Established */
@@ -851,8 +851,10 @@ struct peer {
        /* Syncronization list and time.  */
        struct bgp_synchronize *sync[AFI_MAX][SAFI_MAX];
        time_t synctime;
-       time_t last_write;  /* timestamp when the last msg was written */
-       time_t last_update; /* timestamp when the last UPDATE msg was written */
+       /* timestamp when the last UPDATE msg was written */
+       _Atomic time_t last_write;
+       /* timestamp when the last msg was written */
+       _Atomic time_t last_update;
 
        /* Send prefix count. */
        unsigned long scount[AFI_MAX][SAFI_MAX];