]> git.puffer.fish Git - mirror/frr.git/commitdiff
lib: add frr_with_mutex() block-wrapper
authorDavid Lamparter <equinox@diac24.net>
Fri, 21 Jun 2019 08:58:02 +0000 (10:58 +0200)
committerDavid Lamparter <equinox@diac24.net>
Tue, 3 Sep 2019 15:15:17 +0000 (17:15 +0200)
frr_with_mutex(...) { ... } locks and automatically unlocks the listed
mutex(es) when the block is exited.  This adds a bit of safety against
forgetting the unlock in error paths & co. and makes the code a slight
bit more readable.

Signed-off-by: David Lamparter <equinox@opensourcerouting.org>
19 files changed:
.clang-format
bgpd/bgp_fsm.c
bgpd/bgp_io.c
bgpd/bgp_keepalives.c
bgpd/bgp_packet.c
bgpd/rfapi/rfapi.c
bgpd/rfapi/vnc_zebra.c
lib/ferr.c
lib/frr_pthread.c
lib/frr_pthread.h
lib/hash.c
lib/log.c
lib/northbound.c
lib/privs.c
lib/stream.c
lib/thread.c
tools/coccinelle/frr_with_mutex.cocci [new file with mode: 0644]
zebra/zebra_rib.c
zebra/zserv.c

index 4bd962747fa492853dc386e13b6154d252f6417b..cc68de7b551fee419c12532bdafd0e21e9f68a4f 100644 (file)
@@ -28,6 +28,8 @@ ForEachMacros:
   - frr_each
   - frr_each_safe
   - frr_each_from
+  - frr_with_mutex
+  - frr_elevate_privs
   - LIST_FOREACH
   - LIST_FOREACH_SAFE
   - SLIST_FOREACH
index 4d395e3749e0fc82e63c76c5bec9228e7d561a99..4049e8c15b563b6232fcbca51ff1dd0d8865d741 100644 (file)
@@ -179,9 +179,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
         * on various buffers. Those need to be transferred or dropped,
         * otherwise we'll get spurious failures during session establishment.
         */
-       pthread_mutex_lock(&peer->io_mtx);
-       pthread_mutex_lock(&from_peer->io_mtx);
-       {
+       frr_with_mutex(&peer->io_mtx, &from_peer->io_mtx) {
                fd = peer->fd;
                peer->fd = from_peer->fd;
                from_peer->fd = fd;
@@ -222,8 +220,6 @@ static struct peer *peer_xfer_conn(struct peer *from_peer)
                ringbuf_copy(peer->ibuf_work, from_peer->ibuf_work,
                             ringbuf_remain(from_peer->ibuf_work));
        }
-       pthread_mutex_unlock(&from_peer->io_mtx);
-       pthread_mutex_unlock(&peer->io_mtx);
 
        peer->as = from_peer->as;
        peer->v_holdtime = from_peer->v_holdtime;
@@ -1166,8 +1162,7 @@ int bgp_stop(struct peer *peer)
        BGP_TIMER_OFF(peer->t_routeadv);
 
        /* Clear input and output buffer.  */
-       pthread_mutex_lock(&peer->io_mtx);
-       {
+       frr_with_mutex(&peer->io_mtx) {
                if (peer->ibuf)
                        stream_fifo_clean(peer->ibuf);
                if (peer->obuf)
@@ -1183,7 +1178,6 @@ int bgp_stop(struct peer *peer)
                        peer->curr = NULL;
                }
        }
-       pthread_mutex_unlock(&peer->io_mtx);
 
        /* Close of file descriptor. */
        if (peer->fd >= 0) {
index f111822e53ca5f46460b0147af65aa415ae848e3..c2d06a4b5a078941cb9261f6ad36d2e88f071877 100644 (file)
@@ -132,12 +132,10 @@ static int bgp_process_writes(struct thread *thread)
 
        struct frr_pthread *fpt = bgp_pth_io;
 
-       pthread_mutex_lock(&peer->io_mtx);
-       {
+       frr_with_mutex(&peer->io_mtx) {
                status = bgp_write(peer);
                reschedule = (stream_fifo_head(peer->obuf) != NULL);
        }
-       pthread_mutex_unlock(&peer->io_mtx);
 
        /* no problem */
        if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
@@ -184,11 +182,9 @@ static int bgp_process_reads(struct thread *thread)
 
        struct frr_pthread *fpt = bgp_pth_io;
 
-       pthread_mutex_lock(&peer->io_mtx);
-       {
+       frr_with_mutex(&peer->io_mtx) {
                status = bgp_read(peer);
        }
-       pthread_mutex_unlock(&peer->io_mtx);
 
        /* error checking phase */
        if (CHECK_FLAG(status, BGP_IO_TRANS_ERR)) {
@@ -237,11 +233,9 @@ static int bgp_process_reads(struct thread *thread)
                        assert(ringbuf_get(ibw, pktbuf, pktsize) == pktsize);
                        stream_put(pkt, pktbuf, pktsize);
 
-                       pthread_mutex_lock(&peer->io_mtx);
-                       {
+                       frr_with_mutex(&peer->io_mtx) {
                                stream_fifo_push(peer->ibuf, pkt);
                        }
-                       pthread_mutex_unlock(&peer->io_mtx);
 
                        added_pkt = true;
                } else
index bec3bdcb8d0e1320b957e00b8c4f7694a2c71848..6de1c216a65b4bc155061ab3978bd4544dfafcdd 100644 (file)
@@ -245,8 +245,7 @@ void bgp_keepalives_on(struct peer *peer)
         */
        assert(peerhash_mtx);
 
-       pthread_mutex_lock(peerhash_mtx);
-       {
+       frr_with_mutex(peerhash_mtx) {
                holder.peer = peer;
                if (!hash_lookup(peerhash, &holder)) {
                        struct pkat *pkat = pkat_new(peer);
@@ -255,7 +254,6 @@ void bgp_keepalives_on(struct peer *peer)
                }
                SET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
        }
-       pthread_mutex_unlock(peerhash_mtx);
        bgp_keepalives_wake();
 }
 
@@ -275,8 +273,7 @@ void bgp_keepalives_off(struct peer *peer)
         */
        assert(peerhash_mtx);
 
-       pthread_mutex_lock(peerhash_mtx);
-       {
+       frr_with_mutex(peerhash_mtx) {
                holder.peer = peer;
                struct pkat *res = hash_release(peerhash, &holder);
                if (res) {
@@ -285,16 +282,13 @@ void bgp_keepalives_off(struct peer *peer)
                }
                UNSET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
        }
-       pthread_mutex_unlock(peerhash_mtx);
 }
 
 void bgp_keepalives_wake(void)
 {
-       pthread_mutex_lock(peerhash_mtx);
-       {
+       frr_with_mutex(peerhash_mtx) {
                pthread_cond_signal(peerhash_cond);
        }
-       pthread_mutex_unlock(peerhash_mtx);
 }
 
 int bgp_keepalives_stop(struct frr_pthread *fpt, void **result)
index cd94f421ef198b3d323e44d665c1b884558eeaa8..c7c1780c21c334a9f4cce3d785ce71bc27b4af04 100644 (file)
@@ -124,9 +124,9 @@ int bgp_packet_set_size(struct stream *s)
  */
 static void bgp_packet_add(struct peer *peer, struct stream *s)
 {
-       pthread_mutex_lock(&peer->io_mtx);
-       stream_fifo_push(peer->obuf, s);
-       pthread_mutex_unlock(&peer->io_mtx);
+       frr_with_mutex(&peer->io_mtx) {
+               stream_fifo_push(peer->obuf, s);
+       }
 }
 
 static struct stream *bgp_update_packet_eor(struct peer *peer, afi_t afi,
@@ -665,7 +665,7 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
        struct stream *s;
 
        /* Lock I/O mutex to prevent other threads from pushing packets */
-       pthread_mutex_lock(&peer->io_mtx);
+       frr_mutex_lock_autounlock(&peer->io_mtx);
        /* ============================================== */
 
        /* Allocate new stream. */
@@ -756,9 +756,6 @@ void bgp_notify_send_with_data(struct peer *peer, uint8_t code,
        stream_fifo_push(peer->obuf, s);
 
        bgp_write_notify(peer);
-
-       /* ============================================== */
-       pthread_mutex_unlock(&peer->io_mtx);
 }
 
 /*
@@ -2237,11 +2234,9 @@ int bgp_process_packet(struct thread *thread)
                bgp_size_t size;
                char notify_data_length[2];
 
-               pthread_mutex_lock(&peer->io_mtx);
-               {
+               frr_with_mutex(&peer->io_mtx) {
                        peer->curr = stream_fifo_pop(peer->ibuf);
                }
-               pthread_mutex_unlock(&peer->io_mtx);
 
                if (peer->curr == NULL) // no packets to process, hmm...
                        return 0;
@@ -2360,15 +2355,13 @@ int bgp_process_packet(struct thread *thread)
 
        if (fsm_update_result != FSM_PEER_TRANSFERRED
            && fsm_update_result != FSM_PEER_STOPPED) {
-               pthread_mutex_lock(&peer->io_mtx);
-               {
+               frr_with_mutex(&peer->io_mtx) {
                        // more work to do, come back later
                        if (peer->ibuf->count > 0)
                                thread_add_timer_msec(
                                        bm->master, bgp_process_packet, peer, 0,
                                        &peer->t_process_packet);
                }
-               pthread_mutex_unlock(&peer->io_mtx);
        }
 
        return 0;
index e7905e56225530f5fe2fe3df84604524a187a65d..9b8f64ee678d4d7c6e786ed7f07f901f3d4ae8df 100644 (file)
@@ -1282,8 +1282,7 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
         * since this peer is not on the I/O thread, this lock is not strictly
         * necessary, but serves as a reminder to those who may meddle...
         */
-       pthread_mutex_lock(&rfd->peer->io_mtx);
-       {
+       frr_with_mutex(&rfd->peer->io_mtx) {
                // we don't need any I/O related facilities
                if (rfd->peer->ibuf)
                        stream_fifo_free(rfd->peer->ibuf);
@@ -1300,7 +1299,6 @@ static int rfapi_open_inner(struct rfapi_descriptor *rfd, struct bgp *bgp,
                rfd->peer->obuf_work = NULL;
                rfd->peer->ibuf_work = NULL;
        }
-       pthread_mutex_unlock(&rfd->peer->io_mtx);
 
        { /* base code assumes have valid host pointer */
                char buf[BUFSIZ];
index 481500dfb445ec12130259908c14ab1a4276b89a..80a590f56a13173f49ee59de0366f6752666c50a 100644 (file)
@@ -191,8 +191,7 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
                         * is not strictly necessary, but serves as a reminder
                         * to those who may meddle...
                         */
-                       pthread_mutex_lock(&vncHD1VR.peer->io_mtx);
-                       {
+                       frr_with_mutex(&vncHD1VR.peer->io_mtx) {
                                // we don't need any I/O related facilities
                                if (vncHD1VR.peer->ibuf)
                                        stream_fifo_free(vncHD1VR.peer->ibuf);
@@ -209,7 +208,6 @@ static void vnc_redistribute_add(struct prefix *p, uint32_t metric,
                                vncHD1VR.peer->obuf_work = NULL;
                                vncHD1VR.peer->ibuf_work = NULL;
                        }
-                       pthread_mutex_unlock(&vncHD1VR.peer->io_mtx);
 
                        /* base code assumes have valid host pointer */
                        vncHD1VR.peer->host =
index fd5fb50172e07f902c4ffa983a22586dd017cc72..ccf63dea17986260dd56247013abd58b5eafacc4 100644 (file)
@@ -33,6 +33,7 @@
 #include "command.h"
 #include "json.h"
 #include "linklist.h"
+#include "frr_pthread.h"
 
 DEFINE_MTYPE_STATIC(LIB, ERRINFO, "error information")
 
@@ -83,14 +84,12 @@ void log_ref_add(struct log_ref *ref)
 {
        uint32_t i = 0;
 
-       pthread_mutex_lock(&refs_mtx);
-       {
+       frr_with_mutex(&refs_mtx) {
                while (ref[i].code != END_FERR) {
                        hash_get(refs, &ref[i], hash_alloc_intern);
                        i++;
                }
        }
-       pthread_mutex_unlock(&refs_mtx);
 }
 
 struct log_ref *log_ref_get(uint32_t code)
@@ -99,11 +98,9 @@ struct log_ref *log_ref_get(uint32_t code)
        struct log_ref *ref;
 
        holder.code = code;
-       pthread_mutex_lock(&refs_mtx);
-       {
+       frr_with_mutex(&refs_mtx) {
                ref = hash_lookup(refs, &holder);
        }
-       pthread_mutex_unlock(&refs_mtx);
 
        return ref;
 }
@@ -118,11 +115,9 @@ void log_ref_display(struct vty *vty, uint32_t code, bool json)
        if (json)
                top = json_object_new_object();
 
-       pthread_mutex_lock(&refs_mtx);
-       {
+       frr_with_mutex(&refs_mtx) {
                errlist = code ? list_new() : hash_to_list(refs);
        }
-       pthread_mutex_unlock(&refs_mtx);
 
        if (code) {
                ref = log_ref_get(code);
@@ -189,23 +184,19 @@ DEFUN_NOSH(show_error_code,
 
 void log_ref_init(void)
 {
-       pthread_mutex_lock(&refs_mtx);
-       {
+       frr_with_mutex(&refs_mtx) {
                refs = hash_create(ferr_hash_key, ferr_hash_cmp,
                                   "Error Reference Texts");
        }
-       pthread_mutex_unlock(&refs_mtx);
 }
 
 void log_ref_fini(void)
 {
-       pthread_mutex_lock(&refs_mtx);
-       {
+       frr_with_mutex(&refs_mtx) {
                hash_clean(refs, NULL);
                hash_free(refs);
                refs = NULL;
        }
-       pthread_mutex_unlock(&refs_mtx);
 }
 
 void log_ref_vty_init(void)
index bdb6c2a3973a2101b8c5a637dc097f33d9969bfc..21dfc9256ff9fd7708062b51d4f0c3af829166b1 100644 (file)
@@ -49,21 +49,17 @@ static struct list *frr_pthread_list;
 
 void frr_pthread_init(void)
 {
-       pthread_mutex_lock(&frr_pthread_list_mtx);
-       {
+       frr_with_mutex(&frr_pthread_list_mtx) {
                frr_pthread_list = list_new();
                frr_pthread_list->del = (void (*)(void *))&frr_pthread_destroy;
        }
-       pthread_mutex_unlock(&frr_pthread_list_mtx);
 }
 
 void frr_pthread_finish(void)
 {
-       pthread_mutex_lock(&frr_pthread_list_mtx);
-       {
+       frr_with_mutex(&frr_pthread_list_mtx) {
                list_delete(&frr_pthread_list);
        }
-       pthread_mutex_unlock(&frr_pthread_list_mtx);
 }
 
 struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
@@ -94,11 +90,9 @@ struct frr_pthread *frr_pthread_new(struct frr_pthread_attr *attr,
        pthread_mutex_init(fpt->running_cond_mtx, NULL);
        pthread_cond_init(fpt->running_cond, NULL);
 
-       pthread_mutex_lock(&frr_pthread_list_mtx);
-       {
+       frr_with_mutex(&frr_pthread_list_mtx) {
                listnode_add(frr_pthread_list, fpt);
        }
-       pthread_mutex_unlock(&frr_pthread_list_mtx);
 
        return fpt;
 }
@@ -162,23 +156,19 @@ int frr_pthread_run(struct frr_pthread *fpt, const pthread_attr_t *attr)
 
 void frr_pthread_wait_running(struct frr_pthread *fpt)
 {
-       pthread_mutex_lock(fpt->running_cond_mtx);
-       {
+       frr_with_mutex(fpt->running_cond_mtx) {
                while (!fpt->running)
                        pthread_cond_wait(fpt->running_cond,
                                          fpt->running_cond_mtx);
        }
-       pthread_mutex_unlock(fpt->running_cond_mtx);
 }
 
 void frr_pthread_notify_running(struct frr_pthread *fpt)
 {
-       pthread_mutex_lock(fpt->running_cond_mtx);
-       {
+       frr_with_mutex(fpt->running_cond_mtx) {
                fpt->running = true;
                pthread_cond_signal(fpt->running_cond);
        }
-       pthread_mutex_unlock(fpt->running_cond_mtx);
 }
 
 int frr_pthread_stop(struct frr_pthread *fpt, void **result)
@@ -190,14 +180,12 @@ int frr_pthread_stop(struct frr_pthread *fpt, void **result)
 
 void frr_pthread_stop_all(void)
 {
-       pthread_mutex_lock(&frr_pthread_list_mtx);
-       {
+       frr_with_mutex(&frr_pthread_list_mtx) {
                struct listnode *n;
                struct frr_pthread *fpt;
                for (ALL_LIST_ELEMENTS_RO(frr_pthread_list, n, fpt))
                        frr_pthread_stop(fpt, NULL);
        }
-       pthread_mutex_unlock(&frr_pthread_list_mtx);
 }
 
 /*
index 6096a50370e20ef55add3d0b390c6d663fd0501c..f70c8a0db40b6fd7fd491c0e451960cca1442c59 100644 (file)
@@ -215,6 +215,54 @@ void frr_pthread_stop_all(void);
 #define pthread_condattr_setclock(A, B)
 #endif
 
+/* mutex auto-lock/unlock */
+
+/* variant 1:
+ * (for short blocks, multiple mutexes supported)
+ * break & return can be used for aborting the block
+ *
+ * frr_with_mutex(&mtx, &mtx2) {
+ *    if (error)
+ *       break;
+ *    ...
+ * }
+ */
+#define _frr_with_mutex(mutex)                                                 \
+       *NAMECTR(_mtx_) __attribute__((                                        \
+               unused, cleanup(_frr_mtx_unlock))) = _frr_mtx_lock(mutex),     \
+       /* end */
+
+#define frr_with_mutex(...)                                                    \
+       for (pthread_mutex_t MACRO_REPEAT(_frr_with_mutex, ##__VA_ARGS__)      \
+            *_once = NULL; _once == NULL; _once = (void *)1)                  \
+       /* end */
+
+/* variant 2:
+ * (more suitable for long blocks, no extra indentation)
+ *
+ * frr_mutex_lock_autounlock(&mtx);
+ * ...
+ */
+#define frr_mutex_lock_autounlock(mutex)                                       \
+       pthread_mutex_t *NAMECTR(_mtx_)                                        \
+               __attribute__((unused, cleanup(_frr_mtx_unlock))) =            \
+                                   _frr_mtx_lock(mutex)                       \
+       /* end */
+
+static inline pthread_mutex_t *_frr_mtx_lock(pthread_mutex_t *mutex)
+{
+       pthread_mutex_lock(mutex);
+       return mutex;
+}
+
+static inline void _frr_mtx_unlock(pthread_mutex_t **mutex)
+{
+       if (!*mutex)
+               return;
+       pthread_mutex_unlock(*mutex);
+       *mutex = NULL;
+}
+
 #ifdef __cplusplus
 }
 #endif
index 9d9d39702ec9deacb8d85e45849e9ce60e0ac592..7f8a237047f48d3d990c52efe957edee43fccb51 100644 (file)
@@ -28,6 +28,7 @@
 #include "vty.h"
 #include "command.h"
 #include "libfrr.h"
+#include "frr_pthread.h"
 
 DEFINE_MTYPE_STATIC(LIB, HASH, "Hash")
 DEFINE_MTYPE_STATIC(LIB, HASH_BACKET, "Hash Bucket")
@@ -54,14 +55,12 @@ struct hash *hash_create_size(unsigned int size,
        hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
        hash->stats.empty = hash->size;
 
-       pthread_mutex_lock(&_hashes_mtx);
-       {
+       frr_with_mutex(&_hashes_mtx) {
                if (!_hashes)
                        _hashes = list_new();
 
                listnode_add(_hashes, hash);
        }
-       pthread_mutex_unlock(&_hashes_mtx);
 
        return hash;
 }
@@ -311,8 +310,7 @@ struct list *hash_to_list(struct hash *hash)
 
 void hash_free(struct hash *hash)
 {
-       pthread_mutex_lock(&_hashes_mtx);
-       {
+       frr_with_mutex(&_hashes_mtx) {
                if (_hashes) {
                        listnode_delete(_hashes, hash);
                        if (_hashes->count == 0) {
@@ -320,7 +318,6 @@ void hash_free(struct hash *hash)
                        }
                }
        }
-       pthread_mutex_unlock(&_hashes_mtx);
 
        XFREE(MTYPE_HASH, hash->name);
 
index 51a0ddd6b7c2454e5e2a5f3eec748b4cd602ee1f..f1c0fabfbab944a49eeb692d72dac1d20f239f3d 100644 (file)
--- a/lib/log.c
+++ b/lib/log.c
@@ -31,6 +31,7 @@
 #include "lib_errors.h"
 #include "lib/hook.h"
 #include "printfrr.h"
+#include "frr_pthread.h"
 
 #ifndef SUNOS_5
 #include <sys/un.h>
@@ -83,89 +84,70 @@ static int zlog_filter_lookup(const char *lookup)
 
 void zlog_filter_clear(void)
 {
-       pthread_mutex_lock(&loglock);
-       zlog_filter_count = 0;
-       pthread_mutex_unlock(&loglock);
+       frr_with_mutex(&loglock) {
+               zlog_filter_count = 0;
+       }
 }
 
 int zlog_filter_add(const char *filter)
 {
-       pthread_mutex_lock(&loglock);
+       frr_with_mutex(&loglock) {
+               if (zlog_filter_count >= ZLOG_FILTERS_MAX)
+                       return 1;
 
-       int ret = 0;
+               if (zlog_filter_lookup(filter) != -1)
+                       /* Filter already present */
+                       return -1;
 
-       if (zlog_filter_count >= ZLOG_FILTERS_MAX) {
-               ret = 1;
-               goto done;
-       }
+               strlcpy(zlog_filters[zlog_filter_count], filter,
+                       sizeof(zlog_filters[0]));
 
-       if (zlog_filter_lookup(filter) != -1) {
-               /* Filter already present */
-               ret = -1;
-               goto done;
-       }
+               if (zlog_filters[zlog_filter_count][0] == '\0')
+                       /* Filter was either empty or didn't get copied
+                        * correctly
+                        */
+                       return -1;
 
-       strlcpy(zlog_filters[zlog_filter_count], filter,
-               sizeof(zlog_filters[0]));
-
-       if (zlog_filters[zlog_filter_count][0] == '\0') {
-               /* Filter was either empty or didn't get copied correctly */
-               ret = -1;
-               goto done;
+               zlog_filter_count++;
        }
-
-       zlog_filter_count++;
-
-done:
-       pthread_mutex_unlock(&loglock);
-       return ret;
+       return 0;
 }
 
 int zlog_filter_del(const char *filter)
 {
-       pthread_mutex_lock(&loglock);
-
-       int found_idx = zlog_filter_lookup(filter);
-       int last_idx = zlog_filter_count - 1;
-       int ret = 0;
+       frr_with_mutex(&loglock) {
+               int found_idx = zlog_filter_lookup(filter);
+               int last_idx = zlog_filter_count - 1;
 
-       if (found_idx == -1) {
-               /* Didn't find the filter to delete */
-               ret = -1;
-               goto done;
-       }
-
-       /* Adjust the filter array */
-       memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
-               (last_idx - found_idx) * sizeof(zlog_filters[0]));
+               if (found_idx == -1)
+                       /* Didn't find the filter to delete */
+                       return -1;
 
-       zlog_filter_count--;
+               /* Adjust the filter array */
+               memmove(zlog_filters[found_idx], zlog_filters[found_idx + 1],
+                       (last_idx - found_idx) * sizeof(zlog_filters[0]));
 
-done:
-       pthread_mutex_unlock(&loglock);
-       return ret;
+               zlog_filter_count--;
+       }
+       return 0;
 }
 
 /* Dump all filters to buffer, delimited by new line */
 int zlog_filter_dump(char *buf, size_t max_size)
 {
-       pthread_mutex_lock(&loglock);
-
-       int ret = 0;
        int len = 0;
 
-       for (int i = 0; i < zlog_filter_count; i++) {
-               ret = snprintf(buf + len, max_size - len, " %s\n",
-                              zlog_filters[i]);
-               len += ret;
-               if ((ret < 0) || ((size_t)len >= max_size)) {
-                       len = -1;
-                       goto done;
+       frr_with_mutex(&loglock) {
+               for (int i = 0; i < zlog_filter_count; i++) {
+                       int ret;
+                       ret = snprintf(buf + len, max_size - len, " %s\n",
+                                      zlog_filters[i]);
+                       len += ret;
+                       if ((ret < 0) || ((size_t)len >= max_size))
+                               return -1;
                }
        }
 
-done:
-       pthread_mutex_unlock(&loglock);
        return len;
 }
 
@@ -363,7 +345,7 @@ search:
 /* va_list version of zlog. */
 void vzlog(int priority, const char *format, va_list args)
 {
-       pthread_mutex_lock(&loglock);
+       frr_mutex_lock_autounlock(&loglock);
 
        char proto_str[32] = "";
        int original_errno = errno;
@@ -430,36 +412,31 @@ out:
        if (msg != buf)
                XFREE(MTYPE_TMP, msg);
        errno = original_errno;
-       pthread_mutex_unlock(&loglock);
 }
 
 int vzlog_test(int priority)
 {
-       pthread_mutex_lock(&loglock);
-
-       int ret = 0;
+       frr_mutex_lock_autounlock(&loglock);
 
        struct zlog *zl = zlog_default;
 
        /* When zlog_default is also NULL, use stderr for logging. */
        if (zl == NULL)
-               ret = 1;
+               return 1;
        /* Syslog output */
        else if (priority <= zl->maxlvl[ZLOG_DEST_SYSLOG])
-               ret = 1;
+               return 1;
        /* File output. */
        else if ((priority <= zl->maxlvl[ZLOG_DEST_FILE]) && zl->fp)
-               ret = 1;
+               return 1;
        /* stdout output. */
        else if (priority <= zl->maxlvl[ZLOG_DEST_STDOUT])
-               ret = 1;
+               return 1;
        /* Terminal monitor. */
        else if (priority <= zl->maxlvl[ZLOG_DEST_MONITOR])
-               ret = 1;
-
-       pthread_mutex_unlock(&loglock);
+               return 1;
 
-       return ret;
+       return 0;
 }
 
 /*
@@ -870,9 +847,9 @@ void openzlog(const char *progname, const char *protoname,
 
        openlog(progname, syslog_flags, zl->facility);
 
-       pthread_mutex_lock(&loglock);
-       zlog_default = zl;
-       pthread_mutex_unlock(&loglock);
+       frr_with_mutex(&loglock) {
+               zlog_default = zl;
+       }
 
 #ifdef HAVE_GLIBC_BACKTRACE
        /* work around backtrace() using lazily resolved dynamically linked
@@ -889,7 +866,8 @@ void openzlog(const char *progname, const char *protoname,
 
 void closezlog(void)
 {
-       pthread_mutex_lock(&loglock);
+       frr_mutex_lock_autounlock(&loglock);
+
        struct zlog *zl = zlog_default;
 
        closelog();
@@ -901,15 +879,14 @@ void closezlog(void)
 
        XFREE(MTYPE_ZLOG, zl);
        zlog_default = NULL;
-       pthread_mutex_unlock(&loglock);
 }
 
 /* Called from command.c. */
 void zlog_set_level(zlog_dest_t dest, int log_level)
 {
-       pthread_mutex_lock(&loglock);
-       zlog_default->maxlvl[dest] = log_level;
-       pthread_mutex_unlock(&loglock);
+       frr_with_mutex(&loglock) {
+               zlog_default->maxlvl[dest] = log_level;
+       }
 }
 
 int zlog_set_file(const char *filename, int log_level)
@@ -929,15 +906,15 @@ int zlog_set_file(const char *filename, int log_level)
        if (fp == NULL) {
                ret = 0;
        } else {
-               pthread_mutex_lock(&loglock);
-               zl = zlog_default;
-
-               /* Set flags. */
-               zl->filename = XSTRDUP(MTYPE_ZLOG, filename);
-               zl->maxlvl[ZLOG_DEST_FILE] = log_level;
-               zl->fp = fp;
-               logfile_fd = fileno(fp);
-               pthread_mutex_unlock(&loglock);
+               frr_with_mutex(&loglock) {
+                       zl = zlog_default;
+
+                       /* Set flags. */
+                       zl->filename = XSTRDUP(MTYPE_ZLOG, filename);
+                       zl->maxlvl[ZLOG_DEST_FILE] = log_level;
+                       zl->fp = fp;
+                       logfile_fd = fileno(fp);
+               }
        }
 
        return ret;
@@ -946,7 +923,7 @@ int zlog_set_file(const char *filename, int log_level)
 /* Reset opend file. */
 int zlog_reset_file(void)
 {
-       pthread_mutex_lock(&loglock);
+       frr_mutex_lock_autounlock(&loglock);
 
        struct zlog *zl = zlog_default;
 
@@ -959,8 +936,6 @@ int zlog_reset_file(void)
        XFREE(MTYPE_ZLOG, zl->filename);
        zl->filename = NULL;
 
-       pthread_mutex_unlock(&loglock);
-
        return 1;
 }
 
index 48b450e969c9e0f7537b99e0218ed4c6c88d4ebf..a814f23e14421d0b65a832d7fc4e5b33758c234a 100644 (file)
@@ -26,6 +26,7 @@
 #include "command.h"
 #include "debug.h"
 #include "db.h"
+#include "frr_pthread.h"
 #include "northbound.h"
 #include "northbound_cli.h"
 #include "northbound_db.h"
@@ -723,8 +724,7 @@ int nb_running_lock(enum nb_client client, const void *user)
 {
        int ret = -1;
 
-       pthread_mutex_lock(&running_config_mgmt_lock.mtx);
-       {
+       frr_with_mutex(&running_config_mgmt_lock.mtx) {
                if (!running_config_mgmt_lock.locked) {
                        running_config_mgmt_lock.locked = true;
                        running_config_mgmt_lock.owner_client = client;
@@ -732,7 +732,6 @@ int nb_running_lock(enum nb_client client, const void *user)
                        ret = 0;
                }
        }
-       pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
 
        return ret;
 }
@@ -741,8 +740,7 @@ int nb_running_unlock(enum nb_client client, const void *user)
 {
        int ret = -1;
 
-       pthread_mutex_lock(&running_config_mgmt_lock.mtx);
-       {
+       frr_with_mutex(&running_config_mgmt_lock.mtx) {
                if (running_config_mgmt_lock.locked
                    && running_config_mgmt_lock.owner_client == client
                    && running_config_mgmt_lock.owner_user == user) {
@@ -752,7 +750,6 @@ int nb_running_unlock(enum nb_client client, const void *user)
                        ret = 0;
                }
        }
-       pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
 
        return ret;
 }
@@ -761,14 +758,12 @@ int nb_running_lock_check(enum nb_client client, const void *user)
 {
        int ret = -1;
 
-       pthread_mutex_lock(&running_config_mgmt_lock.mtx);
-       {
+       frr_with_mutex(&running_config_mgmt_lock.mtx) {
                if (!running_config_mgmt_lock.locked
                    || (running_config_mgmt_lock.owner_client == client
                        && running_config_mgmt_lock.owner_user == user))
                        ret = 0;
        }
-       pthread_mutex_unlock(&running_config_mgmt_lock.mtx);
 
        return ret;
 }
index a3314c6c3ca977a5782d26e6851ec3720336b5f2..09efedf68460ac3ab3ce749d4203242810ef6fbb 100644 (file)
@@ -24,6 +24,7 @@
 #include "log.h"
 #include "privs.h"
 #include "memory.h"
+#include "frr_pthread.h"
 #include "lib_errors.h"
 #include "lib/queue.h"
 
@@ -760,8 +761,7 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
         * Serialize 'raise' operations; particularly important for
         * OSes where privs are process-wide.
         */
-       pthread_mutex_lock(&(privs->mutex));
-       {
+       frr_with_mutex(&(privs->mutex)) {
                /* Locate ref-counting object to use */
                refs = get_privs_refs(privs);
 
@@ -775,7 +775,6 @@ struct zebra_privs_t *_zprivs_raise(struct zebra_privs_t *privs,
                        refs->raised_in_funcname = funcname;
                }
        }
-       pthread_mutex_unlock(&(privs->mutex));
 
        return privs;
 }
@@ -791,8 +790,7 @@ void _zprivs_lower(struct zebra_privs_t **privs)
        /* Serialize 'lower privs' operation - particularly important
         * when OS privs are process-wide.
         */
-       pthread_mutex_lock(&(*privs)->mutex);
-       {
+       frr_with_mutex(&(*privs)->mutex) {
                refs = get_privs_refs(*privs);
 
                if (--(refs->refcount) == 0) {
@@ -806,7 +804,6 @@ void _zprivs_lower(struct zebra_privs_t **privs)
                        refs->raised_in_funcname = NULL;
                }
        }
-       pthread_mutex_unlock(&(*privs)->mutex);
 
        *privs = NULL;
 }
index dfd13ca186445a375d85564058831253076efc05..2e1a0193a28bec2accd052f390f9a45521fa51d1 100644 (file)
@@ -28,6 +28,7 @@
 #include "network.h"
 #include "prefix.h"
 #include "log.h"
+#include "frr_pthread.h"
 #include "lib_errors.h"
 
 DEFINE_MTYPE_STATIC(LIB, STREAM, "Stream")
@@ -1136,11 +1137,9 @@ void stream_fifo_push(struct stream_fifo *fifo, struct stream *s)
 
 void stream_fifo_push_safe(struct stream_fifo *fifo, struct stream *s)
 {
-       pthread_mutex_lock(&fifo->mtx);
-       {
+       frr_with_mutex(&fifo->mtx) {
                stream_fifo_push(fifo, s);
        }
-       pthread_mutex_unlock(&fifo->mtx);
 }
 
 /* Delete first stream from fifo. */
@@ -1170,11 +1169,9 @@ struct stream *stream_fifo_pop_safe(struct stream_fifo *fifo)
 {
        struct stream *ret;
 
-       pthread_mutex_lock(&fifo->mtx);
-       {
+       frr_with_mutex(&fifo->mtx) {
                ret = stream_fifo_pop(fifo);
        }
-       pthread_mutex_unlock(&fifo->mtx);
 
        return ret;
 }
@@ -1188,11 +1185,9 @@ struct stream *stream_fifo_head_safe(struct stream_fifo *fifo)
 {
        struct stream *ret;
 
-       pthread_mutex_lock(&fifo->mtx);
-       {
+       frr_with_mutex(&fifo->mtx) {
                ret = stream_fifo_head(fifo);
        }
-       pthread_mutex_unlock(&fifo->mtx);
 
        return ret;
 }
@@ -1212,11 +1207,9 @@ void stream_fifo_clean(struct stream_fifo *fifo)
 
 void stream_fifo_clean_safe(struct stream_fifo *fifo)
 {
-       pthread_mutex_lock(&fifo->mtx);
-       {
+       frr_with_mutex(&fifo->mtx) {
                stream_fifo_clean(fifo);
        }
-       pthread_mutex_unlock(&fifo->mtx);
 }
 
 size_t stream_fifo_count_safe(struct stream_fifo *fifo)
index 943b849ebf10d5bd00d207adb5bf1e25a5e7fa0a..90c794e88dc8ac1c828824c3d18d74c2066fe206 100644 (file)
@@ -33,6 +33,7 @@
 #include "network.h"
 #include "jhash.h"
 #include "frratomic.h"
+#include "frr_pthread.h"
 #include "lib_errors.h"
 
 DEFINE_MTYPE_STATIC(LIB, THREAD, "Thread")
@@ -173,8 +174,7 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
        tmp.funcname = "TOTAL";
        tmp.types = filter;
 
-       pthread_mutex_lock(&masters_mtx);
-       {
+       frr_with_mutex(&masters_mtx) {
                for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
                        const char *name = m->name ? m->name : "main";
 
@@ -206,7 +206,6 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
                        vty_out(vty, "\n");
                }
        }
-       pthread_mutex_unlock(&masters_mtx);
 
        vty_out(vty, "\n");
        vty_out(vty, "Total thread statistics\n");
@@ -240,11 +239,9 @@ static void cpu_record_clear(uint8_t filter)
        struct thread_master *m;
        struct listnode *ln;
 
-       pthread_mutex_lock(&masters_mtx);
-       {
+       frr_with_mutex(&masters_mtx) {
                for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
-                       pthread_mutex_lock(&m->mtx);
-                       {
+                       frr_with_mutex(&m->mtx) {
                                void *args[2] = {tmp, m->cpu_record};
                                hash_iterate(
                                        m->cpu_record,
@@ -252,10 +249,8 @@ static void cpu_record_clear(uint8_t filter)
                                                  void *))cpu_record_hash_clear,
                                        args);
                        }
-                       pthread_mutex_unlock(&m->mtx);
                }
        }
-       pthread_mutex_unlock(&masters_mtx);
 }
 
 static uint8_t parse_filter(const char *filterstr)
@@ -370,13 +365,11 @@ DEFUN (show_thread_poll,
        struct listnode *node;
        struct thread_master *m;
 
-       pthread_mutex_lock(&masters_mtx);
-       {
+       frr_with_mutex(&masters_mtx) {
                for (ALL_LIST_ELEMENTS_RO(masters, node, m)) {
                        show_thread_poll_helper(vty, m);
                }
        }
-       pthread_mutex_unlock(&masters_mtx);
 
        return CMD_SUCCESS;
 }
@@ -487,26 +480,22 @@ struct thread_master *thread_master_create(const char *name)
                                   sizeof(struct pollfd) * rv->handler.pfdsize);
 
        /* add to list of threadmasters */
-       pthread_mutex_lock(&masters_mtx);
-       {
+       frr_with_mutex(&masters_mtx) {
                if (!masters)
                        masters = list_new();
 
                listnode_add(masters, rv);
        }
-       pthread_mutex_unlock(&masters_mtx);
 
        return rv;
 }
 
 void thread_master_set_name(struct thread_master *master, const char *name)
 {
-       pthread_mutex_lock(&master->mtx);
-       {
+       frr_with_mutex(&master->mtx) {
                XFREE(MTYPE_THREAD_MASTER, master->name);
                master->name = XSTRDUP(MTYPE_THREAD_MASTER, name);
        }
-       pthread_mutex_unlock(&master->mtx);
 }
 
 #define THREAD_UNUSED_DEPTH 10
@@ -569,13 +558,11 @@ static void thread_array_free(struct thread_master *m,
  */
 void thread_master_free_unused(struct thread_master *m)
 {
-       pthread_mutex_lock(&m->mtx);
-       {
+       frr_with_mutex(&m->mtx) {
                struct thread *t;
                while ((t = thread_list_pop(&m->unuse)))
                        thread_free(m, t);
        }
-       pthread_mutex_unlock(&m->mtx);
 }
 
 /* Stop thread scheduler. */
@@ -583,14 +570,12 @@ void thread_master_free(struct thread_master *m)
 {
        struct thread *t;
 
-       pthread_mutex_lock(&masters_mtx);
-       {
+       frr_with_mutex(&masters_mtx) {
                listnode_delete(masters, m);
                if (masters->count == 0) {
                        list_delete(&masters);
                }
        }
-       pthread_mutex_unlock(&masters_mtx);
 
        thread_array_free(m, m->read);
        thread_array_free(m, m->write);
@@ -621,11 +606,9 @@ unsigned long thread_timer_remain_msec(struct thread *thread)
 {
        int64_t remain;
 
-       pthread_mutex_lock(&thread->mtx);
-       {
+       frr_with_mutex(&thread->mtx) {
                remain = monotime_until(&thread->u.sands, NULL) / 1000LL;
        }
-       pthread_mutex_unlock(&thread->mtx);
 
        return remain < 0 ? 0 : remain;
 }
@@ -642,11 +625,9 @@ unsigned long thread_timer_remain_second(struct thread *thread)
 struct timeval thread_timer_remain(struct thread *thread)
 {
        struct timeval remain;
-       pthread_mutex_lock(&thread->mtx);
-       {
+       frr_with_mutex(&thread->mtx) {
                monotime_until(&thread->u.sands, &remain);
        }
-       pthread_mutex_unlock(&thread->mtx);
        return remain;
 }
 
@@ -770,14 +751,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
        struct thread **thread_array;
 
        assert(fd >= 0 && fd < m->fd_limit);
-       pthread_mutex_lock(&m->mtx);
-       {
-               if (t_ptr
-                   && *t_ptr) // thread is already scheduled; don't reschedule
-               {
-                       pthread_mutex_unlock(&m->mtx);
-                       return NULL;
-               }
+       frr_with_mutex(&m->mtx) {
+               if (t_ptr && *t_ptr)
+                       // thread is already scheduled; don't reschedule
+                       break;
 
                /* default to a new pollfd */
                nfds_t queuepos = m->handler.pfdcount;
@@ -817,12 +794,10 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
                        m->handler.pfdcount++;
 
                if (thread) {
-                       pthread_mutex_lock(&thread->mtx);
-                       {
+                       frr_with_mutex(&thread->mtx) {
                                thread->u.fd = fd;
                                thread_array[thread->u.fd] = thread;
                        }
-                       pthread_mutex_unlock(&thread->mtx);
 
                        if (t_ptr) {
                                *t_ptr = thread;
@@ -832,7 +807,6 @@ struct thread *funcname_thread_add_read_write(int dir, struct thread_master *m,
 
                AWAKEN(m);
        }
-       pthread_mutex_unlock(&m->mtx);
 
        return thread;
 }
@@ -850,19 +824,14 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
        assert(type == THREAD_TIMER);
        assert(time_relative);
 
-       pthread_mutex_lock(&m->mtx);
-       {
-               if (t_ptr
-                   && *t_ptr) // thread is already scheduled; don't reschedule
-               {
-                       pthread_mutex_unlock(&m->mtx);
+       frr_with_mutex(&m->mtx) {
+               if (t_ptr && *t_ptr)
+                       // thread is already scheduled; don't reschedule
                        return NULL;
-               }
 
                thread = thread_get(m, type, func, arg, debugargpass);
 
-               pthread_mutex_lock(&thread->mtx);
-               {
+               frr_with_mutex(&thread->mtx) {
                        monotime(&thread->u.sands);
                        timeradd(&thread->u.sands, time_relative,
                                 &thread->u.sands);
@@ -872,11 +841,9 @@ funcname_thread_add_timer_timeval(struct thread_master *m,
                                thread->ref = t_ptr;
                        }
                }
-               pthread_mutex_unlock(&thread->mtx);
 
                AWAKEN(m);
        }
-       pthread_mutex_unlock(&m->mtx);
 
        return thread;
 }
@@ -933,26 +900,20 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
                                         void *arg, int val,
                                         struct thread **t_ptr, debugargdef)
 {
-       struct thread *thread;
+       struct thread *thread = NULL;
 
        assert(m != NULL);
 
-       pthread_mutex_lock(&m->mtx);
-       {
-               if (t_ptr
-                   && *t_ptr) // thread is already scheduled; don't reschedule
-               {
-                       pthread_mutex_unlock(&m->mtx);
-                       return NULL;
-               }
+       frr_with_mutex(&m->mtx) {
+               if (t_ptr && *t_ptr)
+                       // thread is already scheduled; don't reschedule
+                       break;
 
                thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
-               pthread_mutex_lock(&thread->mtx);
-               {
+               frr_with_mutex(&thread->mtx) {
                        thread->u.val = val;
                        thread_list_add_tail(&m->event, thread);
                }
-               pthread_mutex_unlock(&thread->mtx);
 
                if (t_ptr) {
                        *t_ptr = thread;
@@ -961,7 +922,6 @@ struct thread *funcname_thread_add_event(struct thread_master *m,
 
                AWAKEN(m);
        }
-       pthread_mutex_unlock(&m->mtx);
 
        return thread;
 }
@@ -1143,15 +1103,13 @@ void thread_cancel_event(struct thread_master *master, void *arg)
 {
        assert(master->owner == pthread_self());
 
-       pthread_mutex_lock(&master->mtx);
-       {
+       frr_with_mutex(&master->mtx) {
                struct cancel_req *cr =
                        XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
                cr->eventobj = arg;
                listnode_add(master->cancel_req, cr);
                do_thread_cancel(master);
        }
-       pthread_mutex_unlock(&master->mtx);
 }
 
 /**
@@ -1167,15 +1125,13 @@ void thread_cancel(struct thread *thread)
 
        assert(master->owner == pthread_self());
 
-       pthread_mutex_lock(&master->mtx);
-       {
+       frr_with_mutex(&master->mtx) {
                struct cancel_req *cr =
                        XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
                cr->thread = thread;
                listnode_add(master->cancel_req, cr);
                do_thread_cancel(master);
        }
-       pthread_mutex_unlock(&master->mtx);
 }
 
 /**
@@ -1208,8 +1164,7 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
        assert(!(thread && eventobj) && (thread || eventobj));
        assert(master->owner != pthread_self());
 
-       pthread_mutex_lock(&master->mtx);
-       {
+       frr_with_mutex(&master->mtx) {
                master->canceled = false;
 
                if (thread) {
@@ -1228,7 +1183,6 @@ void thread_cancel_async(struct thread_master *master, struct thread **thread,
                while (!master->canceled)
                        pthread_cond_wait(&master->cancel_cond, &master->mtx);
        }
-       pthread_mutex_unlock(&master->mtx);
 }
 /* ------------------------------------------------------------------------- */
 
@@ -1527,22 +1481,18 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
 int thread_should_yield(struct thread *thread)
 {
        int result;
-       pthread_mutex_lock(&thread->mtx);
-       {
+       frr_with_mutex(&thread->mtx) {
                result = monotime_since(&thread->real, NULL)
                         > (int64_t)thread->yield;
        }
-       pthread_mutex_unlock(&thread->mtx);
        return result;
 }
 
 void thread_set_yield_time(struct thread *thread, unsigned long yield_time)
 {
-       pthread_mutex_lock(&thread->mtx);
-       {
+       frr_with_mutex(&thread->mtx) {
                thread->yield = yield_time;
        }
-       pthread_mutex_unlock(&thread->mtx);
 }
 
 void thread_getrusage(RUSAGE_T *r)
@@ -1637,20 +1587,16 @@ void funcname_thread_execute(struct thread_master *m,
        struct thread *thread;
 
        /* Get or allocate new thread to execute. */
-       pthread_mutex_lock(&m->mtx);
-       {
+       frr_with_mutex(&m->mtx) {
                thread = thread_get(m, THREAD_EVENT, func, arg, debugargpass);
 
                /* Set its event value. */
-               pthread_mutex_lock(&thread->mtx);
-               {
+               frr_with_mutex(&thread->mtx) {
                        thread->add_type = THREAD_EXECUTE;
                        thread->u.val = val;
                        thread->ref = &thread;
                }
-               pthread_mutex_unlock(&thread->mtx);
        }
-       pthread_mutex_unlock(&m->mtx);
 
        /* Execute thread doing all accounting. */
        thread_call(thread);
diff --git a/tools/coccinelle/frr_with_mutex.cocci b/tools/coccinelle/frr_with_mutex.cocci
new file mode 100644 (file)
index 0000000..ec8b739
--- /dev/null
@@ -0,0 +1,23 @@
+@@
+expression E;
+iterator name frr_with_mutex;
+@@
+
+- pthread_mutex_lock(E);
++ frr_with_mutex(E) {
+- {
+    ...
+- }
+- pthread_mutex_unlock(E);
++ }
+
+
+@@
+expression E;
+@@
+
+- pthread_mutex_lock(E);
++ frr_with_mutex(E) {
+  ...
+- pthread_mutex_unlock(E);
++ }
index 335cc8294c531d282b6ccb2a0b4b1804963f2968..a7058e7928a005c2a0f595541b578896131fe397 100644 (file)
@@ -37,6 +37,7 @@
 #include "vrf.h"
 #include "workqueue.h"
 #include "nexthop_group_private.h"
+#include "frr_pthread.h"
 
 #include "zebra/zebra_router.h"
 #include "zebra/connected.h"
@@ -3204,12 +3205,10 @@ static int rib_process_dplane_results(struct thread *thread)
                TAILQ_INIT(&ctxlist);
 
                /* Take lock controlling queue of results */
-               pthread_mutex_lock(&dplane_mutex);
-               {
+               frr_with_mutex(&dplane_mutex) {
                        /* Dequeue list of context structs */
                        dplane_ctx_list_append(&ctxlist, &rib_dplane_q);
                }
-               pthread_mutex_unlock(&dplane_mutex);
 
                /* Dequeue context block */
                ctx = dplane_ctx_dequeue(&ctxlist);
@@ -3300,12 +3299,10 @@ static int rib_process_dplane_results(struct thread *thread)
 static int rib_dplane_results(struct dplane_ctx_q *ctxlist)
 {
        /* Take lock controlling queue of results */
-       pthread_mutex_lock(&dplane_mutex);
-       {
+       frr_with_mutex(&dplane_mutex) {
                /* Enqueue context blocks */
                dplane_ctx_list_append(&rib_dplane_q, ctxlist);
        }
-       pthread_mutex_unlock(&dplane_mutex);
 
        /* Ensure event is signalled to zebra main pthread */
        thread_add_event(zrouter.master, rib_process_dplane_results, NULL, 0,
index 70b4594813ecfd7b6ccefc480f4eeae4a3aa3b8b..bd75b66a20c0c43f109fbf89761e717fc8893ee5 100644 (file)
@@ -231,13 +231,11 @@ static int zserv_write(struct thread *thread)
 
        cache = stream_fifo_new();
 
-       pthread_mutex_lock(&client->obuf_mtx);
-       {
+       frr_with_mutex(&client->obuf_mtx) {
                while (stream_fifo_head(client->obuf_fifo))
                        stream_fifo_push(cache,
                                         stream_fifo_pop(client->obuf_fifo));
        }
-       pthread_mutex_unlock(&client->obuf_mtx);
 
        if (cache->tail) {
                msg = cache->tail;
@@ -427,13 +425,11 @@ static int zserv_read(struct thread *thread)
                                      memory_order_relaxed);
 
                /* publish read packets on client's input queue */
-               pthread_mutex_lock(&client->ibuf_mtx);
-               {
+               frr_with_mutex(&client->ibuf_mtx) {
                        while (cache->head)
                                stream_fifo_push(client->ibuf_fifo,
                                                 stream_fifo_pop(cache));
                }
-               pthread_mutex_unlock(&client->ibuf_mtx);
 
                /* Schedule job to process those packets */
                zserv_event(client, ZSERV_PROCESS_MESSAGES);
@@ -499,8 +495,7 @@ static int zserv_process_messages(struct thread *thread)
        uint32_t p2p = zrouter.packets_to_process;
        bool need_resched = false;
 
-       pthread_mutex_lock(&client->ibuf_mtx);
-       {
+       frr_with_mutex(&client->ibuf_mtx) {
                uint32_t i;
                for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo);
                     ++i) {
@@ -516,7 +511,6 @@ static int zserv_process_messages(struct thread *thread)
                if (stream_fifo_head(client->ibuf_fifo))
                        need_resched = true;
        }
-       pthread_mutex_unlock(&client->ibuf_mtx);
 
        while (stream_fifo_head(cache)) {
                msg = stream_fifo_pop(cache);
@@ -535,11 +529,9 @@ static int zserv_process_messages(struct thread *thread)
 
 int zserv_send_message(struct zserv *client, struct stream *msg)
 {
-       pthread_mutex_lock(&client->obuf_mtx);
-       {
+       frr_with_mutex(&client->obuf_mtx) {
                stream_fifo_push(client->obuf_fifo, msg);
        }
-       pthread_mutex_unlock(&client->obuf_mtx);
 
        zserv_client_event(client, ZSERV_CLIENT_WRITE);