summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/agentx.c6
-rw-r--r--lib/event.c52
-rw-r--r--lib/event.h17
-rw-r--r--lib/frr_zmq.c8
-rw-r--r--lib/libfrr_trace.h4
-rw-r--r--lib/northbound_cli.c4
-rw-r--r--lib/spf_backoff.c6
-rw-r--r--lib/zlog_5424.c4
8 files changed, 50 insertions, 51 deletions
diff --git a/lib/agentx.c b/lib/agentx.c
index fc724e7d70..7f66f9f9c7 100644
--- a/lib/agentx.c
+++ b/lib/agentx.c
@@ -97,7 +97,7 @@ static void agentx_events_update(void)
struct event **thr;
int fd, thr_fd;
- thread_cancel(&timeout_thr);
+ event_cancel(&timeout_thr);
FD_ZERO(&fds);
snmp_select_info(&maxfd, &fds, &timeout, &block);
@@ -119,7 +119,7 @@ static void agentx_events_update(void)
if (thr_fd == fd) {
struct listnode *nextln = listnextnode(ln);
if (!FD_ISSET(fd, &fds)) {
- thread_cancel(thr);
+ event_cancel(thr);
XFREE(MTYPE_TMP, thr);
list_delete_node(events, ln);
}
@@ -142,7 +142,7 @@ static void agentx_events_update(void)
while (ln) {
struct listnode *nextln = listnextnode(ln);
thr = listgetdata(ln);
- thread_cancel(thr);
+ event_cancel(thr);
XFREE(MTYPE_TMP, thr);
list_delete_node(events, ln);
ln = nextln;
diff --git a/lib/event.c b/lib/event.c
index f081ec4274..a3d2ec7a39 100644
--- a/lib/event.c
+++ b/lib/event.c
@@ -38,7 +38,7 @@ struct cancel_req {
};
/* Flags for task cancellation */
-#define THREAD_CANCEL_FLAG_READY 0x01
+#define EVENT_CANCEL_FLAG_READY 0x01
static int thread_timer_cmp(const struct event *a, const struct event *b)
{
@@ -1169,8 +1169,8 @@ void _event_add_event(const struct xref_threadsched *xref,
* - POLLIN
* - POLLOUT
*/
-static void thread_cancel_rw(struct thread_master *master, int fd, short state,
- int idx_hint)
+static void event_cancel_rw(struct thread_master *master, int fd, short state,
+ int idx_hint)
{
bool found = false;
@@ -1267,7 +1267,7 @@ static void cancel_arg_helper(struct thread_master *master,
}
/* If requested, stop here and ignore io and timers */
- if (CHECK_FLAG(cr->flags, THREAD_CANCEL_FLAG_READY))
+ if (CHECK_FLAG(cr->flags, EVENT_CANCEL_FLAG_READY))
return;
/* Check the io tasks */
@@ -1283,7 +1283,7 @@ static void cancel_arg_helper(struct thread_master *master,
fd = pfd->fd;
/* Found a match to cancel: clean up fd arrays */
- thread_cancel_rw(master, pfd->fd, pfd->events, i);
+ event_cancel_rw(master, pfd->fd, pfd->events, i);
/* Clean up thread arrays */
master->read[fd] = NULL;
@@ -1328,7 +1328,7 @@ static void cancel_arg_helper(struct thread_master *master,
* @param master the thread master to process
* @REQUIRE master->mtx
*/
-static void do_thread_cancel(struct thread_master *master)
+static void do_event_cancel(struct thread_master *master)
{
struct thread_list_head *list = NULL;
struct event **thread_array = NULL;
@@ -1364,11 +1364,11 @@ static void do_thread_cancel(struct thread_master *master)
/* Determine the appropriate queue to cancel the thread from */
switch (thread->type) {
case THREAD_READ:
- thread_cancel_rw(master, thread->u.fd, POLLIN, -1);
+ event_cancel_rw(master, thread->u.fd, POLLIN, -1);
thread_array = master->read;
break;
case THREAD_WRITE:
- thread_cancel_rw(master, thread->u.fd, POLLOUT, -1);
+ event_cancel_rw(master, thread->u.fd, POLLOUT, -1);
thread_array = master->write;
break;
case THREAD_TIMER:
@@ -1401,7 +1401,7 @@ static void do_thread_cancel(struct thread_master *master)
if (master->cancel_req)
list_delete_all_node(master->cancel_req);
- /* Wake up any threads which may be blocked in thread_cancel_async() */
+ /* Wake up any threads which may be blocked in event_cancel_async() */
master->canceled = true;
pthread_cond_broadcast(&master->cancel_cond);
}
@@ -1426,7 +1426,7 @@ static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
frr_with_mutex (&m->mtx) {
cr->eventobj = arg;
listnode_add(m->cancel_req, cr);
- do_thread_cancel(m);
+ do_event_cancel(m);
}
}
@@ -1438,7 +1438,7 @@ static void cancel_event_helper(struct thread_master *m, void *arg, int flags)
* @param m the thread_master to cancel from
* @param arg the argument passed when creating the event
*/
-void thread_cancel_event(struct thread_master *master, void *arg)
+void event_cancel_event(struct thread_master *master, void *arg)
{
cancel_event_helper(master, arg, 0);
}
@@ -1451,11 +1451,11 @@ void thread_cancel_event(struct thread_master *master, void *arg)
* @param m the thread_master to cancel from
* @param arg the argument passed when creating the event
*/
-void thread_cancel_event_ready(struct thread_master *m, void *arg)
+void event_cancel_event_ready(struct thread_master *m, void *arg)
{
/* Only cancel ready/event tasks */
- cancel_event_helper(m, arg, THREAD_CANCEL_FLAG_READY);
+ cancel_event_helper(m, arg, EVENT_CANCEL_FLAG_READY);
}
/**
@@ -1465,7 +1465,7 @@ void thread_cancel_event_ready(struct thread_master *m, void *arg)
*
* @param thread task to cancel
*/
-void thread_cancel(struct event **thread)
+void event_cancel(struct event **thread)
{
struct thread_master *master;
@@ -1474,10 +1474,10 @@ void thread_cancel(struct event **thread)
master = (*thread)->master;
- frrtrace(9, frr_libfrr, thread_cancel, master,
- (*thread)->xref->funcname, (*thread)->xref->xref.file,
- (*thread)->xref->xref.line, NULL, (*thread)->u.fd,
- (*thread)->u.val, (*thread)->arg, (*thread)->u.sands.tv_sec);
+ frrtrace(9, frr_libfrr, event_cancel, master, (*thread)->xref->funcname,
+ (*thread)->xref->xref.file, (*thread)->xref->xref.line, NULL,
+ (*thread)->u.fd, (*thread)->u.val, (*thread)->arg,
+ (*thread)->u.sands.tv_sec);
assert(master->owner == pthread_self());
@@ -1486,7 +1486,7 @@ void thread_cancel(struct event **thread)
XCALLOC(MTYPE_TMP, sizeof(struct cancel_req));
cr->thread = *thread;
listnode_add(master->cancel_req, cr);
- do_thread_cancel(master);
+ do_event_cancel(master);
}
*thread = NULL;
@@ -1516,19 +1516,19 @@ void thread_cancel(struct event **thread)
* @param thread pointer to thread to cancel
* @param eventobj the event
*/
-void thread_cancel_async(struct thread_master *master, struct event **thread,
- void *eventobj)
+void event_cancel_async(struct thread_master *master, struct event **thread,
+ void *eventobj)
{
assert(!(thread && eventobj) && (thread || eventobj));
if (thread && *thread)
- frrtrace(9, frr_libfrr, thread_cancel_async, master,
+ frrtrace(9, frr_libfrr, event_cancel_async, master,
(*thread)->xref->funcname, (*thread)->xref->xref.file,
(*thread)->xref->xref.line, NULL, (*thread)->u.fd,
(*thread)->u.val, (*thread)->arg,
(*thread)->u.sands.tv_sec);
else
- frrtrace(9, frr_libfrr, thread_cancel_async, master, NULL, NULL,
+ frrtrace(9, frr_libfrr, event_cancel_async, master, NULL, NULL,
0, NULL, 0, 0, eventobj, 0);
assert(master->owner != pthread_self());
@@ -1638,10 +1638,10 @@ static void thread_process_io(struct thread_master *m, unsigned int num)
ready++;
/*
- * Unless someone has called thread_cancel from another
+ * Unless someone has called event_cancel from another
* pthread, the only thing that could have changed in
* m->handler.pfds while we were asleep is the .events
- * field in a given pollfd. Barring thread_cancel() that
+ * field in a given pollfd. Barring event_cancel() that
* value should be a superset of the values we have in our
* copy, so there's no need to update it. Similarily,
* barring deletion, the fd should still be a valid index
@@ -1758,7 +1758,7 @@ struct event *thread_fetch(struct thread_master *m, struct event *fetch)
pthread_mutex_lock(&m->mtx);
/* Process any pending cancellation requests */
- do_thread_cancel(m);
+ do_event_cancel(m);
/*
* Attempt to flush ready queue before going into poll().
diff --git a/lib/event.h b/lib/event.h
index 375cd46bbf..e5b6506fd0 100644
--- a/lib/event.h
+++ b/lib/event.h
@@ -158,10 +158,10 @@ struct cpu_thread_history {
/*
* Please consider this macro deprecated, and do not use it in new code.
*/
-#define THREAD_OFF(thread) \
- do { \
- if ((thread)) \
- thread_cancel(&(thread)); \
+#define THREAD_OFF(thread) \
+ do { \
+ if ((thread)) \
+ event_cancel(&(thread)); \
} while (0)
/*
@@ -241,13 +241,12 @@ extern void _thread_execute(const struct xref_threadsched *xref,
struct thread_master *master,
void (*fn)(struct event *), void *arg, int val);
-extern void thread_cancel(struct event **event);
-extern void thread_cancel_async(struct thread_master *, struct event **,
- void *);
+extern void event_cancel(struct event **event);
+extern void event_cancel_async(struct thread_master *, struct event **, void *);
/* Cancel ready tasks with an arg matching 'arg' */
-extern void thread_cancel_event_ready(struct thread_master *m, void *arg);
+extern void event_cancel_event_ready(struct thread_master *m, void *arg);
/* Cancel all tasks with an arg matching 'arg', including timers and io */
-extern void thread_cancel_event(struct thread_master *m, void *arg);
+extern void event_cancel_event(struct thread_master *m, void *arg);
extern struct event *thread_fetch(struct thread_master *, struct event *event);
extern void thread_call(struct event *event);
extern unsigned long thread_timer_remain_second(struct event *event);
diff --git a/lib/frr_zmq.c b/lib/frr_zmq.c
index 65e1cf5cfb..4a860fe253 100644
--- a/lib/frr_zmq.c
+++ b/lib/frr_zmq.c
@@ -190,7 +190,7 @@ int _frrzmq_event_add_read(const struct xref_threadsched *xref,
cb->in_cb = false;
if (events & ZMQ_POLLIN) {
- thread_cancel(&cb->read.thread);
+ event_cancel(&cb->read.thread);
event_add_event(master, frrzmq_read_msg, cbp, fd,
&cb->read.thread);
@@ -296,7 +296,7 @@ int _frrzmq_event_add_write(const struct xref_threadsched *xref,
cb->in_cb = false;
if (events & ZMQ_POLLOUT) {
- thread_cancel(&cb->write.thread);
+ event_cancel(&cb->write.thread);
_event_add_event(xref, master, frrzmq_write_msg, cbp, fd,
&cb->write.thread);
@@ -311,7 +311,7 @@ void frrzmq_thread_cancel(struct frrzmq_cb **cb, struct cb_core *core)
if (!cb || !*cb)
return;
core->cancelled = true;
- thread_cancel(&core->thread);
+ event_cancel(&core->thread);
/* If cancelled from within a callback, don't try to free memory
* in this path.
@@ -344,7 +344,7 @@ void frrzmq_check_events(struct frrzmq_cb **cbp, struct cb_core *core,
if ((events & event) && core->thread && !core->cancelled) {
struct thread_master *tm = core->thread->master;
- thread_cancel(&core->thread);
+ event_cancel(&core->thread);
if (event == ZMQ_POLLIN)
event_add_event(tm, frrzmq_read_msg, cbp, cb->fd,
diff --git a/lib/libfrr_trace.h b/lib/libfrr_trace.h
index 2a1bb2f6c3..ed1dcfb159 100644
--- a/lib/libfrr_trace.h
+++ b/lib/libfrr_trace.h
@@ -103,8 +103,8 @@ THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_timer)
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_event)
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_read)
THREAD_OPERATION_TRACEPOINT_INSTANCE(schedule_write)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_cancel)
-THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_cancel_async)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_cancel)
+THREAD_OPERATION_TRACEPOINT_INSTANCE(event_cancel_async)
THREAD_OPERATION_TRACEPOINT_INSTANCE(thread_call)
TRACEPOINT_EVENT(
diff --git a/lib/northbound_cli.c b/lib/northbound_cli.c
index 704904304c..edfca75cd2 100644
--- a/lib/northbound_cli.c
+++ b/lib/northbound_cli.c
@@ -265,7 +265,7 @@ int nb_cli_rpc(struct vty *vty, const char *xpath, struct list *input,
void nb_cli_confirmed_commit_clean(struct vty *vty)
{
- thread_cancel(&vty->t_confirmed_commit_timeout);
+ event_cancel(&vty->t_confirmed_commit_timeout);
nb_config_free(vty->confirmed_commit_rollback);
vty->confirmed_commit_rollback = NULL;
}
@@ -328,7 +328,7 @@ static int nb_cli_commit(struct vty *vty, bool force,
"%% Resetting confirmed-commit timeout to %u minute(s)\n\n",
confirmed_timeout);
- thread_cancel(&vty->t_confirmed_commit_timeout);
+ event_cancel(&vty->t_confirmed_commit_timeout);
event_add_timer(master, nb_cli_confirmed_commit_timeout,
vty, confirmed_timeout * 60,
&vty->t_confirmed_commit_timeout);
diff --git a/lib/spf_backoff.c b/lib/spf_backoff.c
index f1fb84e0b5..367386e6a4 100644
--- a/lib/spf_backoff.c
+++ b/lib/spf_backoff.c
@@ -97,8 +97,8 @@ void spf_backoff_free(struct spf_backoff *backoff)
if (!backoff)
return;
- thread_cancel(&backoff->t_holddown);
- thread_cancel(&backoff->t_timetolearn);
+ event_cancel(&backoff->t_holddown);
+ event_cancel(&backoff->t_timetolearn);
XFREE(MTYPE_SPF_BACKOFF_NAME, backoff->name);
XFREE(MTYPE_SPF_BACKOFF, backoff);
@@ -150,7 +150,7 @@ long spf_backoff_schedule(struct spf_backoff *backoff)
break;
case SPF_BACKOFF_SHORT_WAIT:
case SPF_BACKOFF_LONG_WAIT:
- thread_cancel(&backoff->t_holddown);
+ event_cancel(&backoff->t_holddown);
event_add_timer_msec(backoff->m, spf_backoff_holddown_elapsed,
backoff, backoff->holddown,
&backoff->t_holddown);
diff --git a/lib/zlog_5424.c b/lib/zlog_5424.c
index 7bfc89bb22..2a0e6aff85 100644
--- a/lib/zlog_5424.c
+++ b/lib/zlog_5424.c
@@ -1053,7 +1053,7 @@ bool zlog_5424_apply_dst(struct zlog_cfg_5424 *zcf)
{
int fd = -1;
- thread_cancel(&zcf->t_reconnect);
+ event_cancel(&zcf->t_reconnect);
if (zcf->prio_min != ZLOG_DISABLED)
fd = zlog_5424_open(zcf, -1);
@@ -1106,7 +1106,7 @@ bool zlog_5424_rotate(struct zlog_cfg_5424 *zcf)
if (!zcf->active)
return true;
- thread_cancel(&zcf->t_reconnect);
+ event_cancel(&zcf->t_reconnect);
/* need to retain the socket type because it also influences
* other fields (packets) and we can't atomically swap these