summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/event.c4
-rw-r--r--lib/event.h4
-rw-r--r--lib/workqueue.c6
-rw-r--r--ospfd/ospf_lsa.c2
-rw-r--r--tests/lib/test_heavy_thread.c2
-rw-r--r--zebra/zebra_fpm.c2
6 files changed, 10 insertions, 10 deletions
diff --git a/lib/event.c b/lib/event.c
index 8355b3cdd4..59f8928d1b 100644
--- a/lib/event.c
+++ b/lib/event.c
@@ -1910,7 +1910,7 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
for CPU time. On balance, wall clock time seems to make sense.
Plus it has the added benefit that gettimeofday should be faster
than calling getrusage. */
-int thread_should_yield(struct event *thread)
+int event_should_yield(struct event *thread)
{
int result;
frr_with_mutex (&thread->mtx) {
@@ -1920,7 +1920,7 @@ int thread_should_yield(struct event *thread)
return result;
}
-void thread_set_yield_time(struct event *thread, unsigned long yield_time)
+void event_set_yield_time(struct event *thread, unsigned long yield_time)
{
frr_with_mutex (&thread->mtx) {
thread->yield = yield_time;
diff --git a/lib/event.h b/lib/event.h
index 68f97bb852..03d7d6d192 100644
--- a/lib/event.h
+++ b/lib/event.h
@@ -254,9 +254,9 @@ extern void event_call(struct event *event);
extern unsigned long event_timer_remain_second(struct event *event);
extern struct timeval event_timer_remain(struct event *event);
extern unsigned long event_timer_remain_msec(struct event *event);
-extern int thread_should_yield(struct event *event);
+extern int event_should_yield(struct event *event);
/* set yield time for thread */
-extern void thread_set_yield_time(struct event *event, unsigned long);
+extern void event_set_yield_time(struct event *event, unsigned long);
/* Internal libfrr exports */
extern void thread_getrusage(RUSAGE_T *);
diff --git a/lib/workqueue.c b/lib/workqueue.c
index b63dafd7e9..7660663449 100644
--- a/lib/workqueue.c
+++ b/lib/workqueue.c
@@ -128,7 +128,7 @@ static int work_queue_schedule(struct work_queue *wq, unsigned int delay)
/* set thread yield time, if needed */
if (thread_is_scheduled(wq->thread) &&
wq->spec.yield != EVENT_YIELD_TIME_SLOT)
- thread_set_yield_time(wq->thread, wq->spec.yield);
+ event_set_yield_time(wq->thread, wq->spec.yield);
return 1;
} else
return 0;
@@ -311,8 +311,8 @@ void work_queue_run(struct event *thread)
cycles++;
/* test if we should yield */
- if (!(cycles % wq->cycles.granularity)
- && thread_should_yield(thread)) {
+ if (!(cycles % wq->cycles.granularity) &&
+ event_should_yield(thread)) {
yielded = 1;
goto stats;
}
diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c
index ea5dc651ac..1c5d1bddf8 100644
--- a/ospfd/ospf_lsa.c
+++ b/ospfd/ospf_lsa.c
@@ -3074,7 +3074,7 @@ void ospf_maxage_lsa_remover(struct event *thread)
}
/* TODO: maybe convert this function to a work-queue */
- if (thread_should_yield(thread)) {
+ if (event_should_yield(thread)) {
OSPF_TIMER_ON(ospf->t_maxage,
ospf_maxage_lsa_remover, 0);
route_unlock_node(
diff --git a/tests/lib/test_heavy_thread.c b/tests/lib/test_heavy_thread.c
index 9084bf55eb..fd8e910fd5 100644
--- a/tests/lib/test_heavy_thread.c
+++ b/tests/lib/test_heavy_thread.c
@@ -67,7 +67,7 @@ static void clear_something(struct event *thread)
while (ws->i < ITERS_MAX) {
slow_func(ws->vty, ws->str, ws->i);
ws->i++;
- if (thread_should_yield(thread)) {
+ if (event_should_yield(thread)) {
event_add_timer_msec(master, clear_something, ws, 0,
NULL);
return;
diff --git a/zebra/zebra_fpm.c b/zebra/zebra_fpm.c
index a97873c384..d453c6f25c 100644
--- a/zebra/zebra_fpm.c
+++ b/zebra/zebra_fpm.c
@@ -285,7 +285,7 @@ union g_addr ipv4ll_gateway;
*/
static inline int zfpm_thread_should_yield(struct event *t)
{
- return thread_should_yield(t);
+ return event_should_yield(t);
}
/*