summaryrefslogtreecommitdiff
path: root/lib/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/thread.c')
-rw-r--r--lib/thread.c99
1 files changed, 60 insertions, 39 deletions
diff --git a/lib/thread.c b/lib/thread.c
index 5db470ef48..4a5c61d036 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -47,6 +47,9 @@ DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
write(m->io_pipe[1], &wakebyte, 1); \
} while (0);
+/* max # of thread_fetch() calls before we force a poll() */
+#define MAX_TICK_IO 1000
+
/* control variable for initializer */
pthread_once_t init_once = PTHREAD_ONCE_INIT;
pthread_key_t thread_current;
@@ -337,9 +340,6 @@ static void cancelreq_del(void *cr)
/* initializer, only ever called once */
static void initializer()
{
- if (!masters)
- masters = list_new();
-
pthread_key_create(&thread_current, NULL);
}
@@ -412,9 +412,12 @@ struct thread_master *thread_master_create(const char *name)
rv->handler.copy = XCALLOC(MTYPE_THREAD_MASTER,
sizeof(struct pollfd) * rv->handler.pfdsize);
- /* add to list */
+ /* add to list of threadmasters */
pthread_mutex_lock(&masters_mtx);
{
+ if (!masters)
+ masters = list_new();
+
listnode_add(masters, rv);
}
pthread_mutex_unlock(&masters_mtx);
@@ -548,6 +551,10 @@ void thread_master_free(struct thread_master *m)
pthread_mutex_lock(&masters_mtx);
{
listnode_delete(masters, m);
+ if (masters->count == 0) {
+ list_free (masters);
+ masters = NULL;
+ }
}
pthread_mutex_unlock(&masters_mtx);
@@ -1312,16 +1319,16 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
/* Process any pending cancellation requests */
do_thread_cancel(m);
- /* Post events to ready queue. This must come before the
- * following block
- * since events should occur immediately */
+ /*
+ * Post events to ready queue. This must come before the
+ * following block since events should occur immediately
+ */
thread_process(&m->event);
- /* If there are no tasks on the ready queue, we will poll()
- * until a timer
- * expires or we receive I/O, whichever comes first. The
- * strategy for doing
- * this is:
+ /*
+ * If there are no tasks on the ready queue, we will poll()
+ * until a timer expires or we receive I/O, whichever comes
+ * first. The strategy for doing this is:
*
* - If there are events pending, set the poll() timeout to zero
* - If there are no events pending, but there are timers
@@ -1333,9 +1340,8 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
* - If nothing is pending, it's time for the application to die
*
* In every case except the last, we need to hit poll() at least
- * once per
- * loop to avoid starvation by events */
-
+ * once per loop to avoid starvation by events
+ */
if (m->ready.count == 0)
tw = thread_timer_wait(m->timer, &tv);
@@ -1348,37 +1354,53 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
break;
}
- /* Copy pollfd array + # active pollfds in it. Not necessary to
- * copy
- * the array size as this is fixed. */
+ /*
+ * Copy pollfd array + # active pollfds in it. Not necessary to
+ * copy the array size as this is fixed.
+ */
m->handler.copycount = m->handler.pfdcount;
memcpy(m->handler.copy, m->handler.pfds,
m->handler.copycount * sizeof(struct pollfd));
- pthread_mutex_unlock(&m->mtx);
- {
- num = fd_poll(m, m->handler.copy, m->handler.pfdsize,
- m->handler.copycount, tw);
- }
- pthread_mutex_lock(&m->mtx);
+ /*
+ * Attempt to flush ready queue before going into poll().
+ * This is performance-critical. Think twice before modifying.
+ */
+ if (m->ready.count == 0 || m->tick_since_io >= MAX_TICK_IO) {
+ pthread_mutex_unlock(&m->mtx);
+ {
+ m->tick_since_io = 0;
+ num = fd_poll(m, m->handler.copy,
+ m->handler.pfdsize,
+ m->handler.copycount, tw);
+ }
+ pthread_mutex_lock(&m->mtx);
+
+ /* Handle any errors received in poll() */
+ if (num < 0) {
+ if (errno == EINTR) {
+ pthread_mutex_unlock(&m->mtx);
+ /* loop around to signal handler */
+ continue;
+ }
- /* Handle any errors received in poll() */
- if (num < 0) {
- if (errno == EINTR) {
+ /* else die */
+ zlog_warn("poll() error: %s",
+ safe_strerror(errno));
pthread_mutex_unlock(&m->mtx);
- continue; /* loop around to signal handler */
+ fetch = NULL;
+ break;
}
- /* else die */
- zlog_warn("poll() error: %s", safe_strerror(errno));
- pthread_mutex_unlock(&m->mtx);
- fetch = NULL;
- break;
- }
+ /*
+ * Since we could have received more cancellation
+ * requests during poll(), process those
+ */
+ do_thread_cancel(m);
- /* Since we could have received more cancellation requests
- * during poll(), process those */
- do_thread_cancel(m);
+ } else {
+ m->tick_since_io++;
+ }
/* Post timers to ready queue. */
monotime(&now);
@@ -1388,8 +1410,7 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
if (num > 0)
thread_process_io(m, num);
- /* If we have a ready task, break the loop and return it to the
- * caller */
+ /* have a ready task ==> return it to caller */
if ((thread = thread_trim_head(&m->ready))) {
fetch = thread_run(m, thread, fetch);
if (fetch->ref)