Merge pull request #895 from qlyoung/flush-ready

lib: flush ready queue before poll() again
This commit is contained in:
Jafar Al-Gharaibeh 2017-08-02 14:24:41 -05:00 committed by GitHub
commit dacb17162a
2 changed files with 35 additions and 19 deletions

View File

@ -47,6 +47,9 @@ DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
write(m->io_pipe[1], &wakebyte, 1); \ write(m->io_pipe[1], &wakebyte, 1); \
} while (0); } while (0);
/* max # of thread_fetch() calls before we force a poll() */
#define MAX_TICK_IO 1000
/* control variable for initializer */ /* control variable for initializer */
pthread_once_t init_once = PTHREAD_ONCE_INIT; pthread_once_t init_once = PTHREAD_ONCE_INIT;
pthread_key_t thread_current; pthread_key_t thread_current;
@ -1355,9 +1358,16 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
memcpy(m->handler.copy, m->handler.pfds, memcpy(m->handler.copy, m->handler.pfds,
m->handler.copycount * sizeof(struct pollfd)); m->handler.copycount * sizeof(struct pollfd));
/*
* Attempt to flush ready queue before going into poll().
* This is performance-critical. Think twice before modifying.
*/
if (m->ready.count == 0 || m->tick_since_io >= MAX_TICK_IO) {
pthread_mutex_unlock(&m->mtx); pthread_mutex_unlock(&m->mtx);
{ {
num = fd_poll(m, m->handler.copy, m->handler.pfdsize, m->tick_since_io = 0;
num = fd_poll(m, m->handler.copy,
m->handler.pfdsize,
m->handler.copycount, tw); m->handler.copycount, tw);
} }
pthread_mutex_lock(&m->mtx); pthread_mutex_lock(&m->mtx);
@ -1366,20 +1376,25 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
if (num < 0) { if (num < 0) {
if (errno == EINTR) { if (errno == EINTR) {
pthread_mutex_unlock(&m->mtx); pthread_mutex_unlock(&m->mtx);
continue; /* loop around to signal handler */ /* loop around to signal handler */
continue;
} }
/* else die */ /* else die */
zlog_warn("poll() error: %s", safe_strerror(errno)); zlog_warn("poll() error: %s",
safe_strerror(errno));
pthread_mutex_unlock(&m->mtx); pthread_mutex_unlock(&m->mtx);
fetch = NULL; fetch = NULL;
break; break;
} }
/* Since we could have received more cancellation requests /* Since we could have received more cancellation
* during poll(), process those */ * requests during poll(), process those */
do_thread_cancel(m); do_thread_cancel(m);
} else
m->tick_since_io++;
/* Post timers to ready queue. */ /* Post timers to ready queue. */
monotime(&now); monotime(&now);
thread_process_timers(m->timer, &now); thread_process_timers(m->timer, &now);

View File

@ -70,6 +70,7 @@ struct cancel_req {
struct thread_master { struct thread_master {
char *name; char *name;
int tick_since_io;
struct thread **read; struct thread **read;
struct thread **write; struct thread **write;
struct pqueue *timer; struct pqueue *timer;