Merge pull request #1087 from qlyoung/fix-weirdness

lib: fix thread scheduling weirdness
This commit is contained in:
David Lamparter 2017-09-05 18:19:45 +02:00 committed by GitHub
commit 75048747e9
2 changed files with 31 additions and 46 deletions

View File

@ -47,9 +47,6 @@ DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
write(m->io_pipe[1], &wakebyte, 1); \ write(m->io_pipe[1], &wakebyte, 1); \
} while (0); } while (0);
/* max # of thread_fetch() calls before we force a poll() */
#define MAX_TICK_IO 1000
/* control variable for initializer */ /* control variable for initializer */
pthread_once_t init_once = PTHREAD_ONCE_INIT; pthread_once_t init_once = PTHREAD_ONCE_INIT;
pthread_key_t thread_current; pthread_key_t thread_current;
@ -552,7 +549,7 @@ void thread_master_free(struct thread_master *m)
{ {
listnode_delete(masters, m); listnode_delete(masters, m);
if (masters->count == 0) { if (masters->count == 0) {
list_free (masters); list_free(masters);
masters = NULL; masters = NULL;
} }
} }
@ -1319,6 +1316,20 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
/* Process any pending cancellation requests */ /* Process any pending cancellation requests */
do_thread_cancel(m); do_thread_cancel(m);
/*
* Attempt to flush ready queue before going into poll().
* This is performance-critical. Think twice before modifying.
*/
if ((thread = thread_trim_head(&m->ready))) {
fetch = thread_run(m, thread, fetch);
if (fetch->ref)
*fetch->ref = NULL;
pthread_mutex_unlock(&m->mtx);
break;
}
/* otherwise, tick through scheduling sequence */
/* /*
* Post events to ready queue. This must come before the * Post events to ready queue. This must come before the
* following block since events should occur immediately * following block since events should occur immediately
@ -1362,16 +1373,9 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
memcpy(m->handler.copy, m->handler.pfds, memcpy(m->handler.copy, m->handler.pfds,
m->handler.copycount * sizeof(struct pollfd)); m->handler.copycount * sizeof(struct pollfd));
/*
* Attempt to flush ready queue before going into poll().
* This is performance-critical. Think twice before modifying.
*/
if (m->ready.count == 0 || m->tick_since_io >= MAX_TICK_IO) {
pthread_mutex_unlock(&m->mtx); pthread_mutex_unlock(&m->mtx);
{ {
m->tick_since_io = 0; num = fd_poll(m, m->handler.copy, m->handler.pfdsize,
num = fd_poll(m, m->handler.copy,
m->handler.pfdsize,
m->handler.copycount, tw); m->handler.copycount, tw);
} }
pthread_mutex_lock(&m->mtx); pthread_mutex_lock(&m->mtx);
@ -1385,23 +1389,12 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
} }
/* else die */ /* else die */
zlog_warn("poll() error: %s", zlog_warn("poll() error: %s", safe_strerror(errno));
safe_strerror(errno));
pthread_mutex_unlock(&m->mtx); pthread_mutex_unlock(&m->mtx);
fetch = NULL; fetch = NULL;
break; break;
} }
/*
* Since we could have received more cancellation
* requests during poll(), process those
*/
do_thread_cancel(m);
} else {
m->tick_since_io++;
}
/* Post timers to ready queue. */ /* Post timers to ready queue. */
monotime(&now); monotime(&now);
thread_process_timers(m->timer, &now); thread_process_timers(m->timer, &now);
@ -1410,13 +1403,6 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
if (num > 0) if (num > 0)
thread_process_io(m, num); thread_process_io(m, num);
/* have a ready task ==> return it to caller */
if ((thread = thread_trim_head(&m->ready))) {
fetch = thread_run(m, thread, fetch);
if (fetch->ref)
*fetch->ref = NULL;
}
pthread_mutex_unlock(&m->mtx); pthread_mutex_unlock(&m->mtx);
} while (!thread && m->spin); } while (!thread && m->spin);

View File

@ -70,7 +70,6 @@ struct cancel_req {
struct thread_master { struct thread_master {
char *name; char *name;
int tick_since_io;
struct thread **read; struct thread **read;
struct thread **write; struct thread **write;
struct pqueue *timer; struct pqueue *timer;