mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-08-04 13:31:48 +00:00
Merge pull request #1087 from qlyoung/fix-weirdness
lib: fix thread scheduling weirdness
This commit is contained in:
commit
75048747e9
76
lib/thread.c
76
lib/thread.c
@ -47,9 +47,6 @@ DEFINE_MTYPE_STATIC(LIB, THREAD_STATS, "Thread stats")
|
|||||||
write(m->io_pipe[1], &wakebyte, 1); \
|
write(m->io_pipe[1], &wakebyte, 1); \
|
||||||
} while (0);
|
} while (0);
|
||||||
|
|
||||||
/* max # of thread_fetch() calls before we force a poll() */
|
|
||||||
#define MAX_TICK_IO 1000
|
|
||||||
|
|
||||||
/* control variable for initializer */
|
/* control variable for initializer */
|
||||||
pthread_once_t init_once = PTHREAD_ONCE_INIT;
|
pthread_once_t init_once = PTHREAD_ONCE_INIT;
|
||||||
pthread_key_t thread_current;
|
pthread_key_t thread_current;
|
||||||
@ -552,7 +549,7 @@ void thread_master_free(struct thread_master *m)
|
|||||||
{
|
{
|
||||||
listnode_delete(masters, m);
|
listnode_delete(masters, m);
|
||||||
if (masters->count == 0) {
|
if (masters->count == 0) {
|
||||||
list_free (masters);
|
list_free(masters);
|
||||||
masters = NULL;
|
masters = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1319,6 +1316,20 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
|
|||||||
/* Process any pending cancellation requests */
|
/* Process any pending cancellation requests */
|
||||||
do_thread_cancel(m);
|
do_thread_cancel(m);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Attempt to flush ready queue before going into poll().
|
||||||
|
* This is performance-critical. Think twice before modifying.
|
||||||
|
*/
|
||||||
|
if ((thread = thread_trim_head(&m->ready))) {
|
||||||
|
fetch = thread_run(m, thread, fetch);
|
||||||
|
if (fetch->ref)
|
||||||
|
*fetch->ref = NULL;
|
||||||
|
pthread_mutex_unlock(&m->mtx);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* otherwise, tick through scheduling sequence */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Post events to ready queue. This must come before the
|
* Post events to ready queue. This must come before the
|
||||||
* following block since events should occur immediately
|
* following block since events should occur immediately
|
||||||
@ -1362,44 +1373,26 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
|
|||||||
memcpy(m->handler.copy, m->handler.pfds,
|
memcpy(m->handler.copy, m->handler.pfds,
|
||||||
m->handler.copycount * sizeof(struct pollfd));
|
m->handler.copycount * sizeof(struct pollfd));
|
||||||
|
|
||||||
/*
|
pthread_mutex_unlock(&m->mtx);
|
||||||
* Attempt to flush ready queue before going into poll().
|
{
|
||||||
* This is performance-critical. Think twice before modifying.
|
num = fd_poll(m, m->handler.copy, m->handler.pfdsize,
|
||||||
*/
|
m->handler.copycount, tw);
|
||||||
if (m->ready.count == 0 || m->tick_since_io >= MAX_TICK_IO) {
|
}
|
||||||
pthread_mutex_unlock(&m->mtx);
|
pthread_mutex_lock(&m->mtx);
|
||||||
{
|
|
||||||
m->tick_since_io = 0;
|
|
||||||
num = fd_poll(m, m->handler.copy,
|
|
||||||
m->handler.pfdsize,
|
|
||||||
m->handler.copycount, tw);
|
|
||||||
}
|
|
||||||
pthread_mutex_lock(&m->mtx);
|
|
||||||
|
|
||||||
/* Handle any errors received in poll() */
|
/* Handle any errors received in poll() */
|
||||||
if (num < 0) {
|
if (num < 0) {
|
||||||
if (errno == EINTR) {
|
if (errno == EINTR) {
|
||||||
pthread_mutex_unlock(&m->mtx);
|
|
||||||
/* loop around to signal handler */
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* else die */
|
|
||||||
zlog_warn("poll() error: %s",
|
|
||||||
safe_strerror(errno));
|
|
||||||
pthread_mutex_unlock(&m->mtx);
|
pthread_mutex_unlock(&m->mtx);
|
||||||
fetch = NULL;
|
/* loop around to signal handler */
|
||||||
break;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* else die */
|
||||||
* Since we could have received more cancellation
|
zlog_warn("poll() error: %s", safe_strerror(errno));
|
||||||
* requests during poll(), process those
|
pthread_mutex_unlock(&m->mtx);
|
||||||
*/
|
fetch = NULL;
|
||||||
do_thread_cancel(m);
|
break;
|
||||||
|
|
||||||
} else {
|
|
||||||
m->tick_since_io++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Post timers to ready queue. */
|
/* Post timers to ready queue. */
|
||||||
@ -1410,13 +1403,6 @@ struct thread *thread_fetch(struct thread_master *m, struct thread *fetch)
|
|||||||
if (num > 0)
|
if (num > 0)
|
||||||
thread_process_io(m, num);
|
thread_process_io(m, num);
|
||||||
|
|
||||||
/* have a ready task ==> return it to caller */
|
|
||||||
if ((thread = thread_trim_head(&m->ready))) {
|
|
||||||
fetch = thread_run(m, thread, fetch);
|
|
||||||
if (fetch->ref)
|
|
||||||
*fetch->ref = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
pthread_mutex_unlock(&m->mtx);
|
pthread_mutex_unlock(&m->mtx);
|
||||||
|
|
||||||
} while (!thread && m->spin);
|
} while (!thread && m->spin);
|
||||||
|
@ -70,7 +70,6 @@ struct cancel_req {
|
|||||||
struct thread_master {
|
struct thread_master {
|
||||||
char *name;
|
char *name;
|
||||||
|
|
||||||
int tick_since_io;
|
|
||||||
struct thread **read;
|
struct thread **read;
|
||||||
struct thread **write;
|
struct thread **write;
|
||||||
struct pqueue *timer;
|
struct pqueue *timer;
|
||||||
|
Loading…
Reference in New Issue
Block a user