zebra: fix race condition in i/o pthread shutdown

I mistakenly used an external mechanism to cause a pthread to shut
itself down instead of using the one built into frr_pthread.[ch]. This
created a race condition whereby a pthread could schedule work onto a
dead pthread and cause it to reanimate.

Signed-off-by: Quentin Young <qlyoung@cumulusnetworks.com>
This commit is contained in:
Quentin Young 2018-05-29 08:17:08 +00:00
parent c0ea1ae701
commit c2ca5ee66d
2 changed files with 3 additions and 15 deletions

View File

@ -167,7 +167,8 @@ static void zserv_log_message(const char *errmsg, struct stream *msg,
*/ */
static void zserv_client_close(struct zserv *client) static void zserv_client_close(struct zserv *client)
{ {
atomic_store_explicit(&client->dead, true, memory_order_seq_cst); atomic_store_explicit(&client->pthread->running, false,
memory_order_seq_cst);
THREAD_OFF(client->t_read); THREAD_OFF(client->t_read);
THREAD_OFF(client->t_write); THREAD_OFF(client->t_write);
zserv_event(client, ZSERV_HANDLE_CLOSE); zserv_event(client, ZSERV_HANDLE_CLOSE);
@ -200,9 +201,6 @@ static int zserv_write(struct thread *thread)
uint32_t wcmd; uint32_t wcmd;
struct stream_fifo *cache; struct stream_fifo *cache;
if (atomic_load_explicit(&client->dead, memory_order_seq_cst))
return 0;
/* If we have any data pending, try to flush it first */ /* If we have any data pending, try to flush it first */
switch (buffer_flush_all(client->wb, client->sock)) { switch (buffer_flush_all(client->wb, client->sock)) {
case BUFFER_ERROR: case BUFFER_ERROR:
@ -221,7 +219,7 @@ static int zserv_write(struct thread *thread)
pthread_mutex_lock(&client->obuf_mtx); pthread_mutex_lock(&client->obuf_mtx);
{ {
while (client->obuf_fifo->head) while (stream_fifo_head(client->obuf_fifo))
stream_fifo_push(cache, stream_fifo_push(cache,
stream_fifo_pop(client->obuf_fifo)); stream_fifo_pop(client->obuf_fifo));
} }
@ -251,7 +249,6 @@ static int zserv_write(struct thread *thread)
memory_order_relaxed); memory_order_relaxed);
zserv_client_event(client, ZSERV_CLIENT_WRITE); zserv_client_event(client, ZSERV_CLIENT_WRITE);
return 0; return 0;
break;
case BUFFER_EMPTY: case BUFFER_EMPTY:
break; break;
} }
@ -304,9 +301,6 @@ static int zserv_read(struct thread *thread)
uint32_t p2p; uint32_t p2p;
struct zmsghdr hdr; struct zmsghdr hdr;
if (atomic_load_explicit(&client->dead, memory_order_seq_cst))
return 0;
p2p_orig = atomic_load_explicit(&zebrad.packets_to_process, p2p_orig = atomic_load_explicit(&zebrad.packets_to_process,
memory_order_relaxed); memory_order_relaxed);
cache = stream_fifo_new(); cache = stream_fifo_new();
@ -449,9 +443,6 @@ zread_fail:
static void zserv_client_event(struct zserv *client, static void zserv_client_event(struct zserv *client,
enum zserv_client_event event) enum zserv_client_event event)
{ {
if (atomic_load_explicit(&client->dead, memory_order_seq_cst))
return;
switch (event) { switch (event) {
case ZSERV_CLIENT_READ: case ZSERV_CLIENT_READ:
thread_add_read(client->pthread->master, zserv_read, client, thread_add_read(client->pthread->master, zserv_read, client,

View File

@ -53,9 +53,6 @@ struct zserv {
/* Client pthread */ /* Client pthread */
struct frr_pthread *pthread; struct frr_pthread *pthread;
/* Whether the thread is waiting to be killed */
_Atomic bool dead;
/* Client file descriptor. */ /* Client file descriptor. */
int sock; int sock;