mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-08-14 12:41:21 +00:00
*: Convert thread_should_yield and thread_set_yield_time
Convert thread_should_yield and thread_set_yield_time to event_should_yield and event_set_yield_time Signed-off-by: Donald Sharp <sharpd@nvidia.com>
This commit is contained in:
parent
4f830a0799
commit
70c35c11f2
@ -1910,7 +1910,7 @@ unsigned long thread_consumed_time(RUSAGE_T *now, RUSAGE_T *start,
|
||||
for CPU time. On balance, wall clock time seems to make sense.
|
||||
Plus it has the added benefit that gettimeofday should be faster
|
||||
than calling getrusage. */
|
||||
int thread_should_yield(struct event *thread)
|
||||
int event_should_yield(struct event *thread)
|
||||
{
|
||||
int result;
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
@ -1920,7 +1920,7 @@ int thread_should_yield(struct event *thread)
|
||||
return result;
|
||||
}
|
||||
|
||||
void thread_set_yield_time(struct event *thread, unsigned long yield_time)
|
||||
void event_set_yield_time(struct event *thread, unsigned long yield_time)
|
||||
{
|
||||
frr_with_mutex (&thread->mtx) {
|
||||
thread->yield = yield_time;
|
||||
|
@ -254,9 +254,9 @@ extern void event_call(struct event *event);
|
||||
extern unsigned long event_timer_remain_second(struct event *event);
|
||||
extern struct timeval event_timer_remain(struct event *event);
|
||||
extern unsigned long event_timer_remain_msec(struct event *event);
|
||||
extern int thread_should_yield(struct event *event);
|
||||
extern int event_should_yield(struct event *event);
|
||||
/* set yield time for thread */
|
||||
extern void thread_set_yield_time(struct event *event, unsigned long);
|
||||
extern void event_set_yield_time(struct event *event, unsigned long);
|
||||
|
||||
/* Internal libfrr exports */
|
||||
extern void thread_getrusage(RUSAGE_T *);
|
||||
|
@ -128,7 +128,7 @@ static int work_queue_schedule(struct work_queue *wq, unsigned int delay)
|
||||
/* set thread yield time, if needed */
|
||||
if (thread_is_scheduled(wq->thread) &&
|
||||
wq->spec.yield != EVENT_YIELD_TIME_SLOT)
|
||||
thread_set_yield_time(wq->thread, wq->spec.yield);
|
||||
event_set_yield_time(wq->thread, wq->spec.yield);
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
@ -311,8 +311,8 @@ void work_queue_run(struct event *thread)
|
||||
cycles++;
|
||||
|
||||
/* test if we should yield */
|
||||
if (!(cycles % wq->cycles.granularity)
|
||||
&& thread_should_yield(thread)) {
|
||||
if (!(cycles % wq->cycles.granularity) &&
|
||||
event_should_yield(thread)) {
|
||||
yielded = 1;
|
||||
goto stats;
|
||||
}
|
||||
|
@ -3074,7 +3074,7 @@ void ospf_maxage_lsa_remover(struct event *thread)
|
||||
}
|
||||
|
||||
/* TODO: maybe convert this function to a work-queue */
|
||||
if (thread_should_yield(thread)) {
|
||||
if (event_should_yield(thread)) {
|
||||
OSPF_TIMER_ON(ospf->t_maxage,
|
||||
ospf_maxage_lsa_remover, 0);
|
||||
route_unlock_node(
|
||||
|
@ -67,7 +67,7 @@ static void clear_something(struct event *thread)
|
||||
while (ws->i < ITERS_MAX) {
|
||||
slow_func(ws->vty, ws->str, ws->i);
|
||||
ws->i++;
|
||||
if (thread_should_yield(thread)) {
|
||||
if (event_should_yield(thread)) {
|
||||
event_add_timer_msec(master, clear_something, ws, 0,
|
||||
NULL);
|
||||
return;
|
||||
|
@ -285,7 +285,7 @@ union g_addr ipv4ll_gateway;
|
||||
*/
|
||||
static inline int zfpm_thread_should_yield(struct event *t)
|
||||
{
|
||||
return thread_should_yield(t);
|
||||
return event_should_yield(t);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user