mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2025-11-07 15:55:12 +00:00
-----BEGIN PGP SIGNATURE-----
iQEzBAABCAAdFiEEe7vIQRWZI0iWSE3xu+CwddJFiJoFAmbn5g0ACgkQu+CwddJF
iJq+Uwf/aqnLNEpjUBzwUUhSojCpPnTtiyjv+AILTxoSTHmbu8OvN0W79+Rpbdmk
O4QapAK+BCs+VL2VATwCCufcJ75Z78txO+buQE0DgwluFTIYZ+IwpUMPsK04ln6A
FD1/uvP1QFx60heqcp2c4zWFBUpg4DE6ufx2A5kieO268lFcWLxyVlcdgRU79ZCt
uAcV2yDLk3GvPGfxZwPKEmZUo/FmuSoBv0XgT+eWxmTu/R7hcpFse49OyjBH8Tvb
8d/RCIFgXOr8dTIjtds7eenwB/is4TkRlctezEQ0jO9/JwL/BVOgXZjD1qCtNWqz
is4TWK7VV+vdq1RD+0xC2hV/+uGEwQ==
=+WAm
-----END PGP SIGNATURE-----
Merge tag 'slab-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka:
"This time it's mostly refactoring and improving APIs for slab users in
the kernel, along with some debugging improvements.
- kmem_cache_create() refactoring (Christian Brauner)
Over the years have been growing new parameters to
kmem_cache_create() where most of them are needed only for a small
number of caches - most recently the rcu_freeptr_offset parameter.
To avoid adding new parameters to kmem_cache_create() and adjusting
all its callers, or creating new wrappers such as
kmem_cache_create_rcu(), we can now pass extra parameters using the
new struct kmem_cache_args. Not explicitly initialized fields
default to values interpreted as unused.
kmem_cache_create() is for now a wrapper that works both with the
new form: kmem_cache_create(name, object_size, args, flags) and the
legacy form: kmem_cache_create(name, object_size, align, flags,
ctor)
- kmem_cache_destroy() waits for kfree_rcu()'s in flight (Vlastimil
Babka, Uladislau Rezki)
Since SLOB removal, kfree() is allowed for freeing objects
allocated by kmem_cache_create(). By extension kfree_rcu() as
allowed as well, which can allow converting simple call_rcu()
callbacks that only do kmem_cache_free(), as there was never a
kmem_cache_free_rcu() variant. However, for caches that can be
destroyed e.g. on module removal, the cache owners knew to issue
rcu_barrier() first to wait for the pending call_rcu()'s, and this
is not sufficient for pending kfree_rcu()'s due to its internal
batching optimizations. Ulad has provided a new
kvfree_rcu_barrier() and to make the usage less error-prone,
kmem_cache_destroy() calls it. Additionally, destroying
SLAB_TYPESAFE_BY_RCU caches now again issues rcu_barrier()
synchronously instead of using an async work, because the past
motivation for async work no longer applies. Users of custom
call_rcu() callbacks should however keep calling rcu_barrier()
before cache destruction.
- Debugging use-after-free in SLAB_TYPESAFE_BY_RCU caches (Jann Horn)
Currently, KASAN cannot catch UAFs in such caches as it is legal to
access them within a grace period, and we only track the grace
period when trying to free the underlying slab page. The new
CONFIG_SLUB_RCU_DEBUG option changes the freeing of individual
object to be RCU-delayed, after which KASAN can poison them.
- Delayed memcg charging (Shakeel Butt)
In some cases, the memcg is uknown at allocation time, such as
receiving network packets in softirq context. With
kmem_cache_charge() these may be now charged later when the user
and its memcg is known.
- Misc fixes and improvements (Pedro Falcato, Axel Rasmussen,
Christoph Lameter, Yan Zhen, Peng Fan, Xavier)"
* tag 'slab-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: (34 commits)
mm, slab: restore kerneldoc for kmem_cache_create()
io_uring: port to struct kmem_cache_args
slab: make __kmem_cache_create() static inline
slab: make kmem_cache_create_usercopy() static inline
slab: remove kmem_cache_create_rcu()
file: port to struct kmem_cache_args
slab: create kmem_cache_create() compatibility layer
slab: port KMEM_CACHE_USERCOPY() to struct kmem_cache_args
slab: port KMEM_CACHE() to struct kmem_cache_args
slab: remove rcu_freeptr_offset from struct kmem_cache
slab: pass struct kmem_cache_args to do_kmem_cache_create()
slab: pull kmem_cache_open() into do_kmem_cache_create()
slab: pass struct kmem_cache_args to create_cache()
slab: port kmem_cache_create_usercopy() to struct kmem_cache_args
slab: port kmem_cache_create_rcu() to struct kmem_cache_args
slab: port kmem_cache_create() to struct kmem_cache_args
slab: add struct kmem_cache_args
slab: s/__kmem_cache_create/do_kmem_cache_create/g
memcg: add charging of already allocated slab objects
mm/slab: Optimize the code logic in find_mergeable()
...
182 lines
4.6 KiB
C
182 lines
4.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0+ */
|
|
/*
|
|
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
|
|
*
|
|
* Copyright IBM Corporation, 2008
|
|
*
|
|
* Author: Paul E. McKenney <paulmck@linux.ibm.com>
|
|
*
|
|
* For detailed explanation of Read-Copy Update mechanism see -
|
|
* Documentation/RCU
|
|
*/
|
|
#ifndef __LINUX_TINY_H
|
|
#define __LINUX_TINY_H
|
|
|
|
#include <asm/param.h> /* for HZ */
|
|
|
|
struct rcu_gp_oldstate {
|
|
unsigned long rgos_norm;
|
|
};
|
|
|
|
// Maximum number of rcu_gp_oldstate values corresponding to
|
|
// not-yet-completed RCU grace periods.
|
|
#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
|
|
|
|
/*
|
|
* Are the two oldstate values the same? See the Tree RCU version for
|
|
* docbook header.
|
|
*/
|
|
static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
|
|
struct rcu_gp_oldstate *rgosp2)
|
|
{
|
|
return rgosp1->rgos_norm == rgosp2->rgos_norm;
|
|
}
|
|
|
|
unsigned long get_state_synchronize_rcu(void);
|
|
|
|
static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
|
|
{
|
|
rgosp->rgos_norm = get_state_synchronize_rcu();
|
|
}
|
|
|
|
unsigned long start_poll_synchronize_rcu(void);
|
|
|
|
static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
|
|
{
|
|
rgosp->rgos_norm = start_poll_synchronize_rcu();
|
|
}
|
|
|
|
bool poll_state_synchronize_rcu(unsigned long oldstate);
|
|
|
|
static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
|
|
{
|
|
return poll_state_synchronize_rcu(rgosp->rgos_norm);
|
|
}
|
|
|
|
static inline void cond_synchronize_rcu(unsigned long oldstate)
|
|
{
|
|
might_sleep();
|
|
}
|
|
|
|
static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
|
|
{
|
|
cond_synchronize_rcu(rgosp->rgos_norm);
|
|
}
|
|
|
|
static inline unsigned long start_poll_synchronize_rcu_expedited(void)
|
|
{
|
|
return start_poll_synchronize_rcu();
|
|
}
|
|
|
|
static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
|
|
{
|
|
rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
|
|
}
|
|
|
|
static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
|
|
{
|
|
cond_synchronize_rcu(oldstate);
|
|
}
|
|
|
|
static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
|
|
{
|
|
cond_synchronize_rcu_expedited(rgosp->rgos_norm);
|
|
}
|
|
|
|
extern void rcu_barrier(void);
|
|
|
|
static inline void synchronize_rcu_expedited(void)
|
|
{
|
|
synchronize_rcu();
|
|
}
|
|
|
|
/*
|
|
* Add one more declaration of kvfree() here. It is
|
|
* not so straight forward to just include <linux/mm.h>
|
|
* where it is defined due to getting many compile
|
|
* errors caused by that include.
|
|
*/
|
|
extern void kvfree(const void *addr);
|
|
|
|
static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
|
{
|
|
if (head) {
|
|
call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
|
|
return;
|
|
}
|
|
|
|
// kvfree_rcu(one_arg) call.
|
|
might_sleep();
|
|
synchronize_rcu();
|
|
kvfree(ptr);
|
|
}
|
|
|
|
static inline void kvfree_rcu_barrier(void)
|
|
{
|
|
rcu_barrier();
|
|
}
|
|
|
|
#ifdef CONFIG_KASAN_GENERIC
|
|
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
|
#else
|
|
static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
|
|
{
|
|
__kvfree_call_rcu(head, ptr);
|
|
}
|
|
#endif
|
|
|
|
void rcu_qs(void);
|
|
|
|
static inline void rcu_softirq_qs(void)
|
|
{
|
|
rcu_qs();
|
|
}
|
|
|
|
#define rcu_note_context_switch(preempt) \
|
|
do { \
|
|
rcu_qs(); \
|
|
rcu_tasks_qs(current, (preempt)); \
|
|
} while (0)
|
|
|
|
static inline int rcu_needs_cpu(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
|
|
|
|
/*
|
|
* Take advantage of the fact that there is only one CPU, which
|
|
* allows us to ignore virtualization-based context switches.
|
|
*/
|
|
static inline void rcu_virt_note_context_switch(void) { }
|
|
static inline void rcu_cpu_stall_reset(void) { }
|
|
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
|
|
static inline void rcu_irq_exit_check_preempt(void) { }
|
|
static inline void exit_rcu(void) { }
|
|
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
|
|
void rcu_scheduler_starting(void);
|
|
static inline void rcu_end_inkernel_boot(void) { }
|
|
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
|
|
static inline bool rcu_is_watching(void) { return true; }
|
|
static inline void rcu_momentary_eqs(void) { }
|
|
static inline void kfree_rcu_scheduler_running(void) { }
|
|
static inline bool rcu_gp_might_be_stalled(void) { return false; }
|
|
|
|
/* Avoid RCU read-side critical sections leaking across. */
|
|
static inline void rcu_all_qs(void) { barrier(); }
|
|
|
|
/* RCUtree hotplug events */
|
|
#define rcutree_prepare_cpu NULL
|
|
#define rcutree_online_cpu NULL
|
|
#define rcutree_offline_cpu NULL
|
|
#define rcutree_dead_cpu NULL
|
|
#define rcutree_dying_cpu NULL
|
|
static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
|
|
|
|
#endif /* __LINUX_RCUTINY_H */
|