mirror of
https://git.proxmox.com/git/mirror_ubuntu-kernels.git
synced 2025-11-08 20:47:08 +00:00
bpf: Add bpf_refcount_acquire kfunc
Currently, BPF programs can interact with the lifetime of refcounted
local kptrs in the following ways:
bpf_obj_new - Initialize refcount to 1 as part of new object creation
bpf_obj_drop - Decrement refcount and free object if it's 0
collection add - Pass ownership to the collection. No change to
refcount but collection is responsible for
bpf_obj_dropping it
In order to be able to add a refcounted local kptr to multiple
collections we need to be able to increment the refcount and acquire a
new owning reference. This patch adds a kfunc, bpf_refcount_acquire,
implementing such an operation.
bpf_refcount_acquire takes a refcounted local kptr and returns a new
owning reference to the same underlying memory as the input. The input
can be either owning or non-owning. To reinforce why this is safe,
consider the following code snippets:
struct node *n = bpf_obj_new(typeof(*n)); // A
struct node *m = bpf_refcount_acquire(n); // B
In the above snippet, n will be alive with refcount=1 after (A), and
since nothing changes that state before (B), it's obviously safe. If
n is instead added to some rbtree, we can still safely refcount_acquire
it:
struct node *n = bpf_obj_new(typeof(*n));
struct node *m;
bpf_spin_lock(&glock);
bpf_rbtree_add(&groot, &n->node, less); // A
m = bpf_refcount_acquire(n); // B
bpf_spin_unlock(&glock);
In the above snippet, after (A) n is a non-owning reference, and after
(B) m is an owning reference pointing to the same memory as n. Although
n has no ownership of that memory's lifetime, it's guaranteed to be
alive until the end of the critical section, and n would be clobbered if
we were past the end of the critical section, so it's safe to bump
refcount.
Implementation details:
* From verifier's perspective, bpf_refcount_acquire handling is similar
to bpf_obj_new and bpf_obj_drop. Like the former, it returns a new
owning reference matching input type, although like the latter, type
can be inferred from concrete kptr input. Verifier changes in
{check,fixup}_kfunc_call and check_kfunc_args are largely copied from
aforementioned functions' verifier changes.
* An exception to the above is the new KF_ARG_PTR_TO_REFCOUNTED_KPTR
arg, indicated by new "__refcounted_kptr" kfunc arg suffix. This is
necessary in order to handle both owning and non-owning input without
adding special-casing to "__alloc" arg handling. Also a convenient
place to confirm that input type has bpf_refcount field.
* The implemented kfunc is actually bpf_refcount_acquire_impl, with
'hidden' second arg that the verifier sets to the type's struct_meta
in fixup_kfunc_call.
Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Link: https://lore.kernel.org/r/20230415201811.343116-5-davemarchevsky@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
1512217c47
commit
7c50b1cb76
@ -1917,6 +1917,20 @@ __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
|
|||||||
__bpf_obj_drop_impl(p, meta ? meta->record : NULL);
|
__bpf_obj_drop_impl(p, meta ? meta->record : NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
|
||||||
|
{
|
||||||
|
struct btf_struct_meta *meta = meta__ign;
|
||||||
|
struct bpf_refcount *ref;
|
||||||
|
|
||||||
|
/* Could just cast directly to refcount_t *, but need some code using
|
||||||
|
* bpf_refcount type so that it is emitted in vmlinux BTF
|
||||||
|
*/
|
||||||
|
ref = (struct bpf_refcount *)p__refcounted_kptr + meta->record->refcount_off;
|
||||||
|
|
||||||
|
refcount_inc((refcount_t *)ref);
|
||||||
|
return (void *)p__refcounted_kptr;
|
||||||
|
}
|
||||||
|
|
||||||
static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, bool tail)
|
static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, bool tail)
|
||||||
{
|
{
|
||||||
struct list_head *n = (void *)node, *h = (void *)head;
|
struct list_head *n = (void *)node, *h = (void *)head;
|
||||||
@ -2276,6 +2290,7 @@ BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
|
|||||||
#endif
|
#endif
|
||||||
BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
|
BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
|
||||||
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
|
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
|
||||||
|
BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE)
|
||||||
BTF_ID_FLAGS(func, bpf_list_push_front)
|
BTF_ID_FLAGS(func, bpf_list_push_front)
|
||||||
BTF_ID_FLAGS(func, bpf_list_push_back)
|
BTF_ID_FLAGS(func, bpf_list_push_back)
|
||||||
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
|
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
|
||||||
|
|||||||
@ -273,6 +273,11 @@ struct bpf_call_arg_meta {
|
|||||||
struct btf_field *kptr_field;
|
struct btf_field *kptr_field;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct btf_and_id {
|
||||||
|
struct btf *btf;
|
||||||
|
u32 btf_id;
|
||||||
|
};
|
||||||
|
|
||||||
struct bpf_kfunc_call_arg_meta {
|
struct bpf_kfunc_call_arg_meta {
|
||||||
/* In parameters */
|
/* In parameters */
|
||||||
struct btf *btf;
|
struct btf *btf;
|
||||||
@ -291,10 +296,10 @@ struct bpf_kfunc_call_arg_meta {
|
|||||||
u64 value;
|
u64 value;
|
||||||
bool found;
|
bool found;
|
||||||
} arg_constant;
|
} arg_constant;
|
||||||
struct {
|
union {
|
||||||
struct btf *btf;
|
struct btf_and_id arg_obj_drop;
|
||||||
u32 btf_id;
|
struct btf_and_id arg_refcount_acquire;
|
||||||
} arg_obj_drop;
|
};
|
||||||
struct {
|
struct {
|
||||||
struct btf_field *field;
|
struct btf_field *field;
|
||||||
} arg_list_head;
|
} arg_list_head;
|
||||||
@ -9403,6 +9408,11 @@ static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *a
|
|||||||
return __kfunc_param_match_suffix(btf, arg, "__uninit");
|
return __kfunc_param_match_suffix(btf, arg, "__uninit");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
|
||||||
|
{
|
||||||
|
return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr");
|
||||||
|
}
|
||||||
|
|
||||||
static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
|
static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
|
||||||
const struct btf_param *arg,
|
const struct btf_param *arg,
|
||||||
const char *name)
|
const char *name)
|
||||||
@ -9542,15 +9552,16 @@ static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
|
|||||||
|
|
||||||
enum kfunc_ptr_arg_type {
|
enum kfunc_ptr_arg_type {
|
||||||
KF_ARG_PTR_TO_CTX,
|
KF_ARG_PTR_TO_CTX,
|
||||||
KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */
|
KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */
|
||||||
KF_ARG_PTR_TO_KPTR, /* PTR_TO_KPTR but type specific */
|
KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */
|
||||||
|
KF_ARG_PTR_TO_KPTR, /* PTR_TO_KPTR but type specific */
|
||||||
KF_ARG_PTR_TO_DYNPTR,
|
KF_ARG_PTR_TO_DYNPTR,
|
||||||
KF_ARG_PTR_TO_ITER,
|
KF_ARG_PTR_TO_ITER,
|
||||||
KF_ARG_PTR_TO_LIST_HEAD,
|
KF_ARG_PTR_TO_LIST_HEAD,
|
||||||
KF_ARG_PTR_TO_LIST_NODE,
|
KF_ARG_PTR_TO_LIST_NODE,
|
||||||
KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */
|
KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */
|
||||||
KF_ARG_PTR_TO_MEM,
|
KF_ARG_PTR_TO_MEM,
|
||||||
KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */
|
KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */
|
||||||
KF_ARG_PTR_TO_CALLBACK,
|
KF_ARG_PTR_TO_CALLBACK,
|
||||||
KF_ARG_PTR_TO_RB_ROOT,
|
KF_ARG_PTR_TO_RB_ROOT,
|
||||||
KF_ARG_PTR_TO_RB_NODE,
|
KF_ARG_PTR_TO_RB_NODE,
|
||||||
@ -9559,6 +9570,7 @@ enum kfunc_ptr_arg_type {
|
|||||||
enum special_kfunc_type {
|
enum special_kfunc_type {
|
||||||
KF_bpf_obj_new_impl,
|
KF_bpf_obj_new_impl,
|
||||||
KF_bpf_obj_drop_impl,
|
KF_bpf_obj_drop_impl,
|
||||||
|
KF_bpf_refcount_acquire_impl,
|
||||||
KF_bpf_list_push_front,
|
KF_bpf_list_push_front,
|
||||||
KF_bpf_list_push_back,
|
KF_bpf_list_push_back,
|
||||||
KF_bpf_list_pop_front,
|
KF_bpf_list_pop_front,
|
||||||
@ -9579,6 +9591,7 @@ enum special_kfunc_type {
|
|||||||
BTF_SET_START(special_kfunc_set)
|
BTF_SET_START(special_kfunc_set)
|
||||||
BTF_ID(func, bpf_obj_new_impl)
|
BTF_ID(func, bpf_obj_new_impl)
|
||||||
BTF_ID(func, bpf_obj_drop_impl)
|
BTF_ID(func, bpf_obj_drop_impl)
|
||||||
|
BTF_ID(func, bpf_refcount_acquire_impl)
|
||||||
BTF_ID(func, bpf_list_push_front)
|
BTF_ID(func, bpf_list_push_front)
|
||||||
BTF_ID(func, bpf_list_push_back)
|
BTF_ID(func, bpf_list_push_back)
|
||||||
BTF_ID(func, bpf_list_pop_front)
|
BTF_ID(func, bpf_list_pop_front)
|
||||||
@ -9597,6 +9610,7 @@ BTF_SET_END(special_kfunc_set)
|
|||||||
BTF_ID_LIST(special_kfunc_list)
|
BTF_ID_LIST(special_kfunc_list)
|
||||||
BTF_ID(func, bpf_obj_new_impl)
|
BTF_ID(func, bpf_obj_new_impl)
|
||||||
BTF_ID(func, bpf_obj_drop_impl)
|
BTF_ID(func, bpf_obj_drop_impl)
|
||||||
|
BTF_ID(func, bpf_refcount_acquire_impl)
|
||||||
BTF_ID(func, bpf_list_push_front)
|
BTF_ID(func, bpf_list_push_front)
|
||||||
BTF_ID(func, bpf_list_push_back)
|
BTF_ID(func, bpf_list_push_back)
|
||||||
BTF_ID(func, bpf_list_pop_front)
|
BTF_ID(func, bpf_list_pop_front)
|
||||||
@ -9649,6 +9663,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
|
|||||||
if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
|
if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
|
||||||
return KF_ARG_PTR_TO_ALLOC_BTF_ID;
|
return KF_ARG_PTR_TO_ALLOC_BTF_ID;
|
||||||
|
|
||||||
|
if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno]))
|
||||||
|
return KF_ARG_PTR_TO_REFCOUNTED_KPTR;
|
||||||
|
|
||||||
if (is_kfunc_arg_kptr_get(meta, argno)) {
|
if (is_kfunc_arg_kptr_get(meta, argno)) {
|
||||||
if (!btf_type_is_ptr(ref_t)) {
|
if (!btf_type_is_ptr(ref_t)) {
|
||||||
verbose(env, "arg#0 BTF type must be a double pointer for kptr_get kfunc\n");
|
verbose(env, "arg#0 BTF type must be a double pointer for kptr_get kfunc\n");
|
||||||
@ -9952,7 +9969,8 @@ static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
|
|||||||
|
|
||||||
static bool is_bpf_graph_api_kfunc(u32 btf_id)
|
static bool is_bpf_graph_api_kfunc(u32 btf_id)
|
||||||
{
|
{
|
||||||
return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id);
|
return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) ||
|
||||||
|
btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_callback_calling_kfunc(u32 btf_id)
|
static bool is_callback_calling_kfunc(u32 btf_id)
|
||||||
@ -10171,6 +10189,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
|
|||||||
const char *func_name = meta->func_name, *ref_tname;
|
const char *func_name = meta->func_name, *ref_tname;
|
||||||
const struct btf *btf = meta->btf;
|
const struct btf *btf = meta->btf;
|
||||||
const struct btf_param *args;
|
const struct btf_param *args;
|
||||||
|
struct btf_record *rec;
|
||||||
u32 i, nargs;
|
u32 i, nargs;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -10306,6 +10325,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
|
|||||||
case KF_ARG_PTR_TO_MEM:
|
case KF_ARG_PTR_TO_MEM:
|
||||||
case KF_ARG_PTR_TO_MEM_SIZE:
|
case KF_ARG_PTR_TO_MEM_SIZE:
|
||||||
case KF_ARG_PTR_TO_CALLBACK:
|
case KF_ARG_PTR_TO_CALLBACK:
|
||||||
|
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
|
||||||
/* Trusted by default */
|
/* Trusted by default */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -10523,6 +10543,26 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
|
|||||||
case KF_ARG_PTR_TO_CALLBACK:
|
case KF_ARG_PTR_TO_CALLBACK:
|
||||||
meta->subprogno = reg->subprogno;
|
meta->subprogno = reg->subprogno;
|
||||||
break;
|
break;
|
||||||
|
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
|
||||||
|
if (!type_is_ptr_alloc_obj(reg->type) && !type_is_non_owning_ref(reg->type)) {
|
||||||
|
verbose(env, "arg#%d is neither owning or non-owning ref\n", i);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
rec = reg_btf_record(reg);
|
||||||
|
if (!rec) {
|
||||||
|
verbose(env, "verifier internal error: Couldn't find btf_record\n");
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (rec->refcount_off < 0) {
|
||||||
|
verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
meta->arg_refcount_acquire.btf = reg->btf;
|
||||||
|
meta->arg_refcount_acquire.btf_id = reg->btf_id;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -10699,7 +10739,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||||||
|
|
||||||
if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
|
if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
|
||||||
/* Only exception is bpf_obj_new_impl */
|
/* Only exception is bpf_obj_new_impl */
|
||||||
if (meta.btf != btf_vmlinux || meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl]) {
|
if (meta.btf != btf_vmlinux ||
|
||||||
|
(meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] &&
|
||||||
|
meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) {
|
||||||
verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
|
verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -10747,6 +10789,15 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||||||
insn_aux->obj_new_size = ret_t->size;
|
insn_aux->obj_new_size = ret_t->size;
|
||||||
insn_aux->kptr_struct_meta =
|
insn_aux->kptr_struct_meta =
|
||||||
btf_find_struct_meta(ret_btf, ret_btf_id);
|
btf_find_struct_meta(ret_btf, ret_btf_id);
|
||||||
|
} else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
|
||||||
|
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||||
|
regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
|
||||||
|
regs[BPF_REG_0].btf = meta.arg_refcount_acquire.btf;
|
||||||
|
regs[BPF_REG_0].btf_id = meta.arg_refcount_acquire.btf_id;
|
||||||
|
|
||||||
|
insn_aux->kptr_struct_meta =
|
||||||
|
btf_find_struct_meta(meta.arg_refcount_acquire.btf,
|
||||||
|
meta.arg_refcount_acquire.btf_id);
|
||||||
} else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
|
} else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
|
||||||
meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
|
meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
|
||||||
struct btf_field *field = meta.arg_list_head.field;
|
struct btf_field *field = meta.arg_list_head.field;
|
||||||
@ -17393,7 +17444,8 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||||||
insn_buf[2] = addr[1];
|
insn_buf[2] = addr[1];
|
||||||
insn_buf[3] = *insn;
|
insn_buf[3] = *insn;
|
||||||
*cnt = 4;
|
*cnt = 4;
|
||||||
} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
|
} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
|
||||||
|
desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
|
||||||
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
|
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
|
||||||
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
|
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
|
||||||
|
|
||||||
|
|||||||
@ -37,6 +37,19 @@ extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
|
|||||||
/* Convenience macro to wrap over bpf_obj_drop_impl */
|
/* Convenience macro to wrap over bpf_obj_drop_impl */
|
||||||
#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
|
#define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL)
|
||||||
|
|
||||||
|
/* Description
|
||||||
|
* Increment the refcount on a refcounted local kptr, turning the
|
||||||
|
* non-owning reference input into an owning reference in the process.
|
||||||
|
*
|
||||||
|
* The 'meta' parameter is a hidden argument that is ignored.
|
||||||
|
* Returns
|
||||||
|
* An owning reference to the object pointed to by 'kptr'
|
||||||
|
*/
|
||||||
|
extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
|
||||||
|
|
||||||
|
/* Convenience macro to wrap over bpf_refcount_acquire_impl */
|
||||||
|
#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)
|
||||||
|
|
||||||
/* Description
|
/* Description
|
||||||
* Add a new entry to the beginning of the BPF linked list.
|
* Add a new entry to the beginning of the BPF linked list.
|
||||||
* Returns
|
* Returns
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user