mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-01 15:14:52 +00:00
x86/cfi: Clean up linkage
With the introduction of kCFI the addition of ENDBR to SYM_FUNC_START* no longer suffices to make the function indirectly callable. This now requires the use of SYM_TYPED_FUNC_START. As such, remove the implicit ENDBR from SYM_FUNC_START* and add some explicit annotations to fix things up again. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Sami Tolvanen <samitolvanen@google.com> Link: https://lore.kernel.org/r/20250207122546.409116003@infradead.org
This commit is contained in:
parent
2981557cb0
commit
582077c940
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <linux/objtool.h>
|
||||||
#include <asm/frame.h>
|
#include <asm/frame.h>
|
||||||
|
|
||||||
#define STATE1 %xmm0
|
#define STATE1 %xmm0
|
||||||
@ -1071,6 +1072,7 @@ SYM_FUNC_END(_aesni_inc)
|
|||||||
* size_t len, u8 *iv)
|
* size_t len, u8 *iv)
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(aesni_ctr_enc)
|
SYM_FUNC_START(aesni_ctr_enc)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
cmp $16, LEN
|
cmp $16, LEN
|
||||||
jb .Lctr_enc_just_ret
|
jb .Lctr_enc_just_ret
|
||||||
|
@ -431,6 +431,7 @@ For 32-bit we have the following conventions - kernel is built with
|
|||||||
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
|
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
|
||||||
.macro THUNK name, func
|
.macro THUNK name, func
|
||||||
SYM_FUNC_START(\name)
|
SYM_FUNC_START(\name)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
pushq %rbp
|
pushq %rbp
|
||||||
movq %rsp, %rbp
|
movq %rsp, %rbp
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <linux/objtool.h>
|
||||||
#include <asm/msr-index.h>
|
#include <asm/msr-index.h>
|
||||||
#include <asm/unwind_hints.h>
|
#include <asm/unwind_hints.h>
|
||||||
#include <asm/segment.h>
|
#include <asm/segment.h>
|
||||||
@ -17,6 +18,7 @@
|
|||||||
.pushsection .noinstr.text, "ax"
|
.pushsection .noinstr.text, "ax"
|
||||||
|
|
||||||
SYM_FUNC_START(entry_ibpb)
|
SYM_FUNC_START(entry_ibpb)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
movl $MSR_IA32_PRED_CMD, %ecx
|
movl $MSR_IA32_PRED_CMD, %ecx
|
||||||
movl $PRED_CMD_IBPB, %eax
|
movl $PRED_CMD_IBPB, %eax
|
||||||
xorl %edx, %edx
|
xorl %edx, %edx
|
||||||
|
@ -175,6 +175,7 @@ SYM_CODE_END(entry_SYSCALL_64)
|
|||||||
*/
|
*/
|
||||||
.pushsection .text, "ax"
|
.pushsection .text, "ax"
|
||||||
SYM_FUNC_START(__switch_to_asm)
|
SYM_FUNC_START(__switch_to_asm)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
/*
|
/*
|
||||||
* Save callee-saved registers
|
* Save callee-saved registers
|
||||||
* This must match the order in inactive_task_frame
|
* This must match the order in inactive_task_frame
|
||||||
@ -742,6 +743,7 @@ _ASM_NOKPROBE(common_interrupt_return)
|
|||||||
* Is in entry.text as it shouldn't be instrumented.
|
* Is in entry.text as it shouldn't be instrumented.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(asm_load_gs_index)
|
SYM_FUNC_START(asm_load_gs_index)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
swapgs
|
swapgs
|
||||||
.Lgs_change:
|
.Lgs_change:
|
||||||
@ -1526,6 +1528,7 @@ SYM_CODE_END(rewind_stack_and_make_dead)
|
|||||||
* refactored in the future if needed.
|
* refactored in the future if needed.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(clear_bhb_loop)
|
SYM_FUNC_START(clear_bhb_loop)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
push %rbp
|
push %rbp
|
||||||
mov %rsp, %rbp
|
mov %rsp, %rbp
|
||||||
movl $5, %ecx
|
movl $5, %ecx
|
||||||
|
@ -58,6 +58,7 @@ SYM_CODE_END(asm_fred_entrypoint_kernel)
|
|||||||
|
|
||||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||||
SYM_FUNC_START(asm_fred_entry_from_kvm)
|
SYM_FUNC_START(asm_fred_entry_from_kvm)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
push %rbp
|
push %rbp
|
||||||
mov %rsp, %rbp
|
mov %rsp, %rbp
|
||||||
|
|
||||||
|
@ -133,6 +133,7 @@ KBUILD_CFLAGS_32 += -fno-stack-protector
|
|||||||
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
|
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
|
||||||
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
|
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
|
||||||
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
|
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
|
||||||
|
KBUILD_CFLAGS_32 += -DBUILD_VDSO
|
||||||
|
|
||||||
ifdef CONFIG_MITIGATION_RETPOLINE
|
ifdef CONFIG_MITIGATION_RETPOLINE
|
||||||
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||||
|
@ -119,33 +119,27 @@
|
|||||||
|
|
||||||
/* SYM_FUNC_START -- use for global functions */
|
/* SYM_FUNC_START -- use for global functions */
|
||||||
#define SYM_FUNC_START(name) \
|
#define SYM_FUNC_START(name) \
|
||||||
SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
|
SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)
|
||||||
ENDBR
|
|
||||||
|
|
||||||
/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
|
/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
|
||||||
#define SYM_FUNC_START_NOALIGN(name) \
|
#define SYM_FUNC_START_NOALIGN(name) \
|
||||||
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
|
SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
|
||||||
ENDBR
|
|
||||||
|
|
||||||
/* SYM_FUNC_START_LOCAL -- use for local functions */
|
/* SYM_FUNC_START_LOCAL -- use for local functions */
|
||||||
#define SYM_FUNC_START_LOCAL(name) \
|
#define SYM_FUNC_START_LOCAL(name) \
|
||||||
SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN) \
|
SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN)
|
||||||
ENDBR
|
|
||||||
|
|
||||||
/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
|
/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
|
||||||
#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
|
#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
|
||||||
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
|
SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
|
||||||
ENDBR
|
|
||||||
|
|
||||||
/* SYM_FUNC_START_WEAK -- use for weak functions */
|
/* SYM_FUNC_START_WEAK -- use for weak functions */
|
||||||
#define SYM_FUNC_START_WEAK(name) \
|
#define SYM_FUNC_START_WEAK(name) \
|
||||||
SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN) \
|
SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN)
|
||||||
ENDBR
|
|
||||||
|
|
||||||
/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
|
/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
|
||||||
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
|
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
|
||||||
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
|
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
|
||||||
ENDBR
|
|
||||||
|
|
||||||
#endif /* _ASM_X86_LINKAGE_H */
|
#endif /* _ASM_X86_LINKAGE_H */
|
||||||
|
|
||||||
|
@ -60,6 +60,7 @@ static inline void clear_page(void *page)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void copy_page(void *to, void *from);
|
void copy_page(void *to, void *from);
|
||||||
|
KCFI_REFERENCE(copy_page);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_5LEVEL
|
#ifdef CONFIG_X86_5LEVEL
|
||||||
/*
|
/*
|
||||||
|
@ -244,7 +244,17 @@ extern struct paravirt_patch_template pv_ops;
|
|||||||
|
|
||||||
int paravirt_disable_iospace(void);
|
int paravirt_disable_iospace(void);
|
||||||
|
|
||||||
/* This generates an indirect call based on the operation type number. */
|
/*
|
||||||
|
* This generates an indirect call based on the operation type number.
|
||||||
|
*
|
||||||
|
* Since alternatives run after enabling CET/IBT -- the latter setting/clearing
|
||||||
|
* capabilities and the former requiring all capabilities being finalized --
|
||||||
|
* these indirect calls are subject to IBT and the paravirt stubs should have
|
||||||
|
* ENDBR on.
|
||||||
|
*
|
||||||
|
* OTOH since this is effectively a __nocfi indirect call, the paravirt stubs
|
||||||
|
* don't need to bother with CFI prefixes.
|
||||||
|
*/
|
||||||
#define PARAVIRT_CALL \
|
#define PARAVIRT_CALL \
|
||||||
ANNOTATE_RETPOLINE_SAFE \
|
ANNOTATE_RETPOLINE_SAFE \
|
||||||
"call *%[paravirt_opptr];"
|
"call *%[paravirt_opptr];"
|
||||||
|
@ -42,14 +42,14 @@ static __always_inline void native_write_cr2(unsigned long val)
|
|||||||
asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
|
asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __native_read_cr3(void)
|
static __always_inline unsigned long __native_read_cr3(void)
|
||||||
{
|
{
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_write_cr3(unsigned long val)
|
static __always_inline void native_write_cr3(unsigned long val)
|
||||||
{
|
{
|
||||||
asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
|
asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ extern void *__memcpy(void *to, const void *from, size_t len);
|
|||||||
#define __HAVE_ARCH_MEMSET
|
#define __HAVE_ARCH_MEMSET
|
||||||
void *memset(void *s, int c, size_t n);
|
void *memset(void *s, int c, size_t n);
|
||||||
void *__memset(void *s, int c, size_t n);
|
void *__memset(void *s, int c, size_t n);
|
||||||
|
KCFI_REFERENCE(__memset);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* KMSAN needs to instrument as much code as possible. Use C versions of
|
* KMSAN needs to instrument as much code as possible. Use C versions of
|
||||||
@ -70,6 +71,7 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
|
|||||||
#define __HAVE_ARCH_MEMMOVE
|
#define __HAVE_ARCH_MEMMOVE
|
||||||
void *memmove(void *dest, const void *src, size_t count);
|
void *memmove(void *dest, const void *src, size_t count);
|
||||||
void *__memmove(void *dest, const void *src, size_t count);
|
void *__memmove(void *dest, const void *src, size_t count);
|
||||||
|
KCFI_REFERENCE(__memmove);
|
||||||
|
|
||||||
int memcmp(const void *cs, const void *ct, size_t count);
|
int memcmp(const void *cs, const void *ct, size_t count);
|
||||||
size_t strlen(const char *s);
|
size_t strlen(const char *s);
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
* rsi: PGD of the identity mapping
|
* rsi: PGD of the identity mapping
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(asm_acpi_mp_play_dead)
|
SYM_FUNC_START(asm_acpi_mp_play_dead)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
/* Turn off global entries. Following CR3 write will flush them. */
|
/* Turn off global entries. Following CR3 write will flush them. */
|
||||||
movq %cr4, %rdx
|
movq %cr4, %rdx
|
||||||
andq $~(X86_CR4_PGE), %rdx
|
andq $~(X86_CR4_PGE), %rdx
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
* Hooray, we are in Long 64-bit mode (but still running in low memory)
|
* Hooray, we are in Long 64-bit mode (but still running in low memory)
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(wakeup_long64)
|
SYM_FUNC_START(wakeup_long64)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
movq saved_magic(%rip), %rax
|
movq saved_magic(%rip), %rax
|
||||||
movq $0x123456789abcdef0, %rdx
|
movq $0x123456789abcdef0, %rdx
|
||||||
cmpq %rdx, %rax
|
cmpq %rdx, %rax
|
||||||
|
@ -926,11 +926,7 @@ struct bpf_insn;
|
|||||||
extern unsigned int __bpf_prog_runX(const void *ctx,
|
extern unsigned int __bpf_prog_runX(const void *ctx,
|
||||||
const struct bpf_insn *insn);
|
const struct bpf_insn *insn);
|
||||||
|
|
||||||
/*
|
KCFI_REFERENCE(__bpf_prog_runX);
|
||||||
* Force a reference to the external symbol so the compiler generates
|
|
||||||
* __kcfi_typid.
|
|
||||||
*/
|
|
||||||
__ADDRESSABLE(__bpf_prog_runX);
|
|
||||||
|
|
||||||
/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
|
/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
|
||||||
asm (
|
asm (
|
||||||
@ -947,7 +943,7 @@ asm (
|
|||||||
/* Must match bpf_callback_t */
|
/* Must match bpf_callback_t */
|
||||||
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
|
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
|
||||||
|
|
||||||
__ADDRESSABLE(__bpf_callback_fn);
|
KCFI_REFERENCE(__bpf_callback_fn);
|
||||||
|
|
||||||
/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
|
/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
|
||||||
asm (
|
asm (
|
||||||
|
@ -146,12 +146,14 @@ SYM_FUNC_END(ftrace_stub_graph)
|
|||||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||||
|
|
||||||
SYM_FUNC_START(__fentry__)
|
SYM_FUNC_START(__fentry__)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
CALL_DEPTH_ACCOUNT
|
CALL_DEPTH_ACCOUNT
|
||||||
RET
|
RET
|
||||||
SYM_FUNC_END(__fentry__)
|
SYM_FUNC_END(__fentry__)
|
||||||
EXPORT_SYMBOL(__fentry__)
|
EXPORT_SYMBOL(__fentry__)
|
||||||
|
|
||||||
SYM_FUNC_START(ftrace_caller)
|
SYM_FUNC_START(ftrace_caller)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
/* save_mcount_regs fills in first two parameters */
|
/* save_mcount_regs fills in first two parameters */
|
||||||
save_mcount_regs
|
save_mcount_regs
|
||||||
|
|
||||||
@ -197,6 +199,7 @@ SYM_FUNC_END(ftrace_caller);
|
|||||||
STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
|
STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
|
||||||
|
|
||||||
SYM_FUNC_START(ftrace_regs_caller)
|
SYM_FUNC_START(ftrace_regs_caller)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
/* Save the current flags before any operations that can change them */
|
/* Save the current flags before any operations that can change them */
|
||||||
pushfq
|
pushfq
|
||||||
|
|
||||||
@ -310,6 +313,7 @@ SYM_FUNC_END(ftrace_regs_caller)
|
|||||||
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
|
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
|
||||||
|
|
||||||
SYM_FUNC_START(ftrace_stub_direct_tramp)
|
SYM_FUNC_START(ftrace_stub_direct_tramp)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
CALL_DEPTH_ACCOUNT
|
CALL_DEPTH_ACCOUNT
|
||||||
RET
|
RET
|
||||||
SYM_FUNC_END(ftrace_stub_direct_tramp)
|
SYM_FUNC_END(ftrace_stub_direct_tramp)
|
||||||
@ -317,6 +321,7 @@ SYM_FUNC_END(ftrace_stub_direct_tramp)
|
|||||||
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
||||||
|
|
||||||
SYM_FUNC_START(__fentry__)
|
SYM_FUNC_START(__fentry__)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
CALL_DEPTH_ACCOUNT
|
CALL_DEPTH_ACCOUNT
|
||||||
|
|
||||||
cmpq $ftrace_stub, ftrace_trace_function
|
cmpq $ftrace_stub, ftrace_trace_function
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
*/
|
*/
|
||||||
.pushsection .noinstr.text, "ax"
|
.pushsection .noinstr.text, "ax"
|
||||||
SYM_FUNC_START(native_save_fl)
|
SYM_FUNC_START(native_save_fl)
|
||||||
|
ENDBR
|
||||||
pushf
|
pushf
|
||||||
pop %_ASM_AX
|
pop %_ASM_AX
|
||||||
RET
|
RET
|
||||||
|
@ -116,6 +116,16 @@ static noinstr void pv_native_write_cr2(unsigned long val)
|
|||||||
native_write_cr2(val);
|
native_write_cr2(val);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static noinstr unsigned long pv_native_read_cr3(void)
|
||||||
|
{
|
||||||
|
return __native_read_cr3();
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinstr void pv_native_write_cr3(unsigned long cr3)
|
||||||
|
{
|
||||||
|
native_write_cr3(cr3);
|
||||||
|
}
|
||||||
|
|
||||||
static noinstr unsigned long pv_native_get_debugreg(int regno)
|
static noinstr unsigned long pv_native_get_debugreg(int regno)
|
||||||
{
|
{
|
||||||
return native_get_debugreg(regno);
|
return native_get_debugreg(regno);
|
||||||
@ -203,8 +213,8 @@ struct paravirt_patch_template pv_ops = {
|
|||||||
#ifdef CONFIG_PARAVIRT_XXL
|
#ifdef CONFIG_PARAVIRT_XXL
|
||||||
.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
|
.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
|
||||||
.mmu.write_cr2 = pv_native_write_cr2,
|
.mmu.write_cr2 = pv_native_write_cr2,
|
||||||
.mmu.read_cr3 = __native_read_cr3,
|
.mmu.read_cr3 = pv_native_read_cr3,
|
||||||
.mmu.write_cr3 = native_write_cr3,
|
.mmu.write_cr3 = pv_native_write_cr3,
|
||||||
|
|
||||||
.mmu.pgd_alloc = __paravirt_pgd_alloc,
|
.mmu.pgd_alloc = __paravirt_pgd_alloc,
|
||||||
.mmu.pgd_free = paravirt_nop,
|
.mmu.pgd_free = paravirt_nop,
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/cfi_types.h>
|
#include <linux/cfi_types.h>
|
||||||
|
#include <linux/objtool.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -64,6 +65,7 @@ EXPORT_SYMBOL_GPL(clear_page_erms)
|
|||||||
* rcx: uncleared bytes or 0 if successful.
|
* rcx: uncleared bytes or 0 if successful.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(rep_stos_alternative)
|
SYM_FUNC_START(rep_stos_alternative)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
cmpq $64,%rcx
|
cmpq $64,%rcx
|
||||||
jae .Lunrolled
|
jae .Lunrolled
|
||||||
|
|
||||||
|
@ -8,6 +8,8 @@
|
|||||||
|
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <linux/cfi_types.h>
|
||||||
|
#include <linux/objtool.h>
|
||||||
#include <asm/cpufeatures.h>
|
#include <asm/cpufeatures.h>
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
@ -30,6 +32,7 @@
|
|||||||
* it simpler for us, we can clobber rsi/rdi and rax freely.
|
* it simpler for us, we can clobber rsi/rdi and rax freely.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(rep_movs_alternative)
|
SYM_FUNC_START(rep_movs_alternative)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
cmpq $64,%rcx
|
cmpq $64,%rcx
|
||||||
jae .Llarge
|
jae .Llarge
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <linux/objtool.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -27,6 +28,7 @@
|
|||||||
* rax uncopied bytes or 0 if successful.
|
* rax uncopied bytes or 0 if successful.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(__copy_user_nocache)
|
SYM_FUNC_START(__copy_user_nocache)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
/* If destination is not 7-byte aligned, we'll have to align it */
|
/* If destination is not 7-byte aligned, we'll have to align it */
|
||||||
testb $7,%dil
|
testb $7,%dil
|
||||||
jne .Lalign
|
jne .Lalign
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
|
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <linux/objtool.h>
|
||||||
#include <asm/page_types.h>
|
#include <asm/page_types.h>
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
@ -62,6 +63,7 @@
|
|||||||
|
|
||||||
.text
|
.text
|
||||||
SYM_FUNC_START(__get_user_1)
|
SYM_FUNC_START(__get_user_1)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
check_range size=1
|
check_range size=1
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
UACCESS movzbl (%_ASM_AX),%edx
|
UACCESS movzbl (%_ASM_AX),%edx
|
||||||
@ -72,6 +74,7 @@ SYM_FUNC_END(__get_user_1)
|
|||||||
EXPORT_SYMBOL(__get_user_1)
|
EXPORT_SYMBOL(__get_user_1)
|
||||||
|
|
||||||
SYM_FUNC_START(__get_user_2)
|
SYM_FUNC_START(__get_user_2)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
check_range size=2
|
check_range size=2
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
UACCESS movzwl (%_ASM_AX),%edx
|
UACCESS movzwl (%_ASM_AX),%edx
|
||||||
@ -82,6 +85,7 @@ SYM_FUNC_END(__get_user_2)
|
|||||||
EXPORT_SYMBOL(__get_user_2)
|
EXPORT_SYMBOL(__get_user_2)
|
||||||
|
|
||||||
SYM_FUNC_START(__get_user_4)
|
SYM_FUNC_START(__get_user_4)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
check_range size=4
|
check_range size=4
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
UACCESS movl (%_ASM_AX),%edx
|
UACCESS movl (%_ASM_AX),%edx
|
||||||
@ -92,6 +96,7 @@ SYM_FUNC_END(__get_user_4)
|
|||||||
EXPORT_SYMBOL(__get_user_4)
|
EXPORT_SYMBOL(__get_user_4)
|
||||||
|
|
||||||
SYM_FUNC_START(__get_user_8)
|
SYM_FUNC_START(__get_user_8)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
#ifndef CONFIG_X86_64
|
#ifndef CONFIG_X86_64
|
||||||
xor %ecx,%ecx
|
xor %ecx,%ecx
|
||||||
#endif
|
#endif
|
||||||
@ -111,6 +116,7 @@ EXPORT_SYMBOL(__get_user_8)
|
|||||||
|
|
||||||
/* .. and the same for __get_user, just without the range checks */
|
/* .. and the same for __get_user, just without the range checks */
|
||||||
SYM_FUNC_START(__get_user_nocheck_1)
|
SYM_FUNC_START(__get_user_nocheck_1)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
ASM_BARRIER_NOSPEC
|
ASM_BARRIER_NOSPEC
|
||||||
UACCESS movzbl (%_ASM_AX),%edx
|
UACCESS movzbl (%_ASM_AX),%edx
|
||||||
@ -121,6 +127,7 @@ SYM_FUNC_END(__get_user_nocheck_1)
|
|||||||
EXPORT_SYMBOL(__get_user_nocheck_1)
|
EXPORT_SYMBOL(__get_user_nocheck_1)
|
||||||
|
|
||||||
SYM_FUNC_START(__get_user_nocheck_2)
|
SYM_FUNC_START(__get_user_nocheck_2)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
ASM_BARRIER_NOSPEC
|
ASM_BARRIER_NOSPEC
|
||||||
UACCESS movzwl (%_ASM_AX),%edx
|
UACCESS movzwl (%_ASM_AX),%edx
|
||||||
@ -131,6 +138,7 @@ SYM_FUNC_END(__get_user_nocheck_2)
|
|||||||
EXPORT_SYMBOL(__get_user_nocheck_2)
|
EXPORT_SYMBOL(__get_user_nocheck_2)
|
||||||
|
|
||||||
SYM_FUNC_START(__get_user_nocheck_4)
|
SYM_FUNC_START(__get_user_nocheck_4)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
ASM_BARRIER_NOSPEC
|
ASM_BARRIER_NOSPEC
|
||||||
UACCESS movl (%_ASM_AX),%edx
|
UACCESS movl (%_ASM_AX),%edx
|
||||||
@ -141,6 +149,7 @@ SYM_FUNC_END(__get_user_nocheck_4)
|
|||||||
EXPORT_SYMBOL(__get_user_nocheck_4)
|
EXPORT_SYMBOL(__get_user_nocheck_4)
|
||||||
|
|
||||||
SYM_FUNC_START(__get_user_nocheck_8)
|
SYM_FUNC_START(__get_user_nocheck_8)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
ASM_BARRIER_NOSPEC
|
ASM_BARRIER_NOSPEC
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <linux/objtool.h>
|
||||||
|
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
|
|
||||||
@ -9,6 +10,7 @@
|
|||||||
* %rdi: w
|
* %rdi: w
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(__sw_hweight32)
|
SYM_FUNC_START(__sw_hweight32)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
movl %edi, %eax # w
|
movl %edi, %eax # w
|
||||||
@ -42,6 +44,7 @@ EXPORT_SYMBOL(__sw_hweight32)
|
|||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
SYM_FUNC_START(__sw_hweight64)
|
SYM_FUNC_START(__sw_hweight64)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
pushq %rdi
|
pushq %rdi
|
||||||
pushq %rdx
|
pushq %rdx
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
*/
|
*/
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
|
#include <linux/objtool.h>
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
@ -45,6 +46,7 @@
|
|||||||
|
|
||||||
.text
|
.text
|
||||||
SYM_FUNC_START(__put_user_1)
|
SYM_FUNC_START(__put_user_1)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
check_range size=1
|
check_range size=1
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
1: movb %al,(%_ASM_CX)
|
1: movb %al,(%_ASM_CX)
|
||||||
@ -55,6 +57,7 @@ SYM_FUNC_END(__put_user_1)
|
|||||||
EXPORT_SYMBOL(__put_user_1)
|
EXPORT_SYMBOL(__put_user_1)
|
||||||
|
|
||||||
SYM_FUNC_START(__put_user_nocheck_1)
|
SYM_FUNC_START(__put_user_nocheck_1)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
2: movb %al,(%_ASM_CX)
|
2: movb %al,(%_ASM_CX)
|
||||||
xor %ecx,%ecx
|
xor %ecx,%ecx
|
||||||
@ -64,6 +67,7 @@ SYM_FUNC_END(__put_user_nocheck_1)
|
|||||||
EXPORT_SYMBOL(__put_user_nocheck_1)
|
EXPORT_SYMBOL(__put_user_nocheck_1)
|
||||||
|
|
||||||
SYM_FUNC_START(__put_user_2)
|
SYM_FUNC_START(__put_user_2)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
check_range size=2
|
check_range size=2
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
3: movw %ax,(%_ASM_CX)
|
3: movw %ax,(%_ASM_CX)
|
||||||
@ -74,6 +78,7 @@ SYM_FUNC_END(__put_user_2)
|
|||||||
EXPORT_SYMBOL(__put_user_2)
|
EXPORT_SYMBOL(__put_user_2)
|
||||||
|
|
||||||
SYM_FUNC_START(__put_user_nocheck_2)
|
SYM_FUNC_START(__put_user_nocheck_2)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
4: movw %ax,(%_ASM_CX)
|
4: movw %ax,(%_ASM_CX)
|
||||||
xor %ecx,%ecx
|
xor %ecx,%ecx
|
||||||
@ -83,6 +88,7 @@ SYM_FUNC_END(__put_user_nocheck_2)
|
|||||||
EXPORT_SYMBOL(__put_user_nocheck_2)
|
EXPORT_SYMBOL(__put_user_nocheck_2)
|
||||||
|
|
||||||
SYM_FUNC_START(__put_user_4)
|
SYM_FUNC_START(__put_user_4)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
check_range size=4
|
check_range size=4
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
5: movl %eax,(%_ASM_CX)
|
5: movl %eax,(%_ASM_CX)
|
||||||
@ -93,6 +99,7 @@ SYM_FUNC_END(__put_user_4)
|
|||||||
EXPORT_SYMBOL(__put_user_4)
|
EXPORT_SYMBOL(__put_user_4)
|
||||||
|
|
||||||
SYM_FUNC_START(__put_user_nocheck_4)
|
SYM_FUNC_START(__put_user_nocheck_4)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
6: movl %eax,(%_ASM_CX)
|
6: movl %eax,(%_ASM_CX)
|
||||||
xor %ecx,%ecx
|
xor %ecx,%ecx
|
||||||
@ -102,6 +109,7 @@ SYM_FUNC_END(__put_user_nocheck_4)
|
|||||||
EXPORT_SYMBOL(__put_user_nocheck_4)
|
EXPORT_SYMBOL(__put_user_nocheck_4)
|
||||||
|
|
||||||
SYM_FUNC_START(__put_user_8)
|
SYM_FUNC_START(__put_user_8)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
check_range size=8
|
check_range size=8
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
7: mov %_ASM_AX,(%_ASM_CX)
|
7: mov %_ASM_AX,(%_ASM_CX)
|
||||||
@ -115,6 +123,7 @@ SYM_FUNC_END(__put_user_8)
|
|||||||
EXPORT_SYMBOL(__put_user_8)
|
EXPORT_SYMBOL(__put_user_8)
|
||||||
|
|
||||||
SYM_FUNC_START(__put_user_nocheck_8)
|
SYM_FUNC_START(__put_user_nocheck_8)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ASM_STAC
|
ASM_STAC
|
||||||
9: mov %_ASM_AX,(%_ASM_CX)
|
9: mov %_ASM_AX,(%_ASM_CX)
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
@ -326,6 +326,7 @@ SYM_FUNC_END(retbleed_untrain_ret)
|
|||||||
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
|
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
|
||||||
|
|
||||||
SYM_FUNC_START(entry_untrain_ret)
|
SYM_FUNC_START(entry_untrain_ret)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
|
ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
|
||||||
SYM_FUNC_END(entry_untrain_ret)
|
SYM_FUNC_END(entry_untrain_ret)
|
||||||
__EXPORT_THUNK(entry_untrain_ret)
|
__EXPORT_THUNK(entry_untrain_ret)
|
||||||
|
@ -72,6 +72,7 @@ SYM_FUNC_START(sme_encrypt_execute)
|
|||||||
SYM_FUNC_END(sme_encrypt_execute)
|
SYM_FUNC_END(sme_encrypt_execute)
|
||||||
|
|
||||||
SYM_FUNC_START(__enc_copy)
|
SYM_FUNC_START(__enc_copy)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
/*
|
/*
|
||||||
* Routine used to encrypt memory in place.
|
* Routine used to encrypt memory in place.
|
||||||
* This routine must be run outside of the kernel proper since
|
* This routine must be run outside of the kernel proper since
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
/* code below belongs to the image kernel */
|
/* code below belongs to the image kernel */
|
||||||
.align PAGE_SIZE
|
.align PAGE_SIZE
|
||||||
SYM_FUNC_START(restore_registers)
|
SYM_FUNC_START(restore_registers)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
/* go back to the original page tables */
|
/* go back to the original page tables */
|
||||||
movq %r9, %cr3
|
movq %r9, %cr3
|
||||||
|
|
||||||
@ -119,6 +120,7 @@ SYM_FUNC_END(restore_image)
|
|||||||
|
|
||||||
/* code below has been relocated to a safe page */
|
/* code below has been relocated to a safe page */
|
||||||
SYM_FUNC_START(core_restore_code)
|
SYM_FUNC_START(core_restore_code)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
/* switch to temporary page tables */
|
/* switch to temporary page tables */
|
||||||
movq %rax, %cr3
|
movq %rax, %cr3
|
||||||
/* flush TLB */
|
/* flush TLB */
|
||||||
|
@ -51,6 +51,7 @@ SYM_FUNC_END(xen_hypercall_pv)
|
|||||||
* non-zero.
|
* non-zero.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(xen_irq_disable_direct)
|
SYM_FUNC_START(xen_irq_disable_direct)
|
||||||
|
ENDBR
|
||||||
movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
|
movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
|
||||||
RET
|
RET
|
||||||
SYM_FUNC_END(xen_irq_disable_direct)
|
SYM_FUNC_END(xen_irq_disable_direct)
|
||||||
@ -90,6 +91,7 @@ SYM_FUNC_END(check_events)
|
|||||||
* then enter the hypervisor to get them handled.
|
* then enter the hypervisor to get them handled.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(xen_irq_enable_direct)
|
SYM_FUNC_START(xen_irq_enable_direct)
|
||||||
|
ENDBR
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
/* Unmask events */
|
/* Unmask events */
|
||||||
movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
|
movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
|
||||||
@ -120,6 +122,7 @@ SYM_FUNC_END(xen_irq_enable_direct)
|
|||||||
* x86 use opposite senses (mask vs enable).
|
* x86 use opposite senses (mask vs enable).
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(xen_save_fl_direct)
|
SYM_FUNC_START(xen_save_fl_direct)
|
||||||
|
ENDBR
|
||||||
testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
|
testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
|
||||||
setz %ah
|
setz %ah
|
||||||
addb %ah, %ah
|
addb %ah, %ah
|
||||||
@ -127,6 +130,7 @@ SYM_FUNC_START(xen_save_fl_direct)
|
|||||||
SYM_FUNC_END(xen_save_fl_direct)
|
SYM_FUNC_END(xen_save_fl_direct)
|
||||||
|
|
||||||
SYM_FUNC_START(xen_read_cr2)
|
SYM_FUNC_START(xen_read_cr2)
|
||||||
|
ENDBR
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
|
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
|
||||||
_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
|
_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
|
||||||
@ -135,6 +139,7 @@ SYM_FUNC_START(xen_read_cr2)
|
|||||||
SYM_FUNC_END(xen_read_cr2);
|
SYM_FUNC_END(xen_read_cr2);
|
||||||
|
|
||||||
SYM_FUNC_START(xen_read_cr2_direct)
|
SYM_FUNC_START(xen_read_cr2_direct)
|
||||||
|
ENDBR
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
_ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX
|
_ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX
|
||||||
FRAME_END
|
FRAME_END
|
||||||
|
@ -133,11 +133,13 @@ SYM_FUNC_START(xen_hypercall_hvm)
|
|||||||
SYM_FUNC_END(xen_hypercall_hvm)
|
SYM_FUNC_END(xen_hypercall_hvm)
|
||||||
|
|
||||||
SYM_FUNC_START(xen_hypercall_amd)
|
SYM_FUNC_START(xen_hypercall_amd)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
vmmcall
|
vmmcall
|
||||||
RET
|
RET
|
||||||
SYM_FUNC_END(xen_hypercall_amd)
|
SYM_FUNC_END(xen_hypercall_amd)
|
||||||
|
|
||||||
SYM_FUNC_START(xen_hypercall_intel)
|
SYM_FUNC_START(xen_hypercall_intel)
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
vmcall
|
vmcall
|
||||||
RET
|
RET
|
||||||
SYM_FUNC_END(xen_hypercall_intel)
|
SYM_FUNC_END(xen_hypercall_intel)
|
||||||
|
@ -212,6 +212,16 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
|
#if defined(CONFIG_CFI_CLANG) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
|
||||||
|
/*
|
||||||
|
* Force a reference to the external symbol so the compiler generates
|
||||||
|
* __kcfi_typid.
|
||||||
|
*/
|
||||||
|
#define KCFI_REFERENCE(sym) __ADDRESSABLE(sym)
|
||||||
|
#else
|
||||||
|
#define KCFI_REFERENCE(sym)
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* offset_to_ptr - convert a relative memory offset to an absolute pointer
|
* offset_to_ptr - convert a relative memory offset to an absolute pointer
|
||||||
* @off: the address of the 32-bit offset value
|
* @off: the address of the 32-bit offset value
|
||||||
|
Loading…
Reference in New Issue
Block a user