mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-04 18:49:41 +00:00

While the GCC and Clang compilers already define __ASSEMBLER__ automatically when compiling assembly code, __ASSEMBLY__ is a macro that only gets defined by the Makefiles in the kernel. This can be very confusing when switching between userspace and kernelspace coding, or when dealing with uapi headers that rather should use __ASSEMBLER__ instead. So let's standardize on the __ASSEMBLER__ macro that is provided by the compilers now. This is a completely mechanical patch (done with a simple "sed -i" statement). Cc: linux-snps-arc@lists.infradead.org Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Vineet Gupta <vgupta@kernel.org>
73 lines
1.9 KiB
C
73 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_ARC_JUMP_LABEL_H
|
|
#define _ASM_ARC_JUMP_LABEL_H
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
#include <linux/stringify.h>
|
|
#include <linux/types.h>
|
|
|
|
#define JUMP_LABEL_NOP_SIZE 4
|
|
|
|
/*
|
|
* NOTE about '.balign 4':
|
|
*
|
|
* To make atomic update of patched instruction available we need to guarantee
|
|
* that this instruction doesn't cross L1 cache line boundary.
|
|
*
|
|
* As of today we simply align instruction which can be patched by 4 byte using
|
|
* ".balign 4" directive. In that case patched instruction is aligned with one
|
|
* 16-bit NOP_S if this is required.
|
|
* However 'align by 4' directive is much stricter than it actually required.
|
|
* It's enough that our 32-bit instruction don't cross L1 cache line boundary /
|
|
* L1 I$ fetch block boundary which can be achieved by using
|
|
* ".bundle_align_mode" assembler directive. That will save us from adding
|
|
* useless NOP_S padding in most of the cases.
|
|
*
|
|
* TODO: switch to ".bundle_align_mode" directive using whin it will be
|
|
* supported by ARC toolchain.
|
|
*/
|
|
|
|
static __always_inline bool arch_static_branch(struct static_key *key,
|
|
bool branch)
|
|
{
|
|
asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
|
|
"1: \n"
|
|
"nop \n"
|
|
".pushsection __jump_table, \"aw\" \n"
|
|
".word 1b, %l[l_yes], %c0 \n"
|
|
".popsection \n"
|
|
: : "i" (&((char *)key)[branch]) : : l_yes);
|
|
|
|
return false;
|
|
l_yes:
|
|
return true;
|
|
}
|
|
|
|
static __always_inline bool arch_static_branch_jump(struct static_key *key,
|
|
bool branch)
|
|
{
|
|
asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n"
|
|
"1: \n"
|
|
"b %l[l_yes] \n"
|
|
".pushsection __jump_table, \"aw\" \n"
|
|
".word 1b, %l[l_yes], %c0 \n"
|
|
".popsection \n"
|
|
: : "i" (&((char *)key)[branch]) : : l_yes);
|
|
|
|
return false;
|
|
l_yes:
|
|
return true;
|
|
}
|
|
|
|
typedef u32 jump_label_t;
|
|
|
|
struct jump_entry {
|
|
jump_label_t code;
|
|
jump_label_t target;
|
|
jump_label_t key;
|
|
};
|
|
|
|
#endif /* __ASSEMBLER__ */
|
|
#endif
|