s390/fpu: provide and use lfpc, sfpc, and stfpc inline assemblies

Instead of open-coding lfpc, sfpc, and stfpc inline assemblies at
several locations, provide an fpu_* function for each instruction and
use the function instead.

Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2024-02-03 11:45:06 +01:00
parent 88d8136a08
commit f4e3de75d0
3 changed files with 32 additions and 10 deletions

View File

@ -45,6 +45,15 @@ static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
: "memory"); : "memory");
} }
static __always_inline void fpu_lfpc(unsigned int *fpc)
{
instrument_read(fpc, sizeof(*fpc));
asm volatile("lfpc %[fpc]"
:
: [fpc] "Q" (*fpc)
: "memory");
}
/** /**
* fpu_lfpc_safe - Load floating point control register safely. * fpu_lfpc_safe - Load floating point control register safely.
* @fpc: new value for floating point control register * @fpc: new value for floating point control register
@ -82,5 +91,22 @@ static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
: "memory"); : "memory");
} }
static __always_inline void fpu_sfpc(unsigned int fpc)
{
asm volatile("sfpc %[fpc]"
:
: [fpc] "d" (fpc)
: "memory");
}
static __always_inline void fpu_stfpc(unsigned int *fpc)
{
instrument_write(fpc, sizeof(*fpc));
asm volatile("stfpc %[fpc]"
: [fpc] "=Q" (*fpc)
:
: "memory");
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_FPU_INSN_H */ #endif /* __ASM_S390_FPU_INSN_H */

View File

@ -17,10 +17,8 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
* in use by the previous context. * in use by the previous context.
*/ */
flags &= state->mask; flags &= state->mask;
if (flags & KERNEL_FPC) { if (flags & KERNEL_FPC)
/* Save floating point control */ fpu_stfpc(&state->fpc);
asm volatile("stfpc %0" : "=Q" (state->fpc));
}
if (!cpu_has_vx()) { if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_LOW) if (flags & KERNEL_VXR_LOW)
save_fp_regs(state->fprs); save_fp_regs(state->fprs);
@ -80,10 +78,8 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
* current context. * current context.
*/ */
flags &= state->mask; flags &= state->mask;
if (flags & KERNEL_FPC) { if (flags & KERNEL_FPC)
/* Restore floating-point controls */ fpu_lfpc(&state->fpc);
asm volatile("lfpc %0" : : "Q" (state->fpc));
}
if (!cpu_has_vx()) { if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_LOW) if (flags & KERNEL_VXR_LOW)
load_fp_regs(state->fprs); load_fp_regs(state->fprs);
@ -176,7 +172,7 @@ void save_fpu_regs(void)
state = &current->thread.fpu; state = &current->thread.fpu;
regs = current->thread.fpu.regs; regs = current->thread.fpu.regs;
asm volatile("stfpc %0" : "=Q" (state->fpc)); fpu_stfpc(&state->fpc);
if (likely(cpu_has_vx())) { if (likely(cpu_has_vx())) {
asm volatile("lgr 1,%0\n" asm volatile("lgr 1,%0\n"
"VSTM 0,15,0,1\n" "VSTM 0,15,0,1\n"

View File

@ -191,7 +191,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
void execve_tail(void) void execve_tail(void)
{ {
current->thread.fpu.fpc = 0; current->thread.fpu.fpc = 0;
asm volatile("sfpc %0" : : "d" (0)); fpu_sfpc(0);
} }
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)