mirror of
				https://github.com/qemu/qemu.git
				synced 2025-10-31 04:06:46 +00:00 
			
		
		
		
	 b00d86bc8b
			
		
	
	
		b00d86bc8b
		
	
	
	
	
		
			
			Reuse the code at the bottom of helper_check_breakpoints, which is what we currently call from *_tr_breakpoint_check. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
		
			
				
	
	
		
			337 lines
		
	
	
		
			9.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			337 lines
		
	
	
		
			9.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * ARM debug helpers.
 | |
|  *
 | |
|  * This code is licensed under the GNU GPL v2 or later.
 | |
|  *
 | |
|  * SPDX-License-Identifier: GPL-2.0-or-later
 | |
|  */
 | |
| #include "qemu/osdep.h"
 | |
| #include "cpu.h"
 | |
| #include "internals.h"
 | |
| #include "exec/exec-all.h"
 | |
| #include "exec/helper-proto.h"
 | |
| 
 | |
| /* Return true if the linked breakpoint entry lbn passes its checks */
 | |
| static bool linked_bp_matches(ARMCPU *cpu, int lbn)
 | |
| {
 | |
|     CPUARMState *env = &cpu->env;
 | |
|     uint64_t bcr = env->cp15.dbgbcr[lbn];
 | |
|     int brps = arm_num_brps(cpu);
 | |
|     int ctx_cmps = arm_num_ctx_cmps(cpu);
 | |
|     int bt;
 | |
|     uint32_t contextidr;
 | |
|     uint64_t hcr_el2;
 | |
| 
 | |
|     /*
 | |
|      * Links to unimplemented or non-context aware breakpoints are
 | |
|      * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
 | |
|      * as if linked to an UNKNOWN context-aware breakpoint (in which
 | |
|      * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
 | |
|      * We choose the former.
 | |
|      */
 | |
|     if (lbn >= brps || lbn < (brps - ctx_cmps)) {
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     bcr = env->cp15.dbgbcr[lbn];
 | |
| 
 | |
|     if (extract64(bcr, 0, 1) == 0) {
 | |
|         /* Linked breakpoint disabled : generate no events */
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     bt = extract64(bcr, 20, 4);
 | |
|     hcr_el2 = arm_hcr_el2_eff(env);
 | |
| 
 | |
|     switch (bt) {
 | |
|     case 3: /* linked context ID match */
 | |
|         switch (arm_current_el(env)) {
 | |
|         default:
 | |
|             /* Context matches never fire in AArch64 EL3 */
 | |
|             return false;
 | |
|         case 2:
 | |
|             if (!(hcr_el2 & HCR_E2H)) {
 | |
|                 /* Context matches never fire in EL2 without E2H enabled. */
 | |
|                 return false;
 | |
|             }
 | |
|             contextidr = env->cp15.contextidr_el[2];
 | |
|             break;
 | |
|         case 1:
 | |
|             contextidr = env->cp15.contextidr_el[1];
 | |
|             break;
 | |
|         case 0:
 | |
|             if ((hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
 | |
|                 contextidr = env->cp15.contextidr_el[2];
 | |
|             } else {
 | |
|                 contextidr = env->cp15.contextidr_el[1];
 | |
|             }
 | |
|             break;
 | |
|         }
 | |
|         break;
 | |
| 
 | |
|     case 7:  /* linked contextidr_el1 match */
 | |
|         contextidr = env->cp15.contextidr_el[1];
 | |
|         break;
 | |
|     case 13: /* linked contextidr_el2 match */
 | |
|         contextidr = env->cp15.contextidr_el[2];
 | |
|         break;
 | |
| 
 | |
|     case 9: /* linked VMID match (reserved if no EL2) */
 | |
|     case 11: /* linked context ID and VMID match (reserved if no EL2) */
 | |
|     case 15: /* linked full context ID match */
 | |
|     default:
 | |
|         /*
 | |
|          * Links to Unlinked context breakpoints must generate no
 | |
|          * events; we choose to do the same for reserved values too.
 | |
|          */
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     /*
 | |
|      * We match the whole register even if this is AArch32 using the
 | |
|      * short descriptor format (in which case it holds both PROCID and ASID),
 | |
|      * since we don't implement the optional v7 context ID masking.
 | |
|      */
 | |
|     return contextidr == (uint32_t)env->cp15.dbgbvr[lbn];
 | |
| }
 | |
| 
 | |
| static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
 | |
| {
 | |
|     CPUARMState *env = &cpu->env;
 | |
|     uint64_t cr;
 | |
|     int pac, hmc, ssc, wt, lbn;
 | |
|     /*
 | |
|      * Note that for watchpoints the check is against the CPU security
 | |
|      * state, not the S/NS attribute on the offending data access.
 | |
|      */
 | |
|     bool is_secure = arm_is_secure(env);
 | |
|     int access_el = arm_current_el(env);
 | |
| 
 | |
|     if (is_wp) {
 | |
|         CPUWatchpoint *wp = env->cpu_watchpoint[n];
 | |
| 
 | |
|         if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
 | |
|             return false;
 | |
|         }
 | |
|         cr = env->cp15.dbgwcr[n];
 | |
|         if (wp->hitattrs.user) {
 | |
|             /*
 | |
|              * The LDRT/STRT/LDT/STT "unprivileged access" instructions should
 | |
|              * match watchpoints as if they were accesses done at EL0, even if
 | |
|              * the CPU is at EL1 or higher.
 | |
|              */
 | |
|             access_el = 0;
 | |
|         }
 | |
|     } else {
 | |
|         uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
 | |
| 
 | |
|         if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
 | |
|             return false;
 | |
|         }
 | |
|         cr = env->cp15.dbgbcr[n];
 | |
|     }
 | |
|     /*
 | |
|      * The WATCHPOINT_HIT flag guarantees us that the watchpoint is
 | |
|      * enabled and that the address and access type match; for breakpoints
 | |
|      * we know the address matched; check the remaining fields, including
 | |
|      * linked breakpoints. We rely on WCR and BCR having the same layout
 | |
|      * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
 | |
|      * Note that some combinations of {PAC, HMC, SSC} are reserved and
 | |
|      * must act either like some valid combination or as if the watchpoint
 | |
|      * were disabled. We choose the former, and use this together with
 | |
|      * the fact that EL3 must always be Secure and EL2 must always be
 | |
|      * Non-Secure to simplify the code slightly compared to the full
 | |
|      * table in the ARM ARM.
 | |
|      */
 | |
|     pac = extract64(cr, 1, 2);
 | |
|     hmc = extract64(cr, 13, 1);
 | |
|     ssc = extract64(cr, 14, 2);
 | |
| 
 | |
|     switch (ssc) {
 | |
|     case 0:
 | |
|         break;
 | |
|     case 1:
 | |
|     case 3:
 | |
|         if (is_secure) {
 | |
|             return false;
 | |
|         }
 | |
|         break;
 | |
|     case 2:
 | |
|         if (!is_secure) {
 | |
|             return false;
 | |
|         }
 | |
|         break;
 | |
|     }
 | |
| 
 | |
|     switch (access_el) {
 | |
|     case 3:
 | |
|     case 2:
 | |
|         if (!hmc) {
 | |
|             return false;
 | |
|         }
 | |
|         break;
 | |
|     case 1:
 | |
|         if (extract32(pac, 0, 1) == 0) {
 | |
|             return false;
 | |
|         }
 | |
|         break;
 | |
|     case 0:
 | |
|         if (extract32(pac, 1, 1) == 0) {
 | |
|             return false;
 | |
|         }
 | |
|         break;
 | |
|     default:
 | |
|         g_assert_not_reached();
 | |
|     }
 | |
| 
 | |
|     wt = extract64(cr, 20, 1);
 | |
|     lbn = extract64(cr, 16, 4);
 | |
| 
 | |
|     if (wt && !linked_bp_matches(cpu, lbn)) {
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| static bool check_watchpoints(ARMCPU *cpu)
 | |
| {
 | |
|     CPUARMState *env = &cpu->env;
 | |
|     int n;
 | |
| 
 | |
|     /*
 | |
|      * If watchpoints are disabled globally or we can't take debug
 | |
|      * exceptions here then watchpoint firings are ignored.
 | |
|      */
 | |
|     if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
 | |
|         || !arm_generate_debug_exceptions(env)) {
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
 | |
|         if (bp_wp_matches(cpu, n, true)) {
 | |
|             return true;
 | |
|         }
 | |
|     }
 | |
|     return false;
 | |
| }
 | |
| 
 | |
| bool arm_debug_check_breakpoint(CPUState *cs)
 | |
| {
 | |
|     ARMCPU *cpu = ARM_CPU(cs);
 | |
|     CPUARMState *env = &cpu->env;
 | |
|     int n;
 | |
| 
 | |
|     /*
 | |
|      * If breakpoints are disabled globally or we can't take debug
 | |
|      * exceptions here then breakpoint firings are ignored.
 | |
|      */
 | |
|     if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
 | |
|         || !arm_generate_debug_exceptions(env)) {
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
 | |
|         if (bp_wp_matches(cpu, n, false)) {
 | |
|             return true;
 | |
|         }
 | |
|     }
 | |
|     return false;
 | |
| }
 | |
| 
 | |
| void HELPER(check_breakpoints)(CPUARMState *env)
 | |
| {
 | |
|     if (arm_debug_check_breakpoint(env_cpu(env))) {
 | |
|         HELPER(exception_internal(env, EXCP_DEBUG));
 | |
|     }
 | |
| }
 | |
| 
 | |
| bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
 | |
| {
 | |
|     /*
 | |
|      * Called by core code when a CPU watchpoint fires; need to check if this
 | |
|      * is also an architectural watchpoint match.
 | |
|      */
 | |
|     ARMCPU *cpu = ARM_CPU(cs);
 | |
| 
 | |
|     return check_watchpoints(cpu);
 | |
| }
 | |
| 
 | |
| void arm_debug_excp_handler(CPUState *cs)
 | |
| {
 | |
|     /*
 | |
|      * Called by core code when a watchpoint or breakpoint fires;
 | |
|      * need to check which one and raise the appropriate exception.
 | |
|      */
 | |
|     ARMCPU *cpu = ARM_CPU(cs);
 | |
|     CPUARMState *env = &cpu->env;
 | |
|     CPUWatchpoint *wp_hit = cs->watchpoint_hit;
 | |
| 
 | |
|     if (wp_hit) {
 | |
|         if (wp_hit->flags & BP_CPU) {
 | |
|             bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
 | |
|             bool same_el = arm_debug_target_el(env) == arm_current_el(env);
 | |
| 
 | |
|             cs->watchpoint_hit = NULL;
 | |
| 
 | |
|             env->exception.fsr = arm_debug_exception_fsr(env);
 | |
|             env->exception.vaddress = wp_hit->hitaddr;
 | |
|             raise_exception(env, EXCP_DATA_ABORT,
 | |
|                     syn_watchpoint(same_el, 0, wnr),
 | |
|                     arm_debug_target_el(env));
 | |
|         }
 | |
|     } else {
 | |
|         uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
 | |
|         bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
 | |
| 
 | |
|         /*
 | |
|          * (1) GDB breakpoints should be handled first.
 | |
|          * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
 | |
|          * since singlestep is also done by generating a debug internal
 | |
|          * exception.
 | |
|          */
 | |
|         if (cpu_breakpoint_test(cs, pc, BP_GDB)
 | |
|             || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
 | |
|             return;
 | |
|         }
 | |
| 
 | |
|         env->exception.fsr = arm_debug_exception_fsr(env);
 | |
|         /*
 | |
|          * FAR is UNKNOWN: clear vaddress to avoid potentially exposing
 | |
|          * values to the guest that it shouldn't be able to see at its
 | |
|          * exception/security level.
 | |
|          */
 | |
|         env->exception.vaddress = 0;
 | |
|         raise_exception(env, EXCP_PREFETCH_ABORT,
 | |
|                         syn_breakpoint(same_el),
 | |
|                         arm_debug_target_el(env));
 | |
|     }
 | |
| }
 | |
| 
 | |
| #if !defined(CONFIG_USER_ONLY)
 | |
| 
 | |
| vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
 | |
| {
 | |
|     ARMCPU *cpu = ARM_CPU(cs);
 | |
|     CPUARMState *env = &cpu->env;
 | |
| 
 | |
|     /*
 | |
|      * In BE32 system mode, target memory is stored byteswapped (on a
 | |
|      * little-endian host system), and by the time we reach here (via an
 | |
|      * opcode helper) the addresses of subword accesses have been adjusted
 | |
|      * to account for that, which means that watchpoints will not match.
 | |
|      * Undo the adjustment here.
 | |
|      */
 | |
|     if (arm_sctlr_b(env)) {
 | |
|         if (len == 1) {
 | |
|             addr ^= 3;
 | |
|         } else if (len == 2) {
 | |
|             addr ^= 2;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     return addr;
 | |
| }
 | |
| 
 | |
| #endif
 |