Compare commits

...

11 Commits

Author SHA1 Message Date
Yulong Han
36d09b96d3 LoongArch: KVM: Add tracepoints for CPUCFG and CSR emulation exits
This patch adds tracepoints to track KVM exits caused by CPUCFG and
CSR emulation. Note that IOCSR emulation tracing is already covered
by the generic trace_kvm_iocsr().

Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Yulong Han <wheatfox17@icloud.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:35 +08:00
Bibo Mao
46ecfb68dd LoongArch: KVM: Add stat information with kernel irqchip
Move stat information about kernel irqchip from VM to vCPU, since all
vm exiting events should be vCPU relative. And also add entry with
structure kvm_vcpu_stats_desc[], so that it can display with directory
/sys/kernel/debug/kvm.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Bibo Mao
9c79c951a3 LoongArch: KVM: Replace eiointc_enable_irq() with eiointc_update_irq()
Function eiointc_enable_irq() checks mask value with char type, and
call eiointc_update_irq() eventually. Function eiointc_update_irq()
will update one single irq status directly.

Here it can check mask value with unsigned long type and call function
eiointc_update_irq(), that is simple and direct.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Bibo Mao
b0096e1e57 LoongArch: KVM: Use generic function loongarch_eiointc_write()
With all eiointc iocsr register write operation with 1/2/4/8 bytes
size, generic function loongarch_eiointc_write() is used here. And
function loongarch_eiointc_writeb(), loongarch_eiointc_writew(),
loongarch_eiointc_writel() are removed.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Bibo Mao
3e394eac6a LoongArch: KVM: Use generic function loongarch_eiointc_read()
Generic read function loongarch_eiointc_read() is used for 1/2/4/8
bytes read access. It reads 8 bytes from emulated software state and
shift right from address offset.

Also the similar with kvm_complete_iocsr_read(), destination register
of IOCSRRD.{B/H/W} is sign extension from byte/half word/word.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Bibo Mao
d23bd878f6 LoongArch: KVM: Use standard bitops API with eiointc
Standard bitops APIs such test_bit() is used here, rather than manually
calculating the offset and mask. Also use non-atomic API __set_bit() and
__clear_bit() rather than set_bit() and clear_bit(), since the global
spinlock is held already.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Bibo Mao
9afce1f1ee LoongArch: KVM: Remove never called default case statement
IOCSR instruction supports 1/2/4/8 bytes access, len must be 1/2/4/8
bytes from iocsr exit emulation function kvm_emu_iocsr(), remove the
default case in switch case statements.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Bibo Mao
a0630332fd LoongArch: KVM: Remove unused parameter len
Parameter len is unused in some functions with eiointc emulation driver,
remove it here.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Bibo Mao
d42fd71333 LoongArch: KVM: Remove unnecessary local variable
Local variable device1 can be replaced with existing variable device,
it makes code concise.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Yury Norov (NVIDIA)
ca2e92dc45 LoongArch: KVM: Simplify kvm_deliver_intr()
The function opencodes for_each_set_bit() macro, which makes it bulky.
Using the proper API makes all the housekeeping code going away.

Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Yury Norov (NVIDIA) <yury.norov@gmail.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
Yury Norov (NVIDIA)
640f8424ca LoongArch: KVM: Rework kvm_send_pv_ipi()
The function in fact traverses a "bitmap" stored in GPR regs A1 and A2,
but does it in a non-obvious way by creating a single-word bitmap twice.

This patch switches the function to create a single 2-word bitmap, and
also employs for_each_set_bit() macro, as it helps to drop most of the
housekeeping code.

While there, convert the function to return void to not confuse readers
with unchecked result.

Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Yury Norov (NVIDIA) <yury.norov@gmail.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2025-07-21 09:26:32 +08:00
8 changed files with 132 additions and 545 deletions

View File

@ -50,12 +50,6 @@ struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
u64 pages;
u64 hugepages;
u64 ipi_read_exits;
u64 ipi_write_exits;
u64 eiointc_read_exits;
u64 eiointc_write_exits;
u64 pch_pic_read_exits;
u64 pch_pic_write_exits;
};
struct kvm_vcpu_stat {
@ -65,6 +59,12 @@ struct kvm_vcpu_stat {
u64 cpucfg_exits;
u64 signal_exits;
u64 hypercall_exits;
u64 ipi_read_exits;
u64 ipi_write_exits;
u64 eiointc_read_exits;
u64 eiointc_write_exits;
u64 pch_pic_read_exits;
u64 pch_pic_write_exits;
};
#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0)

View File

@ -289,9 +289,11 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
er = EMULATE_FAIL;
switch (((inst.word >> 24) & 0xff)) {
case 0x0: /* CPUCFG GSPR */
trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG);
er = kvm_emu_cpucfg(vcpu, inst);
break;
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR);
er = kvm_handle_csr(vcpu, inst);
break;
case 0x6: /* Cache, Idle and IOCSR GSPR */
@ -821,32 +823,25 @@ static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
return RESUME_GUEST;
}
static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
{
unsigned int min, cpu, i;
unsigned long ipi_bitmap;
unsigned int min, cpu;
struct kvm_vcpu *dest;
DECLARE_BITMAP(ipi_bitmap, BITS_PER_LONG * 2) = {
kvm_read_reg(vcpu, LOONGARCH_GPR_A1),
kvm_read_reg(vcpu, LOONGARCH_GPR_A2)
};
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
if (!ipi_bitmap)
for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
if (!dest)
continue;
cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
while (cpu < BITS_PER_LONG) {
dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
if (!dest)
continue;
/* Send SWI0 to dest vcpu to emulate IPI interrupt */
kvm_queue_irq(dest, INT_SWI0);
kvm_vcpu_kick(dest);
}
/* Send SWI0 to dest vcpu to emulate IPI interrupt */
kvm_queue_irq(dest, INT_SWI0);
kvm_vcpu_kick(dest);
}
return 0;
}
/*

View File

@ -9,7 +9,7 @@
static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
{
int ipnum, cpu, cpuid, irq_index, irq_mask, irq;
int ipnum, cpu, cpuid, irq;
struct kvm_vcpu *vcpu;
for (irq = 0; irq < EIOINTC_IRQS; irq++) {
@ -18,8 +18,6 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
ipnum = count_trailing_zeros(ipnum);
ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
}
irq_index = irq / 32;
irq_mask = BIT(irq & 0x1f);
cpuid = s->coremap.reg_u8[irq];
vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
@ -27,16 +25,16 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
continue;
cpu = vcpu->vcpu_id;
if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
set_bit(irq, s->sw_coreisr[cpu][ipnum]);
if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]))
__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
else
clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
}
}
static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
{
int ipnum, cpu, found, irq_index, irq_mask;
int ipnum, cpu, found;
struct kvm_vcpu *vcpu;
struct kvm_interrupt vcpu_irq;
@ -48,19 +46,16 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
cpu = s->sw_coremap[irq];
vcpu = kvm_get_vcpu(s->kvm, cpu);
irq_index = irq / 32;
irq_mask = BIT(irq & 0x1f);
if (level) {
/* if not enable return false */
if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0)
if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
return;
s->coreisr.reg_u32[cpu][irq_index] |= irq_mask;
__set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
set_bit(irq, s->sw_coreisr[cpu][ipnum]);
__set_bit(irq, s->sw_coreisr[cpu][ipnum]);
} else {
s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask;
clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
__clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
__clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
}
@ -110,159 +105,14 @@ void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
unsigned long flags;
unsigned long *isr = (unsigned long *)s->isr.reg_u8;
level ? set_bit(irq, isr) : clear_bit(irq, isr);
spin_lock_irqsave(&s->lock, flags);
level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
eiointc_update_irq(s, irq, level);
spin_unlock_irqrestore(&s->lock, flags);
}
static inline void eiointc_enable_irq(struct kvm_vcpu *vcpu,
struct loongarch_eiointc *s, int index, u8 mask, int level)
{
u8 val;
int irq;
val = mask & s->isr.reg_u8[index];
irq = ffs(val);
while (irq != 0) {
/*
* enable bit change from 0 to 1,
* need to update irq by pending bits
*/
eiointc_update_irq(s, irq - 1 + index * 8, level);
val &= ~BIT(irq - 1);
irq = ffs(val);
}
}
static int loongarch_eiointc_readb(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
gpa_t addr, int len, void *val)
{
int index, ret = 0;
u8 data = 0;
gpa_t offset;
offset = addr - EIOINTC_BASE;
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = offset - EIOINTC_NODETYPE_START;
data = s->nodetype.reg_u8[index];
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
index = offset - EIOINTC_IPMAP_START;
data = s->ipmap.reg_u8[index];
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = offset - EIOINTC_ENABLE_START;
data = s->enable.reg_u8[index];
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
index = offset - EIOINTC_BOUNCE_START;
data = s->bounce.reg_u8[index];
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = offset - EIOINTC_COREISR_START;
data = s->coreisr.reg_u8[vcpu->vcpu_id][index];
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
index = offset - EIOINTC_COREMAP_START;
data = s->coremap.reg_u8[index];
break;
default:
ret = -EINVAL;
break;
}
*(u8 *)val = data;
return ret;
}
static int loongarch_eiointc_readw(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
gpa_t addr, int len, void *val)
{
int index, ret = 0;
u16 data = 0;
gpa_t offset;
offset = addr - EIOINTC_BASE;
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 1;
data = s->nodetype.reg_u16[index];
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
index = (offset - EIOINTC_IPMAP_START) >> 1;
data = s->ipmap.reg_u16[index];
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 1;
data = s->enable.reg_u16[index];
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
index = (offset - EIOINTC_BOUNCE_START) >> 1;
data = s->bounce.reg_u16[index];
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 1;
data = s->coreisr.reg_u16[vcpu->vcpu_id][index];
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
index = (offset - EIOINTC_COREMAP_START) >> 1;
data = s->coremap.reg_u16[index];
break;
default:
ret = -EINVAL;
break;
}
*(u16 *)val = data;
return ret;
}
static int loongarch_eiointc_readl(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
gpa_t addr, int len, void *val)
{
int index, ret = 0;
u32 data = 0;
gpa_t offset;
offset = addr - EIOINTC_BASE;
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 2;
data = s->nodetype.reg_u32[index];
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
index = (offset - EIOINTC_IPMAP_START) >> 2;
data = s->ipmap.reg_u32[index];
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 2;
data = s->enable.reg_u32[index];
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
index = (offset - EIOINTC_BOUNCE_START) >> 2;
data = s->bounce.reg_u32[index];
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 2;
data = s->coreisr.reg_u32[vcpu->vcpu_id][index];
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
index = (offset - EIOINTC_COREMAP_START) >> 2;
data = s->coremap.reg_u32[index];
break;
default:
ret = -EINVAL;
break;
}
*(u32 *)val = data;
return ret;
}
static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
gpa_t addr, int len, void *val)
static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s,
gpa_t addr, unsigned long *val)
{
int index, ret = 0;
u64 data = 0;
@ -298,7 +148,7 @@ static int loongarch_eiointc_readq(struct kvm_vcpu *vcpu, struct loongarch_eioin
ret = -EINVAL;
break;
}
*(u64 *)val = data;
*val = data;
return ret;
}
@ -308,7 +158,7 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
gpa_t addr, int len, void *val)
{
int ret = -EINVAL;
unsigned long flags;
unsigned long flags, data, offset;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
@ -321,355 +171,115 @@ static int kvm_eiointc_read(struct kvm_vcpu *vcpu,
return -EINVAL;
}
vcpu->kvm->stat.eiointc_read_exits++;
offset = addr & 0x7;
addr -= offset;
vcpu->stat.eiointc_read_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data);
spin_unlock_irqrestore(&eiointc->lock, flags);
if (ret)
return ret;
data = data >> (offset * 8);
switch (len) {
case 1:
ret = loongarch_eiointc_readb(vcpu, eiointc, addr, len, val);
*(long *)val = (s8)data;
break;
case 2:
ret = loongarch_eiointc_readw(vcpu, eiointc, addr, len, val);
*(long *)val = (s16)data;
break;
case 4:
ret = loongarch_eiointc_readl(vcpu, eiointc, addr, len, val);
break;
case 8:
ret = loongarch_eiointc_readq(vcpu, eiointc, addr, len, val);
*(long *)val = (s32)data;
break;
default:
WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
__func__, addr, len);
}
spin_unlock_irqrestore(&eiointc->lock, flags);
return ret;
}
static int loongarch_eiointc_writeb(struct kvm_vcpu *vcpu,
struct loongarch_eiointc *s,
gpa_t addr, int len, const void *val)
{
int index, irq, bits, ret = 0;
u8 cpu;
u8 data, old_data;
u8 coreisr, old_coreisr;
gpa_t offset;
data = *(u8 *)val;
offset = addr - EIOINTC_BASE;
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START);
s->nodetype.reg_u8[index] = data;
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of irqchip driver, need not update upper irq level
*/
index = (offset - EIOINTC_IPMAP_START);
s->ipmap.reg_u8[index] = data;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START);
old_data = s->enable.reg_u8[index];
s->enable.reg_u8[index] = data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index];
eiointc_enable_irq(vcpu, s, index, data, 1);
/*
* 0: disable irq.
* update irq when isr is set.
*/
data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index];
eiointc_enable_irq(vcpu, s, index, data, 0);
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = offset - EIOINTC_BOUNCE_START;
s->bounce.reg_u8[index] = data;
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START);
/* use attrs to get current cpu index */
cpu = vcpu->vcpu_id;
coreisr = data;
old_coreisr = s->coreisr.reg_u8[cpu][index];
/* write 1 to clear interrupt */
s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr;
coreisr &= old_coreisr;
bits = sizeof(data) * 8;
irq = find_first_bit((void *)&coreisr, bits);
while (irq < bits) {
eiointc_update_irq(s, irq + index * bits, 0);
bitmap_clear((void *)&coreisr, irq, 1);
irq = find_first_bit((void *)&coreisr, bits);
}
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
irq = offset - EIOINTC_COREMAP_START;
index = irq;
s->coremap.reg_u8[index] = data;
eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
*(long *)val = (long)data;
break;
}
return ret;
return 0;
}
static int loongarch_eiointc_writew(struct kvm_vcpu *vcpu,
static int loongarch_eiointc_write(struct kvm_vcpu *vcpu,
struct loongarch_eiointc *s,
gpa_t addr, int len, const void *val)
gpa_t addr, u64 value, u64 field_mask)
{
int i, index, irq, bits, ret = 0;
int index, irq, ret = 0;
u8 cpu;
u16 data, old_data;
u16 coreisr, old_coreisr;
u64 data, old, mask;
gpa_t offset;
data = *(u16 *)val;
offset = addr - EIOINTC_BASE;
offset = addr & 7;
mask = field_mask << (offset * 8);
data = (value & field_mask) << (offset * 8);
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 1;
s->nodetype.reg_u16[index] = data;
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of irqchip driver, need not update upper irq level
*/
index = (offset - EIOINTC_IPMAP_START) >> 1;
s->ipmap.reg_u16[index] = data;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 1;
old_data = s->enable.reg_u16[index];
s->enable.reg_u16[index] = data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u16[index] & ~old_data & s->isr.reg_u16[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 1);
}
/*
* 0: disable irq.
* update irq when isr is set.
*/
data = ~s->enable.reg_u16[index] & old_data & s->isr.reg_u16[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index * 2 + i, mask, 0);
}
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = (offset - EIOINTC_BOUNCE_START) >> 1;
s->bounce.reg_u16[index] = data;
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 1;
/* use attrs to get current cpu index */
cpu = vcpu->vcpu_id;
coreisr = data;
old_coreisr = s->coreisr.reg_u16[cpu][index];
/* write 1 to clear interrupt */
s->coreisr.reg_u16[cpu][index] = old_coreisr & ~coreisr;
coreisr &= old_coreisr;
bits = sizeof(data) * 8;
irq = find_first_bit((void *)&coreisr, bits);
while (irq < bits) {
eiointc_update_irq(s, irq + index * bits, 0);
bitmap_clear((void *)&coreisr, irq, 1);
irq = find_first_bit((void *)&coreisr, bits);
}
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
irq = offset - EIOINTC_COREMAP_START;
index = irq >> 1;
s->coremap.reg_u16[index] = data;
eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int loongarch_eiointc_writel(struct kvm_vcpu *vcpu,
struct loongarch_eiointc *s,
gpa_t addr, int len, const void *val)
{
int i, index, irq, bits, ret = 0;
u8 cpu;
u32 data, old_data;
u32 coreisr, old_coreisr;
gpa_t offset;
data = *(u32 *)val;
offset = addr - EIOINTC_BASE;
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 2;
s->nodetype.reg_u32[index] = data;
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of irqchip driver, need not update upper irq level
*/
index = (offset - EIOINTC_IPMAP_START) >> 2;
s->ipmap.reg_u32[index] = data;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 2;
old_data = s->enable.reg_u32[index];
s->enable.reg_u32[index] = data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 1);
}
/*
* 0: disable irq.
* update irq when isr is set.
*/
data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index * 4 + i, mask, 0);
}
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = (offset - EIOINTC_BOUNCE_START) >> 2;
s->bounce.reg_u32[index] = data;
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 2;
/* use attrs to get current cpu index */
cpu = vcpu->vcpu_id;
coreisr = data;
old_coreisr = s->coreisr.reg_u32[cpu][index];
/* write 1 to clear interrupt */
s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr;
coreisr &= old_coreisr;
bits = sizeof(data) * 8;
irq = find_first_bit((void *)&coreisr, bits);
while (irq < bits) {
eiointc_update_irq(s, irq + index * bits, 0);
bitmap_clear((void *)&coreisr, irq, 1);
irq = find_first_bit((void *)&coreisr, bits);
}
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
irq = offset - EIOINTC_COREMAP_START;
index = irq >> 2;
s->coremap.reg_u32[index] = data;
eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int loongarch_eiointc_writeq(struct kvm_vcpu *vcpu,
struct loongarch_eiointc *s,
gpa_t addr, int len, const void *val)
{
int i, index, irq, bits, ret = 0;
u8 cpu;
u64 data, old_data;
u64 coreisr, old_coreisr;
gpa_t offset;
data = *(u64 *)val;
addr -= offset;
offset = addr - EIOINTC_BASE;
switch (offset) {
case EIOINTC_NODETYPE_START ... EIOINTC_NODETYPE_END:
index = (offset - EIOINTC_NODETYPE_START) >> 3;
s->nodetype.reg_u64[index] = data;
old = s->nodetype.reg_u64[index];
s->nodetype.reg_u64[index] = (old & ~mask) | data;
break;
case EIOINTC_IPMAP_START ... EIOINTC_IPMAP_END:
/*
* ipmap cannot be set at runtime, can be set only at the beginning
* of irqchip driver, need not update upper irq level
*/
index = (offset - EIOINTC_IPMAP_START) >> 3;
s->ipmap.reg_u64 = data;
old = s->ipmap.reg_u64;
s->ipmap.reg_u64 = (old & ~mask) | data;
break;
case EIOINTC_ENABLE_START ... EIOINTC_ENABLE_END:
index = (offset - EIOINTC_ENABLE_START) >> 3;
old_data = s->enable.reg_u64[index];
s->enable.reg_u64[index] = data;
old = s->enable.reg_u64[index];
s->enable.reg_u64[index] = (old & ~mask) | data;
/*
* 1: enable irq.
* update irq when isr is set.
*/
data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 1);
data = s->enable.reg_u64[index] & ~old & s->isr.reg_u64[index];
while (data) {
irq = __ffs(data);
eiointc_update_irq(s, irq + index * 64, 1);
data &= ~BIT_ULL(irq);
}
/*
* 0: disable irq.
* update irq when isr is set.
*/
data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index];
for (i = 0; i < sizeof(data); i++) {
u8 mask = (data >> (i * 8)) & 0xff;
eiointc_enable_irq(vcpu, s, index * 8 + i, mask, 0);
data = ~s->enable.reg_u64[index] & old & s->isr.reg_u64[index];
while (data) {
irq = __ffs(data);
eiointc_update_irq(s, irq + index * 64, 0);
data &= ~BIT_ULL(irq);
}
break;
case EIOINTC_BOUNCE_START ... EIOINTC_BOUNCE_END:
/* do not emulate hw bounced irq routing */
index = (offset - EIOINTC_BOUNCE_START) >> 3;
s->bounce.reg_u64[index] = data;
old = s->bounce.reg_u64[index];
s->bounce.reg_u64[index] = (old & ~mask) | data;
break;
case EIOINTC_COREISR_START ... EIOINTC_COREISR_END:
index = (offset - EIOINTC_COREISR_START) >> 3;
/* use attrs to get current cpu index */
cpu = vcpu->vcpu_id;
coreisr = data;
old_coreisr = s->coreisr.reg_u64[cpu][index];
old = s->coreisr.reg_u64[cpu][index];
/* write 1 to clear interrupt */
s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr;
coreisr &= old_coreisr;
bits = sizeof(data) * 8;
irq = find_first_bit((void *)&coreisr, bits);
while (irq < bits) {
eiointc_update_irq(s, irq + index * bits, 0);
bitmap_clear((void *)&coreisr, irq, 1);
irq = find_first_bit((void *)&coreisr, bits);
s->coreisr.reg_u64[cpu][index] = old & ~data;
data &= old;
while (data) {
irq = __ffs(data);
eiointc_update_irq(s, irq + index * 64, 0);
data &= ~BIT_ULL(irq);
}
break;
case EIOINTC_COREMAP_START ... EIOINTC_COREMAP_END:
irq = offset - EIOINTC_COREMAP_START;
index = irq >> 3;
s->coremap.reg_u64[index] = data;
eiointc_update_sw_coremap(s, irq, data, sizeof(data), true);
index = (offset - EIOINTC_COREMAP_START) >> 3;
old = s->coremap.reg_u64[index];
s->coremap.reg_u64[index] = (old & ~mask) | data;
data = s->coremap.reg_u64[index];
eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true);
break;
default:
ret = -EINVAL;
@ -684,7 +294,7 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
gpa_t addr, int len, const void *val)
{
int ret = -EINVAL;
unsigned long flags;
unsigned long flags, value;
struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc;
if (!eiointc) {
@ -697,24 +307,25 @@ static int kvm_eiointc_write(struct kvm_vcpu *vcpu,
return -EINVAL;
}
vcpu->kvm->stat.eiointc_write_exits++;
vcpu->stat.eiointc_write_exits++;
spin_lock_irqsave(&eiointc->lock, flags);
switch (len) {
case 1:
ret = loongarch_eiointc_writeb(vcpu, eiointc, addr, len, val);
value = *(unsigned char *)val;
ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF);
break;
case 2:
ret = loongarch_eiointc_writew(vcpu, eiointc, addr, len, val);
value = *(unsigned short *)val;
ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX);
break;
case 4:
ret = loongarch_eiointc_writel(vcpu, eiointc, addr, len, val);
break;
case 8:
ret = loongarch_eiointc_writeq(vcpu, eiointc, addr, len, val);
value = *(unsigned int *)val;
ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX);
break;
default:
WARN_ONCE(1, "%s: Abnormal address access: addr 0x%llx, size %d\n",
__func__, addr, len);
value = *(unsigned long *)val;
ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX);
break;
}
spin_unlock_irqrestore(&eiointc->lock, flags);
@ -989,7 +600,7 @@ static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
{
int ret;
struct loongarch_eiointc *s;
struct kvm_io_device *device, *device1;
struct kvm_io_device *device;
struct kvm *kvm = dev->kvm;
/* eiointc has been created */
@ -1017,10 +628,10 @@ static int kvm_eiointc_create(struct kvm_device *dev, u32 type)
return ret;
}
device1 = &s->device_vext;
kvm_iodevice_init(device1, &kvm_eiointc_virt_ops);
device = &s->device_vext;
kvm_iodevice_init(device, &kvm_eiointc_virt_ops);
ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS,
EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device1);
EIOINTC_VIRT_BASE, EIOINTC_VIRT_SIZE, device);
if (ret < 0) {
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &s->device);
kfree(s);

View File

@ -268,36 +268,16 @@ static int kvm_ipi_read(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
int ret;
struct loongarch_ipi *ipi;
ipi = vcpu->kvm->arch.ipi;
if (!ipi) {
kvm_err("%s: ipi irqchip not valid!\n", __func__);
return -EINVAL;
}
ipi->kvm->stat.ipi_read_exits++;
ret = loongarch_ipi_readl(vcpu, addr, len, val);
return ret;
vcpu->stat.ipi_read_exits++;
return loongarch_ipi_readl(vcpu, addr, len, val);
}
static int kvm_ipi_write(struct kvm_vcpu *vcpu,
struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
int ret;
struct loongarch_ipi *ipi;
ipi = vcpu->kvm->arch.ipi;
if (!ipi) {
kvm_err("%s: ipi irqchip not valid!\n", __func__);
return -EINVAL;
}
ipi->kvm->stat.ipi_write_exits++;
ret = loongarch_ipi_writel(vcpu, addr, len, val);
return ret;
vcpu->stat.ipi_write_exits++;
return loongarch_ipi_writel(vcpu, addr, len, val);
}
static const struct kvm_io_device_ops kvm_ipi_ops = {

View File

@ -196,7 +196,7 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
}
/* statistics of pch pic reading */
vcpu->kvm->stat.pch_pic_read_exits++;
vcpu->stat.pch_pic_read_exits++;
ret = loongarch_pch_pic_read(s, addr, len, val);
return ret;
@ -303,7 +303,7 @@ static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
}
/* statistics of pch pic writing */
vcpu->kvm->stat.pch_pic_write_exits++;
vcpu->stat.pch_pic_write_exits++;
ret = loongarch_pch_pic_write(s, addr, len, val);
return ret;

View File

@ -83,28 +83,11 @@ void kvm_deliver_intr(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.irq_pending;
unsigned long *pending_clr = &vcpu->arch.irq_clear;
if (!(*pending) && !(*pending_clr))
return;
for_each_set_bit(priority, pending_clr, INT_IPI + 1)
kvm_irq_clear(vcpu, priority);
if (*pending_clr) {
priority = __ffs(*pending_clr);
while (priority <= INT_IPI) {
kvm_irq_clear(vcpu, priority);
priority = find_next_bit(pending_clr,
BITS_PER_BYTE * sizeof(*pending_clr),
priority + 1);
}
}
if (*pending) {
priority = __ffs(*pending);
while (priority <= INT_IPI) {
kvm_irq_deliver(vcpu, priority);
priority = find_next_bit(pending,
BITS_PER_BYTE * sizeof(*pending),
priority + 1);
}
}
for_each_set_bit(priority, pending, INT_IPI + 1)
kvm_irq_deliver(vcpu, priority);
}
int kvm_pending_timer(struct kvm_vcpu *vcpu)

View File

@ -46,11 +46,15 @@ DEFINE_EVENT(kvm_transition, kvm_out,
/* Further exit reasons */
#define KVM_TRACE_EXIT_IDLE 64
#define KVM_TRACE_EXIT_CACHE 65
#define KVM_TRACE_EXIT_CPUCFG 66
#define KVM_TRACE_EXIT_CSR 67
/* Tracepoints for VM exits */
#define kvm_trace_symbol_exit_types \
{ KVM_TRACE_EXIT_IDLE, "IDLE" }, \
{ KVM_TRACE_EXIT_CACHE, "CACHE" }
{ KVM_TRACE_EXIT_CACHE, "CACHE" }, \
{ KVM_TRACE_EXIT_CPUCFG, "CPUCFG" }, \
{ KVM_TRACE_EXIT_CSR, "CSR" }
DECLARE_EVENT_CLASS(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
@ -82,6 +86,14 @@ DEFINE_EVENT(kvm_exit, kvm_exit_cache,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
DEFINE_EVENT(kvm_exit, kvm_exit_cpucfg,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
DEFINE_EVENT(kvm_exit, kvm_exit_csr,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));
DEFINE_EVENT(kvm_exit, kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
TP_ARGS(vcpu, reason));

View File

@ -20,7 +20,13 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, idle_exits),
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
STATS_DESC_COUNTER(VCPU, signal_exits),
STATS_DESC_COUNTER(VCPU, hypercall_exits)
STATS_DESC_COUNTER(VCPU, hypercall_exits),
STATS_DESC_COUNTER(VCPU, ipi_read_exits),
STATS_DESC_COUNTER(VCPU, ipi_write_exits),
STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {