linux/drivers/hv/mshv_eventfd.c
Linus Torvalds 63eb28bb14 ARM:
- Host driver for GICv5, the next generation interrupt controller for
   arm64, including support for interrupt routing, MSIs, interrupt
   translation and wired interrupts.
 
 - Use FEAT_GCIE_LEGACY on GICv5 systems to virtualize GICv3 VMs on
   GICv5 hardware, leveraging the legacy VGIC interface.
 
 - Userspace control of the 'nASSGIcap' GICv3 feature, allowing
   userspace to disable support for SGIs w/o an active state on hardware
   that previously advertised it unconditionally.
 
 - Map supporting endpoints with cacheable memory attributes on systems
   with FEAT_S2FWB and DIC where KVM no longer needs to perform cache
   maintenance on the address range.
 
 - Nested support for FEAT_RAS and FEAT_DoubleFault2, allowing the guest
   hypervisor to inject external aborts into an L2 VM and take traps of
   masked external aborts to the hypervisor.
 
 - Convert more system register sanitization to the config-driven
   implementation.
 
 - Fixes to the visibility of EL2 registers, namely making VGICv3 system
   registers accessible through the VGIC device instead of the ONE_REG
   vCPU ioctls.
 
 - Various cleanups and minor fixes.
 
 LoongArch:
 
 - Add stat information for in-kernel irqchip
 
 - Add tracepoints for CPUCFG and CSR emulation exits
 
 - Enhance in-kernel irqchip emulation
 
 - Various cleanups.
 
 RISC-V:
 
 - Enable ring-based dirty memory tracking
 
 - Improve perf kvm stat to report interrupt events
 
 - Delegate illegal instruction trap to VS-mode
 
 - MMU improvements related to upcoming nested virtualization
 
 s390x
 
 - Fixes
 
 x86:
 
 - Add CONFIG_KVM_IOAPIC for x86 to allow disabling support for I/O APIC,
   PIC, and PIT emulation at compile time.
 
 - Share device posted IRQ code between SVM and VMX and
   harden it against bugs and runtime errors.
 
 - Use vcpu_idx, not vcpu_id, for GA log tag/metadata, to make lookups O(1)
   instead of O(n).
 
 - For MMIO stale data mitigation, track whether or not a vCPU has access to
   (host) MMIO based on whether the page tables have MMIO pfns mapped; using
   VFIO is prone to false negatives
 
 - Rework the MSR interception code so that the SVM and VMX APIs are more or
   less identical.
 
 - Recalculate all MSR intercepts from scratch on MSR filter changes,
   instead of maintaining shadow bitmaps.
 
 - Advertise support for LKGS (Load Kernel GS base), a new instruction
   that's loosely related to FRED, but is supported and enumerated
   independently.
 
 - Fix a user-triggerable WARN that syzkaller found by setting the vCPU
   in INIT_RECEIVED state (aka wait-for-SIPI), and then putting the vCPU
   into VMX Root Mode (post-VMXON).  Trying to detect every possible path
   leading to architecturally forbidden states is hard and even risks
   breaking userspace (if it goes from valid to valid state but passes
   through invalid states), so just wait until KVM_RUN to detect that
   the vCPU state isn't allowed.
 
 - Add KVM_X86_DISABLE_EXITS_APERFMPERF to allow disabling interception of
   APERF/MPERF reads, so that a "properly" configured VM can access
   APERF/MPERF.  This has many caveats (APERF/MPERF cannot be zeroed
   on vCPU creation or saved/restored on suspend and resume, or preserved
   over thread migration let alone VM migration) but can be useful whenever
   you're interested in letting Linux guests see the effective physical CPU
   frequency in /proc/cpuinfo.
 
 - Reject KVM_SET_TSC_KHZ for vm file descriptors if vCPUs have been
   created, as there's no known use case for changing the default
   frequency for other VM types and it goes counter to the very reason
   why the ioctl was added to the vm file descriptor.  And also, there
   would be no way to make it work for confidential VMs with a "secure"
   TSC, so kill two birds with one stone.
 
 - Dynamically allocation the shadow MMU's hashed page list, and defer
   allocating the hashed list until it's actually needed (the TDP MMU
   doesn't use the list).
 
 - Extract many of KVM's helpers for accessing architectural local APIC
   state to common x86 so that they can be shared by guest-side code for
   Secure AVIC.
 
 - Various cleanups and fixes.
 
 x86 (Intel):
 
 - Preserve the host's DEBUGCTL.FREEZE_IN_SMM when running the guest.
   Failure to honor FREEZE_IN_SMM can leak host state into guests.
 
 - Explicitly check vmcs12.GUEST_DEBUGCTL on nested VM-Enter to prevent
   L1 from running L2 with features that KVM doesn't support, e.g. BTF.
 
 x86 (AMD):
 
 - WARN and reject loading kvm-amd.ko instead of panicking the kernel if the
   nested SVM MSRPM offsets tracker can't handle an MSR (which is pretty
   much a static condition and therefore should never happen, but still).
 
 - Fix a variety of flaws and bugs in the AVIC device posted IRQ code.
 
 - Inhibit AVIC if a vCPU's ID is too big (relative to what hardware
   supports) instead of rejecting vCPU creation.
 
 - Extend enable_ipiv module param support to SVM, by simply leaving
   IsRunning clear in the vCPU's physical ID table entry.
 
 - Disable IPI virtualization, via enable_ipiv, if the CPU is affected by
   erratum #1235, to allow (safely) enabling AVIC on such CPUs.
 
 - Request GA Log interrupts if and only if the target vCPU is blocking,
   i.e. only if KVM needs a notification in order to wake the vCPU.
 
 - Intercept SPEC_CTRL on AMD if the MSR shouldn't exist according to the
   vCPU's CPUID model.
 
 - Accept any SNP policy that is accepted by the firmware with respect to
   SMT and single-socket restrictions.  An incompatible policy doesn't put
   the kernel at risk in any way, so there's no reason for KVM to care.
 
 - Drop a superfluous WBINVD (on all CPUs!) when destroying a VM and
   use WBNOINVD instead of WBINVD when possible for SEV cache maintenance.
 
 - When reclaiming memory from an SEV guest, only do cache flushes on CPUs
   that have ever run a vCPU for the guest, i.e. don't flush the caches for
   CPUs that can't possibly have cache lines with dirty, encrypted data.
 
 Generic:
 
 - Rework irqbypass to track/match producers and consumers via an xarray
   instead of a linked list.  Using a linked list leads to O(n^2) insertion
   times, which is hugely problematic for use cases that create large
   numbers of VMs.  Such use cases typically don't actually use irqbypass,
   but eliminating the pointless registration is a future problem to
   solve as it likely requires new uAPI.
 
 - Track irqbypass's "token" as "struct eventfd_ctx *" instead of a "void *",
   to avoid making a simple concept unnecessarily difficult to understand.
 
 - Decouple device posted IRQs from VFIO device assignment, as binding a VM
   to a VFIO group is not a requirement for enabling device posted IRQs.
 
 - Clean up and document/comment the irqfd assignment code.
 
 - Disallow binding multiple irqfds to an eventfd with a priority waiter,
   i.e.  ensure an eventfd is bound to at most one irqfd through the entire
   host, and add a selftest to verify eventfd:irqfd bindings are globally
   unique.
 
 - Add a tracepoint for KVM_SET_MEMORY_ATTRIBUTES to help debug issues
   related to private <=> shared memory conversions.
 
 - Drop guest_memfd's .getattr() implementation as the VFS layer will call
   generic_fillattr() if inode_operations.getattr is NULL.
 
 - Fix issues with dirty ring harvesting where KVM doesn't bound the
   processing of entries in any way, which allows userspace to keep KVM
   in a tight loop indefinitely.
 
 - Kill off kvm_arch_{start,end}_assignment() and x86's associated tracking,
   now that KVM no longer uses assigned_device_count as a heuristic for
   either irqbypass usage or MDS mitigation.
 
 Selftests:
 
 - Fix a comment typo.
 
 - Verify KVM is loaded when getting any KVM module param so that attempting
   to run a selftest without kvm.ko loaded results in a SKIP message about
   KVM not being loaded/enabled (versus some random parameter not existing).
 
 - Skip tests that hit EACCES when attempting to access a file, and rpint
   a "Root required?" help message.  In most cases, the test just needs to
   be run with elevated permissions.
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmiKXMgUHHBib256aW5p
 QHJlZGhhdC5jb20ACgkQv/vSX3jHroMhMQf/QDhC/CP1aGXph2whuyeD2NMqPKiU
 9KdnDNST+ftPwjg9QxZ9mTaa8zeVz/wly6XlxD9OQHy+opM1wcys3k0GZAFFEEQm
 YrThgURdzEZ3nwJZgb+m0t4wjJQtpiFIBwAf7qq6z1VrqQBEmHXJ/8QxGuqO+BNC
 j5q/X+q6KZwehKI6lgFBrrOKWFaxqhnRAYfW6rGBxRXxzTJuna37fvDpodQnNceN
 zOiq+avfriUMArTXTqOteJNKU0229HjiPSnjILLnFQ+B3akBlwNG0jk7TMaAKR6q
 IZWG1EIS9q1BAkGXaw6DE1y6d/YwtXCR5qgAIkiGwaPt5yj9Oj6kRN2Ytw==
 =j2At
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm updates from Paolo Bonzini:
 "ARM:

   - Host driver for GICv5, the next generation interrupt controller for
     arm64, including support for interrupt routing, MSIs, interrupt
     translation and wired interrupts

   - Use FEAT_GCIE_LEGACY on GICv5 systems to virtualize GICv3 VMs on
     GICv5 hardware, leveraging the legacy VGIC interface

   - Userspace control of the 'nASSGIcap' GICv3 feature, allowing
     userspace to disable support for SGIs w/o an active state on
     hardware that previously advertised it unconditionally

   - Map supporting endpoints with cacheable memory attributes on
     systems with FEAT_S2FWB and DIC where KVM no longer needs to
     perform cache maintenance on the address range

   - Nested support for FEAT_RAS and FEAT_DoubleFault2, allowing the
     guest hypervisor to inject external aborts into an L2 VM and take
     traps of masked external aborts to the hypervisor

   - Convert more system register sanitization to the config-driven
     implementation

   - Fixes to the visibility of EL2 registers, namely making VGICv3
     system registers accessible through the VGIC device instead of the
     ONE_REG vCPU ioctls

   - Various cleanups and minor fixes

  LoongArch:

   - Add stat information for in-kernel irqchip

   - Add tracepoints for CPUCFG and CSR emulation exits

   - Enhance in-kernel irqchip emulation

   - Various cleanups

  RISC-V:

   - Enable ring-based dirty memory tracking

   - Improve perf kvm stat to report interrupt events

   - Delegate illegal instruction trap to VS-mode

   - MMU improvements related to upcoming nested virtualization

  s390x

   - Fixes

  x86:

   - Add CONFIG_KVM_IOAPIC for x86 to allow disabling support for I/O
     APIC, PIC, and PIT emulation at compile time

   - Share device posted IRQ code between SVM and VMX and harden it
     against bugs and runtime errors

   - Use vcpu_idx, not vcpu_id, for GA log tag/metadata, to make lookups
     O(1) instead of O(n)

   - For MMIO stale data mitigation, track whether or not a vCPU has
     access to (host) MMIO based on whether the page tables have MMIO
     pfns mapped; using VFIO is prone to false negatives

   - Rework the MSR interception code so that the SVM and VMX APIs are
     more or less identical

   - Recalculate all MSR intercepts from scratch on MSR filter changes,
     instead of maintaining shadow bitmaps

   - Advertise support for LKGS (Load Kernel GS base), a new instruction
     that's loosely related to FRED, but is supported and enumerated
     independently

   - Fix a user-triggerable WARN that syzkaller found by setting the
     vCPU in INIT_RECEIVED state (aka wait-for-SIPI), and then putting
     the vCPU into VMX Root Mode (post-VMXON). Trying to detect every
     possible path leading to architecturally forbidden states is hard
     and even risks breaking userspace (if it goes from valid to valid
     state but passes through invalid states), so just wait until
     KVM_RUN to detect that the vCPU state isn't allowed

   - Add KVM_X86_DISABLE_EXITS_APERFMPERF to allow disabling
     interception of APERF/MPERF reads, so that a "properly" configured
     VM can access APERF/MPERF. This has many caveats (APERF/MPERF
     cannot be zeroed on vCPU creation or saved/restored on suspend and
     resume, or preserved over thread migration let alone VM migration)
     but can be useful whenever you're interested in letting Linux
     guests see the effective physical CPU frequency in /proc/cpuinfo

   - Reject KVM_SET_TSC_KHZ for vm file descriptors if vCPUs have been
     created, as there's no known use case for changing the default
     frequency for other VM types and it goes counter to the very reason
     why the ioctl was added to the vm file descriptor. And also, there
     would be no way to make it work for confidential VMs with a
     "secure" TSC, so kill two birds with one stone

   - Dynamically allocation the shadow MMU's hashed page list, and defer
     allocating the hashed list until it's actually needed (the TDP MMU
     doesn't use the list)

   - Extract many of KVM's helpers for accessing architectural local
     APIC state to common x86 so that they can be shared by guest-side
     code for Secure AVIC

   - Various cleanups and fixes

  x86 (Intel):

   - Preserve the host's DEBUGCTL.FREEZE_IN_SMM when running the guest.
     Failure to honor FREEZE_IN_SMM can leak host state into guests

   - Explicitly check vmcs12.GUEST_DEBUGCTL on nested VM-Enter to
     prevent L1 from running L2 with features that KVM doesn't support,
     e.g. BTF

  x86 (AMD):

   - WARN and reject loading kvm-amd.ko instead of panicking the kernel
     if the nested SVM MSRPM offsets tracker can't handle an MSR (which
     is pretty much a static condition and therefore should never
     happen, but still)

   - Fix a variety of flaws and bugs in the AVIC device posted IRQ code

   - Inhibit AVIC if a vCPU's ID is too big (relative to what hardware
     supports) instead of rejecting vCPU creation

   - Extend enable_ipiv module param support to SVM, by simply leaving
     IsRunning clear in the vCPU's physical ID table entry

   - Disable IPI virtualization, via enable_ipiv, if the CPU is affected
     by erratum #1235, to allow (safely) enabling AVIC on such CPUs

   - Request GA Log interrupts if and only if the target vCPU is
     blocking, i.e. only if KVM needs a notification in order to wake
     the vCPU

   - Intercept SPEC_CTRL on AMD if the MSR shouldn't exist according to
     the vCPU's CPUID model

   - Accept any SNP policy that is accepted by the firmware with respect
     to SMT and single-socket restrictions. An incompatible policy
     doesn't put the kernel at risk in any way, so there's no reason for
     KVM to care

   - Drop a superfluous WBINVD (on all CPUs!) when destroying a VM and
     use WBNOINVD instead of WBINVD when possible for SEV cache
     maintenance

   - When reclaiming memory from an SEV guest, only do cache flushes on
     CPUs that have ever run a vCPU for the guest, i.e. don't flush the
     caches for CPUs that can't possibly have cache lines with dirty,
     encrypted data

  Generic:

   - Rework irqbypass to track/match producers and consumers via an
     xarray instead of a linked list. Using a linked list leads to
     O(n^2) insertion times, which is hugely problematic for use cases
     that create large numbers of VMs. Such use cases typically don't
     actually use irqbypass, but eliminating the pointless registration
     is a future problem to solve as it likely requires new uAPI

   - Track irqbypass's "token" as "struct eventfd_ctx *" instead of a
     "void *", to avoid making a simple concept unnecessarily difficult
     to understand

   - Decouple device posted IRQs from VFIO device assignment, as binding
     a VM to a VFIO group is not a requirement for enabling device
     posted IRQs

   - Clean up and document/comment the irqfd assignment code

   - Disallow binding multiple irqfds to an eventfd with a priority
     waiter, i.e. ensure an eventfd is bound to at most one irqfd
     through the entire host, and add a selftest to verify eventfd:irqfd
     bindings are globally unique

   - Add a tracepoint for KVM_SET_MEMORY_ATTRIBUTES to help debug issues
     related to private <=> shared memory conversions

   - Drop guest_memfd's .getattr() implementation as the VFS layer will
     call generic_fillattr() if inode_operations.getattr is NULL

   - Fix issues with dirty ring harvesting where KVM doesn't bound the
     processing of entries in any way, which allows userspace to keep
     KVM in a tight loop indefinitely

   - Kill off kvm_arch_{start,end}_assignment() and x86's associated
     tracking, now that KVM no longer uses assigned_device_count as a
     heuristic for either irqbypass usage or MDS mitigation

  Selftests:

   - Fix a comment typo

   - Verify KVM is loaded when getting any KVM module param so that
     attempting to run a selftest without kvm.ko loaded results in a
     SKIP message about KVM not being loaded/enabled (versus some random
     parameter not existing)

   - Skip tests that hit EACCES when attempting to access a file, and
     print a "Root required?" help message. In most cases, the test just
     needs to be run with elevated permissions"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (340 commits)
  Documentation: KVM: Use unordered list for pre-init VGIC registers
  RISC-V: KVM: Avoid re-acquiring memslot in kvm_riscv_gstage_map()
  RISC-V: KVM: Use find_vma_intersection() to search for intersecting VMAs
  RISC-V: perf/kvm: Add reporting of interrupt events
  RISC-V: KVM: Enable ring-based dirty memory tracking
  RISC-V: KVM: Fix inclusion of Smnpm in the guest ISA bitmap
  RISC-V: KVM: Delegate illegal instruction fault to VS mode
  RISC-V: KVM: Pass VMID as parameter to kvm_riscv_hfence_xyz() APIs
  RISC-V: KVM: Factor-out g-stage page table management
  RISC-V: KVM: Add vmid field to struct kvm_riscv_hfence
  RISC-V: KVM: Introduce struct kvm_gstage_mapping
  RISC-V: KVM: Factor-out MMU related declarations into separate headers
  RISC-V: KVM: Use ncsr_xyz() in kvm_riscv_vcpu_trap_redirect()
  RISC-V: KVM: Implement kvm_arch_flush_remote_tlbs_range()
  RISC-V: KVM: Don't flush TLB when PTE is unchanged
  RISC-V: KVM: Replace KVM_REQ_HFENCE_GVMA_VMID_ALL with KVM_REQ_TLB_FLUSH
  RISC-V: KVM: Rename and move kvm_riscv_local_tlb_sanitize()
  RISC-V: KVM: Drop the return value of kvm_riscv_vcpu_aia_init()
  RISC-V: KVM: Check kvm_riscv_vcpu_alloc_vector_context() return value
  KVM: arm64: selftests: Add FEAT_RAS EL2 registers to get-reg-list
  ...
2025-07-30 17:14:01 -07:00

834 lines
20 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* eventfd support for mshv
*
* Heavily inspired from KVM implementation of irqfd/ioeventfd. The basic
* framework code is taken from the kvm implementation.
*
* All credits to kvm developers.
*/
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/eventfd.h>
#if IS_ENABLED(CONFIG_X86_64)
#include <asm/apic.h>
#endif
#include <asm/mshyperv.h>
#include "mshv_eventfd.h"
#include "mshv.h"
#include "mshv_root.h"
static struct workqueue_struct *irqfd_cleanup_wq;
void mshv_register_irq_ack_notifier(struct mshv_partition *partition,
struct mshv_irq_ack_notifier *mian)
{
mutex_lock(&partition->pt_irq_lock);
hlist_add_head_rcu(&mian->link, &partition->irq_ack_notifier_list);
mutex_unlock(&partition->pt_irq_lock);
}
void mshv_unregister_irq_ack_notifier(struct mshv_partition *partition,
struct mshv_irq_ack_notifier *mian)
{
mutex_lock(&partition->pt_irq_lock);
hlist_del_init_rcu(&mian->link);
mutex_unlock(&partition->pt_irq_lock);
synchronize_rcu();
}
bool mshv_notify_acked_gsi(struct mshv_partition *partition, int gsi)
{
struct mshv_irq_ack_notifier *mian;
bool acked = false;
rcu_read_lock();
hlist_for_each_entry_rcu(mian, &partition->irq_ack_notifier_list,
link) {
if (mian->irq_ack_gsi == gsi) {
mian->irq_acked(mian);
acked = true;
}
}
rcu_read_unlock();
return acked;
}
#if IS_ENABLED(CONFIG_ARM64)
static inline bool hv_should_clear_interrupt(enum hv_interrupt_type type)
{
return false;
}
#elif IS_ENABLED(CONFIG_X86_64)
static inline bool hv_should_clear_interrupt(enum hv_interrupt_type type)
{
return type == HV_X64_INTERRUPT_TYPE_EXTINT;
}
#endif
static void mshv_irqfd_resampler_ack(struct mshv_irq_ack_notifier *mian)
{
struct mshv_irqfd_resampler *resampler;
struct mshv_partition *partition;
struct mshv_irqfd *irqfd;
int idx;
resampler = container_of(mian, struct mshv_irqfd_resampler,
rsmplr_notifier);
partition = resampler->rsmplr_partn;
idx = srcu_read_lock(&partition->pt_irq_srcu);
hlist_for_each_entry_rcu(irqfd, &resampler->rsmplr_irqfd_list,
irqfd_resampler_hnode) {
if (hv_should_clear_interrupt(irqfd->irqfd_lapic_irq.lapic_control.interrupt_type))
hv_call_clear_virtual_interrupt(partition->pt_id);
eventfd_signal(irqfd->irqfd_resamplefd);
}
srcu_read_unlock(&partition->pt_irq_srcu, idx);
}
#if IS_ENABLED(CONFIG_X86_64)
static bool
mshv_vp_irq_vector_injected(union hv_vp_register_page_interrupt_vectors iv,
u32 vector)
{
int i;
for (i = 0; i < iv.vector_count; i++) {
if (iv.vector[i] == vector)
return true;
}
return false;
}
static int mshv_vp_irq_try_set_vector(struct mshv_vp *vp, u32 vector)
{
union hv_vp_register_page_interrupt_vectors iv, new_iv;
iv = vp->vp_register_page->interrupt_vectors;
new_iv = iv;
if (mshv_vp_irq_vector_injected(iv, vector))
return 0;
if (iv.vector_count >= HV_VP_REGISTER_PAGE_MAX_VECTOR_COUNT)
return -ENOSPC;
new_iv.vector[new_iv.vector_count++] = vector;
if (cmpxchg(&vp->vp_register_page->interrupt_vectors.as_uint64,
iv.as_uint64, new_iv.as_uint64) != iv.as_uint64)
return -EAGAIN;
return 0;
}
static int mshv_vp_irq_set_vector(struct mshv_vp *vp, u32 vector)
{
int ret;
do {
ret = mshv_vp_irq_try_set_vector(vp, vector);
} while (ret == -EAGAIN && !need_resched());
return ret;
}
/*
* Try to raise irq for guest via shared vector array. hyp does the actual
* inject of the interrupt.
*/
static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
{
struct mshv_partition *partition = irqfd->irqfd_partn;
struct mshv_lapic_irq *irq = &irqfd->irqfd_lapic_irq;
struct mshv_vp *vp;
if (!(ms_hyperv.ext_features &
HV_VP_DISPATCH_INTERRUPT_INJECTION_AVAILABLE))
return -EOPNOTSUPP;
if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
return -EOPNOTSUPP;
if (irq->lapic_control.logical_dest_mode)
return -EOPNOTSUPP;
vp = partition->pt_vp_array[irq->lapic_apic_id];
if (!vp->vp_register_page)
return -EOPNOTSUPP;
if (mshv_vp_irq_set_vector(vp, irq->lapic_vector))
return -EINVAL;
if (vp->run.flags.root_sched_dispatched &&
vp->vp_register_page->interrupt_vectors.as_uint64)
return -EBUSY;
wake_up(&vp->run.vp_suspend_queue);
return 0;
}
#else /* CONFIG_X86_64 */
static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
{
return -EOPNOTSUPP;
}
#endif
static void mshv_assert_irq_slow(struct mshv_irqfd *irqfd)
{
struct mshv_partition *partition = irqfd->irqfd_partn;
struct mshv_lapic_irq *irq = &irqfd->irqfd_lapic_irq;
unsigned int seq;
int idx;
WARN_ON(irqfd->irqfd_resampler &&
!irq->lapic_control.level_triggered);
idx = srcu_read_lock(&partition->pt_irq_srcu);
if (irqfd->irqfd_girq_ent.guest_irq_num) {
if (!irqfd->irqfd_girq_ent.girq_entry_valid) {
srcu_read_unlock(&partition->pt_irq_srcu, idx);
return;
}
do {
seq = read_seqcount_begin(&irqfd->irqfd_irqe_sc);
} while (read_seqcount_retry(&irqfd->irqfd_irqe_sc, seq));
}
hv_call_assert_virtual_interrupt(irqfd->irqfd_partn->pt_id,
irq->lapic_vector, irq->lapic_apic_id,
irq->lapic_control);
srcu_read_unlock(&partition->pt_irq_srcu, idx);
}
static void mshv_irqfd_resampler_shutdown(struct mshv_irqfd *irqfd)
{
struct mshv_irqfd_resampler *rp = irqfd->irqfd_resampler;
struct mshv_partition *pt = rp->rsmplr_partn;
mutex_lock(&pt->irqfds_resampler_lock);
hlist_del_rcu(&irqfd->irqfd_resampler_hnode);
synchronize_srcu(&pt->pt_irq_srcu);
if (hlist_empty(&rp->rsmplr_irqfd_list)) {
hlist_del(&rp->rsmplr_hnode);
mshv_unregister_irq_ack_notifier(pt, &rp->rsmplr_notifier);
kfree(rp);
}
mutex_unlock(&pt->irqfds_resampler_lock);
}
/*
* Race-free decouple logic (ordering is critical)
*/
static void mshv_irqfd_shutdown(struct work_struct *work)
{
struct mshv_irqfd *irqfd =
container_of(work, struct mshv_irqfd, irqfd_shutdown);
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
* further events.
*/
remove_wait_queue(irqfd->irqfd_wqh, &irqfd->irqfd_wait);
if (irqfd->irqfd_resampler) {
mshv_irqfd_resampler_shutdown(irqfd);
eventfd_ctx_put(irqfd->irqfd_resamplefd);
}
/*
* It is now safe to release the object's resources
*/
eventfd_ctx_put(irqfd->irqfd_eventfd_ctx);
kfree(irqfd);
}
/* assumes partition->pt_irqfds_lock is held */
static bool mshv_irqfd_is_active(struct mshv_irqfd *irqfd)
{
return !hlist_unhashed(&irqfd->irqfd_hnode);
}
/*
* Mark the irqfd as inactive and schedule it for removal
*
* assumes partition->pt_irqfds_lock is held
*/
static void mshv_irqfd_deactivate(struct mshv_irqfd *irqfd)
{
if (!mshv_irqfd_is_active(irqfd))
return;
hlist_del(&irqfd->irqfd_hnode);
queue_work(irqfd_cleanup_wq, &irqfd->irqfd_shutdown);
}
/*
* Called with wqh->lock held and interrupts disabled
*/
static int mshv_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
int sync, void *key)
{
struct mshv_irqfd *irqfd = container_of(wait, struct mshv_irqfd,
irqfd_wait);
unsigned long flags = (unsigned long)key;
int idx;
unsigned int seq;
struct mshv_partition *pt = irqfd->irqfd_partn;
int ret = 0;
if (flags & POLLIN) {
u64 cnt;
eventfd_ctx_do_read(irqfd->irqfd_eventfd_ctx, &cnt);
idx = srcu_read_lock(&pt->pt_irq_srcu);
do {
seq = read_seqcount_begin(&irqfd->irqfd_irqe_sc);
} while (read_seqcount_retry(&irqfd->irqfd_irqe_sc, seq));
/* An event has been signaled, raise an interrupt */
ret = mshv_try_assert_irq_fast(irqfd);
if (ret)
mshv_assert_irq_slow(irqfd);
srcu_read_unlock(&pt->pt_irq_srcu, idx);
ret = 1;
}
if (flags & POLLHUP) {
/* The eventfd is closing, detach from the partition */
unsigned long flags;
spin_lock_irqsave(&pt->pt_irqfds_lock, flags);
/*
* We must check if someone deactivated the irqfd before
* we could acquire the pt_irqfds_lock since the item is
* deactivated from the mshv side before it is unhooked from
* the wait-queue. If it is already deactivated, we can
* simply return knowing the other side will cleanup for us.
* We cannot race against the irqfd going away since the
* other side is required to acquire wqh->lock, which we hold
*/
if (mshv_irqfd_is_active(irqfd))
mshv_irqfd_deactivate(irqfd);
spin_unlock_irqrestore(&pt->pt_irqfds_lock, flags);
}
return ret;
}
/* Must be called under pt_irqfds_lock */
static void mshv_irqfd_update(struct mshv_partition *pt,
struct mshv_irqfd *irqfd)
{
write_seqcount_begin(&irqfd->irqfd_irqe_sc);
irqfd->irqfd_girq_ent = mshv_ret_girq_entry(pt,
irqfd->irqfd_irqnum);
mshv_copy_girq_info(&irqfd->irqfd_girq_ent, &irqfd->irqfd_lapic_irq);
write_seqcount_end(&irqfd->irqfd_irqe_sc);
}
void mshv_irqfd_routing_update(struct mshv_partition *pt)
{
struct mshv_irqfd *irqfd;
spin_lock_irq(&pt->pt_irqfds_lock);
hlist_for_each_entry(irqfd, &pt->pt_irqfds_list, irqfd_hnode)
mshv_irqfd_update(pt, irqfd);
spin_unlock_irq(&pt->pt_irqfds_lock);
}
static void mshv_irqfd_queue_proc(struct file *file, wait_queue_head_t *wqh,
poll_table *polltbl)
{
struct mshv_irqfd *irqfd =
container_of(polltbl, struct mshv_irqfd, irqfd_polltbl);
irqfd->irqfd_wqh = wqh;
/*
* TODO: Ensure there isn't already an exclusive, priority waiter, e.g.
* that the irqfd isn't already bound to another partition. Only the
* first exclusive waiter encountered will be notified, and
* add_wait_queue_priority() doesn't enforce exclusivity.
*/
irqfd->irqfd_wait.flags |= WQ_FLAG_EXCLUSIVE;
add_wait_queue_priority(wqh, &irqfd->irqfd_wait);
}
static int mshv_irqfd_assign(struct mshv_partition *pt,
struct mshv_user_irqfd *args)
{
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
struct mshv_irqfd *irqfd, *tmp;
unsigned int events;
int ret;
int idx;
CLASS(fd, f)(args->fd);
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
if (!irqfd)
return -ENOMEM;
irqfd->irqfd_partn = pt;
irqfd->irqfd_irqnum = args->gsi;
INIT_WORK(&irqfd->irqfd_shutdown, mshv_irqfd_shutdown);
seqcount_spinlock_init(&irqfd->irqfd_irqe_sc, &pt->pt_irqfds_lock);
if (fd_empty(f)) {
ret = -EBADF;
goto out;
}
eventfd = eventfd_ctx_fileget(fd_file(f));
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
goto fail;
}
irqfd->irqfd_eventfd_ctx = eventfd;
if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE)) {
struct mshv_irqfd_resampler *rp;
resamplefd = eventfd_ctx_fdget(args->resamplefd);
if (IS_ERR(resamplefd)) {
ret = PTR_ERR(resamplefd);
goto fail;
}
irqfd->irqfd_resamplefd = resamplefd;
mutex_lock(&pt->irqfds_resampler_lock);
hlist_for_each_entry(rp, &pt->irqfds_resampler_list,
rsmplr_hnode) {
if (rp->rsmplr_notifier.irq_ack_gsi ==
irqfd->irqfd_irqnum) {
irqfd->irqfd_resampler = rp;
break;
}
}
if (!irqfd->irqfd_resampler) {
rp = kzalloc(sizeof(*rp), GFP_KERNEL_ACCOUNT);
if (!rp) {
ret = -ENOMEM;
mutex_unlock(&pt->irqfds_resampler_lock);
goto fail;
}
rp->rsmplr_partn = pt;
INIT_HLIST_HEAD(&rp->rsmplr_irqfd_list);
rp->rsmplr_notifier.irq_ack_gsi = irqfd->irqfd_irqnum;
rp->rsmplr_notifier.irq_acked =
mshv_irqfd_resampler_ack;
hlist_add_head(&rp->rsmplr_hnode,
&pt->irqfds_resampler_list);
mshv_register_irq_ack_notifier(pt,
&rp->rsmplr_notifier);
irqfd->irqfd_resampler = rp;
}
hlist_add_head_rcu(&irqfd->irqfd_resampler_hnode,
&irqfd->irqfd_resampler->rsmplr_irqfd_list);
mutex_unlock(&pt->irqfds_resampler_lock);
}
/*
* Install our own custom wake-up handling so we are notified via
* a callback whenever someone signals the underlying eventfd
*/
init_waitqueue_func_entry(&irqfd->irqfd_wait, mshv_irqfd_wakeup);
init_poll_funcptr(&irqfd->irqfd_polltbl, mshv_irqfd_queue_proc);
spin_lock_irq(&pt->pt_irqfds_lock);
if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE) &&
!irqfd->irqfd_lapic_irq.lapic_control.level_triggered) {
/*
* Resample Fd must be for level triggered interrupt
* Otherwise return with failure
*/
spin_unlock_irq(&pt->pt_irqfds_lock);
ret = -EINVAL;
goto fail;
}
ret = 0;
hlist_for_each_entry(tmp, &pt->pt_irqfds_list, irqfd_hnode) {
if (irqfd->irqfd_eventfd_ctx != tmp->irqfd_eventfd_ctx)
continue;
/* This fd is used for another irq already. */
ret = -EBUSY;
spin_unlock_irq(&pt->pt_irqfds_lock);
goto fail;
}
idx = srcu_read_lock(&pt->pt_irq_srcu);
mshv_irqfd_update(pt, irqfd);
hlist_add_head(&irqfd->irqfd_hnode, &pt->pt_irqfds_list);
spin_unlock_irq(&pt->pt_irqfds_lock);
/*
* Check if there was an event already pending on the eventfd
* before we registered, and trigger it as if we didn't miss it.
*/
events = vfs_poll(fd_file(f), &irqfd->irqfd_polltbl);
if (events & POLLIN)
mshv_assert_irq_slow(irqfd);
srcu_read_unlock(&pt->pt_irq_srcu, idx);
return 0;
fail:
if (irqfd->irqfd_resampler)
mshv_irqfd_resampler_shutdown(irqfd);
if (resamplefd && !IS_ERR(resamplefd))
eventfd_ctx_put(resamplefd);
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
out:
kfree(irqfd);
return ret;
}
/*
* shutdown any irqfd's that match fd+gsi
*/
static int mshv_irqfd_deassign(struct mshv_partition *pt,
struct mshv_user_irqfd *args)
{
struct mshv_irqfd *irqfd;
struct hlist_node *n;
struct eventfd_ctx *eventfd;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
hlist_for_each_entry_safe(irqfd, n, &pt->pt_irqfds_list,
irqfd_hnode) {
if (irqfd->irqfd_eventfd_ctx == eventfd &&
irqfd->irqfd_irqnum == args->gsi)
mshv_irqfd_deactivate(irqfd);
}
eventfd_ctx_put(eventfd);
/*
* Block until we know all outstanding shutdown jobs have completed
* so that we guarantee there will not be any more interrupts on this
* gsi once this deassign function returns.
*/
flush_workqueue(irqfd_cleanup_wq);
return 0;
}
int mshv_set_unset_irqfd(struct mshv_partition *pt,
struct mshv_user_irqfd *args)
{
if (args->flags & ~MSHV_IRQFD_FLAGS_MASK)
return -EINVAL;
if (args->flags & BIT(MSHV_IRQFD_BIT_DEASSIGN))
return mshv_irqfd_deassign(pt, args);
return mshv_irqfd_assign(pt, args);
}
/*
* This function is called as the mshv VM fd is being released.
* Shutdown all irqfds that still remain open
*/
static void mshv_irqfd_release(struct mshv_partition *pt)
{
struct mshv_irqfd *irqfd;
struct hlist_node *n;
spin_lock_irq(&pt->pt_irqfds_lock);
hlist_for_each_entry_safe(irqfd, n, &pt->pt_irqfds_list, irqfd_hnode)
mshv_irqfd_deactivate(irqfd);
spin_unlock_irq(&pt->pt_irqfds_lock);
/*
* Block until we know all outstanding shutdown jobs have completed
* since we do not take a mshv_partition* reference.
*/
flush_workqueue(irqfd_cleanup_wq);
}
int mshv_irqfd_wq_init(void)
{
irqfd_cleanup_wq = alloc_workqueue("mshv-irqfd-cleanup", 0, 0);
if (!irqfd_cleanup_wq)
return -ENOMEM;
return 0;
}
void mshv_irqfd_wq_cleanup(void)
{
destroy_workqueue(irqfd_cleanup_wq);
}
/*
* --------------------------------------------------------------------
* ioeventfd: translate a MMIO memory write to an eventfd signal.
*
* userspace can register a MMIO address with an eventfd for receiving
* notification when the memory has been touched.
* --------------------------------------------------------------------
*/
static void ioeventfd_release(struct mshv_ioeventfd *p, u64 partition_id)
{
if (p->iovntfd_doorbell_id > 0)
mshv_unregister_doorbell(partition_id, p->iovntfd_doorbell_id);
eventfd_ctx_put(p->iovntfd_eventfd);
kfree(p);
}
/* MMIO writes trigger an event if the addr/val match */
static void ioeventfd_mmio_write(int doorbell_id, void *data)
{
struct mshv_partition *partition = (struct mshv_partition *)data;
struct mshv_ioeventfd *p;
rcu_read_lock();
hlist_for_each_entry_rcu(p, &partition->ioeventfds_list, iovntfd_hnode)
if (p->iovntfd_doorbell_id == doorbell_id) {
eventfd_signal(p->iovntfd_eventfd);
break;
}
rcu_read_unlock();
}
static bool ioeventfd_check_collision(struct mshv_partition *pt,
struct mshv_ioeventfd *p)
__must_hold(&pt->mutex)
{
struct mshv_ioeventfd *_p;
hlist_for_each_entry(_p, &pt->ioeventfds_list, iovntfd_hnode)
if (_p->iovntfd_addr == p->iovntfd_addr &&
_p->iovntfd_length == p->iovntfd_length &&
(_p->iovntfd_wildcard || p->iovntfd_wildcard ||
_p->iovntfd_datamatch == p->iovntfd_datamatch))
return true;
return false;
}
static int mshv_assign_ioeventfd(struct mshv_partition *pt,
struct mshv_user_ioeventfd *args)
__must_hold(&pt->mutex)
{
struct mshv_ioeventfd *p;
struct eventfd_ctx *eventfd;
u64 doorbell_flags = 0;
int ret;
/* This mutex is currently protecting ioeventfd.items list */
WARN_ON_ONCE(!mutex_is_locked(&pt->pt_mutex));
if (args->flags & BIT(MSHV_IOEVENTFD_BIT_PIO))
return -EOPNOTSUPP;
/* must be natural-word sized */
switch (args->len) {
case 0:
doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_ANY;
break;
case 1:
doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_BYTE;
break;
case 2:
doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_WORD;
break;
case 4:
doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_DWORD;
break;
case 8:
doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_QWORD;
break;
default:
return -EINVAL;
}
/* check for range overflow */
if (args->addr + args->len < args->addr)
return -EINVAL;
/* check for extra flags that we don't understand */
if (args->flags & ~MSHV_IOEVENTFD_FLAGS_MASK)
return -EINVAL;
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto fail;
}
p->iovntfd_addr = args->addr;
p->iovntfd_length = args->len;
p->iovntfd_eventfd = eventfd;
/* The datamatch feature is optional, otherwise this is a wildcard */
if (args->flags & BIT(MSHV_IOEVENTFD_BIT_DATAMATCH)) {
p->iovntfd_datamatch = args->datamatch;
} else {
p->iovntfd_wildcard = true;
doorbell_flags |= HV_DOORBELL_FLAG_TRIGGER_ANY_VALUE;
}
if (ioeventfd_check_collision(pt, p)) {
ret = -EEXIST;
goto unlock_fail;
}
ret = mshv_register_doorbell(pt->pt_id, ioeventfd_mmio_write,
(void *)pt, p->iovntfd_addr,
p->iovntfd_datamatch, doorbell_flags);
if (ret < 0)
goto unlock_fail;
p->iovntfd_doorbell_id = ret;
hlist_add_head_rcu(&p->iovntfd_hnode, &pt->ioeventfds_list);
return 0;
unlock_fail:
kfree(p);
fail:
eventfd_ctx_put(eventfd);
return ret;
}
static int mshv_deassign_ioeventfd(struct mshv_partition *pt,
struct mshv_user_ioeventfd *args)
__must_hold(&pt->mutex)
{
struct mshv_ioeventfd *p;
struct eventfd_ctx *eventfd;
struct hlist_node *n;
int ret = -ENOENT;
/* This mutex is currently protecting ioeventfd.items list */
WARN_ON_ONCE(!mutex_is_locked(&pt->pt_mutex));
eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
hlist_for_each_entry_safe(p, n, &pt->ioeventfds_list, iovntfd_hnode) {
bool wildcard = !(args->flags & BIT(MSHV_IOEVENTFD_BIT_DATAMATCH));
if (p->iovntfd_eventfd != eventfd ||
p->iovntfd_addr != args->addr ||
p->iovntfd_length != args->len ||
p->iovntfd_wildcard != wildcard)
continue;
if (!p->iovntfd_wildcard &&
p->iovntfd_datamatch != args->datamatch)
continue;
hlist_del_rcu(&p->iovntfd_hnode);
synchronize_rcu();
ioeventfd_release(p, pt->pt_id);
ret = 0;
break;
}
eventfd_ctx_put(eventfd);
return ret;
}
int mshv_set_unset_ioeventfd(struct mshv_partition *pt,
struct mshv_user_ioeventfd *args)
__must_hold(&pt->mutex)
{
if ((args->flags & ~MSHV_IOEVENTFD_FLAGS_MASK) ||
mshv_field_nonzero(*args, rsvd))
return -EINVAL;
/* PIO not yet implemented */
if (args->flags & BIT(MSHV_IOEVENTFD_BIT_PIO))
return -EOPNOTSUPP;
if (args->flags & BIT(MSHV_IOEVENTFD_BIT_DEASSIGN))
return mshv_deassign_ioeventfd(pt, args);
return mshv_assign_ioeventfd(pt, args);
}
void mshv_eventfd_init(struct mshv_partition *pt)
{
spin_lock_init(&pt->pt_irqfds_lock);
INIT_HLIST_HEAD(&pt->pt_irqfds_list);
INIT_HLIST_HEAD(&pt->irqfds_resampler_list);
mutex_init(&pt->irqfds_resampler_lock);
INIT_HLIST_HEAD(&pt->ioeventfds_list);
}
void mshv_eventfd_release(struct mshv_partition *pt)
{
struct hlist_head items;
struct hlist_node *n;
struct mshv_ioeventfd *p;
hlist_move_list(&pt->ioeventfds_list, &items);
synchronize_rcu();
hlist_for_each_entry_safe(p, n, &items, iovntfd_hnode) {
hlist_del(&p->iovntfd_hnode);
ioeventfd_release(p, pt->pt_id);
}
mshv_irqfd_release(pt);
}