mirror of
				https://github.com/qemu/qemu.git
				synced 2025-10-31 20:44:16 +00:00 
			
		
		
		
	 9277d81f5c
			
		
	
	
		9277d81f5c
		
	
	
	
	
		
			
			Signed-off-by: Ville Skyttä <ville.skytta@iki.fi> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Eric Blake <eblake@redhat.com> Message-id: 20180612065150.21110-1-ville.skytta@iki.fi Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
		
			
				
	
	
		
			282 lines
		
	
	
		
			7.1 KiB
		
	
	
	
		
			INI
		
	
	
	
	
	
			
		
		
	
	
			282 lines
		
	
	
		
			7.1 KiB
		
	
	
	
		
			INI
		
	
	
	
	
	
| # mach-virt - VirtIO guest (graphical console)
 | |
| # =========================================================
 | |
| #
 | |
| # Usage:
 | |
| #
 | |
| #   $ qemu-system-aarch64 \
 | |
| #     -nodefaults \
 | |
| #     -readconfig mach-virt-graphical.cfg \
 | |
| #     -cpu host
 | |
| #
 | |
| # You will probably need to tweak the lines marked as
 | |
| # CHANGE ME before being able to use this configuration!
 | |
| #
 | |
| # The guest will have a selection of VirtIO devices
 | |
| # tailored towards optimal performance with modern guests,
 | |
| # and will be accessed through a graphical console.
 | |
| #
 | |
| # ---------------------------------------------------------
 | |
| #
 | |
| # Using -nodefaults is required to have full control over
 | |
| # the virtual hardware: when it's specified, QEMU will
 | |
| # populate the board with only the builtin peripherals,
 | |
| # such as the PL011 UART, plus a PCI Express Root Bus; the
 | |
| # user will then have to explicitly add further devices.
 | |
| #
 | |
| # The PCI Express Root Bus shows up in the guest as:
 | |
| #
 | |
| #   00:00.0 Host bridge
 | |
| #
 | |
| # This configuration file adds a number of other useful
 | |
| # devices, more specifically:
 | |
| #
 | |
| #   00:01.0 Display controller
 | |
| #   00.1c.* PCI bridge (PCI Express Root Ports)
 | |
| #   01:00.0 SCSI storage controller
 | |
| #   02:00.0 Ethernet controller
 | |
| #   03:00.0 USB controller
 | |
| #
 | |
| # More information about these devices is available below.
 | |
| 
 | |
| 
 | |
| # Machine options
 | |
| # =========================================================
 | |
| #
 | |
| # We use the virt machine type and enable KVM acceleration
 | |
| # for better performance.
 | |
| #
 | |
| # Using less than 1 GiB of memory is probably not going to
 | |
| # yield good performance in the guest, and might even lead
 | |
| # to obscure boot issues in some cases.
 | |
| #
 | |
| # Unfortunately, there is no way to configure the CPU model
 | |
| # in this file, so it will have to be provided on the
 | |
| # command line, but we can configure the guest to use the
 | |
| # same GIC version as the host.
 | |
| 
 | |
| [machine]
 | |
|   type = "virt"
 | |
|   accel = "kvm"
 | |
|   gic-version = "host"
 | |
| 
 | |
| [memory]
 | |
|   size = "1024"
 | |
| 
 | |
| 
 | |
| # Firmware configuration
 | |
| # =========================================================
 | |
| #
 | |
| # There are two parts to the firmware: a read-only image
 | |
| # containing the executable code, which is shared between
 | |
| # guests, and a read/write variable store that is owned
 | |
| # by one specific guest, exclusively, and is used to
 | |
| # record information such as the UEFI boot order.
 | |
| #
 | |
| # For any new guest, its permanent, private variable store
 | |
| # should initially be copied from the template file
 | |
| # provided along with the firmware binary.
 | |
| #
 | |
| # Depending on the OS distribution you're using on the
 | |
| # host, the name of the package containing the firmware
 | |
| # binary and variable store template, as well as the paths
 | |
| # to the files themselves, will be different. For example:
 | |
| #
 | |
| # Fedora
 | |
| #   edk2-aarch64                                      (pkg)
 | |
| #   /usr/share/edk2/aarch64/QEMU_EFI-pflash.raw       (bin)
 | |
| #   /usr/share/edk2/aarch64/vars-template-pflash.raw  (var)
 | |
| #
 | |
| # RHEL
 | |
| #   AAVMF                                             (pkg)
 | |
| #   /usr/share/AAVMF/AAVMF_CODE.fd                    (bin)
 | |
| #   /usr/share/AAVMF/AAVMF_VARS.fd                    (var)
 | |
| #
 | |
| # Debian/Ubuntu
 | |
| #   qemu-efi                                          (pkg)
 | |
| #   /usr/share/AAVMF/AAVMF_CODE.fd                    (bin)
 | |
| #   /usr/share/AAVMF/AAVMF_VARS.fd                    (var)
 | |
| 
 | |
| [drive "uefi-binary"]
 | |
|   file = "/usr/share/AAVMF/AAVMF_CODE.fd"       # CHANGE ME
 | |
|   format = "raw"
 | |
|   if = "pflash"
 | |
|   unit = "0"
 | |
|   readonly = "on"
 | |
| 
 | |
| [drive "uefi-varstore"]
 | |
|   file = "guest_VARS.fd"                        # CHANGE ME
 | |
|   format = "raw"
 | |
|   if = "pflash"
 | |
|   unit = "1"
 | |
| 
 | |
| 
 | |
| # PCI bridge (PCI Express Root Ports)
 | |
| # =========================================================
 | |
| #
 | |
| # We create eight PCI Express Root Ports, and we plug them
 | |
| # all into separate functions of the same slot. Some of
 | |
| # them will be used by devices, the rest will remain
 | |
| # available for hotplug.
 | |
| 
 | |
| [device "pcie.1"]
 | |
|   driver = "pcie-root-port"
 | |
|   bus = "pcie.0"
 | |
|   addr = "1c.0"
 | |
|   port = "1"
 | |
|   chassis = "1"
 | |
|   multifunction = "on"
 | |
| 
 | |
| [device "pcie.2"]
 | |
|   driver = "pcie-root-port"
 | |
|   bus = "pcie.0"
 | |
|   addr = "1c.1"
 | |
|   port = "2"
 | |
|   chassis = "2"
 | |
| 
 | |
| [device "pcie.3"]
 | |
|   driver = "pcie-root-port"
 | |
|   bus = "pcie.0"
 | |
|   addr = "1c.2"
 | |
|   port = "3"
 | |
|   chassis = "3"
 | |
| 
 | |
| [device "pcie.4"]
 | |
|   driver = "pcie-root-port"
 | |
|   bus = "pcie.0"
 | |
|   addr = "1c.3"
 | |
|   port = "4"
 | |
|   chassis = "4"
 | |
| 
 | |
| [device "pcie.5"]
 | |
|   driver = "pcie-root-port"
 | |
|   bus = "pcie.0"
 | |
|   addr = "1c.4"
 | |
|   port = "5"
 | |
|   chassis = "5"
 | |
| 
 | |
| [device "pcie.6"]
 | |
|   driver = "pcie-root-port"
 | |
|   bus = "pcie.0"
 | |
|   addr = "1c.5"
 | |
|   port = "6"
 | |
|   chassis = "6"
 | |
| 
 | |
| [device "pcie.7"]
 | |
|   driver = "pcie-root-port"
 | |
|   bus = "pcie.0"
 | |
|   addr = "1c.6"
 | |
|   port = "7"
 | |
|   chassis = "7"
 | |
| 
 | |
| [device "pcie.8"]
 | |
|   driver = "pcie-root-port"
 | |
|   bus = "pcie.0"
 | |
|   addr = "1c.7"
 | |
|   port = "8"
 | |
|   chassis = "8"
 | |
| 
 | |
| 
 | |
| # SCSI storage controller (and storage)
 | |
| # =========================================================
 | |
| #
 | |
| # We use virtio-scsi here so that we can (hot)plug a large
 | |
| # number of disks without running into issues; a SCSI disk,
 | |
| # backed by a qcow2 disk image on the host's filesystem, is
 | |
| # attached to it.
 | |
| #
 | |
| # We also create an optical disk, mostly for installation
 | |
| # purposes: once the guest OS has been successfully
 | |
| # installed, the guest will no longer boot from optical
 | |
| # media. If you don't want, or no longer want, to have an
 | |
| # optical disk in the guest you can safely comment out
 | |
| # all relevant sections below.
 | |
| 
 | |
| [device "scsi"]
 | |
|   driver = "virtio-scsi-pci"
 | |
|   bus = "pcie.1"
 | |
|   addr = "00.0"
 | |
| 
 | |
| [device "scsi-disk"]
 | |
|   driver = "scsi-hd"
 | |
|   bus = "scsi.0"
 | |
|   drive = "disk"
 | |
|   bootindex = "1"
 | |
| 
 | |
| [drive "disk"]
 | |
|   file = "guest.qcow2"                          # CHANGE ME
 | |
|   format = "qcow2"
 | |
|   if = "none"
 | |
| 
 | |
| [device "scsi-optical-disk"]
 | |
|   driver = "scsi-cd"
 | |
|   bus = "scsi.0"
 | |
|   drive = "optical-disk"
 | |
|   bootindex = "2"
 | |
| 
 | |
| [drive "optical-disk"]
 | |
|   file = "install.iso"                          # CHANGE ME
 | |
|   format = "raw"
 | |
|   if = "none"
 | |
| 
 | |
| 
 | |
| # Ethernet controller
 | |
| # =========================================================
 | |
| #
 | |
| # We use virtio-net for improved performance over emulated
 | |
| # hardware; on the host side, we take advantage of user
 | |
| # networking so that the QEMU process doesn't require any
 | |
| # additional privileges.
 | |
| 
 | |
| [netdev "hostnet"]
 | |
|   type = "user"
 | |
| 
 | |
| [device "net"]
 | |
|   driver = "virtio-net-pci"
 | |
|   netdev = "hostnet"
 | |
|   bus = "pcie.2"
 | |
|   addr = "00.0"
 | |
| 
 | |
| 
 | |
| # USB controller (and input devices)
 | |
| # =========================================================
 | |
| #
 | |
| # We add a virtualization-friendly USB 3.0 controller and
 | |
| # a USB keyboard / USB tablet combo so that graphical
 | |
| # guests can be controlled appropriately.
 | |
| 
 | |
| [device "usb"]
 | |
|   driver = "nec-usb-xhci"
 | |
|   bus = "pcie.3"
 | |
|   addr = "00.0"
 | |
| 
 | |
| [device "keyboard"]
 | |
|   driver = "usb-kbd"
 | |
|   bus = "usb.0"
 | |
| 
 | |
| [device "tablet"]
 | |
|   driver = "usb-tablet"
 | |
|   bus = "usb.0"
 | |
| 
 | |
| 
 | |
| # Display controller
 | |
| # =========================================================
 | |
| #
 | |
| # We use virtio-gpu because the legacy VGA framebuffer is
 | |
| # very troublesome on aarch64, and virtio-gpu is the only
 | |
| # video device that doesn't implement it.
 | |
| #
 | |
| # If you're running the guest on a remote, potentially
 | |
| # headless host, you will probably want to append something
 | |
| # like
 | |
| #
 | |
| #   -display vnc=127.0.0.1:0
 | |
| #
 | |
| # to the command line in order to prevent QEMU from
 | |
| # creating a graphical display window on the host and
 | |
| # enable remote access instead.
 | |
| 
 | |
| [device "video"]
 | |
|   driver = "virtio-gpu"
 | |
|   bus = "pcie.0"
 | |
|   addr = "01.0"
 |