You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge 5.10.154 into android12-5.10-lts
Changes in 5.10.154
serial: 8250: Let drivers request full 16550A feature probing
serial: ar933x: Deassert Transmit Enable on ->rs485_config()
KVM: nVMX: Pull KVM L0's desired controls directly from vmcs01
KVM: nVMX: Don't propagate vmcs12's PERF_GLOBAL_CTRL settings to vmcs02
KVM: x86: Trace re-injected exceptions
KVM: x86: Treat #DBs from the emulator as fault-like (code and DR7.GD=1)
x86/topology: Set cpu_die_id only if DIE_TYPE found
x86/topology: Fix multiple packages shown on a single-package system
x86/topology: Fix duplicated core ID within a package
KVM: x86: Protect the unused bits in MSR exiting flags
KVM: x86: Copy filter arg outside kvm_vm_ioctl_set_msr_filter()
KVM: x86: Add compat handler for KVM_X86_SET_MSR_FILTER
RDMA/cma: Use output interface for net_dev check
IB/hfi1: Correctly move list in sc_disable()
NFSv4: Fix a potential state reclaim deadlock
NFSv4.1: Handle RECLAIM_COMPLETE trunking errors
NFSv4.1: We must always send RECLAIM_COMPLETE after a reboot
nfs4: Fix kmemleak when allocate slot failed
net: dsa: Fix possible memory leaks in dsa_loop_init()
RDMA/core: Fix null-ptr-deref in ib_core_cleanup()
RDMA/qedr: clean up work queue on failure in qedr_alloc_resources()
nfc: fdp: drop ftrace-like debugging messages
nfc: fdp: Fix potential memory leak in fdp_nci_send()
NFC: nxp-nci: remove unnecessary labels
nfc: nxp-nci: Fix potential memory leak in nxp_nci_send()
nfc: s3fwrn5: Fix potential memory leak in s3fwrn5_nci_send()
nfc: nfcmrvl: Fix potential memory leak in nfcmrvl_i2c_nci_send()
net: fec: fix improper use of NETDEV_TX_BUSY
ata: pata_legacy: fix pdc20230_set_piomode()
net: sched: Fix use after free in red_enqueue()
net: tun: fix bugs for oversize packet when napi frags enabled
netfilter: nf_tables: release flow rule object from commit path
ipvs: use explicitly signed chars
ipvs: fix WARNING in __ip_vs_cleanup_batch()
ipvs: fix WARNING in ip_vs_app_net_cleanup()
rose: Fix NULL pointer dereference in rose_send_frame()
mISDN: fix possible memory leak in mISDN_register_device()
isdn: mISDN: netjet: fix wrong check of device registration
btrfs: fix inode list leak during backref walking at resolve_indirect_refs()
btrfs: fix inode list leak during backref walking at find_parent_nodes()
btrfs: fix ulist leaks in error paths of qgroup self tests
Bluetooth: L2CAP: Fix use-after-free caused by l2cap_reassemble_sdu
Bluetooth: L2CAP: fix use-after-free in l2cap_conn_del()
net: mdio: fix undefined behavior in bit shift for __mdiobus_register
net, neigh: Fix null-ptr-deref in neigh_table_clear()
ipv6: fix WARNING in ip6_route_net_exit_late()
drm/msm/hdmi: Remove spurious IRQF_ONESHOT flag
drm/msm/hdmi: fix IRQ lifetime
mmc: sdhci-esdhc-imx: Propagate ESDHC_FLAG_HS400* only on 8bit bus
mmc: sdhci-pci: Avoid comma separated statements
mmc: sdhci-pci-core: Disable ES for ASUS BIOS on Jasper Lake
video/fbdev/stifb: Implement the stifb_fillrect() function
fbdev: stifb: Fall back to cfb_fillrect() on 32-bit HCRX cards
mtd: parsers: bcm47xxpart: print correct offset on read error
mtd: parsers: bcm47xxpart: Fix halfblock reads
xhci-pci: Set runtime PM as default policy on all xHC 1.2 or later devices
s390/boot: add secure boot trailer
media: rkisp1: Initialize color space on resizer sink and source pads
media: rkisp1: Zero v4l2_subdev_format fields in when validating links
media: s5p_cec: limit msg.len to CEC_MAX_MSG_SIZE
media: cros-ec-cec: limit msg.len to CEC_MAX_MSG_SIZE
media: dvb-frontends/drxk: initialize err to 0
media: meson: vdec: fix possible refcount leak in vdec_probe()
ACPI: APEI: Fix integer overflow in ghes_estatus_pool_init()
scsi: core: Restrict legal sdev_state transitions via sysfs
HID: saitek: add madcatz variant of MMO7 mouse device ID
drm/amdgpu: set vm_update_mode=0 as default for Sienna Cichlid in SRIOV case
i2c: xiic: Add platform module alias
efi/tpm: Pass correct address to memblock_reserve
ARM: dts: imx6qdl-gw59{10,13}: fix user pushbutton GPIO offset
firmware: arm_scmi: Suppress the driver's bind attributes
firmware: arm_scmi: Make Rx chan_setup fail on memory errors
arm64: dts: juno: Add thermal critical trip points
i2c: piix4: Fix adapter not be removed in piix4_remove()
Bluetooth: L2CAP: Fix accepting connection request for invalid SPSM
Bluetooth: L2CAP: Fix attempting to access uninitialized memory
block, bfq: protect 'bfqd->queued' by 'bfqd->lock'
ALSA: usb-audio: Add quirks for MacroSilicon MS2100/MS2106 devices
fscrypt: simplify master key locking
fscrypt: stop using keyrings subsystem for fscrypt_master_key
fscrypt: fix keyring memory leak on mount failure
tcp/udp: Fix memory leak in ipv6_renew_options().
mtd: rawnand: gpmi: Set WAIT_FOR_READY timeout based on program/erase times
memcg: enable accounting of ipc resources
binder: fix UAF of alloc->vma in race with munmap()
coresight: cti: Fix hang in cti_disable_hw()
btrfs: fix type of parameter generation in btrfs_get_dentry
ftrace: Fix use-after-free for dynamic ftrace_ops
tcp/udp: Make early_demux back namespacified.
tracing: kprobe: Fix memory leak in test_gen_kprobe/kretprobe_cmd()
kprobe: reverse kp->flags when arm_kprobe failed
tools/nolibc/string: Fix memcmp() implementation
tracing/histogram: Update document for KEYS_MAX size
capabilities: fix potential memleak on error path from vfs_getxattr_alloc()
fuse: add file_modified() to fallocate
efi: random: reduce seed size to 32 bytes
efi: random: Use 'ACPI reclaim' memory for random seed
perf/x86/intel: Fix pebs event constraints for ICL
perf/x86/intel: Add Cooper Lake stepping to isolation_ucodes[]
parisc: Make 8250_gsc driver dependend on CONFIG_PARISC
parisc: Export iosapic_serial_irq() symbol for serial port driver
parisc: Avoid printing the hardware path twice
ext4: fix warning in 'ext4_da_release_space'
ext4: fix BUG_ON() when directory entry has invalid rec_len
KVM: x86: Mask off reserved bits in CPUID.80000006H
KVM: x86: Mask off reserved bits in CPUID.8000001AH
KVM: x86: Mask off reserved bits in CPUID.80000008H
KVM: x86: Mask off reserved bits in CPUID.80000001H
KVM: x86: emulator: em_sysexit should update ctxt->mode
KVM: x86: emulator: introduce emulator_recalc_and_set_mode
KVM: x86: emulator: update the emulation mode after CR0 write
ext4,f2fs: fix readahead of verity data
drm/rockchip: dsi: Force synchronous probe
drm/i915/sdvo: Filter out invalid outputs more sensibly
drm/i915/sdvo: Setup DDC fully before output init
wifi: brcmfmac: Fix potential buffer overflow in brcmf_fweh_event_worker()
ipc: remove memcg accounting for sops objects in do_semtimedop()
Linux 5.10.154
Change-Id: I6965878bf3bad857fbdbcdeb7dd066cc280aa026
Signed-off-by: Eric Biggers <ebiggers@google.com>
This commit is contained in:
@@ -39,7 +39,7 @@ Documentation written by Tom Zanussi
|
||||
will use the event's kernel stacktrace as the key. The keywords
|
||||
'keys' or 'key' can be used to specify keys, and the keywords
|
||||
'values', 'vals', or 'val' can be used to specify values. Compound
|
||||
keys consisting of up to two fields can be specified by the 'keys'
|
||||
keys consisting of up to three fields can be specified by the 'keys'
|
||||
keyword. Hashing a compound key produces a unique entry in the
|
||||
table for each unique combination of component keys, and can be
|
||||
useful for providing more fine-grained summaries of event data.
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 153
|
||||
SUBLEVEL = 154
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
|
||||
user-pb {
|
||||
label = "user_pb";
|
||||
gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>;
|
||||
gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>;
|
||||
linux,code = <BTN_0>;
|
||||
};
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
|
||||
user-pb {
|
||||
label = "user_pb";
|
||||
gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>;
|
||||
gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>;
|
||||
linux,code = <BTN_0>;
|
||||
};
|
||||
|
||||
|
||||
@@ -595,12 +595,26 @@
|
||||
polling-delay = <1000>;
|
||||
polling-delay-passive = <100>;
|
||||
thermal-sensors = <&scpi_sensors0 0>;
|
||||
trips {
|
||||
pmic_crit0: trip0 {
|
||||
temperature = <90000>;
|
||||
hysteresis = <2000>;
|
||||
type = "critical";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
soc {
|
||||
polling-delay = <1000>;
|
||||
polling-delay-passive = <100>;
|
||||
thermal-sensors = <&scpi_sensors0 3>;
|
||||
trips {
|
||||
soc_crit0: trip0 {
|
||||
temperature = <80000>;
|
||||
hysteresis = <2000>;
|
||||
type = "critical";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
big_cluster_thermal_zone: big-cluster {
|
||||
|
||||
@@ -10,12 +10,12 @@
|
||||
#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
|
||||
|
||||
struct hp_hardware {
|
||||
unsigned short hw_type:5; /* HPHW_xxx */
|
||||
unsigned short hversion;
|
||||
unsigned long sversion:28;
|
||||
unsigned short opt;
|
||||
const char name[80]; /* The hardware description */
|
||||
};
|
||||
unsigned int hw_type:8; /* HPHW_xxx */
|
||||
unsigned int hversion:12;
|
||||
unsigned int sversion:12;
|
||||
unsigned char opt;
|
||||
unsigned char name[59]; /* The hardware description */
|
||||
} __packed;
|
||||
|
||||
struct parisc_device;
|
||||
|
||||
|
||||
@@ -883,15 +883,13 @@ void __init walk_central_bus(void)
|
||||
&root);
|
||||
}
|
||||
|
||||
static void print_parisc_device(struct parisc_device *dev)
|
||||
static __init void print_parisc_device(struct parisc_device *dev)
|
||||
{
|
||||
char hw_path[64];
|
||||
static int count;
|
||||
static int count __initdata;
|
||||
|
||||
print_pa_hwpath(dev, hw_path);
|
||||
pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
|
||||
++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
|
||||
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
|
||||
pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
|
||||
++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
|
||||
dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
|
||||
|
||||
if (dev->num_addrs) {
|
||||
int k;
|
||||
@@ -1080,7 +1078,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
|
||||
|
||||
|
||||
|
||||
static int print_one_device(struct device * dev, void * data)
|
||||
static __init int print_one_device(struct device * dev, void * data)
|
||||
{
|
||||
struct parisc_device * pdev = to_parisc_device(dev);
|
||||
|
||||
|
||||
@@ -91,8 +91,17 @@ SECTIONS
|
||||
_compressed_start = .;
|
||||
*(.vmlinux.bin.compressed)
|
||||
_compressed_end = .;
|
||||
FILL(0xff);
|
||||
. = ALIGN(4096);
|
||||
}
|
||||
|
||||
#define SB_TRAILER_SIZE 32
|
||||
/* Trailer needed for Secure Boot */
|
||||
. += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */
|
||||
. = ALIGN(4096) - SB_TRAILER_SIZE;
|
||||
.sb.trailer : {
|
||||
QUAD(0)
|
||||
QUAD(0)
|
||||
QUAD(0)
|
||||
QUAD(0x000000207a49504c)
|
||||
}
|
||||
_end = .;
|
||||
|
||||
|
||||
@@ -4412,6 +4412,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
|
||||
|
||||
@@ -855,8 +855,13 @@ struct event_constraint intel_icl_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
|
||||
|
||||
INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
|
||||
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
|
||||
|
||||
|
||||
@@ -96,6 +96,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
|
||||
unsigned int core_select_mask, core_level_siblings;
|
||||
unsigned int die_select_mask, die_level_siblings;
|
||||
unsigned int pkg_mask_width;
|
||||
bool die_level_present = false;
|
||||
int leaf;
|
||||
|
||||
leaf = detect_extended_topology_leaf(c);
|
||||
@@ -110,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
|
||||
sub_index = 1;
|
||||
do {
|
||||
while (true) {
|
||||
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
/*
|
||||
@@ -126,23 +128,33 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
}
|
||||
if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) {
|
||||
die_level_present = true;
|
||||
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
}
|
||||
|
||||
sub_index++;
|
||||
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
|
||||
if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE)
|
||||
pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
else
|
||||
break;
|
||||
|
||||
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
|
||||
sub_index++;
|
||||
}
|
||||
|
||||
core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width;
|
||||
die_select_mask = (~(-1 << die_plus_mask_width)) >>
|
||||
core_plus_mask_width;
|
||||
|
||||
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
ht_mask_width) & core_select_mask;
|
||||
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
core_plus_mask_width) & die_select_mask;
|
||||
|
||||
if (die_level_present) {
|
||||
c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
core_plus_mask_width) & die_select_mask;
|
||||
}
|
||||
|
||||
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
|
||||
die_plus_mask_width);
|
||||
pkg_mask_width);
|
||||
/*
|
||||
* Reinit the apicid, now that we have extended initial_apicid.
|
||||
*/
|
||||
|
||||
@@ -813,11 +813,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->eax = min(entry->eax, 0x8000001f);
|
||||
break;
|
||||
case 0x80000001:
|
||||
entry->ebx &= ~GENMASK(27, 16);
|
||||
cpuid_entry_override(entry, CPUID_8000_0001_EDX);
|
||||
cpuid_entry_override(entry, CPUID_8000_0001_ECX);
|
||||
break;
|
||||
case 0x80000006:
|
||||
/* L2 cache and TLB: pass through host info. */
|
||||
/* Drop reserved bits, pass host L2 cache and TLB info. */
|
||||
entry->edx &= ~GENMASK(17, 16);
|
||||
break;
|
||||
case 0x80000007: /* Advanced power management */
|
||||
/* invariant TSC is CPUID.80000007H:EDX[8] */
|
||||
@@ -840,6 +842,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
g_phys_as = phys_as;
|
||||
|
||||
entry->eax = g_phys_as | (virt_as << 8);
|
||||
entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
|
||||
entry->edx = 0;
|
||||
cpuid_entry_override(entry, CPUID_8000_0008_EBX);
|
||||
break;
|
||||
@@ -859,6 +862,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->ecx = entry->edx = 0;
|
||||
break;
|
||||
case 0x8000001a:
|
||||
entry->eax &= GENMASK(2, 0);
|
||||
entry->ebx = entry->ecx = entry->edx = 0;
|
||||
break;
|
||||
case 0x8000001e:
|
||||
break;
|
||||
/* Support memory encryption cpuid if host supports it */
|
||||
|
||||
@@ -796,8 +796,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
|
||||
ctxt->mode, linear);
|
||||
}
|
||||
|
||||
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
|
||||
enum x86emul_mode mode)
|
||||
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||||
{
|
||||
ulong linear;
|
||||
int rc;
|
||||
@@ -807,41 +806,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
|
||||
|
||||
if (ctxt->op_bytes != sizeof(unsigned long))
|
||||
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
|
||||
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
|
||||
rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
|
||||
if (rc == X86EMUL_CONTINUE)
|
||||
ctxt->_eip = addr.ea;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||||
static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return assign_eip(ctxt, dst, ctxt->mode);
|
||||
u64 efer;
|
||||
struct desc_struct cs;
|
||||
u16 selector;
|
||||
u32 base3;
|
||||
|
||||
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
|
||||
|
||||
if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
|
||||
/* Real mode. cpu must not have long mode active */
|
||||
if (efer & EFER_LMA)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
ctxt->mode = X86EMUL_MODE_REAL;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
if (ctxt->eflags & X86_EFLAGS_VM) {
|
||||
/* Protected/VM86 mode. cpu must not have long mode active */
|
||||
if (efer & EFER_LMA)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
ctxt->mode = X86EMUL_MODE_VM86;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
if (efer & EFER_LMA) {
|
||||
if (cs.l) {
|
||||
/* Proper long mode */
|
||||
ctxt->mode = X86EMUL_MODE_PROT64;
|
||||
} else if (cs.d) {
|
||||
/* 32 bit compatibility mode*/
|
||||
ctxt->mode = X86EMUL_MODE_PROT32;
|
||||
} else {
|
||||
ctxt->mode = X86EMUL_MODE_PROT16;
|
||||
}
|
||||
} else {
|
||||
/* Legacy 32 bit / 16 bit mode */
|
||||
ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
|
||||
}
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
|
||||
const struct desc_struct *cs_desc)
|
||||
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||||
{
|
||||
enum x86emul_mode mode = ctxt->mode;
|
||||
int rc;
|
||||
return assign_eip(ctxt, dst);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
|
||||
if (cs_desc->l) {
|
||||
u64 efer = 0;
|
||||
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||||
{
|
||||
int rc = emulator_recalc_and_set_mode(ctxt);
|
||||
|
||||
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
|
||||
if (efer & EFER_LMA)
|
||||
mode = X86EMUL_MODE_PROT64;
|
||||
} else
|
||||
mode = X86EMUL_MODE_PROT32; /* temporary value */
|
||||
}
|
||||
#endif
|
||||
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
|
||||
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
|
||||
rc = assign_eip(ctxt, dst, mode);
|
||||
if (rc == X86EMUL_CONTINUE)
|
||||
ctxt->mode = mode;
|
||||
return rc;
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
return assign_eip(ctxt, dst);
|
||||
}
|
||||
|
||||
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
||||
@@ -2256,7 +2285,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
|
||||
rc = assign_eip_far(ctxt, ctxt->src.val);
|
||||
/* Error handling is not implemented. */
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
@@ -2337,7 +2366,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
|
||||
&new_desc);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
rc = assign_eip_far(ctxt, eip, &new_desc);
|
||||
rc = assign_eip_far(ctxt, eip);
|
||||
/* Error handling is not implemented. */
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
@@ -2957,6 +2986,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
||||
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
|
||||
|
||||
ctxt->_eip = rdx;
|
||||
ctxt->mode = usermode;
|
||||
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
@@ -3553,7 +3583,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
|
||||
rc = assign_eip_far(ctxt, ctxt->src.val);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
goto fail;
|
||||
|
||||
@@ -3695,11 +3725,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
|
||||
int cr_num = ctxt->modrm_reg;
|
||||
int r;
|
||||
|
||||
if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
|
||||
return emulate_gp(ctxt, 0);
|
||||
|
||||
/* Disable writeback. */
|
||||
ctxt->dst.type = OP_NONE;
|
||||
|
||||
if (cr_num == 0) {
|
||||
/*
|
||||
* CR0 write might have updated CR0.PE and/or CR0.PG
|
||||
* which can affect the cpu's execution mode.
|
||||
*/
|
||||
r = emulator_recalc_and_set_mode(ctxt);
|
||||
if (r != X86EMUL_CONTINUE)
|
||||
return r;
|
||||
}
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
|
||||
@@ -304,25 +304,29 @@ TRACE_EVENT(kvm_inj_virq,
|
||||
* Tracepoint for kvm interrupt injection:
|
||||
*/
|
||||
TRACE_EVENT(kvm_inj_exception,
|
||||
TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
|
||||
TP_ARGS(exception, has_error, error_code),
|
||||
TP_PROTO(unsigned exception, bool has_error, unsigned error_code,
|
||||
bool reinjected),
|
||||
TP_ARGS(exception, has_error, error_code, reinjected),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u8, exception )
|
||||
__field( u8, has_error )
|
||||
__field( u32, error_code )
|
||||
__field( bool, reinjected )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->exception = exception;
|
||||
__entry->has_error = has_error;
|
||||
__entry->error_code = error_code;
|
||||
__entry->reinjected = reinjected;
|
||||
),
|
||||
|
||||
TP_printk("%s (0x%x)",
|
||||
TP_printk("%s (0x%x)%s",
|
||||
__print_symbolic(__entry->exception, kvm_trace_sym_exc),
|
||||
/* FIXME: don't print error_code if not present */
|
||||
__entry->has_error ? __entry->error_code : 0)
|
||||
__entry->has_error ? __entry->error_code : 0,
|
||||
__entry->reinjected ? " [reinjected]" : "")
|
||||
);
|
||||
|
||||
/*
|
||||
|
||||
@@ -2232,7 +2232,8 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
|
||||
}
|
||||
}
|
||||
|
||||
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
||||
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
u32 exec_control, vmcs12_exec_ctrl;
|
||||
u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
|
||||
@@ -2243,7 +2244,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
||||
/*
|
||||
* PIN CONTROLS
|
||||
*/
|
||||
exec_control = vmx_pin_based_exec_ctrl(vmx);
|
||||
exec_control = __pin_controls_get(vmcs01);
|
||||
exec_control |= (vmcs12->pin_based_vm_exec_control &
|
||||
~PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
|
||||
@@ -2258,7 +2259,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
||||
/*
|
||||
* EXEC CONTROLS
|
||||
*/
|
||||
exec_control = vmx_exec_control(vmx); /* L0's desires */
|
||||
exec_control = __exec_controls_get(vmcs01); /* L0's desires */
|
||||
exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
|
||||
exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
|
||||
exec_control &= ~CPU_BASED_TPR_SHADOW;
|
||||
@@ -2295,17 +2296,20 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
||||
* SECONDARY EXEC CONTROLS
|
||||
*/
|
||||
if (cpu_has_secondary_exec_ctrls()) {
|
||||
exec_control = vmx->secondary_exec_control;
|
||||
exec_control = __secondary_exec_controls_get(vmcs01);
|
||||
|
||||
/* Take the following fields only from vmcs12 */
|
||||
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
||||
SECONDARY_EXEC_ENABLE_INVPCID |
|
||||
SECONDARY_EXEC_ENABLE_RDTSCP |
|
||||
SECONDARY_EXEC_XSAVES |
|
||||
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
|
||||
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
||||
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
||||
SECONDARY_EXEC_ENABLE_VMFUNC);
|
||||
SECONDARY_EXEC_ENABLE_VMFUNC |
|
||||
SECONDARY_EXEC_DESC);
|
||||
|
||||
if (nested_cpu_has(vmcs12,
|
||||
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
|
||||
vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
|
||||
@@ -2341,9 +2345,15 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
||||
* are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
|
||||
* on the related bits (if supported by the CPU) in the hope that
|
||||
* we can avoid VMWrites during vmx_set_efer().
|
||||
*
|
||||
* Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
|
||||
* loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
|
||||
* do the same for L2.
|
||||
*/
|
||||
exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
|
||||
~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
|
||||
exec_control = __vm_entry_controls_get(vmcs01);
|
||||
exec_control |= (vmcs12->vm_entry_controls &
|
||||
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
|
||||
exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
|
||||
if (cpu_has_load_ia32_efer()) {
|
||||
if (guest_efer & EFER_LMA)
|
||||
exec_control |= VM_ENTRY_IA32E_MODE;
|
||||
@@ -2359,9 +2369,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
||||
* we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
|
||||
* bits may be modified by vmx_set_efer() in prepare_vmcs02().
|
||||
*/
|
||||
exec_control = vmx_vmexit_ctrl();
|
||||
exec_control = __vm_exit_controls_get(vmcs01);
|
||||
if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
|
||||
exec_control |= VM_EXIT_LOAD_IA32_EFER;
|
||||
else
|
||||
exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
|
||||
vm_exit_controls_set(vmx, exec_control);
|
||||
|
||||
/*
|
||||
@@ -3370,7 +3382,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
||||
|
||||
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
|
||||
|
||||
prepare_vmcs02_early(vmx, vmcs12);
|
||||
prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
|
||||
|
||||
if (from_vmentry) {
|
||||
if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
|
||||
|
||||
@@ -386,9 +386,13 @@ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
|
||||
vmx->loaded_vmcs->controls_shadow.lname = val; \
|
||||
} \
|
||||
} \
|
||||
static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \
|
||||
{ \
|
||||
return vmcs->controls_shadow.lname; \
|
||||
} \
|
||||
static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
|
||||
{ \
|
||||
return vmx->loaded_vmcs->controls_shadow.lname; \
|
||||
return __##lname##_controls_get(vmx->loaded_vmcs); \
|
||||
} \
|
||||
static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
|
||||
{ \
|
||||
|
||||
@@ -459,6 +459,7 @@ static int exception_class(int vector)
|
||||
#define EXCPT_TRAP 1
|
||||
#define EXCPT_ABORT 2
|
||||
#define EXCPT_INTERRUPT 3
|
||||
#define EXCPT_DB 4
|
||||
|
||||
static int exception_type(int vector)
|
||||
{
|
||||
@@ -469,8 +470,14 @@ static int exception_type(int vector)
|
||||
|
||||
mask = 1 << vector;
|
||||
|
||||
/* #DB is trap, as instruction watchpoints are handled elsewhere */
|
||||
if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
|
||||
/*
|
||||
* #DBs can be trap-like or fault-like, the caller must check other CPU
|
||||
* state, e.g. DR6, to determine whether a #DB is a trap or fault.
|
||||
*/
|
||||
if (mask & (1 << DB_VECTOR))
|
||||
return EXCPT_DB;
|
||||
|
||||
if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
|
||||
return EXCPT_TRAP;
|
||||
|
||||
if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
|
||||
@@ -5353,6 +5360,11 @@ split_irqchip_unlock:
|
||||
r = 0;
|
||||
break;
|
||||
case KVM_CAP_X86_USER_SPACE_MSR:
|
||||
r = -EINVAL;
|
||||
if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
|
||||
KVM_MSR_EXIT_REASON_UNKNOWN |
|
||||
KVM_MSR_EXIT_REASON_FILTER))
|
||||
break;
|
||||
kvm->arch.user_space_msr_mask = cap->args[0];
|
||||
r = 0;
|
||||
break;
|
||||
@@ -5434,23 +5446,22 @@ err:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
||||
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
|
||||
struct kvm_msr_filter *filter)
|
||||
{
|
||||
struct kvm_msr_filter __user *user_msr_filter = argp;
|
||||
struct kvm_x86_msr_filter *new_filter, *old_filter;
|
||||
struct kvm_msr_filter filter;
|
||||
bool default_allow;
|
||||
bool empty = true;
|
||||
int r = 0;
|
||||
u32 i;
|
||||
|
||||
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
|
||||
return -EFAULT;
|
||||
if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
|
||||
empty &= !filter.ranges[i].nmsrs;
|
||||
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
|
||||
empty &= !filter->ranges[i].nmsrs;
|
||||
|
||||
default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY);
|
||||
default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
|
||||
if (empty && !default_allow)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -5458,8 +5469,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
||||
if (!new_filter)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
|
||||
r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
|
||||
r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
|
||||
if (r) {
|
||||
kvm_free_msr_filter(new_filter);
|
||||
return r;
|
||||
@@ -5482,6 +5493,62 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_COMPAT
|
||||
/* for KVM_X86_SET_MSR_FILTER */
|
||||
struct kvm_msr_filter_range_compat {
|
||||
__u32 flags;
|
||||
__u32 nmsrs;
|
||||
__u32 base;
|
||||
__u32 bitmap;
|
||||
};
|
||||
|
||||
struct kvm_msr_filter_compat {
|
||||
__u32 flags;
|
||||
struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
|
||||
};
|
||||
|
||||
#define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
|
||||
|
||||
long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct kvm *kvm = filp->private_data;
|
||||
long r = -ENOTTY;
|
||||
|
||||
switch (ioctl) {
|
||||
case KVM_X86_SET_MSR_FILTER_COMPAT: {
|
||||
struct kvm_msr_filter __user *user_msr_filter = argp;
|
||||
struct kvm_msr_filter_compat filter_compat;
|
||||
struct kvm_msr_filter filter;
|
||||
int i;
|
||||
|
||||
if (copy_from_user(&filter_compat, user_msr_filter,
|
||||
sizeof(filter_compat)))
|
||||
return -EFAULT;
|
||||
|
||||
filter.flags = filter_compat.flags;
|
||||
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
|
||||
struct kvm_msr_filter_range_compat *cr;
|
||||
|
||||
cr = &filter_compat.ranges[i];
|
||||
filter.ranges[i] = (struct kvm_msr_filter_range) {
|
||||
.flags = cr->flags,
|
||||
.nmsrs = cr->nmsrs,
|
||||
.base = cr->base,
|
||||
.bitmap = (__u8 *)(ulong)cr->bitmap,
|
||||
};
|
||||
}
|
||||
|
||||
r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
|
||||
long kvm_arch_vm_ioctl(struct file *filp,
|
||||
unsigned int ioctl, unsigned long arg)
|
||||
{
|
||||
@@ -5788,9 +5855,16 @@ set_pit2_out:
|
||||
case KVM_SET_PMU_EVENT_FILTER:
|
||||
r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
|
||||
break;
|
||||
case KVM_X86_SET_MSR_FILTER:
|
||||
r = kvm_vm_ioctl_set_msr_filter(kvm, argp);
|
||||
case KVM_X86_SET_MSR_FILTER: {
|
||||
struct kvm_msr_filter __user *user_msr_filter = argp;
|
||||
struct kvm_msr_filter filter;
|
||||
|
||||
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
|
||||
return -EFAULT;
|
||||
|
||||
r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
r = -ENOTTY;
|
||||
}
|
||||
@@ -7560,6 +7634,12 @@ restart:
|
||||
unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
|
||||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||
|
||||
/*
|
||||
* Note, EXCPT_DB is assumed to be fault-like as the emulator
|
||||
* only supports code breakpoints and general detect #DB, both
|
||||
* of which are fault-like.
|
||||
*/
|
||||
if (!ctxt->have_exception ||
|
||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
@@ -8347,6 +8427,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
trace_kvm_inj_exception(vcpu->arch.exception.nr,
|
||||
vcpu->arch.exception.has_error_code,
|
||||
vcpu->arch.exception.error_code,
|
||||
vcpu->arch.exception.injected);
|
||||
|
||||
if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
|
||||
vcpu->arch.exception.error_code = false;
|
||||
kvm_x86_ops.queue_exception(vcpu);
|
||||
@@ -8404,13 +8489,16 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
|
||||
|
||||
/* try to inject new event if pending */
|
||||
if (vcpu->arch.exception.pending) {
|
||||
trace_kvm_inj_exception(vcpu->arch.exception.nr,
|
||||
vcpu->arch.exception.has_error_code,
|
||||
vcpu->arch.exception.error_code);
|
||||
|
||||
vcpu->arch.exception.pending = false;
|
||||
vcpu->arch.exception.injected = true;
|
||||
|
||||
/*
|
||||
* Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
|
||||
* value pushed on the stack. Trap-like exception and all #DBs
|
||||
* leave RF as-is (KVM follows Intel's behavior in this regard;
|
||||
* AMD states that code breakpoint #DBs excplitly clear RF=0).
|
||||
*
|
||||
* Note, most versions of Intel's SDM and AMD's APM incorrectly
|
||||
* describe the behavior of General Detect #DBs, which are
|
||||
* fault-like. They do _not_ set RF, a la code breakpoints.
|
||||
*/
|
||||
if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
|
||||
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
|
||||
X86_EFLAGS_RF);
|
||||
@@ -8424,6 +8512,10 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
|
||||
}
|
||||
|
||||
kvm_inject_exception(vcpu);
|
||||
|
||||
vcpu->arch.exception.pending = false;
|
||||
vcpu->arch.exception.injected = true;
|
||||
|
||||
can_inject = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -421,6 +421,8 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
|
||||
*/
|
||||
void bfq_schedule_dispatch(struct bfq_data *bfqd)
|
||||
{
|
||||
lockdep_assert_held(&bfqd->lock);
|
||||
|
||||
if (bfqd->queued != 0) {
|
||||
bfq_log(bfqd, "schedule dispatch");
|
||||
blk_mq_run_hw_queues(bfqd->queue, true);
|
||||
@@ -6264,8 +6266,8 @@ bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
bfq_bfqq_expire(bfqd, bfqq, true, reason);
|
||||
|
||||
schedule_dispatch:
|
||||
spin_unlock_irqrestore(&bfqd->lock, flags);
|
||||
bfq_schedule_dispatch(bfqd);
|
||||
spin_unlock_irqrestore(&bfqd->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
|
||||
clear_fixmap(fixmap_idx);
|
||||
}
|
||||
|
||||
int ghes_estatus_pool_init(int num_ghes)
|
||||
int ghes_estatus_pool_init(unsigned int num_ghes)
|
||||
{
|
||||
unsigned long addr, len;
|
||||
int rc;
|
||||
|
||||
@@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
mm = alloc->vma_vm_mm;
|
||||
|
||||
if (mm) {
|
||||
mmap_read_lock(mm);
|
||||
mmap_write_lock(mm);
|
||||
vma = alloc->vma;
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
trace_binder_alloc_page_end(alloc, index);
|
||||
}
|
||||
if (mm) {
|
||||
mmap_read_unlock(mm);
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
}
|
||||
return 0;
|
||||
@@ -304,7 +304,7 @@ err_page_ptr_cleared:
|
||||
}
|
||||
err_no_vma:
|
||||
if (mm) {
|
||||
mmap_read_unlock(mm);
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
}
|
||||
return vma ? -ENOMEM : -ESRCH;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user