diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt index a4873e5e3e36..e30e184f50c7 100644 --- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt @@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 { 80 81 68 69 70 71 72 73 74 75 76 77>; - interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", + interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty", "saif0", "saif1", "i2c0", "i2c1", "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; diff --git a/Makefile b/Makefile index b9d850d86366..ae5f1e62812f 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 14 -SUBLEVEL = 42 +SUBLEVEL = 43 EXTRAVERSION = NAME = Remembering Coco diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index 1c6bd83bde5e..2ade35703d72 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts @@ -69,6 +69,10 @@ }; internal-regs { + rtc@10300 { + /* No crystal connected to the internal RTC */ + status = "disabled"; + }; serial@12000 { clock-frequency = <250000000>; status = "okay"; diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts index 526bfdbd87f9..f8922fb9bf50 100644 --- a/arch/arm/boot/dts/imx23-olinuxino.dts +++ b/arch/arm/boot/dts/imx23-olinuxino.dts @@ -12,6 +12,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx23.dtsi" / { @@ -93,6 +94,7 @@ ahb@80080000 { usb0: usb@80080000 { + dr_mode = "host"; vbus-supply = <®_usb0_vbus>; status = "okay"; }; @@ -119,7 +121,7 @@ user { label = "green"; - gpios = <&gpio2 1 1>; + gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; }; }; }; diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index cf3300a3071d..bfc327ff70af 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi @@ -411,6 +411,7 @@ pwm4: pwm@53fc8000 { compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; + #pwm-cells = <2>; reg = <0x53fc8000 0x4000>; clocks = <&clks 108>, <&clks 52>; clock-names = "ipg", "per"; diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index f8e9b20f6982..ee1a4da35cc4 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -803,7 +803,7 @@ 80 81 68 69 70 71 72 73 74 75 76 77>; - interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", + interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty", "saif0", "saif1", "i2c0", "i2c1", "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index e0853ea02df2..75e748efa9c2 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -985,23 +985,6 @@ status = "disabled"; }; - vmmci: regulator-gpio { - compatible = "regulator-gpio"; - - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <2900000>; - regulator-name = "mmci-reg"; - regulator-type = "voltage"; - - startup-delay-us = <100>; - enable-active-high; - - states = <1800000 0x1 - 2900000 0x0>; - - status = "disabled"; - }; - mcde@a0350000 { compatible = "stericsson,mcde"; reg = <0xa0350000 0x1000>, /* MCDE */ diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index 6cb9b68e2188..0b668f87dd40 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -111,6 +111,21 @@ pinctrl-1 = <&i2c3_sleep_mode>; }; + vmmci: regulator-gpio { + compatible = "regulator-gpio"; + + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2900000>; + regulator-name = "mmci-reg"; + regulator-type = "voltage"; + + startup-delay-us = <100>; + enable-active-high; + + states = <1800000 0x1 + 2900000 0x0>; + }; + // External Micro SD slot sdi0_per1@80126000 { arm,primecell-periphid = <0x10480180>; diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 97d5d21b7db7..5deaf3c1292f 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -146,8 +146,21 @@ }; vmmci: regulator-gpio { + compatible = "regulator-gpio"; + gpios = <&gpio7 4 0x4>; enable-gpio = <&gpio6 25 0x4>; + + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2900000>; + regulator-name = "mmci-reg"; + regulator-type = "voltage"; + + startup-delay-us = <100>; + enable-active-high; + + states = <1800000 0x1 + 2900000 0x0>; }; // External Micro SD slot diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 1d3153c7eb41..816db0bf2dd8 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -55,6 +55,7 @@ * The bits we set in HCR: * TAC: Trap ACTLR * TSC: Trap SMC + * TVM: Trap VM ops (until MMU and caches are on) * TSW: Trap cache operations by set/way * TWI: Trap WFI * TWE: Trap WFE @@ -68,8 +69,7 @@ */ #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ - HCR_TWE | HCR_SWIO | HCR_TIDCP) -#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) + HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP) /* System Control Register (SCTLR) bits */ #define SCTLR_TE (1 << 30) diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 661da11f76f4..53b3c4a50d5c 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -48,7 +48,9 @@ #define c13_TID_URO 26 /* Thread ID, User R/O */ #define c13_TID_PRIV 27 /* Thread ID, Privileged */ #define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ -#define NR_CP15_REGS 29 /* Number of regs (incl. invalid) */ +#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */ +#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */ +#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */ #define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_UNDEFINED 1 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 098f7dd6d564..09af14999c9b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -101,6 +101,12 @@ struct kvm_vcpu_arch { /* The CPU type we expose to the VM */ u32 midr; + /* HYP trapping configuration */ + u32 hcr; + + /* Interrupt related fields */ + u32 irq_lines; /* IRQ and FIQ levels */ + /* Exception Information */ struct kvm_vcpu_fault_info fault; @@ -128,9 +134,6 @@ struct kvm_vcpu_arch { /* IO related fields */ struct kvm_decode mmio_decode; - /* Interrupt related fields */ - u32 irq_lines; /* IRQ and FIQ levels */ - /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 2d122adcdb22..7b362bc9c09a 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -114,11 +114,34 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) pmd_val(*pmd) |= L_PMD_S2_RDWR; } +/* Open coded p*d_addr_end that can deal with 64bit addresses */ +#define kvm_pgd_addr_end(addr, end) \ +({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#define kvm_pud_addr_end(addr,end) (end) + +#define kvm_pmd_addr_end(addr, end) \ +({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, - unsigned long size) +#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) + +static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { + return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; +} + +static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, + unsigned long size) +{ + if (!vcpu_has_cache_enabled(vcpu)) + kvm_flush_dcache_to_poc((void *)hva, size); + /* * If we are going to insert an instruction page and the icache is * either VIPT or PIPT, there is a potential problem where the host @@ -139,9 +162,10 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, } } -#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) +void stage2_flush_vm(struct kvm *kvm); + #endif /* !__ASSEMBLY__ */ #endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index ded041711beb..85598b5d1efd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -174,6 +174,7 @@ int main(void) DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); + DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 78c0885d6501..c58a35116f63 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -23,6 +23,7 @@ #include <asm/kvm_host.h> #include <asm/kvm_emulate.h> #include <asm/kvm_coproc.h> +#include <asm/kvm_mmu.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <trace/events/kvm.h> @@ -205,6 +206,44 @@ done: } /* + * Generic accessor for VM registers. Only called as long as HCR_TVM + * is set. + */ +static bool access_vm_reg(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) +{ + BUG_ON(!p->is_write); + + vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); + if (p->is_64bit) + vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); + + return true; +} + +/* + * SCTLR accessor. Only called as long as HCR_TVM is set. If the + * guest enables the MMU, we stop trapping the VM sys_regs and leave + * it in complete control of the caches. + * + * Used by the cpu-specific code. + */ +bool access_sctlr(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) +{ + access_vm_reg(vcpu, p, r); + + if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ + vcpu->arch.hcr &= ~HCR_TVM; + stage2_flush_vm(vcpu->kvm); + } + + return true; +} + +/* * We could trap ID_DFR0 and tell the guest we don't support performance * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was * NAKed, so it will read the PMCR anyway. @@ -261,33 +300,36 @@ static const struct coproc_reg cp15_regs[] = { { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, NULL, reset_val, c1_CPACR, 0x00000000 }, - /* TTBR0/TTBR1: swapped by interrupt.S. */ - { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, - { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, - - /* TTBCR: swapped by interrupt.S. */ + /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */ + { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 }, + { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32, + access_vm_reg, reset_unknown, c2_TTBR0 }, + { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32, + access_vm_reg, reset_unknown, c2_TTBR1 }, { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_val, c2_TTBCR, 0x00000000 }, + access_vm_reg, reset_val, c2_TTBCR, 0x00000000 }, + { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 }, + /* DACR: swapped by interrupt.S. */ { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c3_DACR }, + access_vm_reg, reset_unknown, c3_DACR }, /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c5_DFSR }, + access_vm_reg, reset_unknown, c5_DFSR }, { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c5_IFSR }, + access_vm_reg, reset_unknown, c5_IFSR }, { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c5_ADFSR }, + access_vm_reg, reset_unknown, c5_ADFSR }, { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c5_AIFSR }, + access_vm_reg, reset_unknown, c5_AIFSR }, /* DFAR/IFAR: swapped by interrupt.S. */ { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c6_DFAR }, + access_vm_reg, reset_unknown, c6_DFAR }, { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_unknown, c6_IFAR }, + access_vm_reg, reset_unknown, c6_IFAR }, /* PAR swapped by interrupt.S */ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, @@ -324,9 +366,15 @@ static const struct coproc_reg cp15_regs[] = { /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c10_PRRR}, + access_vm_reg, reset_unknown, c10_PRRR}, { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c10_NMRR}, + access_vm_reg, reset_unknown, c10_NMRR}, + + /* AMAIR0/AMAIR1: swapped by interrupt.S. */ + { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32, + access_vm_reg, reset_unknown, c10_AMAIR0}, + { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, + access_vm_reg, reset_unknown, c10_AMAIR1}, /* VBAR: swapped by interrupt.S. */ { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, @@ -334,7 +382,7 @@ static const struct coproc_reg cp15_regs[] = { /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, - NULL, reset_val, c13_CID, 0x00000000 }, + access_vm_reg, reset_val, c13_CID, 0x00000000 }, { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, NULL, reset_unknown, c13_TID_URW }, { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, @@ -443,7 +491,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct coproc_params params; - params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; + params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); params.is_64bit = true; @@ -451,7 +499,7 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; params.Op2 = 0; params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; - params.CRn = 0; + params.CRm = 0; return emulate_cp15(vcpu, ¶ms); } diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 0461d5c8d3de..1a44bbe39643 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -58,8 +58,8 @@ static inline void print_cp_instr(const struct coproc_params *p) { /* Look, we even formatted it for you to paste into the table! */ if (p->is_64bit) { - kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n", - p->CRm, p->Op1, p->is_write ? "write" : "read"); + kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n", + p->CRn, p->Op1, p->is_write ? "write" : "read"); } else { kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," " func_%s },\n", @@ -135,13 +135,13 @@ static inline int cmp_reg(const struct coproc_reg *i1, return -1; if (i1->CRn != i2->CRn) return i1->CRn - i2->CRn; - if (i1->is_64 != i2->is_64) - return i2->is_64 - i1->is_64; if (i1->CRm != i2->CRm) return i1->CRm - i2->CRm; if (i1->Op1 != i2->Op1) return i1->Op1 - i2->Op1; - return i1->Op2 - i2->Op2; + if (i1->Op2 != i2->Op2) + return i1->Op2 - i2->Op2; + return i2->is_64 - i1->is_64; } @@ -153,4 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, #define is64 .is_64 = true #define is32 .is_64 = false +bool access_sctlr(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r); + #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index bb0cac1410cc..e6f4ae48bda9 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c @@ -34,7 +34,7 @@ static const struct coproc_reg a15_regs[] = { /* SCTLR: swapped by interrupt.S. */ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_val, c1_SCTLR, 0x00C50078 }, + access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, }; static struct kvm_coproc_target_table a15_target_table = { diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c index 1df767331588..17fc7cd479d3 100644 --- a/arch/arm/kvm/coproc_a7.c +++ b/arch/arm/kvm/coproc_a7.c @@ -37,7 +37,7 @@ static const struct coproc_reg a7_regs[] = { /* SCTLR: swapped by interrupt.S. */ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_val, c1_SCTLR, 0x00C50878 }, + access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, }; static struct kvm_coproc_target_table a7_target_table = { diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 2786eae10c0d..b23a59c1c522 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -38,6 +38,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { + vcpu->arch.hcr = HCR_GUEST_MASK; return 0; } diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 6f18695a09cb..76af93025574 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -303,13 +303,17 @@ vcpu .req r0 @ vcpu pointer always in r0 mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL mrrc p15, 0, r4, r5, c7 @ PAR + mrc p15, 0, r6, c10, c3, 0 @ AMAIR0 + mrc p15, 0, r7, c10, c3, 1 @ AMAIR1 .if \store_to_vcpu == 0 - push {r2,r4-r5} + push {r2,r4-r7} .else str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] add r12, vcpu, #CP15_OFFSET(c7_PAR) strd r4, r5, [r12] + str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)] + str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)] .endif .endm @@ -322,15 +326,19 @@ vcpu .req r0 @ vcpu pointer always in r0 */ .macro write_cp15_state read_from_vcpu .if \read_from_vcpu == 0 - pop {r2,r4-r5} + pop {r2,r4-r7} .else ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] add r12, vcpu, #CP15_OFFSET(c7_PAR) ldrd r4, r5, [r12] + ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)] + ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)] .endif mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL mcrr p15, 0, r4, r5, c7 @ PAR + mcr p15, 0, r6, c10, c3, 0 @ AMAIR0 + mcr p15, 0, r7, c10, c3, 1 @ AMAIR1 .if \read_from_vcpu == 0 pop {r2-r12} @@ -597,17 +605,14 @@ vcpu .req r0 @ vcpu pointer always in r0 /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ .macro configure_hyp_role operation - mrc p15, 4, r2, c1, c1, 0 @ HCR - bic r2, r2, #HCR_VIRT_EXCP_MASK - ldr r3, =HCR_GUEST_MASK .if \operation == vmentry - orr r2, r2, r3 + ldr r2, [vcpu, #VCPU_HCR] ldr r3, [vcpu, #VCPU_IRQ_LINES] orr r2, r2, r3 .else - bic r2, r2, r3 + mov r2, #0 .endif - mcr p15, 4, r2, c1, c1, 0 + mcr p15, 4, r2, c1, c1, 0 @ HCR .endm .macro load_vcpu diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 575d7904305b..c93ef38f9cb0 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -147,7 +147,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { - addr = pud_addr_end(addr, end); + addr = kvm_pud_addr_end(addr, end); continue; } @@ -157,13 +157,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, * move on. */ clear_pud_entry(kvm, pud, addr); - addr = pud_addr_end(addr, end); + addr = kvm_pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { - addr = pmd_addr_end(addr, end); + addr = kvm_pmd_addr_end(addr, end); continue; } @@ -178,10 +178,10 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, */ if (kvm_pmd_huge(*pmd) || page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); - next = pmd_addr_end(addr, end); + next = kvm_pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); - next = pud_addr_end(addr, end); + next = kvm_pud_addr_end(addr, end); } } @@ -189,6 +189,99 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, } } +static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, + phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); + kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); + } + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, + phys_addr_t addr, phys_addr_t end) +{ + pmd_t *pmd; + phys_addr_t next; + + pmd = pmd_offset(pud, addr); + do { + next = kvm_pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (kvm_pmd_huge(*pmd)) { + hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); + kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); + } else { + stage2_flush_ptes(kvm, pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); +} + +static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, + phys_addr_t addr, phys_addr_t end) +{ + pud_t *pud; + phys_addr_t next; + + pud = pud_offset(pgd, addr); + do { + next = kvm_pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); + kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); + } else { + stage2_flush_pmds(kvm, pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); +} + +static void stage2_flush_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t end = addr + PAGE_SIZE * memslot->npages; + phys_addr_t next; + pgd_t *pgd; + + pgd = kvm->arch.pgd + pgd_index(addr); + do { + next = kvm_pgd_addr_end(addr, end); + stage2_flush_puds(kvm, pgd, addr, next); + } while (pgd++, addr = next, addr != end); +} + +/** + * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 + * @kvm: The struct kvm pointer + * + * Go through the stage 2 page tables and invalidate any cache lines + * backing memory already mapped to the VM. + */ +void stage2_flush_vm(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) + stage2_flush_memslot(kvm, memslot); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); +} + /** * free_boot_hyp_pgd - free HYP boot page tables * @@ -717,7 +810,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_set_s2pmd_writable(&new_pmd); kvm_set_pfn_dirty(pfn); } - coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE); + coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); } else { pte_t new_pte = pfn_pte(pfn, PAGE_S2); @@ -725,7 +818,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_set_s2pte_writable(&new_pte); kvm_set_pfn_dirty(pfn); } - coherent_icache_guest_page(kvm, hva, PAGE_SIZE); + coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); } diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 271b5e971568..6adf5913a7ac 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -449,10 +449,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx) return; } #endif - if (rm != ARM_R0) - emit(ARM_MOV_R(ARM_R0, rm), ctx); + + /* + * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4 + * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into + * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm + * before using it as a source for ARM_R1. + * + * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is + * ARM_R5 (r_X) so there is no particular register overlap + * issues. + */ if (rn != ARM_R1) emit(ARM_MOV_R(ARM_R1, rn), ctx); + if (rm != ARM_R0) + emit(ARM_MOV_R(ARM_R0, rm), ctx); ctx->seen |= SEEN_CALL; emit_mov_i(ARM_R3, (u32)jit_udiv, ctx); diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 0eb398655378..00fbaa75dc7b 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -62,6 +62,7 @@ * RW: 64bit by default, can be overriden for 32bit VMs * TAC: Trap ACTLR * TSC: Trap SMC + * TVM: Trap VM ops (until M+C set in SCTLR_EL1) * TSW: Trap cache operations by set/way * TWE: Trap WFE * TWI: Trap WFI @@ -74,7 +75,7 @@ * SWIO: Turn set/way invalidates into set/way clean+invalidate */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ - HCR_BSU_IS | HCR_FB | HCR_TAC | \ + HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_IMO | HCR_FMO | \ HCR_SWIO | HCR_TIDCP | HCR_RW) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index b25763bc0ec4..9fcd54b1e16d 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -79,7 +79,8 @@ #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ -#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ #define NR_CP15_REGS (NR_SYS_REGS * 2) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 7f1f9408ff66..7d29847a893b 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -106,7 +106,6 @@ static inline bool kvm_is_write_fault(unsigned long esr) return true; } -static inline void kvm_clean_dcache_area(void *addr, size_t size) {} static inline void kvm_clean_pgd(pgd_t *pgd) {} static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} static inline void kvm_clean_pte(pte_t *pte) {} @@ -122,11 +121,25 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) pmd_val(*pmd) |= PMD_S2_RDWR; } +#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) +#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) +#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) + struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, - unsigned long size) +#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) + +static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { + return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; +} + +static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, + unsigned long size) +{ + if (!vcpu_has_cache_enabled(vcpu)) + kvm_flush_dcache_to_poc((void *)hva, size); + if (!icache_is_aliasing()) { /* PIPT */ flush_icache_range(hva, hva + size); } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ @@ -135,8 +148,9 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, } } -#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) +void stage2_flush_vm(struct kvm *kvm); + #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 2c56012cb2d2..b0d1512acf08 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -630,9 +630,15 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) * whole of Stage-1. Weep... */ tlbi ipas2e1is, x1 - dsb sy + /* + * We have to ensure completion of the invalidation at Stage-2, + * since a table walk on another CPU could refill a TLB with a + * complete (S1 + S2) walk based on the old Stage-2 mapping if + * the Stage-1 invalidation happened first. + */ + dsb ish tlbi vmalle1is - dsb sy + dsb ish isb msr vttbr_el2, xzr @@ -643,7 +649,7 @@ ENTRY(__kvm_flush_vm_context) dsb ishst tlbi alle1is ic ialluis - dsb sy + dsb ish ret ENDPROC(__kvm_flush_vm_context) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 02e9d09e1d80..03244582bc55 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -27,6 +27,7 @@ #include <asm/kvm_host.h> #include <asm/kvm_emulate.h> #include <asm/kvm_coproc.h> +#include <asm/kvm_mmu.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <trace/events/kvm.h> @@ -121,6 +122,48 @@ done: } /* + * Generic accessor for VM registers. Only called as long as HCR_TVM + * is set. + */ +static bool access_vm_reg(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + unsigned long val; + + BUG_ON(!p->is_write); + + val = *vcpu_reg(vcpu, p->Rt); + if (!p->is_aarch32) { + vcpu_sys_reg(vcpu, r->reg) = val; + } else { + vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL; + if (!p->is_32bit) + vcpu_cp15(vcpu, r->reg + 1) = val >> 32; + } + return true; +} + +/* + * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the + * guest enables the MMU, we stop trapping the VM sys_regs and leave + * it in complete control of the caches. + */ +static bool access_sctlr(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + access_vm_reg(vcpu, p, r); + + if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ + vcpu->arch.hcr_el2 &= ~HCR_TVM; + stage2_flush_vm(vcpu->kvm); + } + + return true; +} + +/* * We could trap ID_DFR0 and tell the guest we don't support performance * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was * NAKed, so it will read the PMCR anyway. @@ -185,32 +228,32 @@ static const struct sys_reg_desc sys_reg_descs[] = { NULL, reset_mpidr, MPIDR_EL1 }, /* SCTLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), - NULL, reset_val, SCTLR_EL1, 0x00C50078 }, + access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, /* CPACR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), NULL, reset_val, CPACR_EL1, 0 }, /* TTBR0_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), - NULL, reset_unknown, TTBR0_EL1 }, + access_vm_reg, reset_unknown, TTBR0_EL1 }, /* TTBR1_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), - NULL, reset_unknown, TTBR1_EL1 }, + access_vm_reg, reset_unknown, TTBR1_EL1 }, /* TCR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), - NULL, reset_val, TCR_EL1, 0 }, + access_vm_reg, reset_val, TCR_EL1, 0 }, /* AFSR0_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), - NULL, reset_unknown, AFSR0_EL1 }, + access_vm_reg, reset_unknown, AFSR0_EL1 }, /* AFSR1_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), - NULL, reset_unknown, AFSR1_EL1 }, + access_vm_reg, reset_unknown, AFSR1_EL1 }, /* ESR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), - NULL, reset_unknown, ESR_EL1 }, + access_vm_reg, reset_unknown, ESR_EL1 }, /* FAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), - NULL, reset_unknown, FAR_EL1 }, + access_vm_reg, reset_unknown, FAR_EL1 }, /* PAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), NULL, reset_unknown, PAR_EL1 }, @@ -224,17 +267,17 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* MAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), - NULL, reset_unknown, MAIR_EL1 }, + access_vm_reg, reset_unknown, MAIR_EL1 }, /* AMAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), - NULL, reset_amair_el1, AMAIR_EL1 }, + access_vm_reg, reset_amair_el1, AMAIR_EL1 }, /* VBAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), NULL, reset_val, VBAR_EL1, 0 }, /* CONTEXTIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), - NULL, reset_val, CONTEXTIDR_EL1, 0 }, + access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, /* TPIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), NULL, reset_unknown, TPIDR_EL1 }, @@ -305,14 +348,32 @@ static const struct sys_reg_desc sys_reg_descs[] = { NULL, reset_val, FPEXC32_EL2, 0x70 }, }; -/* Trapped cp15 registers */ +/* + * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, + * depending on the way they are accessed (as a 32bit or a 64bit + * register). + */ static const struct sys_reg_desc cp15_regs[] = { + { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, + { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, + { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, + { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, + { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, + { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, + { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, + { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, + /* * DC{C,I,CI}SW operations: */ { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, + { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, @@ -326,6 +387,14 @@ static const struct sys_reg_desc cp15_regs[] = { { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, + + { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, + { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, + { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, + { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, + { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, + + { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; /* Target specific emulation tables */ @@ -437,6 +506,8 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) u32 hsr = kvm_vcpu_get_hsr(vcpu); int Rt2 = (hsr >> 10) & 0xf; + params.is_aarch32 = true; + params.is_32bit = false; params.CRm = (hsr >> 1) & 0xf; params.Rt = (hsr >> 5) & 0xf; params.is_write = ((hsr & 1) == 0); @@ -480,6 +551,8 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); + params.is_aarch32 = true; + params.is_32bit = true; params.CRm = (hsr >> 1) & 0xf; params.Rt = (hsr >> 5) & 0xf; params.is_write = ((hsr & 1) == 0); @@ -549,6 +622,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_hsr(vcpu); + params.is_aarch32 = false; + params.is_32bit = false; params.Op0 = (esr >> 20) & 3; params.Op1 = (esr >> 14) & 0x7; params.CRn = (esr >> 10) & 0xf; diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index d50d3722998e..d411e251412c 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -30,6 +30,8 @@ struct sys_reg_params { u8 Op2; u8 Rt; bool is_write; + bool is_aarch32; + bool is_32bit; /* Only valid if is_aarch32 is true */ }; struct sys_reg_desc { diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 2a86c65d873b..97c7a52dfb4a 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -63,19 +63,15 @@ #define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val)) /* - * printf() format helpers + * printf() format helper. This macros is a workaround for the difficulties + * with emitting 64-bit integers and 64-bit pointers with the same code + * for both 32-bit and 64-bit hosts. */ /* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */ #define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i) -#if ACPI_MACHINE_WIDTH == 64 -#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i) -#else -#define ACPI_FORMAT_NATIVE_UINT(i) 0, (i) -#endif - /* * Macros for moving data around to/from buffers that are possibly unaligned. * If the hardware supports the transfer of unaligned data, just do the store. diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 5205edcf2c01..fe79296fdd9e 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n", obj_desc, - ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address), + ACPI_FORMAT_UINT64(obj_desc->region.address), obj_desc->region.length)); /* Now the address and length are valid for this opregion */ @@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_NOT_EXIST); } - obj_desc->region.address = - (acpi_physical_address) ACPI_TO_INTEGER(table); + obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table); obj_desc->region.length = table->length; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n", obj_desc, - ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address), + ACPI_FORMAT_UINT64(obj_desc->region.address), obj_desc->region.length)); /* Now the address and length are valid for this opregion */ diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index cd4b231ae760..ee8ec4bf0611 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -272,7 +272,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", ®ion_obj->region.handler->address_space, handler, - ACPI_FORMAT_NATIVE_UINT(address), + ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(region_obj->region. space_id))); diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 4d046faac48c..b64fb68aa5d3 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -622,8 +622,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) acpi_os_printf("\n"); } else { acpi_os_printf(" base %8.8X%8.8X Length %X\n", - ACPI_FORMAT_NATIVE_UINT(obj_desc->region. - address), + ACPI_FORMAT_UINT64(obj_desc->region. + address), obj_desc->region.length); } break; diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 49fb742d61b9..98af39f0d677 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -263,17 +263,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, } ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD, - " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n", + " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n", acpi_ut_get_region_name(rgn_desc->region. space_id), rgn_desc->region.space_id, obj_desc->common_field.access_byte_width, obj_desc->common_field.base_byte_offset, - field_datum_byte_offset, ACPI_CAST_PTR(void, - (rgn_desc-> - region. - address + - region_offset)))); + field_datum_byte_offset, + ACPI_FORMAT_UINT64(rgn_desc->region.address + + region_offset))); /* Invoke the appropriate address_space/op_region handler */ diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 9d28867e60dc..cf4022359dc7 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -181,7 +181,7 @@ acpi_ex_system_memory_space_handler(u32 function, if (!mem_info->mapped_logical_address) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X%8.8X, size %u", - ACPI_FORMAT_NATIVE_UINT(address), + ACPI_FORMAT_UINT64(address), (u32) map_length)); mem_info->mapped_length = 0; return_ACPI_STATUS(AE_NO_MEMORY); @@ -202,8 +202,7 @@ acpi_ex_system_memory_space_handler(u32 function, ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n", - bit_width, function, - ACPI_FORMAT_NATIVE_UINT(address))); + bit_width, function, ACPI_FORMAT_UINT64(address))); /* * Perform the memory read or write @@ -318,8 +317,7 @@ acpi_ex_system_io_space_handler(u32 function, ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n", - bit_width, function, - ACPI_FORMAT_NATIVE_UINT(address))); + bit_width, function, ACPI_FORMAT_UINT64(address))); /* Decode the function parameter */ diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index eab70d58852a..fae57584a182 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) byte_width = ACPI_DIV_8(bit_width); last_address = address + byte_width - 1; - ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X", - ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void, - last_address), - byte_width)); + ACPI_DEBUG_PRINT((ACPI_DB_IO, + "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X", + ACPI_FORMAT_UINT64(address), + ACPI_FORMAT_UINT64(last_address), byte_width)); /* Maximum 16-bit address in I/O space */ if (last_address > ACPI_UINT16_MAX) { ACPI_ERROR((AE_INFO, - "Illegal I/O port address/length above 64K: %p/0x%X", - ACPI_CAST_PTR(void, address), byte_width)); + "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X", + ACPI_FORMAT_UINT64(address), byte_width)); return_ACPI_STATUS(AE_LIMIT); } @@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) if (acpi_gbl_osi_data >= port_info->osi_dependency) { ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)", - ACPI_CAST_PTR(void, address), + "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)", + ACPI_FORMAT_UINT64(address), byte_width, port_info->name, port_info->start, port_info->end)); diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 48b9c6f12643..fc82c532090f 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -271,12 +271,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, switch (type) { case ACPI_TYPE_PROCESSOR: - acpi_os_printf("ID %02X Len %02X Addr %p\n", + acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n", obj_desc->processor.proc_id, obj_desc->processor.length, - ACPI_CAST_PTR(void, - obj_desc->processor. - address)); + ACPI_FORMAT_UINT64(obj_desc->processor. + address)); break; case ACPI_TYPE_DEVICE: @@ -347,8 +346,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, space_id)); if (obj_desc->region.flags & AOPOBJ_DATA_VALID) { acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n", - ACPI_FORMAT_NATIVE_UINT - (obj_desc->region.address), + ACPI_FORMAT_UINT64(obj_desc-> + region. + address), obj_desc->region.length); } else { acpi_os_printf diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 634357d51fe9..c4d097700a86 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -294,8 +294,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "%4.4s %p Attempted physical table override failed", table_header->signature, - ACPI_CAST_PTR(void, - table_desc->address))); + ACPI_PHYSADDR_TO_PTR(table_desc->address))); return (NULL); } @@ -311,7 +310,7 @@ finish_override: ACPI_INFO((AE_INFO, "%4.4s %p %s table override, new table: %p", table_header->signature, - ACPI_CAST_PTR(void, table_desc->address), + ACPI_PHYSADDR_TO_PTR(table_desc->address), override_type, new_table)); /* We can now unmap/delete the original table (if fully mapped) */ diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index 6866e767ba90..2957ed50b552 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -127,16 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address, { struct acpi_table_header local_header; - /* - * The reason that the Address is cast to a void pointer is so that we - * can use %p which will work properly on both 32-bit and 64-bit hosts. - */ if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) { /* FACS only has signature and length fields */ - ACPI_INFO((AE_INFO, "%4.4s %p %06X", - header->signature, ACPI_CAST_PTR(void, address), + ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X", + header->signature, ACPI_FORMAT_UINT64(address), header->length)); } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) { @@ -147,8 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address, header)->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE); - ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)", - ACPI_CAST_PTR(void, address), + ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)", + ACPI_FORMAT_UINT64(address), (ACPI_CAST_PTR(struct acpi_table_rsdp, header)-> revision > 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp, @@ -162,8 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address, acpi_tb_cleanup_table_header(&local_header, header); ACPI_INFO((AE_INFO, - "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)", - local_header.signature, ACPI_CAST_PTR(void, address), + "%-4.4s 0x%8.8X%8.8X" + " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)", + local_header.signature, ACPI_FORMAT_UINT64(address), local_header.length, local_header.revision, local_header.oem_id, local_header.oem_table_id, local_header.oem_revision, diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 1bc879ec83d4..4cca6b797808 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -227,8 +227,8 @@ acpi_tb_install_table(acpi_physical_address address, table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); if (!table) { ACPI_ERROR((AE_INFO, - "Could not map memory for table [%s] at %p", - signature, ACPI_CAST_PTR(void, address))); + "Could not map memory for table [%s] at %8.8X%8.8X", + signature, ACPI_FORMAT_UINT64(address))); return; } diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 60b5a871833c..daad59d76e67 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -184,11 +184,10 @@ static acpi_status acpi_tb_load_namespace(void) * be useful for debugging ACPI problems on some machines. */ if (acpi_gbl_disable_ssdt_table_load) { - ACPI_INFO((AE_INFO, "Ignoring %4.4s at %p", + ACPI_INFO((AE_INFO, "Ignoring %4.4s at %8.8X%8.8X", acpi_gbl_root_table_list.tables[i].signature. - ascii, ACPI_CAST_PTR(void, - acpi_gbl_root_table_list. - tables[i].address))); + ascii, ACPI_FORMAT_UINT64(acpi_gbl_root_table_list. + tables[i].address))); continue; } diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index e4e1468877c3..01bf7ebef7a4 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -111,7 +111,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) * ******************************************************************************/ -acpi_status __init acpi_find_root_pointer(acpi_size *table_address) +acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address) { u8 *table_ptr; u8 *mem_rover; @@ -169,7 +169,8 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address) physical_address += (u32) ACPI_PTR_DIFF(mem_rover, table_ptr); - *table_address = physical_address; + *table_address = + (acpi_physical_address) physical_address; return_ACPI_STATUS(AE_OK); } } @@ -202,7 +203,7 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address) (ACPI_HI_RSDP_WINDOW_BASE + ACPI_PTR_DIFF(mem_rover, table_ptr)); - *table_address = physical_address; + *table_address = (acpi_physical_address) physical_address; return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index 2c2b6ae5dfc4..3a02b65a1d9e 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id, acpi_gbl_address_range_list[space_id] = range_info; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, - "\nAdded [%4.4s] address range: 0x%p-0x%p\n", + "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n", acpi_ut_get_node_name(range_info->region_node), - ACPI_CAST_PTR(void, address), - ACPI_CAST_PTR(void, range_info->end_address))); + ACPI_FORMAT_UINT64(address), + ACPI_FORMAT_UINT64(range_info->end_address))); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(AE_OK); @@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id, } ACPI_DEBUG_PRINT((ACPI_DB_NAMES, - "\nRemoved [%4.4s] address range: 0x%p-0x%p\n", + "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n", acpi_ut_get_node_name(range_info-> region_node), - ACPI_CAST_PTR(void, - range_info-> - start_address), - ACPI_CAST_PTR(void, - range_info-> - end_address))); + ACPI_FORMAT_UINT64(range_info-> + start_address), + ACPI_FORMAT_UINT64(range_info-> + end_address))); ACPI_FREE(range_info); return_VOID; @@ -245,16 +243,14 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id, region_node); ACPI_WARNING((AE_INFO, - "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)", + "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)", acpi_ut_get_region_name(space_id), - ACPI_CAST_PTR(void, address), - ACPI_CAST_PTR(void, end_address), - ACPI_CAST_PTR(void, - range_info-> - start_address), - ACPI_CAST_PTR(void, - range_info-> - end_address), + ACPI_FORMAT_UINT64(address), + ACPI_FORMAT_UINT64(end_address), + ACPI_FORMAT_UINT64(range_info-> + start_address), + ACPI_FORMAT_UINT64(range_info-> + end_address), pathname)); ACPI_FREE(pathname); } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f1fc14c33be5..a03e18f5e562 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -800,6 +800,7 @@ static struct class gpio_class = { */ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { + struct gpio_chip *chip; unsigned long flags; int status; const char *ioname = NULL; @@ -817,8 +818,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) return -EINVAL; } + chip = desc->chip; + mutex_lock(&sysfs_lock); + /* check if chip is being removed */ + if (!chip || !chip->exported) { + status = -ENODEV; + goto fail_unlock; + } + spin_lock_irqsave(&gpio_lock, flags); if (!test_bit(FLAG_REQUESTED, &desc->flags) || test_bit(FLAG_EXPORT, &desc->flags)) { @@ -1057,12 +1066,15 @@ static void gpiochip_unexport(struct gpio_chip *chip) { int status; struct device *dev; + struct gpio_desc *desc; + unsigned int i; mutex_lock(&sysfs_lock); dev = class_find_device(&gpio_class, NULL, chip, match_export); if (dev) { put_device(dev); device_unregister(dev); + /* prevent further gpiod exports */ chip->exported = false; status = 0; } else @@ -1071,6 +1083,13 @@ static void gpiochip_unexport(struct gpio_chip *chip) if (status) chip_dbg(chip, "%s: status %d\n", __func__, status); + + /* unregister gpiod class devices owned by sysfs */ + for (i = 0; i < chip->ngpio; i++) { + desc = &chip->desc[i]; + if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) + gpiod_free(desc); + } } static int __init gpiolib_sysfs_init(void) @@ -1265,6 +1284,8 @@ int gpiochip_remove(struct gpio_chip *chip) int status = 0; unsigned id; + gpiochip_unexport(chip); + spin_lock_irqsave(&gpio_lock, flags); gpiochip_remove_pin_ranges(chip); @@ -1286,9 +1307,6 @@ int gpiochip_remove(struct gpio_chip *chip) spin_unlock_irqrestore(&gpio_lock, flags); - if (status == 0) - gpiochip_unexport(chip); - return status; } EXPORT_SYMBOL_GPL(gpiochip_remove); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index af49b24d14cb..1eb49941faff 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -815,12 +815,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) static const struct dmi_system_id intel_dual_link_lvds[] = { { .callback = intel_dual_link_lvds_callback, - .ident = "Apple MacBook Pro (Core i5/i7 Series)", + .ident = "Apple MacBook Pro 15\" (2010)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"), + }, + }, + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro 15\" (2011)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), }, }, + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro 15\" (2012)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 64d6cfba9952..d625a14ec038 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1173,7 +1173,7 @@ static struct radeon_asic rs780_asic = { static struct radeon_asic_ring rv770_uvd_ring = { .ib_execute = &uvd_v1_0_ib_execute, .emit_fence = &uvd_v2_2_fence_emit, - .emit_semaphore = &uvd_v1_0_semaphore_emit, + .emit_semaphore = &uvd_v2_2_semaphore_emit, .cs_parse = &radeon_uvd_cs_parse, .ring_test = &uvd_v1_0_ring_test, .ib_test = &uvd_v1_0_ib_test, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index ae637cfda783..f6e19edb1166 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -853,6 +853,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int uvd_v2_2_resume(struct radeon_device *rdev); void uvd_v2_2_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); +bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); /* uvd v3.1 */ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 414e07928693..bcfac7624550 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -350,6 +350,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) return 0; } +static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, + unsigned stream_type) +{ + switch (stream_type) { + case 0: /* H264 */ + case 1: /* VC1 */ + /* always supported */ + return 0; + + case 3: /* MPEG2 */ + case 4: /* MPEG4 */ + /* only since UVD 3 */ + if (p->rdev->family >= CHIP_PALM) + return 0; + + /* fall through */ + default: + DRM_ERROR("UVD codec not supported by hardware %d!\n", + stream_type); + return -EINVAL; + } +} + static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, unsigned offset, unsigned buf_sizes[]) { @@ -388,50 +411,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - if (msg_type == 1) { - /* it's a decode msg, calc buffer sizes */ - r = radeon_uvd_cs_msg_decode(msg, buf_sizes); - /* calc image size (width * height) */ - img_size = msg[6] * msg[7]; + switch (msg_type) { + case 0: + /* it's a create msg, calc image size (width * height) */ + img_size = msg[7] * msg[8]; + + r = radeon_uvd_validate_codec(p, msg[4]); + radeon_bo_kunmap(bo); + if (r) + return r; + + /* try to alloc a new handle */ + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { + DRM_ERROR("Handle 0x%x already in use!\n", handle); + return -EINVAL; + } + + if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { + p->rdev->uvd.filp[i] = p->filp; + p->rdev->uvd.img_size[i] = img_size; + return 0; + } + } + + DRM_ERROR("No more free UVD handles!\n"); + return -EINVAL; + + case 1: + /* it's a decode msg, validate codec and calc buffer sizes */ + r = radeon_uvd_validate_codec(p, msg[4]); + if (!r) + r = radeon_uvd_cs_msg_decode(msg, buf_sizes); radeon_bo_kunmap(bo); if (r) return r; - } else if (msg_type == 2) { + /* validate the handle */ + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { + if (p->rdev->uvd.filp[i] != p->filp) { + DRM_ERROR("UVD handle collision detected!\n"); + return -EINVAL; + } + return 0; + } + } + + DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + return -ENOENT; + + case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); radeon_bo_kunmap(bo); return 0; - } else { - /* it's a create msg, calc image size (width * height) */ - img_size = msg[7] * msg[8]; - radeon_bo_kunmap(bo); - if (msg_type != 0) { - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); - return -EINVAL; - } - - /* it's a create msg, no special handling needed */ - } - - /* create or decode, validate the handle */ - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (atomic_read(&p->rdev->uvd.handles[i]) == handle) - return 0; - } + default: - /* handle not found try to alloc a new one */ - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { - p->rdev->uvd.filp[i] = p->filp; - p->rdev->uvd.img_size[i] = img_size; - return 0; - } + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; } - DRM_ERROR("No more free UVD handles!\n"); + BUG(); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 3cf1e2921545..9ef2064b1c9c 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -989,6 +989,9 @@ ((n) & 0x3FFF) << 16) /* UVD */ +#define UVD_SEMA_ADDR_LOW 0xef00 +#define UVD_SEMA_ADDR_HIGH 0xef04 +#define UVD_SEMA_CMD 0xef08 #define UVD_GPCOM_VCPU_CMD 0xef0c #define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA1 0xef14 diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index c310a0aeebb9..0fa9009fef64 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -365,18 +365,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_semaphore *semaphore, bool emit_wait) { - uint64_t addr = semaphore->gpu_addr; - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); - radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); - radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); - radeon_ring_write(ring, emit_wait ? 1 : 0); - - return true; + /* disable semaphores for UVD V1 hardware */ + return false; } /** diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index d1771004cb52..19ccb2ae7a5d 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, } /** + * uvd_v2_2_semaphore_emit - emit semaphore command + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); + radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); + radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); + radeon_ring_write(ring, emit_wait ? 1 : 0); + + return true; +} + +/** * uvd_v2_2_resume - memory controller programming * * @rdev: radeon_device pointer diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 42c3058e6e9c..dbd7d66977c9 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id memcpy(&ib->sib_addr, &path->dgid, 16); } +static __be16 ss_get_port(const struct sockaddr_storage *ss) +{ + if (ss->ss_family == AF_INET) + return ((struct sockaddr_in *)ss)->sin_port; + else if (ss->ss_family == AF_INET6) + return ((struct sockaddr_in6 *)ss)->sin6_port; + BUG(); +} + static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { - struct sockaddr_in *listen4, *ip4; + struct sockaddr_in *ip4; - listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr; ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; - ip4->sin_family = listen4->sin_family; + ip4->sin_family = AF_INET; ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; - ip4->sin_port = listen4->sin_port; + ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr); ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; - ip4->sin_family = listen4->sin_family; + ip4->sin_family = AF_INET; ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; ip4->sin_port = hdr->port; } @@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { - struct sockaddr_in6 *listen6, *ip6; + struct sockaddr_in6 *ip6; - listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr; ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; - ip6->sin6_family = listen6->sin6_family; + ip6->sin6_family = AF_INET6; ip6->sin6_addr = hdr->dst_addr.ip6; - ip6->sin6_port = listen6->sin6_port; + ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr); ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; - ip6->sin6_family = listen6->sin6_family; + ip6->sin6_family = AF_INET6; ip6->sin6_addr = hdr->src_addr.ip6; ip6->sin6_port = hdr->port; } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 5a4cda24e302..4a8d19d0a5a4 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -915,10 +915,11 @@ static int crypt_convert(struct crypt_config *cc, switch (r) { /* async */ - case -EINPROGRESS: case -EBUSY: wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); + /* fall through*/ + case -EINPROGRESS: ctx->req = NULL; ctx->cc_sector++; continue; @@ -1313,8 +1314,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); struct crypt_config *cc = io->cc; - if (error == -EINPROGRESS) + if (error == -EINPROGRESS) { + complete(&ctx->restart); return; + } if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); @@ -1325,15 +1328,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); if (!atomic_dec_and_test(&ctx->cc_pending)) - goto done; + return; if (bio_data_dir(io->base_bio) == READ) kcryptd_crypt_read_done(io); else kcryptd_crypt_write_io_submit(io, 1); -done: - if (!completion_done(&ctx->restart)) - complete(&ctx->restart); } static void kcryptd_crypt(struct work_struct *work) diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index df72c478c5a2..b1e21fc869c3 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -951,6 +951,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) md->reset_done &= ~type; } +int mmc_access_rpmb(struct mmc_queue *mq) +{ + struct mmc_blk_data *md = mq->data; + /* + * If this is a RPMB partition access, return ture + */ + if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) + return true; + + return false; +} + static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 3e049c13429c..6ceede0a0bf7 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) return BLKPREP_KILL; } - if (mq && mmc_card_removed(mq->card)) + if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) return BLKPREP_KILL; req->cmd_flags |= REQ_DONTPREP; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 5752d50049a3..99e6521e6169 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *); extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); extern void mmc_packed_clean(struct mmc_queue *); +extern int mmc_access_rpmb(struct mmc_queue *); + #endif diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 098374b1ab2b..6a881ebe5b02 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2657,6 +2657,7 @@ int mmc_pm_notify(struct notifier_block *notify_block, switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 1; spin_unlock_irqrestore(&host->lock, flags); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 54730f4aac87..9c208fdf43ab 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1401,7 +1401,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; - host->timeout = msecs_to_jiffies(1000); + host->timeout = msecs_to_jiffies(10000); host->ccs_enable = !pd || !pd->ccs_unsupported; host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 988f5e18763a..a2a79c737b1b 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1121,7 +1121,7 @@ void devm_pinctrl_put(struct pinctrl *p) EXPORT_SYMBOL_GPL(devm_pinctrl_put); int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, - bool dup, bool locked) + bool dup) { int i, ret; struct pinctrl_maps *maps_node; @@ -1189,11 +1189,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, maps_node->maps = maps; } - if (!locked) - mutex_lock(&pinctrl_maps_mutex); + mutex_lock(&pinctrl_maps_mutex); list_add_tail(&maps_node->node, &pinctrl_maps); - if (!locked) - mutex_unlock(&pinctrl_maps_mutex); + mutex_unlock(&pinctrl_maps_mutex); return 0; } @@ -1208,7 +1206,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, int pinctrl_register_mappings(struct pinctrl_map const *maps, unsigned num_maps) { - return pinctrl_register_map(maps, num_maps, true, false); + return pinctrl_register_map(maps, num_maps, true); } void pinctrl_unregister_map(struct pinctrl_map const *map) diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 75476b3d87da..b24ea846c867 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, } int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, - bool dup, bool locked); + bool dup); void pinctrl_unregister_map(struct pinctrl_map const *map); extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 340fb4e6c600..fd91c4c31f6b 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, dt_map->num_maps = num_maps; list_add_tail(&dt_map->node, &p->dt_maps); - return pinctrl_register_map(map, num_maps, false, true); + return pinctrl_register_map(map, num_maps, false); } struct pinctrl_dev *of_pinctrl_get(struct device_node *np) diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 2dc2831840ca..8eb65f26fcae 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void) return 0; } +static void xen_console_update_evtchn(struct xencons_info *info) +{ + if (xen_hvm_domain()) { + uint64_t v; + int err; + + err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); + if (!err && v) + info->evtchn = v; + } else + info->evtchn = xen_start_info->console.domU.evtchn; +} + void xen_console_resume(void) { struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); - if (info != NULL && info->irq) + if (info != NULL && info->irq) { + if (!xen_initial_domain()) + xen_console_update_evtchn(info); rebind_evtchn_irq(info->evtchn, info->irq); + } } static void xencons_disconnect_backend(struct xencons_info *info) diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index d7ff91757307..843e5d8538bd 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -352,6 +352,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static void evtchn_2l_resume(void) +{ + int i; + + for_each_online_cpu(i) + memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); +} + static const struct evtchn_ops evtchn_ops_2l = { .max_channels = evtchn_2l_max_channels, .nr_channels = evtchn_2l_max_channels, @@ -363,6 +372,7 @@ static const struct evtchn_ops evtchn_ops_2l = { .mask = evtchn_2l_mask, .unmask = evtchn_2l_unmask, .handle_events = evtchn_2l_handle_events, + .resume = evtchn_2l_resume, }; void __init xen_evtchn_2l_init(void) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index c8860a8757ac..a5cc476256f1 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -550,8 +550,8 @@ static unsigned int __startup_pirq(unsigned int irq) if (rc) goto err; - bind_evtchn_to_cpu(evtchn, 0); info->evtchn = evtchn; + bind_evtchn_to_cpu(evtchn, 0); rc = xen_evtchn_port_setup(info); if (rc) @@ -1294,8 +1294,9 @@ void rebind_evtchn_irq(int evtchn, int irq) mutex_unlock(&irq_mapping_update_lock); - /* new event channels are always bound to cpu 0 */ - irq_set_affinity(irq, cpumask_of(0)); + bind_evtchn_to_cpu(evtchn, info->cpu); + /* This will be deferred until interrupt is processed */ + irq_set_affinity(irq, cpumask_of(info->cpu)); /* Unmask the event channel. */ enable_irq(irq); diff --git a/fs/dcache.c b/fs/dcache.c index c345f5f2b508..a9231c872342 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -435,7 +435,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) __releases(parent->d_lock) __releases(dentry->d_inode->i_lock) { - list_del(&dentry->d_child); + __list_del_entry(&dentry->d_child); /* * Inform d_walk() that we are no longer attached to the * dentry tree @@ -1123,33 +1123,31 @@ resume: /* * All done at this level ... ascend and resume the search. */ + rcu_read_lock(); +ascend: if (this_parent != parent) { struct dentry *child = this_parent; this_parent = child->d_parent; - rcu_read_lock(); spin_unlock(&child->d_lock); spin_lock(&this_parent->d_lock); - /* - * might go back up the wrong parent if we have had a rename - * or deletion - */ - if (this_parent != child->d_parent || - (child->d_flags & DCACHE_DENTRY_KILLED) || - need_seqretry(&rename_lock, seq)) { - spin_unlock(&this_parent->d_lock); - rcu_read_unlock(); + /* might go back up the wrong parent if we have had a rename. */ + if (need_seqretry(&rename_lock, seq)) goto rename_retry; + next = child->d_child.next; + while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { + if (next == &this_parent->d_subdirs) + goto ascend; + child = list_entry(next, struct dentry, d_child); + next = next->next; } rcu_read_unlock(); - next = child->d_child.next; goto resume; } - if (need_seqretry(&rename_lock, seq)) { - spin_unlock(&this_parent->d_lock); + if (need_seqretry(&rename_lock, seq)) goto rename_retry; - } + rcu_read_unlock(); if (finish) finish(data); @@ -1159,6 +1157,9 @@ out_unlock: return; rename_retry: + spin_unlock(&this_parent->d_lock); + rcu_read_unlock(); + BUG_ON(seq & 1); if (!retry) return; seq = 1; diff --git a/fs/namespace.c b/fs/namespace.c index 039f3802d70e..2faa7eacb62b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -3025,6 +3025,12 @@ bool fs_fully_visible(struct file_system_type *type) if (mnt->mnt.mnt_sb->s_type != type) continue; + /* This mount is not fully visible if it's root directory + * is not the root directory of the filesystem. + */ + if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) + continue; + /* This mount is not fully visible if there are any child mounts * that cover anything except for empty directories. */ diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index ecdbae19a766..090d8ce25bd1 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, nchildren = nilfs_btree_node_get_nchildren(node); if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || - level > NILFS_BTREE_LEVEL_MAX || + level >= NILFS_BTREE_LEVEL_MAX || nchildren < 0 || nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 1be3398c96f6..1dd0bcc75536 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -726,6 +726,19 @@ lookup: if (tmpres) { spin_unlock(&dlm->spinlock); spin_lock(&tmpres->spinlock); + + /* + * Right after dlm spinlock was released, dlm_thread could have + * purged the lockres. Check if lockres got unhashed. If so + * start over. + */ + if (hlist_unhashed(&tmpres->hash_node)) { + spin_unlock(&tmpres->spinlock); + dlm_lockres_put(tmpres); + tmpres = NULL; + goto lookup; + } + /* Wait on the thread that is mastering the resource */ if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { __dlm_wait_on_lockres(tmpres); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index fea6773f87fc..5d51f27549cc 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -175,7 +175,7 @@ acpi_status __init acpi_load_tables(void); */ acpi_status __init acpi_reallocate_root_table(void); -acpi_status __init acpi_find_root_pointer(acpi_size *rsdp_address); +acpi_status __init acpi_find_root_pointer(acpi_physical_address * rsdp_address); acpi_status acpi_unload_table_id(acpi_owner_id id); diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 98755767c7b0..1108acaacfc6 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -458,7 +458,7 @@ struct nilfs_btree_node { /* level */ #define NILFS_BTREE_LEVEL_DATA 0 #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) -#define NILFS_BTREE_LEVEL_MAX 14 +#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */ /** * struct nilfs_palloc_group_desc - block group descriptor diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ffc7bf0458fb..9502057c3c54 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1149,10 +1149,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) * The check (unnecessarily) ignores LRU pages being isolated and * walked by the page reclaim code, however that's not a big loss. */ - if (!PageHuge(p) && !PageTransTail(p)) { - if (!PageLRU(p)) - shake_page(p, 0); - if (!PageLRU(p)) { + if (!PageHuge(p)) { + if (!PageLRU(hpage)) + shake_page(hpage, 0); + if (!PageLRU(hpage)) { /* * shake_page could have turned it free. */ @@ -1723,12 +1723,12 @@ int soft_offline_page(struct page *page, int flags) } else if (ret == 0) { /* for free pages */ if (PageHuge(page)) { set_page_hwpoison_huge_page(hpage); - dequeue_hwpoisoned_huge_page(hpage); - atomic_long_add(1 << compound_order(hpage), + if (!dequeue_hwpoisoned_huge_page(hpage)) + atomic_long_add(1 << compound_order(hpage), &num_poisoned_pages); } else { - SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + if (!TestSetPageHWPoison(page)) + atomic_long_inc(&num_poisoned_pages); } } unset_migratetype_isolate(page, MIGRATE_MOVABLE); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 51d8d15f48d7..656a5490f693 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -601,7 +601,7 @@ static long long pos_ratio_polynom(unsigned long setpoint, long x; x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, - limit - setpoint + 1); + (limit - setpoint) | 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; @@ -828,7 +828,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, * scale global setpoint to bdi's: * bdi_setpoint = setpoint * bdi_thresh / thresh */ - x = div_u64((u64)bdi_thresh << 16, thresh + 1); + x = div_u64((u64)bdi_thresh << 16, thresh | 1); bdi_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single bdi case as indicated by @@ -843,7 +843,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, if (bdi_dirty < x_intercept - span / 4) { pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), - x_intercept - bdi_setpoint + 1); + (x_intercept - bdi_setpoint) | 1); } else pos_ratio /= 4; diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c index 9b9f7d385134..1010ca1c9994 100644 --- a/sound/oss/sequencer.c +++ b/sound/oss/sequencer.c @@ -683,13 +683,8 @@ static int seq_timing_event(unsigned char *event_rec) break; case TMR_ECHO: - if (seq_mode == SEQ_2) - seq_copy_to_input(event_rec, 8); - else - { - parm = (parm << 8 | SEQ_ECHO); - seq_copy_to_input((unsigned char *) &parm, 4); - } + parm = (parm << 8 | SEQ_ECHO); + seq_copy_to_input((unsigned char *) &parm, 4); break; default:; @@ -1330,7 +1325,6 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a int mode = translate_mode(file); struct synth_info inf; struct seq_event_rec event_rec; - unsigned long flags; int __user *p = arg; orig_dev = dev = dev >> 4; @@ -1485,9 +1479,7 @@ int sequencer_ioctl(int dev, struct file *file, unsigned int cmd, void __user *a case SNDCTL_SEQ_OUTOFBAND: if (copy_from_user(&event_rec, arg, sizeof(event_rec))) return -EFAULT; - spin_lock_irqsave(&lock,flags); play_event(event_rec.arr); - spin_unlock_irqrestore(&lock,flags); return 0; case SNDCTL_MIDI_INFO: diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 26954a7d9b03..4eec2d436109 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, u32 val; u32 *reg; - offset >>= 1; reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, - vcpu->vcpu_id, offset); + vcpu->vcpu_id, offset >> 1); - if (offset & 2) + if (offset & 4) val = *reg >> 16; else val = *reg & 0xffff; @@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, vgic_reg_access(mmio, &val, offset, ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); if (mmio->is_write) { - if (offset < 4) { + if (offset < 8) { *reg = ~0U; /* Force PPIs/SGIs to 1 */ return false; } val = vgic_cfg_compress(val); - if (offset & 2) { + if (offset & 4) { *reg &= 0xffff; *reg |= val << 16; } else { @@ -1527,17 +1526,33 @@ int kvm_vgic_hyp_init(void) goto out_unmap; } - kvm_info("%s@%llx IRQ%d\n", vgic_node->name, - vctrl_res.start, vgic_maint_irq); - on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); - if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { kvm_err("Cannot obtain VCPU resource\n"); ret = -ENXIO; goto out_unmap; } + + if (!PAGE_ALIGNED(vcpu_res.start)) { + kvm_err("GICV physical address 0x%llx not page aligned\n", + (unsigned long long)vcpu_res.start); + ret = -ENXIO; + goto out_unmap; + } + + if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { + kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", + (unsigned long long)resource_size(&vcpu_res), + PAGE_SIZE); + ret = -ENXIO; + goto out_unmap; + } + vgic_vcpu_base = vcpu_res.start; + kvm_info("%s@%llx IRQ%d\n", vgic_node->name, + vctrl_res.start, vgic_maint_irq); + on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); + goto out; out_unmap: @@ -1668,10 +1683,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, if (addr + size < addr) return -EINVAL; + *ioaddr = addr; ret = vgic_ioaddr_overlap(kvm); if (ret) - return ret; - *ioaddr = addr; + *ioaddr = VGIC_ADDR_UNDEF; + return ret; } -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html