diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt index a4873e5e3e36..e30e184f50c7 100644 --- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt @@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 { 80 81 68 69 70 71 72 73 74 75 76 77>; - interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", + interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty", "saif0", "saif1", "i2c0", "i2c1", "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c85942532dd5..f6ca2e530727 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -3462,6 +3462,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. READ_CAPACITY_16 command); f = NO_REPORT_OPCODES (don't use report opcodes command, uas only); + g = MAX_SECTORS_240 (don't transfer more than + 240 sectors at a time, uas only); h = CAPACITY_HEURISTICS (decrease the reported device capacity by one sector if the number is odd); diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 612e6e99d1e5..769c2cb7f9b3 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2382,7 +2382,8 @@ should be created before this ioctl is invoked. Possible features: - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. - Depends on KVM_CAP_ARM_PSCI. + Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on + and execute guest code when KVM_RUN is called. - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. diff --git a/Makefile b/Makefile index d5de7dccbe7a..c2c3ab88cea7 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 16 SUBLEVEL = 7 -EXTRAVERSION =-ckt11 +EXTRAVERSION =-ckt12 NAME = Museum of Fishiegoodies # *DOCUMENTATION* diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index d68b410595c8..a0c63fc48457 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn) /* Don't restart from sigreturn */ syscall_wont_restart(regs); + /* + * Ensure that sigreturn always returns to user mode (in case the + * regs saved on user stack got fudged between save and sigreturn) + * Otherwise it is easy to panic the kernel with a custom + * signal handler and/or restorer which clobberes the status32/ret + * to return to a bogus location in kernel mode. + */ + regs->status32 |= STATUS_U_MASK; + return regs->r0; badframe: @@ -234,8 +243,11 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info, /* * handler returns using sigreturn stub provided already by userpsace + * If not, nuke the process right away */ - BUG_ON(!(ka->sa.sa_flags & SA_RESTORER)); + if(!(ka->sa.sa_flags & SA_RESTORER)) + return 1; + regs->blink = (unsigned long)ka->sa.sa_restorer; /* User Stack for signal handler will be above the frame just carved */ @@ -302,12 +314,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); - int ret; + int failed; /* Set up the stack frame */ - ret = setup_rt_frame(sig, ka, info, oldset, regs); + failed = setup_rt_frame(sig, ka, info, oldset, regs); - if (ret) + if (failed) force_sigsegv(sig, current); else signal_delivered(sig, info, ka, regs, 0); diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index 4e5a59ee1501..db06fa397f79 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts @@ -71,6 +71,10 @@ }; internal-regs { + rtc@10300 { + /* No crystal connected to the internal RTC */ + status = "disabled"; + }; serial@12000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts index 7e6eef2488e8..82045398bf1f 100644 --- a/arch/arm/boot/dts/imx23-olinuxino.dts +++ b/arch/arm/boot/dts/imx23-olinuxino.dts @@ -12,6 +12,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx23.dtsi" / { @@ -93,6 +94,7 @@ ahb@80080000 { usb0: usb@80080000 { + dr_mode = "host"; vbus-supply = <®_usb0_vbus>; status = "okay"; }; @@ -122,7 +124,7 @@ user { label = "green"; - gpios = <&gpio2 1 1>; + gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; }; }; }; diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index 3073469cf808..ce2f47c25efe 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi @@ -426,6 +426,7 @@ pwm4: pwm@53fc8000 { compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; + #pwm-cells = <2>; reg = <0x53fc8000 0x4000>; clocks = <&clks 108>, <&clks 52>; clock-names = "ipg", "per"; diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index a95cc5358ff4..e40f3ea23507 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -857,7 +857,7 @@ 80 81 68 69 70 71 72 73 74 75 76 77>; - interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", + interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty", "saif0", "saif1", "i2c0", "i2c1", "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index faa3494a69d4..50c7718cb84e 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -31,6 +31,7 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio4 15 0>; + enable-active-high; }; reg_usb_h1_vbus: regulator@1 { @@ -40,6 +41,7 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio1 0 0>; + enable-active-high; }; }; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 1fe45d1f75ec..2b061fee8a20 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -437,6 +437,8 @@ DRVDD-supply = <&vmmc2>; IOVDD-supply = <&vio>; DVDD-supply = <&vio>; + + ai3x-micbias-vg = <1>; }; tlv320aic3x_aux: tlv320aic3x@19 { @@ -448,6 +450,8 @@ DRVDD-supply = <&vmmc2>; IOVDD-supply = <&vio>; DVDD-supply = <&vio>; + + ai3x-micbias-vg = <2>; }; tsl2563: tsl2563@29 { diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index e41eedca3ce3..55e4f2d30f8f 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -985,23 +985,6 @@ status = "disabled"; }; - vmmci: regulator-gpio { - compatible = "regulator-gpio"; - - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <2900000>; - regulator-name = "mmci-reg"; - regulator-type = "voltage"; - - startup-delay-us = <100>; - enable-active-high; - - states = <1800000 0x1 - 2900000 0x0>; - - status = "disabled"; - }; - mcde@a0350000 { compatible = "stericsson,mcde"; reg = <0xa0350000 0x1000>, /* MCDE */ diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index bf8f0eddc2c0..744c1e3a744d 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -111,6 +111,21 @@ pinctrl-1 = <&i2c3_sleep_mode>; }; + vmmci: regulator-gpio { + compatible = "regulator-gpio"; + + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2900000>; + regulator-name = "mmci-reg"; + regulator-type = "voltage"; + + startup-delay-us = <100>; + enable-active-high; + + states = <1800000 0x1 + 2900000 0x0>; + }; + // External Micro SD slot sdi0_per1@80126000 { arm,primecell-periphid = <0x10480180>; diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 474ef83229cd..ae435e94ef9b 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -147,8 +147,21 @@ }; vmmci: regulator-gpio { + compatible = "regulator-gpio"; + gpios = <&gpio7 4 0x4>; enable-gpio = <&gpio6 25 0x4>; + + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <2900000>; + regulator-name = "mmci-reg"; + regulator-type = "voltage"; + + startup-delay-us = <100>; + enable-active-high; + + states = <1800000 0x1 + 2900000 0x0>; }; // External Micro SD slot diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 0fa90c962ac8..853e2becad18 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr = HCR_GUEST_MASK; +} + static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) { return 1; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 193ceaf01bfd..f10a24e77134 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -42,7 +42,7 @@ struct kvm_vcpu; u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); -int kvm_target_cpu(void); +int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); void kvm_reset_coprocs(struct kvm_vcpu *vcpu); diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 5c7aa3c1519f..8618badbbbbe 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -47,6 +47,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_boot_hyp_pgd(void); void free_hyp_pgds(void); +void stage2_unmap_vm(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, @@ -78,17 +79,6 @@ static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) flush_pmd_entry(pte); } -static inline bool kvm_is_write_fault(unsigned long hsr) -{ - unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; - if (hsr_ec == HSR_EC_IABT) - return false; - else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) - return false; - else - return true; -} - static inline void kvm_clean_pgd(pgd_t *pgd) { clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); @@ -127,6 +117,19 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#define kvm_pgd_index(addr) pgd_index(addr) + +static inline bool kvm_page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} + +#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) +#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) +#define kvm_pud_table_empty(pudp) (0) + + struct kvm; #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 797b1a6a4906..7e666cfda634 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -134,9 +134,7 @@ ENTRY(__hyp_stub_install_secondary) mcr p15, 4, r7, c1, c1, 3 @ HSTR THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE -#ifdef CONFIG_CPU_BIG_ENDIAN - orr r7, #(1 << 9) @ HSCTLR.EE -#endif +ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE mcr p15, 4, r7, c1, c0, 0 @ HSCTLR mrc p15, 4, r7, c1, c1, 1 @ HDCR diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 3c82b37c0f9e..17c7ac58f887 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -82,7 +82,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void) /** * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. */ -struct kvm_vcpu __percpu **kvm_get_running_vcpus(void) +struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) { return &kvm_arm_running_vcpu; } @@ -155,16 +155,6 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) -{ -} - -int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, - unsigned long npages) -{ - return 0; -} /** * kvm_arch_destroy_vm - destroy the VM data structure @@ -225,39 +215,17 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } -void kvm_arch_memslots_updated(struct kvm *kvm) -{ -} - -int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_userspace_memory_region *mem, - enum kvm_mr_change change) -{ - return 0; -} - -void kvm_arch_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old, - enum kvm_mr_change change) -{ -} - -void kvm_arch_flush_shadow_all(struct kvm *kvm) -{ -} - -void kvm_arch_flush_shadow_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ -} struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err; struct kvm_vcpu *vcpu; + if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) { + err = -EBUSY; + goto out; + } + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) { err = -ENOMEM; @@ -465,15 +433,16 @@ static void update_vttbr(struct kvm *kvm) /* update vttbr to be used with the new vmid */ pgd_phys = virt_to_phys(kvm->arch.pgd); + BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; - kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; - kvm->arch.vttbr |= vmid; + kvm->arch.vttbr = pgd_phys | vmid; spin_unlock(&kvm_vmid_lock); } static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; int ret; if (likely(vcpu->arch.has_run_once)) @@ -485,12 +454,20 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) * Initialize the VGIC before running a vcpu the first time on * this VM. */ - if (unlikely(!vgic_initialized(vcpu->kvm))) { - ret = kvm_vgic_init(vcpu->kvm); + if (unlikely(!vgic_initialized(kvm))) { + ret = kvm_vgic_init(kvm); if (ret) return ret; } + /* + * Enable the arch timers only if we have an in-kernel VGIC + * and it has been properly initialized, since we cannot handle + * interrupts from the virtual timer with a userspace gic. + */ + if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) + kvm_timer_enable(kvm); + return 0; } @@ -714,10 +691,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, return ret; /* + * Ensure a rebooted VM will fault in RAM pages and detect if the + * guest MMU is turned off and flush the caches as needed. + */ + if (vcpu->arch.has_run_once) + stage2_unmap_vm(vcpu->kvm); + + vcpu_reset_hcr(vcpu); + + /* * Handle the "start in power-off" case by marking the VCPU as paused. */ - if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) + if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) vcpu->arch.pause = true; + else + vcpu->arch.pause = false; return 0; } @@ -863,7 +851,8 @@ static int hyp_init_cpu_notify(struct notifier_block *self, switch (action) { case CPU_STARTING: case CPU_STARTING_FROZEN: - cpu_init_hyp_mode(NULL); + if (__hyp_get_vectors() == hyp_default_vectors) + cpu_init_hyp_mode(NULL); break; } diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index c58a35116f63..7c732908f1df 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -742,7 +742,7 @@ static bool is_valid_cache(u32 val) u32 level, ctype; if (val >= CSSELR_MAX) - return -ENOENT; + return false; /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ level = (val >> 1); diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index b23a59c1c522..2786eae10c0d 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - vcpu->arch.hcr = HCR_GUEST_MASK; return 0; } diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 16f804938b8f..c65db5d7ea92 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -90,104 +90,116 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) return p; } -static bool page_empty(void *ptr) +static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) { - struct page *ptr_page = virt_to_page(ptr); - return page_count(ptr_page) == 1; + pud_t *pud_table __maybe_unused = pud_offset(pgd, 0); + pgd_clear(pgd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + pud_free(NULL, pud_table); + put_page(virt_to_page(pgd)); } static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) { - if (pud_huge(*pud)) { - pud_clear(pud); - kvm_tlb_flush_vmid_ipa(kvm, addr); - } else { - pmd_t *pmd_table = pmd_offset(pud, 0); - pud_clear(pud); - kvm_tlb_flush_vmid_ipa(kvm, addr); - pmd_free(NULL, pmd_table); - } + pmd_t *pmd_table = pmd_offset(pud, 0); + VM_BUG_ON(pud_huge(*pud)); + pud_clear(pud); + kvm_tlb_flush_vmid_ipa(kvm, addr); + pmd_free(NULL, pmd_table); put_page(virt_to_page(pud)); } static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) { - if (kvm_pmd_huge(*pmd)) { - pmd_clear(pmd); - kvm_tlb_flush_vmid_ipa(kvm, addr); - } else { - pte_t *pte_table = pte_offset_kernel(pmd, 0); - pmd_clear(pmd); - kvm_tlb_flush_vmid_ipa(kvm, addr); - pte_free_kernel(NULL, pte_table); - } + pte_t *pte_table = pte_offset_kernel(pmd, 0); + VM_BUG_ON(kvm_pmd_huge(*pmd)); + pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + pte_free_kernel(NULL, pte_table); put_page(virt_to_page(pmd)); } -static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) +static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, + phys_addr_t addr, phys_addr_t end) { - if (pte_present(*pte)) { - kvm_set_pte(pte, __pte(0)); - put_page(virt_to_page(pte)); - kvm_tlb_flush_vmid_ipa(kvm, addr); - } + phys_addr_t start_addr = addr; + pte_t *pte, *start_pte; + + start_pte = pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + kvm_set_pte(pte, __pte(0)); + put_page(virt_to_page(pte)); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } + } while (pte++, addr += PAGE_SIZE, addr != end); + + if (kvm_pte_table_empty(start_pte)) + clear_pmd_entry(kvm, pmd, start_addr); } -static void unmap_range(struct kvm *kvm, pgd_t *pgdp, - unsigned long long start, u64 size) +static void unmap_pmds(struct kvm *kvm, pud_t *pud, + phys_addr_t addr, phys_addr_t end) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - unsigned long long addr = start, end = start + size; - u64 next; + phys_addr_t next, start_addr = addr; + pmd_t *pmd, *start_pmd; - while (addr < end) { - pgd = pgdp + pgd_index(addr); - pud = pud_offset(pgd, addr); - pte = NULL; - if (pud_none(*pud)) { - addr = kvm_pud_addr_end(addr, end); - continue; - } - - if (pud_huge(*pud)) { - /* - * If we are dealing with a huge pud, just clear it and - * move on. - */ - clear_pud_entry(kvm, pud, addr); - addr = kvm_pud_addr_end(addr, end); - continue; + start_pmd = pmd = pmd_offset(pud, addr); + do { + next = kvm_pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (kvm_pmd_huge(*pmd)) { + pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + put_page(virt_to_page(pmd)); + } else { + unmap_ptes(kvm, pmd, addr, next); + } } + } while (pmd++, addr = next, addr != end); - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - addr = kvm_pmd_addr_end(addr, end); - continue; - } + if (kvm_pmd_table_empty(start_pmd)) + clear_pud_entry(kvm, pud, start_addr); +} - if (!kvm_pmd_huge(*pmd)) { - pte = pte_offset_kernel(pmd, addr); - clear_pte_entry(kvm, pte, addr); - next = addr + PAGE_SIZE; - } +static void unmap_puds(struct kvm *kvm, pgd_t *pgd, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next, start_addr = addr; + pud_t *pud, *start_pud; - /* - * If the pmd entry is to be cleared, walk back up the ladder - */ - if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) { - clear_pmd_entry(kvm, pmd, addr); - next = kvm_pmd_addr_end(addr, end); - if (page_empty(pmd) && !page_empty(pud)) { - clear_pud_entry(kvm, pud, addr); - next = kvm_pud_addr_end(addr, end); + start_pud = pud = pud_offset(pgd, addr); + do { + next = kvm_pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + pud_clear(pud); + kvm_tlb_flush_vmid_ipa(kvm, addr); + put_page(virt_to_page(pud)); + } else { + unmap_pmds(kvm, pud, addr, next); } } + } while (pud++, addr = next, addr != end); - addr = next; - } + if (kvm_pud_table_empty(start_pud)) + clear_pgd_entry(kvm, pgd, start_addr); +} + + +static void unmap_range(struct kvm *kvm, pgd_t *pgdp, + phys_addr_t start, u64 size) +{ + pgd_t *pgd; + phys_addr_t addr = start, end = start + size; + phys_addr_t next; + + pgd = pgdp + kvm_pgd_index(addr); + do { + next = kvm_pgd_addr_end(addr, end); + if (!pgd_none(*pgd)) + unmap_puds(kvm, pgd, addr, next); + } while (pgd++, addr = next, addr != end); } static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, @@ -252,7 +264,7 @@ static void stage2_flush_memslot(struct kvm *kvm, phys_addr_t next; pgd_t *pgd; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); stage2_flush_puds(kvm, pgd, addr, next); @@ -544,6 +556,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) unmap_range(kvm, kvm->arch.pgd, start, size); } +static void stage2_unmap_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + hva_t hva = memslot->userspace_addr; + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t size = PAGE_SIZE * memslot->npages; + hva_t reg_end = hva + size; + + /* + * A memory region could potentially cover multiple VMAs, and any holes + * between them, so iterate over all of them to find out if we should + * unmap any of them. + * + * +--------------------------------------------+ + * +---------------+----------------+ +----------------+ + * | : VMA 1 | VMA 2 | | VMA 3 : | + * +---------------+----------------+ +----------------+ + * | memory region | + * +--------------------------------------------+ + */ + do { + struct vm_area_struct *vma = find_vma(current->mm, hva); + hva_t vm_start, vm_end; + + if (!vma || vma->vm_start >= reg_end) + break; + + /* + * Take the intersection of this VMA with the memory region + */ + vm_start = max(hva, vma->vm_start); + vm_end = min(reg_end, vma->vm_end); + + if (!(vma->vm_flags & VM_PFNMAP)) { + gpa_t gpa = addr + (vm_start - memslot->userspace_addr); + unmap_stage2_range(kvm, gpa, vm_end - vm_start); + } + hva = vm_end; + } while (hva < reg_end); +} + +/** + * stage2_unmap_vm - Unmap Stage-2 RAM mappings + * @kvm: The struct kvm pointer + * + * Go through the memregions and unmap any reguler RAM + * backing memory already mapped to the VM. + */ +void stage2_unmap_vm(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) + stage2_unmap_memslot(kvm, memslot); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); +} + /** * kvm_free_stage2_pgd - free all stage-2 tables * @kvm: The KVM struct pointer for the VM. @@ -572,7 +649,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pud_t *pud; pmd_t *pmd; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { if (!cache) @@ -735,6 +812,19 @@ static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) return false; } +static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) +{ + if (kvm_vcpu_trap_is_iabt(vcpu)) + return false; + + return kvm_vcpu_dabt_iswrite(vcpu); +} + +static bool kvm_is_device_pfn(unsigned long pfn) +{ + return !pfn_valid(pfn); +} + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long fault_status) @@ -748,8 +838,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct vm_area_struct *vma; pfn_t pfn; + pgprot_t mem_type = PAGE_S2; - write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); + write_fault = kvm_is_write_fault(vcpu); if (fault_status == FSC_PERM && !write_fault) { kvm_err("Unexpected L2 read permission error\n"); return -EFAULT; @@ -758,6 +849,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, /* Let's check if we will get back a huge page backed by hugetlbfs */ down_read(¤t->mm->mmap_sem); vma = find_vma_intersection(current->mm, hva, hva + 1); + if (unlikely(!vma)) { + kvm_err("Failed to find VMA for hva 0x%lx\n", hva); + up_read(¤t->mm->mmap_sem); + return -EFAULT; + } + if (is_vm_hugetlb_page(vma)) { hugetlb = true; gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; @@ -798,6 +895,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (is_error_pfn(pfn)) return -EFAULT; + if (kvm_is_device_pfn(pfn)) + mem_type = PAGE_S2_DEVICE; + spin_lock(&kvm->mmu_lock); if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; @@ -805,7 +905,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); if (hugetlb) { - pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2); + pmd_t new_pmd = pfn_pmd(pfn, mem_type); new_pmd = pmd_mkhuge(new_pmd); if (writable) { kvm_set_s2pmd_writable(&new_pmd); @@ -814,13 +914,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); } else { - pte_t new_pte = pfn_pte(pfn, PAGE_S2); + pte_t new_pte = pfn_pte(pfn, mem_type); if (writable) { kvm_set_s2pte_writable(&new_pte); kvm_set_pfn_dirty(pfn); } coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); - ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); + ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, + pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); } @@ -896,6 +997,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) memslot = gfn_to_memslot(vcpu->kvm, gfn); + /* Userspace should not be able to register out-of-bounds IPAs */ + VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); + ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status); if (ret == 0) ret = 1; @@ -1100,3 +1204,57 @@ out: free_hyp_pgds(); return err; } + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + enum kvm_mr_change change) +{ + gpa_t gpa = old->base_gfn << PAGE_SHIFT; + phys_addr_t size = old->npages << PAGE_SHIFT; + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { + spin_lock(&kvm->mmu_lock); + unmap_stage2_range(kvm, gpa, size); + spin_unlock(&kvm->mmu_lock); + } +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + /* + * Prevent userspace from creating a memory region outside of the IPA + * space addressable by the KVM guest IPA space. + */ + if (memslot->base_gfn + memslot->npages >= + (KVM_PHYS_SIZE >> PAGE_SHIFT)) + return -EFAULT; + + return 0; +} + +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ +} + +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) +{ + return 0; +} + +void kvm_arch_memslots_updated(struct kvm *kvm) +{ +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h index cbefbd7cfdb5..661d753df584 100644 --- a/arch/arm/mach-omap2/prm-regbits-34xx.h +++ b/arch/arm/mach-omap2/prm-regbits-34xx.h @@ -112,6 +112,7 @@ #define OMAP3430_VC_CMD_ONLP_SHIFT 16 #define OMAP3430_VC_CMD_RET_SHIFT 8 #define OMAP3430_VC_CMD_OFF_SHIFT 0 +#define OMAP3430_SREN_MASK (1 << 4) #define OMAP3430_HSEN_MASK (1 << 3) #define OMAP3430_MCODE_MASK (0x7 << 0) #define OMAP3430_VALID_MASK (1 << 24) diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h index b1c7a33e00e7..e794828dee55 100644 --- a/arch/arm/mach-omap2/prm-regbits-44xx.h +++ b/arch/arm/mach-omap2/prm-regbits-44xx.h @@ -35,6 +35,7 @@ #define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1 #define OMAP4430_GLOBAL_WUEN_MASK (1 << 16) #define OMAP4430_HSMCODE_MASK (0x7 << 0) +#define OMAP4430_SRMODEEN_MASK (1 << 4) #define OMAP4430_HSMODEEN_MASK (1 << 3) #define OMAP4430_HSSCLL_SHIFT 24 #define OMAP4430_ICEPICK_RST_SHIFT 9 diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index a4628a9e760c..b36db175d4a5 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c @@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm) * idle. And we can also scale voltages to zero for off-idle. * Note that no actual voltage scaling during off-idle will * happen unless the board specific twl4030 PMIC scripts are - * loaded. + * loaded. See also omap_vc_i2c_init for comments regarding + * erratum i531. */ val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET); if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) { @@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) return; } + /* + * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around + * erratum i531 "Extra Power Consumed When Repeated Start Operation + * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)". + * Otherwise I2C4 eventually leads into about 23mW extra power being + * consumed even during off idle using VMODE. + */ i2c_high_speed = voltdm->pmic->i2c_high_speed; if (i2c_high_speed) - voltdm->rmw(vc->common->i2c_cfg_hsen_mask, + voltdm->rmw(vc->common->i2c_cfg_clear_mask, vc->common->i2c_cfg_hsen_mask, vc->common->i2c_cfg_reg); diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h index cdbdd78e755e..89b83b7ff3ec 100644 --- a/arch/arm/mach-omap2/vc.h +++ b/arch/arm/mach-omap2/vc.h @@ -34,6 +34,7 @@ struct voltagedomain; * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register * @i2c_cfg_reg: I2C configuration register offset + * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register * @i2c_mcode_mask: MCODE field mask for I2C config register * @@ -52,6 +53,7 @@ struct omap_vc_common { u8 cmd_ret_shift; u8 cmd_off_shift; u8 i2c_cfg_reg; + u8 i2c_cfg_clear_mask; u8 i2c_cfg_hsen_mask; u8 i2c_mcode_mask; }; diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c index 75bc4aa22b3a..71d74c9172c1 100644 --- a/arch/arm/mach-omap2/vc3xxx_data.c +++ b/arch/arm/mach-omap2/vc3xxx_data.c @@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = { .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT, .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT, .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT, + .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK, .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK, .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET, .i2c_mcode_mask = OMAP3430_MCODE_MASK, diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c index 085e5d6a04fd..2abd5fa8a697 100644 --- a/arch/arm/mach-omap2/vc44xx_data.c +++ b/arch/arm/mach-omap2/vc44xx_data.c @@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = { .cmd_ret_shift = OMAP4430_RET_SHIFT, .cmd_off_shift = OMAP4430_OFF_SHIFT, .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET, + .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK, .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK, .i2c_mcode_mask = OMAP4430_HSMCODE_MASK, }; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 3d6903006a8a..c33a96bac824 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -18,6 +18,7 @@ #ifndef __ARM64_KVM_ARM_H__ #define __ARM64_KVM_ARM_H__ +#include <asm/memory.h> #include <asm/types.h> /* Hyp Configuration Register (HCR) bits */ @@ -121,6 +122,17 @@ #define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_T0SZ_40B 24 +/* + * We configure the Stage-2 page tables to always restrict the IPA space to be + * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are + * not known to exist and will break with this configuration. + * + * Note that when using 4K pages, we concatenate two first level page tables + * together. + * + * The magic numbers used for VTTBR_X in this patch can be found in Tables + * D4-23 and D4-25 in ARM DDI 0487A.b. + */ #ifdef CONFIG_ARM64_64K_PAGES /* * Stage2 translation configuration: @@ -148,9 +160,9 @@ #endif #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) -#define VTTBR_VMID_SHIFT (48LLU) -#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) +#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_VMID_SHIFT (UL(48)) +#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) /* Hyp System Trap Register */ #define HSTR_EL2_TTEE (1 << 16) @@ -173,13 +185,13 @@ /* Exception Syndrome Register (ESR) bits */ #define ESR_EL2_EC_SHIFT (26) -#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT) -#define ESR_EL2_IL (1U << 25) +#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT) +#define ESR_EL2_IL (UL(1) << 25) #define ESR_EL2_ISS (ESR_EL2_IL - 1) #define ESR_EL2_ISV_SHIFT (24) -#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT) +#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT) #define ESR_EL2_SAS_SHIFT (22) -#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT) +#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT) #define ESR_EL2_SSE (1 << 21) #define ESR_EL2_SRT_SHIFT (16) #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) @@ -193,16 +205,16 @@ #define ESR_EL2_FSC_TYPE (0x3c) #define ESR_EL2_CV_SHIFT (24) -#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT) +#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT) #define ESR_EL2_COND_SHIFT (20) -#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT) +#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT) #define FSC_FAULT (0x04) #define FSC_PERM (0x0c) /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ -#define HPFAR_MASK (~0xFUL) +#define HPFAR_MASK (~UL(0xf)) #define ESR_EL2_EC_UNKNOWN (0x00) #define ESR_EL2_EC_WFI (0x01) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index dd8ecfc3f995..91f33c2051f2 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -38,6 +38,13 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; + if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) + vcpu->arch.hcr_el2 &= ~HCR_RW; +} + static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) { return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 92242ce06309..fb6075dfac5b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -42,7 +42,7 @@ #define KVM_VCPU_MAX_FEATURES 3 struct kvm_vcpu; -int kvm_target_cpu(void); +int __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_arch_dev_ioctl_check_extension(long ext); @@ -177,7 +177,7 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) } struct kvm_vcpu *kvm_arm_get_running_vcpu(void); -struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); +struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); u64 kvm_call_hyp(void *hypfn, ...); diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 7d29847a893b..2067f4b75d13 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -59,10 +59,9 @@ #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) /* - * Align KVM with the kernel's view of physical memory. Should be - * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration. + * We currently only support a 40bit IPA. */ -#define KVM_PHYS_SHIFT PHYS_MASK_SHIFT +#define KVM_PHYS_SHIFT (40) #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) @@ -70,11 +69,14 @@ #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT)) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + int create_hyp_mappings(void *from, void *to); int create_hyp_io_mappings(void *from, void *to, phys_addr_t); void free_boot_hyp_pgd(void); void free_hyp_pgds(void); +void stage2_unmap_vm(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, @@ -93,19 +95,6 @@ void kvm_clear_hyp_idmap(void); #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) -static inline bool kvm_is_write_fault(unsigned long esr) -{ - unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT; - - if (esr_ec == ESR_EL2_EC_IABT) - return false; - - if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR)) - return false; - - return true; -} - static inline void kvm_clean_pgd(pgd_t *pgd) {} static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} static inline void kvm_clean_pte(pte_t *pte) {} @@ -125,6 +114,21 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd) #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) +static inline bool kvm_page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} + +#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) +#ifndef CONFIG_ARM64_64K_PAGES +#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) +#else +#define kvm_pmd_table_empty(pmdp) (0) +#endif +#define kvm_pud_table_empty(pudp) (0) + + struct kvm; #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 60b5c31f3c10..3e610f60f031 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; return 0; } diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index b0d1512acf08..3aaf3bc4ad8a 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -629,6 +629,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) * Instead, we invalidate Stage-2 for this IPA, and the * whole of Stage-1. Weep... */ + lsr x1, x1, #12 tlbi ipas2e1is, x1 /* * We have to ensure completion of the invalidation at Stage-2, @@ -830,7 +831,7 @@ el1_trap: mrs x2, far_el2 2: mrs x0, tpidr_el2 - str x1, [x0, #VCPU_ESR_EL2] + str w1, [x0, #VCPU_ESR_EL2] str x2, [x0, #VCPU_FAR_EL2] str x3, [x0, #VCPU_HPFAR_EL2] diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 70a7816535cd..0b4326578985 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (!cpu_has_32bit_el1()) return -EINVAL; cpu_reset = &default_regs_reset32; - vcpu->arch.hcr_el2 &= ~HCR_RW; } else { cpu_reset = &default_regs_reset; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index c59a1bdab5eb..444e8edd71b2 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -836,7 +836,7 @@ static bool is_valid_cache(u32 val) u32 level, ctype; if (val >= CSSELR_MAX) - return -ENOENT; + return false; /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ level = (val >> 1); @@ -962,7 +962,7 @@ static unsigned int num_demux_regs(void) static int write_demux_regids(u64 __user *uindices) { - u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; + u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; unsigned int i; val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index de3abbe6c59f..893802645efe 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -65,8 +65,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, *dma_handle = phys_to_dma(dev, page_to_phys(page)); addr = page_address(page); - if (flags & __GFP_ZERO) - memset(addr, 0, size); + memset(addr, 0, size); return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 8ed6cb1a900f..8f7ffffc63e9 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -47,22 +47,14 @@ static int mmap_is_legacy(void) return sysctl_legacy_va_layout; } -/* - * Since get_random_int() returns the same value within a 1 jiffy window, we - * will almost always get the same randomisation for the stack and mmap - * region. This will mean the relative distance between stack and mmap will be - * the same. - * - * To avoid this we can shift the randomness by 1 bit. - */ static unsigned long mmap_rnd(void) { unsigned long rnd = 0; if (current->flags & PF_RANDOMIZE) - rnd = (long)get_random_int() & (STACK_RND_MASK >> 1); + rnd = (long)get_random_int() & STACK_RND_MASK; - return rnd << (PAGE_SHIFT + 1); + return rnd << PAGE_SHIFT; } static unsigned long mmap_base(void) diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 55d4ba47a907..deed6fa96bb0 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -662,7 +662,7 @@ void machine_restart (char *restart_cmd) { (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); - (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); + efi_reboot(REBOOT_WARM, NULL); } void diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index c89787d43dd1..06685498b681 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -407,6 +407,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) if (rc) return -EINVAL; + rc = dlpar_acquire_drc(drc_index); + if (rc) + return -EINVAL; + parent = of_find_node_by_path("/cpus"); if (!parent) return -ENODEV; @@ -417,12 +421,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) of_node_put(parent); - rc = dlpar_acquire_drc(drc_index); - if (rc) { - dlpar_free_cc_nodes(dn); - return -EINVAL; - } - rc = dlpar_attach_node(dn); if (rc) { dlpar_release_drc(drc_index); diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 0331d765c2bb..bfb3908493f8 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1091,6 +1091,8 @@ struct boot_params *make_boot_params(struct efi_config *c) if (!cmdline_ptr) goto fail; hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; + /* Fill in upper bits of command line address, NOP on 32 bit */ + boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32; hdr->ramdisk_image = 0; hdr->ramdisk_size = 0; diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 1eb5f6433ad8..81396a9a9277 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -156,6 +156,9 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( return EFI_SUCCESS; } #endif /* CONFIG_EFI_MIXED */ + +extern bool efi_reboot_required(void); + #else /* * IF EFI is not configured, have the EFI calls return -ENOSYS. @@ -168,6 +171,10 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} +static inline bool efi_reboot_required(void) +{ + return false; +} #endif /* CONFIG_EFI */ #endif /* _ASM_X86_EFI_H */ diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index f319bfea154e..587be13be0be 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -28,6 +28,7 @@ #include <linux/mc146818rtc.h> #include <asm/realmode.h> #include <asm/x86_init.h> +#include <asm/efi.h> /* * Power off function, if any @@ -411,12 +412,25 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { static int __init reboot_init(void) { + int rv; + /* * Only do the DMI check if reboot_type hasn't been overridden * on the command line */ - if (reboot_default) - dmi_check_system(reboot_dmi_table); + if (!reboot_default) + return 0; + + /* + * The DMI quirks table takes precedence. If no quirks entry + * matches and the ACPI Hardware Reduced bit is set, force EFI + * reboot. + */ + rv = dmi_check_system(reboot_dmi_table); + + if (!rv && efi_reboot_required()) + reboot_type = BOOT_EFI; + return 0; } core_initcall(reboot_init); @@ -538,11 +552,7 @@ static void native_machine_emergency_restart(void) break; case BOOT_EFI: - if (efi_enabled(EFI_RUNTIME_SERVICES)) - efi.reset_system(reboot_mode == REBOOT_WARM ? - EFI_RESET_WARM : - EFI_RESET_COLD, - EFI_SUCCESS, 0, NULL); + efi_reboot(reboot_mode, NULL); reboot_type = BOOT_BIOS; break; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 87fc96bcc13c..53a324606d11 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -44,6 +44,7 @@ #include <linux/io.h> #include <linux/reboot.h> #include <linux/bcd.h> +#include <linux/acpi.h> #include <asm/setup.h> #include <asm/efi.h> @@ -1340,3 +1341,25 @@ void __init efi_apply_memmap_quirks(void) if (is_uv_system()) set_bit(EFI_OLD_MEMMAP, &efi.flags); } + +/* + * For most modern platforms the preferred method of powering off is via + * ACPI. However, there are some that are known to require the use of + * EFI runtime services and for which ACPI does not work at all. + * + * Using EFI is a last resort, to be used only if no other option + * exists. + */ +bool efi_reboot_required(void) +{ + if (!acpi_gbl_reduced_hardware) + return false; + + efi_reboot_quirk_mode = EFI_RESET_WARM; + return true; +} + +bool efi_poweroff_required(void) +{ + return !!acpi_gbl_reduced_hardware; +} diff --git a/block/blk-mq.c b/block/blk-mq.c index 06ac59f5bb5a..50dd29bc4972 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1503,22 +1503,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) return NOTIFY_OK; } -static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) -{ - struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; - - if (set->tags[hctx->queue_num]) - return NOTIFY_OK; - - set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); - if (!set->tags[hctx->queue_num]) - return NOTIFY_STOP; - - hctx->tags = set->tags[hctx->queue_num]; - return NOTIFY_OK; -} - static int blk_mq_hctx_notify(void *data, unsigned long action, unsigned int cpu) { @@ -1526,8 +1510,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) return blk_mq_hctx_cpu_offline(hctx, cpu); - else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) - return blk_mq_hctx_cpu_online(hctx, cpu); + + /* + * In case of CPU online, tags may be reallocated + * in blk_mq_map_swqueue() after mapping is updated. + */ return NOTIFY_OK; } @@ -1664,6 +1651,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) unsigned int i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; + struct blk_mq_tag_set *set = q->tag_set; queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); @@ -1690,16 +1678,20 @@ static void blk_mq_map_swqueue(struct request_queue *q) * disable it and free the request entries */ if (!hctx->nr_ctx) { - struct blk_mq_tag_set *set = q->tag_set; - if (set->tags[i]) { blk_mq_free_rq_map(set, set->tags[i], i); set->tags[i] = NULL; - hctx->tags = NULL; } + hctx->tags = NULL; continue; } + /* unmapped hw queue can be remapped after CPU topo changed */ + if (!set->tags[i]) + set->tags[i] = blk_mq_init_rq_map(set, i); + hctx->tags = set->tags[i]; + WARN_ON(!hctx->tags); + /* * Initialize batch roundrobin counts */ diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 4ddb0dca56f6..fa13db682309 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -309,6 +309,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { {"PNPb006"}, /* cs423x-pnpbios */ {"CSC0100"}, + {"CSC0103"}, + {"CSC0110"}, {"CSC0000"}, {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */ /* es18xx-pnpbios */ diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 4bceb11c7380..624cbb3b5683 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -63,23 +63,12 @@ #define ACPI_SET64(ptr, val) (*ACPI_CAST64 (ptr) = (u64) (val)) /* - * printf() format helpers. These macros are workarounds for the difficulties + * printf() format helper. This macros is a workaround for the difficulties * with emitting 64-bit integers and 64-bit pointers with the same code * for both 32-bit and 64-bit hosts. */ #define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i) -#if ACPI_MACHINE_WIDTH == 64 -#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i) -#define ACPI_FORMAT_TO_UINT(i) ACPI_FORMAT_UINT64(i) -#define ACPI_PRINTF_UINT "0x%8.8X%8.8X" - -#else -#define ACPI_FORMAT_NATIVE_UINT(i) 0, (u32) (i) -#define ACPI_FORMAT_TO_UINT(i) (u32) (i) -#define ACPI_PRINTF_UINT "0x%8.8X" -#endif - /* * Macros for moving data around to/from buffers that are possibly unaligned. * If the hardware supports the transfer of unaligned data, just do the store. diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 6c0759c0db47..5e7c7c301c5c 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n", obj_desc, - ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address), + ACPI_FORMAT_UINT64(obj_desc->region.address), obj_desc->region.length)); /* Now the address and length are valid for this opregion */ @@ -539,13 +539,12 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_NOT_EXIST); } - obj_desc->region.address = - (acpi_physical_address) ACPI_TO_INTEGER(table); + obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table); obj_desc->region.length = table->length; ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n", obj_desc, - ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address), + ACPI_FORMAT_UINT64(obj_desc->region.address), obj_desc->region.length)); /* Now the address and length are valid for this opregion */ diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 8eb8575e8c16..9ce53980201c 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -272,7 +272,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Handler %p (@%p) Address %8.8X%8.8X [%s]\n", ®ion_obj->region.handler->address_space, handler, - ACPI_FORMAT_NATIVE_UINT(address), + ACPI_FORMAT_UINT64(address), acpi_ut_get_region_name(region_obj->region. space_id))); diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index 925202acc3e4..869692f12a64 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -767,8 +767,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth) acpi_os_printf("\n"); } else { acpi_os_printf(" base %8.8X%8.8X Length %X\n", - ACPI_FORMAT_NATIVE_UINT(obj_desc->region. - address), + ACPI_FORMAT_UINT64(obj_desc->region. + address), obj_desc->region.length); } break; diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 1d1b27a96c5b..58ea85e42bfc 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -263,17 +263,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, } ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD, - " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n", + " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n", acpi_ut_get_region_name(rgn_desc->region. space_id), rgn_desc->region.space_id, obj_desc->common_field.access_byte_width, obj_desc->common_field.base_byte_offset, - field_datum_byte_offset, ACPI_CAST_PTR(void, - (rgn_desc-> - region. - address + - region_offset)))); + field_datum_byte_offset, + ACPI_FORMAT_UINT64(rgn_desc->region.address + + region_offset))); /* Invoke the appropriate address_space/op_region handler */ diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index cd5288a257a9..be51d3ed4d0f 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -181,7 +181,7 @@ acpi_ex_system_memory_space_handler(u32 function, if (!mem_info->mapped_logical_address) { ACPI_ERROR((AE_INFO, "Could not map memory at 0x%8.8X%8.8X, size %u", - ACPI_FORMAT_NATIVE_UINT(address), + ACPI_FORMAT_UINT64(address), (u32) map_length)); mem_info->mapped_length = 0; return_ACPI_STATUS(AE_NO_MEMORY); @@ -202,8 +202,7 @@ acpi_ex_system_memory_space_handler(u32 function, ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n", - bit_width, function, - ACPI_FORMAT_NATIVE_UINT(address))); + bit_width, function, ACPI_FORMAT_UINT64(address))); /* * Perform the memory read or write @@ -318,8 +317,7 @@ acpi_ex_system_io_space_handler(u32 function, ACPI_DEBUG_PRINT((ACPI_DB_INFO, "System-IO (width %u) R/W %u Address=%8.8X%8.8X\n", - bit_width, function, - ACPI_FORMAT_NATIVE_UINT(address))); + bit_width, function, ACPI_FORMAT_UINT64(address))); /* Decode the function parameter */ diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index 6b919127cd9d..dabf754ea4a4 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -142,17 +142,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) byte_width = ACPI_DIV_8(bit_width); last_address = address + byte_width - 1; - ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X", - ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void, - last_address), - byte_width)); + ACPI_DEBUG_PRINT((ACPI_DB_IO, + "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X", + ACPI_FORMAT_UINT64(address), + ACPI_FORMAT_UINT64(last_address), byte_width)); /* Maximum 16-bit address in I/O space */ if (last_address > ACPI_UINT16_MAX) { ACPI_ERROR((AE_INFO, - "Illegal I/O port address/length above 64K: %p/0x%X", - ACPI_CAST_PTR(void, address), byte_width)); + "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X", + ACPI_FORMAT_UINT64(address), byte_width)); return_ACPI_STATUS(AE_LIMIT); } @@ -181,8 +181,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width) if (acpi_gbl_osi_data >= port_info->osi_dependency) { ACPI_DEBUG_PRINT((ACPI_DB_IO, - "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)", - ACPI_CAST_PTR(void, address), + "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)", + ACPI_FORMAT_UINT64(address), byte_width, port_info->name, port_info->start, port_info->end)); diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 3d88ef4a3e0d..809ed52de9c8 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -271,12 +271,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, switch (type) { case ACPI_TYPE_PROCESSOR: - acpi_os_printf("ID %02X Len %02X Addr %p\n", + acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n", obj_desc->processor.proc_id, obj_desc->processor.length, - ACPI_CAST_PTR(void, - obj_desc->processor. - address)); + ACPI_FORMAT_UINT64(obj_desc->processor. + address)); break; case ACPI_TYPE_DEVICE: @@ -347,8 +346,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, space_id)); if (obj_desc->region.flags & AOPOBJ_DATA_VALID) { acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n", - ACPI_FORMAT_NATIVE_UINT - (obj_desc->region.address), + ACPI_FORMAT_UINT64(obj_desc-> + region. + address), obj_desc->region.length); } else { acpi_os_printf diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index f499c10ceb4a..a078053ff473 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -113,9 +113,9 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc, case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: - table = - ACPI_CAST_PTR(struct acpi_table_header, - table_desc->address); + table = ACPI_CAST_PTR(struct acpi_table_header, + ACPI_PHYSADDR_TO_PTR(table_desc-> + address)); break; default: @@ -214,7 +214,8 @@ acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc, case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: - table_header = ACPI_CAST_PTR(struct acpi_table_header, address); + table_header = ACPI_CAST_PTR(struct acpi_table_header, + ACPI_PHYSADDR_TO_PTR(address)); if (!table_header) { return (AE_NO_MEMORY); } @@ -398,14 +399,14 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature) table_desc->length); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, - "%4.4s " ACPI_PRINTF_UINT + "%4.4s 0x%8.8X%8.8X" " Attempted table install failed", acpi_ut_valid_acpi_name(table_desc-> signature. ascii) ? table_desc->signature.ascii : "????", - ACPI_FORMAT_TO_UINT(table_desc-> - address))); + ACPI_FORMAT_UINT64(table_desc-> + address))); goto invalidate_and_exit; } } diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index c0b39f304ea3..8e1a43ae2f9b 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -187,8 +187,9 @@ acpi_tb_install_fixed_table(acpi_physical_address address, status = acpi_tb_acquire_temp_table(&new_table_desc, address, ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL); if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, "Could not acquire table length at %p", - ACPI_CAST_PTR(void, address))); + ACPI_ERROR((AE_INFO, + "Could not acquire table length at %8.8X%8.8X", + ACPI_FORMAT_UINT64(address))); return_ACPI_STATUS(status); } @@ -246,8 +247,9 @@ acpi_tb_install_standard_table(acpi_physical_address address, status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags); if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, "Could not acquire table length at %p", - ACPI_CAST_PTR(void, address))); + ACPI_ERROR((AE_INFO, + "Could not acquire table length at %8.8X%8.8X", + ACPI_FORMAT_UINT64(address))); return_ACPI_STATUS(status); } @@ -258,9 +260,10 @@ acpi_tb_install_standard_table(acpi_physical_address address, if (!reload && acpi_gbl_disable_ssdt_table_install && ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) { - ACPI_INFO((AE_INFO, "Ignoring installation of %4.4s at %p", - new_table_desc.signature.ascii, ACPI_CAST_PTR(void, - address))); + ACPI_INFO((AE_INFO, + "Ignoring installation of %4.4s at %8.8X%8.8X", + new_table_desc.signature.ascii, + ACPI_FORMAT_UINT64(address))); goto release_and_exit; } @@ -428,11 +431,11 @@ finish_override: return; } - ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT - " %s table override, new table: " ACPI_PRINTF_UINT, + ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X" + " %s table override, new table: 0x%8.8X%8.8X", old_table_desc->signature.ascii, - ACPI_FORMAT_TO_UINT(old_table_desc->address), - override_type, ACPI_FORMAT_TO_UINT(new_table_desc.address))); + ACPI_FORMAT_UINT64(old_table_desc->address), + override_type, ACPI_FORMAT_UINT64(new_table_desc.address))); /* We can now uninstall the original table */ @@ -516,7 +519,7 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc) if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) == ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) { - ACPI_FREE(ACPI_CAST_PTR(void, table_desc->address)); + ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address)); } table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL); diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index df3bb20ea325..a23bdaec6040 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -127,18 +127,12 @@ acpi_tb_print_table_header(acpi_physical_address address, { struct acpi_table_header local_header; - /* - * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to - * support both 32-bit and 64-bit hosts/addresses in a consistent manner. - * The %p specifier does not emit uniform output on all hosts. On some, - * leading zeros are not supported. - */ if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) { /* FACS only has signature and length fields */ - ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X", - header->signature, ACPI_FORMAT_TO_UINT(address), + ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X", + header->signature, ACPI_FORMAT_UINT64(address), header->length)); } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) { @@ -149,9 +143,8 @@ acpi_tb_print_table_header(acpi_physical_address address, header)->oem_id, ACPI_OEM_ID_SIZE); acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE); - ACPI_INFO((AE_INFO, - "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)", - ACPI_FORMAT_TO_UINT(address), + ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)", + ACPI_FORMAT_UINT64(address), (ACPI_CAST_PTR(struct acpi_table_rsdp, header)-> revision > 0) ? ACPI_CAST_PTR(struct acpi_table_rsdp, @@ -165,9 +158,9 @@ acpi_tb_print_table_header(acpi_physical_address address, acpi_tb_cleanup_table_header(&local_header, header); ACPI_INFO((AE_INFO, - "%-4.4s " ACPI_PRINTF_UINT + "%-4.4s 0x%8.8X%8.8X" " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)", - local_header.signature, ACPI_FORMAT_TO_UINT(address), + local_header.signature, ACPI_FORMAT_UINT64(address), local_header.length, local_header.revision, local_header.oem_id, local_header.oem_table_id, local_header.oem_revision, diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 65ab8fed3d5e..32c3ccfa22c1 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -111,7 +111,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) * ******************************************************************************/ -acpi_status __init acpi_find_root_pointer(acpi_size *table_address) +acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address) { u8 *table_ptr; u8 *mem_rover; @@ -169,7 +169,8 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address) physical_address += (u32) ACPI_PTR_DIFF(mem_rover, table_ptr); - *table_address = physical_address; + *table_address = + (acpi_physical_address) physical_address; return_ACPI_STATUS(AE_OK); } } @@ -202,7 +203,7 @@ acpi_status __init acpi_find_root_pointer(acpi_size *table_address) (ACPI_HI_RSDP_WINDOW_BASE + ACPI_PTR_DIFF(mem_rover, table_ptr)); - *table_address = physical_address; + *table_address = (acpi_physical_address) physical_address; return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index a1acec9d2ef3..65985036c5c3 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -107,10 +107,10 @@ acpi_ut_add_address_range(acpi_adr_space_type space_id, acpi_gbl_address_range_list[space_id] = range_info; ACPI_DEBUG_PRINT((ACPI_DB_NAMES, - "\nAdded [%4.4s] address range: 0x%p-0x%p\n", + "\nAdded [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n", acpi_ut_get_node_name(range_info->region_node), - ACPI_CAST_PTR(void, address), - ACPI_CAST_PTR(void, range_info->end_address))); + ACPI_FORMAT_UINT64(address), + ACPI_FORMAT_UINT64(range_info->end_address))); (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(AE_OK); @@ -160,15 +160,13 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id, } ACPI_DEBUG_PRINT((ACPI_DB_NAMES, - "\nRemoved [%4.4s] address range: 0x%p-0x%p\n", + "\nRemoved [%4.4s] address range: 0x%8.8X%8.8X-0x%8.8X%8.8X\n", acpi_ut_get_node_name(range_info-> region_node), - ACPI_CAST_PTR(void, - range_info-> - start_address), - ACPI_CAST_PTR(void, - range_info-> - end_address))); + ACPI_FORMAT_UINT64(range_info-> + start_address), + ACPI_FORMAT_UINT64(range_info-> + end_address))); ACPI_FREE(range_info); return_VOID; @@ -245,16 +243,14 @@ acpi_ut_check_address_range(acpi_adr_space_type space_id, region_node); ACPI_WARNING((AE_INFO, - "%s range 0x%p-0x%p conflicts with OpRegion 0x%p-0x%p (%s)", + "%s range 0x%8.8X%8.8X-0x%8.8X%8.8X conflicts with OpRegion 0x%8.8X%8.8X-0x%8.8X%8.8X (%s)", acpi_ut_get_region_name(space_id), - ACPI_CAST_PTR(void, address), - ACPI_CAST_PTR(void, end_address), - ACPI_CAST_PTR(void, - range_info-> - start_address), - ACPI_CAST_PTR(void, - range_info-> - end_address), + ACPI_FORMAT_UINT64(address), + ACPI_FORMAT_UINT64(end_address), + ACPI_FORMAT_UINT64(range_info-> + start_address), + ACPI_FORMAT_UINT64(range_info-> + end_address), pathname)); ACPI_FREE(pathname); } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 73d5388bd1f6..f017d1964eb6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2167,6 +2167,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) result, xferred); if (!img_request->result) img_request->result = result; + /* + * Need to end I/O on the entire obj_request worth of + * bytes in case of error. + */ + xferred = obj_request->length; } /* Image object requests don't own their page array */ diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 9553496b0f43..c135154ead89 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -1,7 +1,7 @@ # # Makefile for linux kernel # -obj-$(CONFIG_EFI) += efi.o vars.o +obj-$(CONFIG_EFI) += efi.o vars.o reboot.o obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o obj-$(CONFIG_UEFI_CPER) += cper.o diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c new file mode 100644 index 000000000000..9c59d1c795d1 --- /dev/null +++ b/drivers/firmware/efi/reboot.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2014 Intel Corporation; author Matt Fleming + * Copyright (c) 2014 Red Hat, Inc., Mark Salter <msalter@xxxxxxxxxx> + */ +#include <linux/efi.h> +#include <linux/reboot.h> + +int efi_reboot_quirk_mode = -1; + +void efi_reboot(enum reboot_mode reboot_mode, const char *__unused) +{ + int efi_mode; + + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return; + + switch (reboot_mode) { + case REBOOT_WARM: + case REBOOT_SOFT: + efi_mode = EFI_RESET_WARM; + break; + default: + efi_mode = EFI_RESET_COLD; + break; + } + + /* + * If a quirk forced an EFI reset mode, always use that. + */ + if (efi_reboot_quirk_mode != -1) + efi_mode = efi_reboot_quirk_mode; + + efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL); +} + +bool __weak efi_poweroff_required(void) +{ + return false; +} + +static void efi_power_off(void) +{ + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); +} + +static int __init efi_shutdown_init(void) +{ + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return -ENODEV; + + if (efi_poweroff_required()) + pm_power_off = efi_power_off; + + return 0; +} +late_initcall(efi_shutdown_init); diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index c98b101a73ae..019a7e32de4c 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -120,7 +120,8 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { kset_unregister(map_kset); - return entry; + map_kset = NULL; + return ERR_PTR(-ENOMEM); } memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, @@ -132,6 +133,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) if (ret) { kobject_put(&entry->kobj); kset_unregister(map_kset); + map_kset = NULL; return ERR_PTR(ret); } @@ -174,8 +176,6 @@ out_add_entry: entry = *(map_entries + j); kobject_put(&entry->kobj); } - if (map_kset) - kset_unregister(map_kset); out: return ret; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d9b8844e2715..b5f915609dc5 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -833,6 +833,7 @@ static struct class gpio_class = { */ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) { + struct gpio_chip *chip; unsigned long flags; int status; const char *ioname = NULL; @@ -850,8 +851,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) return -EINVAL; } + chip = desc->chip; + mutex_lock(&sysfs_lock); + /* check if chip is being removed */ + if (!chip || !chip->exported) { + status = -ENODEV; + goto fail_unlock; + } + spin_lock_irqsave(&gpio_lock, flags); if (!test_bit(FLAG_REQUESTED, &desc->flags) || test_bit(FLAG_EXPORT, &desc->flags)) { @@ -1076,12 +1085,15 @@ static void gpiochip_unexport(struct gpio_chip *chip) { int status; struct device *dev; + struct gpio_desc *desc; + unsigned int i; mutex_lock(&sysfs_lock); dev = class_find_device(&gpio_class, NULL, chip, match_export); if (dev) { put_device(dev); device_unregister(dev); + /* prevent further gpiod exports */ chip->exported = false; status = 0; } else @@ -1090,6 +1102,13 @@ static void gpiochip_unexport(struct gpio_chip *chip) if (status) chip_dbg(chip, "%s: status %d\n", __func__, status); + + /* unregister gpiod class devices owned by sysfs */ + for (i = 0; i < chip->ngpio; i++) { + desc = &chip->desc[i]; + if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) + gpiod_free(desc); + } } static int __init gpiolib_sysfs_init(void) @@ -1287,6 +1306,8 @@ int gpiochip_remove(struct gpio_chip *chip) int status = 0; unsigned id; + gpiochip_unexport(chip); + gpiochip_irqchip_remove(chip); acpi_gpiochip_remove(chip); @@ -1309,9 +1330,6 @@ int gpiochip_remove(struct gpio_chip *chip) spin_unlock_irqrestore(&gpio_lock, flags); - if (status == 0) - gpiochip_unexport(chip); - return status; } EXPORT_SYMBOL_GPL(gpiochip_remove); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index a8221d9349ee..d2f1431eb428 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -852,7 +852,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->has_pch_encoder = true; pipe_config->has_dp_encoder = true; - pipe_config->has_audio = intel_dp->has_audio; + pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { intel_fixed_panel_mode(intel_connector->panel.fixed_mode, @@ -1578,8 +1578,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, int dotclock; tmp = I915_READ(intel_dp->output_reg); - if (tmp & DP_AUDIO_OUTPUT_ENABLE) - pipe_config->has_audio = true; + + pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { if (tmp & DP_SYNC_HS_HIGH) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index eef36d0076ee..e61b29cb2d29 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -802,12 +802,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) static const struct dmi_system_id intel_dual_link_lvds[] = { { .callback = intel_dual_link_lvds_callback, - .ident = "Apple MacBook Pro (Core i5/i7 Series)", + .ident = "Apple MacBook Pro 15\" (2010)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"), + }, + }, + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro 15\" (2011)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), }, }, + { + .callback = intel_dual_link_lvds_callback, + .ident = "Apple MacBook Pro 15\" (2012)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 30f9f1885191..21d6f08f5324 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1160,7 +1160,7 @@ static struct radeon_asic rs780_asic = { static struct radeon_asic_ring rv770_uvd_ring = { .ib_execute = &uvd_v1_0_ib_execute, .emit_fence = &uvd_v2_2_fence_emit, - .emit_semaphore = &uvd_v1_0_semaphore_emit, + .emit_semaphore = &uvd_v2_2_semaphore_emit, .cs_parse = &radeon_uvd_cs_parse, .ring_test = &uvd_v1_0_ring_test, .ib_test = &uvd_v1_0_ib_test, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 01e7c0ad8f01..c26b0e2cdd39 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -866,6 +866,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int uvd_v2_2_resume(struct radeon_device *rdev); void uvd_v2_2_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); +bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait); /* uvd v3.1 */ bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index a4ad270e8261..a86cc490c35f 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -351,6 +351,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) return 0; } +static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, + unsigned stream_type) +{ + switch (stream_type) { + case 0: /* H264 */ + case 1: /* VC1 */ + /* always supported */ + return 0; + + case 3: /* MPEG2 */ + case 4: /* MPEG4 */ + /* only since UVD 3 */ + if (p->rdev->family >= CHIP_PALM) + return 0; + + /* fall through */ + default: + DRM_ERROR("UVD codec not supported by hardware %d!\n", + stream_type); + return -EINVAL; + } +} + static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, unsigned offset, unsigned buf_sizes[]) { @@ -389,50 +412,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - if (msg_type == 1) { - /* it's a decode msg, calc buffer sizes */ - r = radeon_uvd_cs_msg_decode(msg, buf_sizes); - /* calc image size (width * height) */ - img_size = msg[6] * msg[7]; + switch (msg_type) { + case 0: + /* it's a create msg, calc image size (width * height) */ + img_size = msg[7] * msg[8]; + + r = radeon_uvd_validate_codec(p, msg[4]); + radeon_bo_kunmap(bo); + if (r) + return r; + + /* try to alloc a new handle */ + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { + DRM_ERROR("Handle 0x%x already in use!\n", handle); + return -EINVAL; + } + + if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { + p->rdev->uvd.filp[i] = p->filp; + p->rdev->uvd.img_size[i] = img_size; + return 0; + } + } + + DRM_ERROR("No more free UVD handles!\n"); + return -EINVAL; + + case 1: + /* it's a decode msg, validate codec and calc buffer sizes */ + r = radeon_uvd_validate_codec(p, msg[4]); + if (!r) + r = radeon_uvd_cs_msg_decode(msg, buf_sizes); radeon_bo_kunmap(bo); if (r) return r; - } else if (msg_type == 2) { + /* validate the handle */ + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { + if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { + if (p->rdev->uvd.filp[i] != p->filp) { + DRM_ERROR("UVD handle collision detected!\n"); + return -EINVAL; + } + return 0; + } + } + + DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + return -ENOENT; + + case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); radeon_bo_kunmap(bo); return 0; - } else { - /* it's a create msg, calc image size (width * height) */ - img_size = msg[7] * msg[8]; - radeon_bo_kunmap(bo); - if (msg_type != 0) { - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); - return -EINVAL; - } - - /* it's a create msg, no special handling needed */ - } - - /* create or decode, validate the handle */ - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (atomic_read(&p->rdev->uvd.handles[i]) == handle) - return 0; - } + default: - /* handle not found try to alloc a new one */ - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { - p->rdev->uvd.filp[i] = p->filp; - p->rdev->uvd.img_size[i] = img_size; - return 0; - } + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; } - DRM_ERROR("No more free UVD handles!\n"); + BUG(); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index aa21c31a846c..2944637b8b44 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -492,18 +492,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, * * @p: parser context * @handle: handle to validate + * @allocated: allocated a new handle? * * Validates the handle and return the found session index or -EINVAL * we we don't have another free session index. */ -int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) +static int radeon_vce_validate_handle(struct radeon_cs_parser *p, + uint32_t handle, bool *allocated) { unsigned i; + *allocated = false; + /* validate the handle */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (atomic_read(&p->rdev->vce.handles[i]) == handle) + if (atomic_read(&p->rdev->vce.handles[i]) == handle) { + if (p->rdev->vce.filp[i] != p->filp) { + DRM_ERROR("VCE handle collision detected!\n"); + return -EINVAL; + } return i; + } } /* handle not found try to alloc a new one */ @@ -511,6 +520,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { p->rdev->vce.filp[i] = p->filp; p->rdev->vce.img_size[i] = 0; + *allocated = true; return i; } } @@ -528,10 +538,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) int radeon_vce_cs_parse(struct radeon_cs_parser *p) { int session_idx = -1; - bool destroyed = false; + bool destroyed = false, created = false, allocated = false; uint32_t tmp, handle = 0; uint32_t *size = &tmp; - int i, r; + int i, r = 0; while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { uint32_t len = radeon_get_ib_value(p, p->idx); @@ -539,18 +549,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) if ((len < 8) || (len & 3)) { DRM_ERROR("invalid VCE command length (%d)!\n", len); - return -EINVAL; + r = -EINVAL; + goto out; } if (destroyed) { DRM_ERROR("No other command allowed after destroy!\n"); - return -EINVAL; + r = -EINVAL; + goto out; } switch (cmd) { case 0x00000001: // session handle = radeon_get_ib_value(p, p->idx + 2); - session_idx = radeon_vce_validate_handle(p, handle); + session_idx = radeon_vce_validate_handle(p, handle, + &allocated); if (session_idx < 0) return session_idx; size = &p->rdev->vce.img_size[session_idx]; @@ -560,6 +573,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) break; case 0x01000001: // create + created = true; + if (!allocated) { + DRM_ERROR("Handle already in use!\n"); + r = -EINVAL; + goto out; + } + *size = radeon_get_ib_value(p, p->idx + 8) * radeon_get_ib_value(p, p->idx + 10) * 8 * 3 / 2; @@ -576,12 +596,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, *size); if (r) - return r; + goto out; r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, *size / 3); if (r) - return r; + goto out; break; case 0x02000001: // destroy @@ -592,7 +612,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, *size * 2); if (r) - return r; + goto out; break; case 0x05000004: // video bitstream buffer @@ -600,36 +620,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, tmp); if (r) - return r; + goto out; break; case 0x05000005: // feedback buffer r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, 4096); if (r) - return r; + goto out; break; default: DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); - return -EINVAL; + r = -EINVAL; + goto out; } if (session_idx == -1) { DRM_ERROR("no session command at start of IB\n"); - return -EINVAL; + r = -EINVAL; + goto out; } p->idx += len / 4; } - if (destroyed) { - /* IB contains a destroy msg, free the handle */ + if (allocated && !created) { + DRM_ERROR("New session without create command!\n"); + r = -ENOENT; + } + +out: + if ((!r && destroyed) || (r && allocated)) { + /* + * IB contains a destroy msg or we have allocated an + * handle and got an error, anyway free the handle + */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); } - return 0; + return r; } /** diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 3cf1e2921545..9ef2064b1c9c 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -989,6 +989,9 @@ ((n) & 0x3FFF) << 16) /* UVD */ +#define UVD_SEMA_ADDR_LOW 0xef00 +#define UVD_SEMA_ADDR_HIGH 0xef04 +#define UVD_SEMA_CMD 0xef08 #define UVD_GPCOM_VCPU_CMD 0xef0c #define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA1 0xef14 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index e7b9f714042b..4d47c5819c9c 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2920,6 +2920,7 @@ struct si_dpm_quirk { static struct si_dpm_quirk si_dpm_quirk_list[] = { /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index be42c8125203..80856b0619fb 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -365,18 +365,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, struct radeon_semaphore *semaphore, bool emit_wait) { - uint64_t addr = semaphore->gpu_addr; - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); - radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); - radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); - - radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); - radeon_ring_write(ring, emit_wait ? 1 : 0); - - return true; + /* disable semaphores for UVD V1 hardware */ + return false; } /** diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 8bfdadd56598..3eaa034a4f46 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, } /** + * uvd_v2_2_semaphore_emit - emit semaphore command + * + * @rdev: radeon_device pointer + * @ring: radeon_ring pointer + * @semaphore: semaphore to emit commands for + * @emit_wait: true if we should emit a wait command + * + * Emit a semaphore command (either wait or signal) to the UVD ring. + */ +bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, + struct radeon_ring *ring, + struct radeon_semaphore *semaphore, + bool emit_wait) +{ + uint64_t addr = semaphore->gpu_addr; + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); + radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); + radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); + + radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); + radeon_ring_write(ring, emit_wait ? 1 : 0); + + return true; +} + +/** * uvd_v2_2_resume - memory controller programming * * @rdev: radeon_device pointer diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 4d835fbeba44..d66af95d3de4 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1252,8 +1252,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap) dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); - pm_runtime_no_callbacks(&adap->dev); - #ifdef CONFIG_I2C_COMPAT res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev, adap->dev.parent); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d570030d899c..06441a43c3aa 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id memcpy(&ib->sib_addr, &path->dgid, 16); } +static __be16 ss_get_port(const struct sockaddr_storage *ss) +{ + if (ss->ss_family == AF_INET) + return ((struct sockaddr_in *)ss)->sin_port; + else if (ss->ss_family == AF_INET6) + return ((struct sockaddr_in6 *)ss)->sin6_port; + BUG(); +} + static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { - struct sockaddr_in *listen4, *ip4; + struct sockaddr_in *ip4; - listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr; ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; - ip4->sin_family = listen4->sin_family; + ip4->sin_family = AF_INET; ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; - ip4->sin_port = listen4->sin_port; + ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr); ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; - ip4->sin_family = listen4->sin_family; + ip4->sin_family = AF_INET; ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; ip4->sin_port = hdr->port; } @@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { - struct sockaddr_in6 *listen6, *ip6; + struct sockaddr_in6 *ip6; - listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr; ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; - ip6->sin6_family = listen6->sin6_family; + ip6->sin6_family = AF_INET6; ip6->sin6_addr = hdr->dst_addr.ip6; - ip6->sin6_port = listen6->sin6_port; + ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr); ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; - ip6->sin6_family = listen6->sin6_family; + ip6->sin6_family = AF_INET6; ip6->sin6_addr = hdr->src_addr.ip6; ip6->sin6_port = hdr->port; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 683e685ed697..9afd00b45f83 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -531,6 +531,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) ? (sector & (chunk_sects-1)) : sector_div(sector, chunk_sects)); + /* Restore due to sector_div */ + sector = bio->bi_iter.bi_sector; + if (sectors < bio_sectors(bio)) { split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); bio_chain(split, bio); @@ -538,7 +541,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) split = bio; } - sector = bio->bi_iter.bi_sector; zone = find_zone(mddev->private, §or); tmp_dev = map_sector(mddev, zone, sector, §or); split->bi_bdev = tmp_dev->bdev; diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index b082b25ea9c4..e32206ecfa04 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1025,6 +1025,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) md->reset_done &= ~type; } +int mmc_access_rpmb(struct mmc_queue *mq) +{ + struct mmc_blk_data *md = mq->data; + /* + * If this is a RPMB partition access, return ture + */ + if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) + return true; + + return false; +} + static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 3e049c13429c..6ceede0a0bf7 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) return BLKPREP_KILL; } - if (mq && mmc_card_removed(mq->card)) + if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) return BLKPREP_KILL; req->cmd_flags |= REQ_DONTPREP; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 5752d50049a3..99e6521e6169 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *); extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); extern void mmc_packed_clean(struct mmc_queue *); +extern int mmc_access_rpmb(struct mmc_queue *); + #endif diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7dc0c85fdb60..767b47296e1b 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2620,6 +2620,7 @@ int mmc_pm_notify(struct notifier_block *notify_block, switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 1; spin_unlock_irqrestore(&host->lock, flags); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 656fbba4c422..1df9140c0b16 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1402,7 +1402,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; - host->timeout = msecs_to_jiffies(1000); + host->timeout = msecs_to_jiffies(10000); host->ccs_enable = !pd || !pd->ccs_unsupported; host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index f913d701a5b3..c4b1af07a121 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c @@ -74,6 +74,8 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id) for (i = 0; i < vol->used_ebs; i++) { int size; + cond_resched(); + if (i == vol->used_ebs - 1) size = vol->last_eb_bytes; else diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 1b67e902aee6..8be0c179e25d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1497,6 +1497,7 @@ static void mlx4_en_service_task(struct work_struct *work) if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) mlx4_en_ptp_overflow_check(mdev); + mlx4_en_recover_from_oom(priv); queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); } @@ -1746,7 +1747,7 @@ mac_err: cq_err: while (rx_index--) { mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); - mlx4_en_free_affinity_hint(priv, i); + mlx4_en_free_affinity_hint(priv, rx_index); } for (i = 0; i < priv->rx_ring_num; i++) mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 5535862f27cc..fa742b1115f9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -237,6 +237,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); } +static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) +{ + BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); + return ring->prod == ring->cons; +} + static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) { *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); @@ -308,8 +314,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, ring->cons, ring->prod); /* Unmap and free Rx buffers */ - BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); - while (ring->cons != ring->prod) { + while (!mlx4_en_is_ring_empty(ring)) { index = ring->cons & ring->size_mask; en_dbg(DRV, priv, "Processing descriptor:%d\n", index); mlx4_en_free_rx_desc(priv, ring, index); @@ -483,6 +488,23 @@ err_allocator: return err; } +/* We recover from out of memory by scheduling our napi poll + * function (mlx4_en_process_cq), which tries to allocate + * all missing RX buffers (call to mlx4_en_refill_rx_buffers). + */ +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) +{ + int ring; + + if (!priv->port_up) + return; + + for (ring = 0; ring < priv->rx_ring_num; ring++) { + if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) + napi_reschedule(&priv->rx_cq[ring]->napi); + } +} + void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 0610c85d4371..8068b0557c5a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -132,8 +132,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; ring->queue_index = queue_index; - if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index)) - cpumask_set_cpu(queue_index, &ring->affinity_mask); + if (queue_index < priv->num_tx_rings_p_up) + cpumask_set_cpu_local_first(queue_index, + priv->mdev->dev->numa_node, + &ring->affinity_mask); *pring = ring; return 0; @@ -200,7 +202,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, &ring->qp, &ring->qp_state); - if (!user_prio && cpu_online(ring->queue_index)) + if (!cpumask_empty(&ring->affinity_mask)) netif_set_xps_queue(priv->dev, &ring->affinity_mask, ring->queue_index); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d72a5a894fc6..4e47239d031d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -758,6 +758,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride, int node); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 81ca0ce4c909..84f946a7a405 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1124,7 +1124,7 @@ void devm_pinctrl_put(struct pinctrl *p) EXPORT_SYMBOL_GPL(devm_pinctrl_put); int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, - bool dup, bool locked) + bool dup) { int i, ret; struct pinctrl_maps *maps_node; @@ -1192,11 +1192,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, maps_node->maps = maps; } - if (!locked) - mutex_lock(&pinctrl_maps_mutex); + mutex_lock(&pinctrl_maps_mutex); list_add_tail(&maps_node->node, &pinctrl_maps); - if (!locked) - mutex_unlock(&pinctrl_maps_mutex); + mutex_unlock(&pinctrl_maps_mutex); return 0; } @@ -1211,7 +1209,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, int pinctrl_register_mappings(struct pinctrl_map const *maps, unsigned num_maps) { - return pinctrl_register_map(maps, num_maps, true, false); + return pinctrl_register_map(maps, num_maps, true); } void pinctrl_unregister_map(struct pinctrl_map const *map) diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 75476b3d87da..b24ea846c867 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h @@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, } int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, - bool dup, bool locked); + bool dup); void pinctrl_unregister_map(struct pinctrl_map const *map); extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index eda13de2e7c0..0bbf7d71b281 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, dt_map->num_maps = num_maps; list_add_tail(&dt_map->node, &p->dt_maps); - return pinctrl_register_map(map, num_maps, false, true); + return pinctrl_register_map(map, num_maps, false); } struct pinctrl_dev *of_pinctrl_get(struct device_node *np) diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 0a7325361d29..5f57e3d35e26 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); -static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); /* Functions */ @@ -1352,11 +1351,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) } /* Now complete the io */ + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - twa_unmap_scsi_data(tw_dev, request_id); } /* Check for valid status after each drain */ @@ -1414,26 +1413,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm } } /* End twa_load_sgl() */ -/* This function will perform a pci-dma mapping for a scatter gather list */ -static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) -{ - int use_sg; - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - use_sg = scsi_dma_map(cmd); - if (!use_sg) - return 0; - else if (use_sg < 0) { - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End twa_map_scsi_sg_data() */ - /* This function will poll for a response interrupt of a request */ static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) { @@ -1612,9 +1591,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) (tw_dev->state[i] != TW_S_INITIAL) && (tw_dev->state[i] != TW_S_COMPLETED)) { if (tw_dev->srb[i]) { - tw_dev->srb[i]->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - twa_unmap_scsi_data(tw_dev, i); + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + cmd->result = (DID_RESET << 16); + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); } } } @@ -1793,21 +1774,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: + scsi_dma_unmap(SCpnt); twa_free_request_id(tw_dev, request_id); - twa_unmap_scsi_data(tw_dev, request_id); break; case 1: - tw_dev->state[request_id] = TW_S_COMPLETED; - twa_free_request_id(tw_dev, request_id); - twa_unmap_scsi_data(tw_dev, request_id); SCpnt->result = (DID_ERROR << 16); + scsi_dma_unmap(SCpnt); done(SCpnt); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); retval = 0; } out: @@ -1875,8 +1853,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); } else { - sg_count = twa_map_scsi_sg_data(tw_dev, request_id); - if (sg_count == 0) + sg_count = scsi_dma_map(srb); + if (sg_count < 0) goto out; scsi_for_each_sg(srb, sg, sg_count, i) { @@ -1991,15 +1969,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code) return(table[index].text); } /* End twa_string_lookup() */ -/* This function will perform a pci-dma unmap */ -static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) -{ - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End twa_unmap_scsi_data() */ - /* This function gets called when a disk is coming on-line */ static int twa_slave_configure(struct scsi_device *sdev) { diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h index 040f7214e5b7..0fdc83cfa0e1 100644 --- a/drivers/scsi/3w-9xxx.h +++ b/drivers/scsi/3w-9xxx.h @@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = { #define TW_CURRENT_DRIVER_BUILD 0 #define TW_CURRENT_DRIVER_BRANCH 0 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SINGLE 1 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_9550SX_DRAIN_COMPLETED 0xFFFF #define TW_SECTOR_SIZE 512 diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 4de346017e9f..61702ac00d42 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -303,26 +303,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) return 0; } /* End twl_post_command_packet() */ -/* This function will perform a pci-dma mapping for a scatter gather list */ -static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) -{ - int use_sg; - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - use_sg = scsi_dma_map(cmd); - if (!use_sg) - return 0; - else if (use_sg < 0) { - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End twl_map_scsi_sg_data() */ - /* This function hands scsi cdb's to the firmware */ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) { @@ -370,8 +350,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, if (!sglistarg) { /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { - sg_count = twl_map_scsi_sg_data(tw_dev, request_id); - if (sg_count == 0) + sg_count = scsi_dma_map(srb); + if (sg_count <= 0) goto out; scsi_for_each_sg(srb, sg, sg_count, i) { @@ -1116,15 +1096,6 @@ out: return retval; } /* End twl_initialize_device_extension() */ -/* This function will perform a pci-dma unmap */ -static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) -{ - struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End twl_unmap_scsi_data() */ - /* This function will handle attention interrupts */ static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) { @@ -1265,11 +1236,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance) } /* Now complete the io */ + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twl_free_request_id(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - twl_unmap_scsi_data(tw_dev, request_id); } /* Check for another response interrupt */ @@ -1414,10 +1385,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res if ((tw_dev->state[i] != TW_S_FINISHED) && (tw_dev->state[i] != TW_S_INITIAL) && (tw_dev->state[i] != TW_S_COMPLETED)) { - if (tw_dev->srb[i]) { - tw_dev->srb[i]->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - twl_unmap_scsi_data(tw_dev, i); + struct scsi_cmnd *cmd = tw_dev->srb[i]; + + if (cmd) { + cmd->result = (DID_RESET << 16); + scsi_dma_unmap(cmd); + cmd->scsi_done(cmd); } } } @@ -1521,9 +1494,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); if (retval) { tw_dev->state[request_id] = TW_S_COMPLETED; diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h index d474892701d4..fec6449c7595 100644 --- a/drivers/scsi/3w-sas.h +++ b/drivers/scsi/3w-sas.h @@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] = #define TW_CURRENT_DRIVER_BUILD 0 #define TW_CURRENT_DRIVER_BRANCH 0 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_SECTOR_SIZE 512 #define TW_MAX_UNITS 32 diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 752624e6bc00..b327742b95ef 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1284,32 +1284,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev) return 0; } /* End tw_initialize_device_extension() */ -static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) -{ - int use_sg; - - dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n"); - - use_sg = scsi_dma_map(cmd); - if (use_sg < 0) { - printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n"); - return 0; - } - - cmd->SCp.phase = TW_PHASE_SGLIST; - cmd->SCp.have_data_in = use_sg; - - return use_sg; -} /* End tw_map_scsi_sg_data() */ - -static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) -{ - dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); - - if (cmd->SCp.phase == TW_PHASE_SGLIST) - scsi_dma_unmap(cmd); -} /* End tw_unmap_scsi_data() */ - /* This function will reset a device extension */ static int tw_reset_device_extension(TW_Device_Extension *tw_dev) { @@ -1332,8 +1306,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev) srb = tw_dev->srb[i]; if (srb != NULL) { srb->result = (DID_RESET << 16); - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]); + scsi_dma_unmap(srb); + srb->scsi_done(srb); } } } @@ -1780,8 +1754,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id) command_packet->byte8.io.lba = lba; command_packet->byte6.block_count = num_sectors; - use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); - if (!use_sg) + use_sg = scsi_dma_map(srb); + if (use_sg <= 0) return 1; scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { @@ -1968,9 +1942,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c /* Save the scsi command for use by the ISR */ tw_dev->srb[request_id] = SCpnt; - /* Initialize phase to zero */ - SCpnt->SCp.phase = TW_PHASE_INITIAL; - switch (*command) { case READ_10: case READ_6: @@ -2198,12 +2169,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance) /* Now complete the io */ if ((error != TW_ISR_DONT_COMPLETE)) { + scsi_dma_unmap(tw_dev->srb[request_id]); + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); tw_dev->state[request_id] = TW_S_COMPLETED; tw_state_request_finish(tw_dev, request_id); tw_dev->posted_request_count--; - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); - - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); } } diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h index 49dcf03c631a..1d31858766ce 100644 --- a/drivers/scsi/3w-xxxx.h +++ b/drivers/scsi/3w-xxxx.h @@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] = #define TW_AEN_SMART_FAIL 0x000F #define TW_AEN_SBUF_FAIL 0x0024 -/* Phase defines */ -#define TW_PHASE_INITIAL 0 -#define TW_PHASE_SINGLE 1 -#define TW_PHASE_SGLIST 2 - /* Misc defines */ #define TW_ALIGNMENT_6000 64 /* 64 bytes */ #define TW_ALIGNMENT_7000 4 /* 4 bytes */ diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 262ab837a704..9f77d23239a2 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -226,6 +226,7 @@ static struct { {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, {"Promise", "", NULL, BLIST_SPARSELUN}, + {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index a299b82e6b09..44458696c7b6 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -900,6 +900,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, */ if (*bflags & BLIST_MAX_512) blk_queue_max_hw_sectors(sdev->request_queue, 512); + /* + * Max 1024 sector transfer length for targets that report incorrect + * max/optimal lengths and relied on the old block layer safe default + */ + else if (*bflags & BLIST_MAX_1024) + blk_queue_max_hw_sectors(sdev->request_queue, 1024); /* * Some devices may not want to have a start command automatically diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 2dc2831840ca..8eb65f26fcae 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void) return 0; } +static void xen_console_update_evtchn(struct xencons_info *info) +{ + if (xen_hvm_domain()) { + uint64_t v; + int err; + + err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); + if (!err && v) + info->evtchn = v; + } else + info->evtchn = xen_start_info->console.domU.evtchn; +} + void xen_console_resume(void) { struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); - if (info != NULL && info->irq) + if (info != NULL && info->irq) { + if (!xen_initial_domain()) + xen_console_update_evtchn(info); rebind_evtchn_irq(info->evtchn, info->irq); + } } static void xencons_disconnect_backend(struct xencons_info *info) diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 60d05fc2c1c4..0d39ae4ff533 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -860,6 +860,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port) config.direction = DMA_MEM_TO_DEV; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; config.dst_addr = port->mapbase + ATMEL_US_THR; + config.dst_maxburst = 1; ret = dmaengine_device_control(atmel_port->chan_tx, DMA_SLAVE_CONFIG, @@ -1024,6 +1025,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port) config.direction = DMA_DEV_TO_MEM; config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; config.src_addr = port->mapbase + ATMEL_US_RHR; + config.src_maxburst = 1; ret = dmaengine_device_control(atmel_port->chan_rx, DMA_SLAVE_CONFIG, diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 68d4455f3cf9..3197872f307b 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -266,7 +266,6 @@ static struct of_device_id of_platform_serial_table[] = { { .compatible = "ibm,qpace-nwp-serial", .data = (void *)PORT_NWPSERIAL, }, #endif - { .type = "serial", .data = (void *)PORT_UNKNOWN, }, { /* end of list */ }, }; diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index dce27f34937e..aade351c3192 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -638,7 +638,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match); static int ulite_probe(struct platform_device *pdev) { - struct resource *res, *res2; + struct resource *res; + int irq; int id = pdev->id; #ifdef CONFIG_OF const __be32 *prop; @@ -652,11 +653,11 @@ static int ulite_probe(struct platform_device *pdev) if (!res) return -ENODEV; - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res2) - return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -ENXIO; - return ulite_assign(&pdev->dev, id, res->start, res2->start); + return ulite_assign(&pdev->dev, id, res->start, irq); } static int ulite_remove(struct platform_device *pdev) diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 8809775e2ba3..efbfddf93471 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1290,9 +1290,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend, */ static int cdns_uart_probe(struct platform_device *pdev) { - int rc, id; + int rc, id, irq; struct uart_port *port; - struct resource *res, *res2; + struct resource *res; struct cdns_uart *cdns_uart_data; cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), @@ -1339,9 +1339,9 @@ static int cdns_uart_probe(struct platform_device *pdev) goto err_out_clk_disable; } - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res2) { - rc = -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + rc = -ENXIO; goto err_out_clk_disable; } @@ -1370,7 +1370,7 @@ static int cdns_uart_probe(struct platform_device *pdev) * and triggers invocation of the config_port() entry point. */ port->mapbase = res->start; - port->irq = res2->start; + port->irq = irq; port->dev = &pdev->dev; port->uartclk = clk_get_rate(cdns_uart_data->uartclk); port->private_data = cdns_uart_data; diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index caaabc58021e..34a52cd7bfb7 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -537,7 +537,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on) { struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); - mutex_unlock(&fsm->lock); if (on) { ci_role_stop(ci); ci_role_start(ci, CI_ROLE_HOST); @@ -546,7 +545,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on) hw_device_reset(ci, USBMODE_CM_DC); ci_role_start(ci, CI_ROLE_GADGET); } - mutex_lock(&fsm->lock); return 0; } @@ -554,12 +552,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on) { struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); - mutex_unlock(&fsm->lock); if (on) usb_gadget_vbus_connect(&ci->gadget); else usb_gadget_vbus_disconnect(&ci->gadget); - mutex_lock(&fsm->lock); return 0; } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index bc2cacd3cb68..a8946e88978e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1128,11 +1128,16 @@ static int acm_probe(struct usb_interface *intf, } while (buflen > 0) { + elength = buffer[0]; + if (!elength) { + dev_err(&intf->dev, "skipping garbage byte\n"); + elength = 1; + goto next_desc; + } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } - elength = buffer[0]; switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 9893d696fc97..f58caa9e6a27 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt, } static int uas_use_uas_driver(struct usb_interface *intf, - const struct usb_device_id *id) + const struct usb_device_id *id, + unsigned long *flags_ret) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = interface_to_usbdev(intf); @@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf, * this writing the following versions exist: * ASM1051 - no uas support version * ASM1051 - with broken (*) uas support - * ASM1053 - with working uas support + * ASM1053 - with working uas support, but problems with large xfers * ASM1153 - with working uas support * * Devices with these chips re-use a number of device-ids over the @@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf, } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; + } else { + /* ASM1053, these have issues with large transfers */ + flags |= US_FL_MAX_SECTORS_240; } } @@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf, return 0; } + if (flags_ret) + *flags_ret = flags; + return 1; } diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 27136935fec3..22b3b92ceda2 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -936,7 +936,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd) static int uas_slave_alloc(struct scsi_device *sdev) { - sdev->hostdata = (void *)sdev->host->hostdata; + struct uas_dev_info *devinfo = + (struct uas_dev_info *)sdev->host->hostdata; + + sdev->hostdata = devinfo; /* USB has unusual DMA-alignment requirements: Although the * starting address of each scatter-gather element doesn't matter, @@ -955,6 +958,11 @@ static int uas_slave_alloc(struct scsi_device *sdev) */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); + if (devinfo->flags & US_FL_MAX_SECTORS_64) + blk_queue_max_hw_sectors(sdev->request_queue, 64); + else if (devinfo->flags & US_FL_MAX_SECTORS_240) + blk_queue_max_hw_sectors(sdev->request_queue, 240); + return 0; } @@ -1071,8 +1079,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) struct Scsi_Host *shost = NULL; struct uas_dev_info *devinfo; struct usb_device *udev = interface_to_usbdev(intf); + unsigned long dev_flags; - if (!uas_use_uas_driver(intf, id)) + if (!uas_use_uas_driver(intf, id, &dev_flags)) return -ENODEV; if (uas_switch_interface(udev, intf)) @@ -1095,8 +1104,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) devinfo->resetting = 0; devinfo->running_task = 0; devinfo->shutdown = 0; - devinfo->flags = id->driver_info; - usb_stor_adjust_quirks(udev, &devinfo->flags); + devinfo->flags = dev_flags; init_usb_anchor(&devinfo->cmd_urbs); init_usb_anchor(&devinfo->sense_urbs); init_usb_anchor(&devinfo->data_urbs); diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 20c5bcc6d3df..79323d008f2d 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -477,7 +477,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT | US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 | US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE | - US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES); + US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES | + US_FL_MAX_SECTORS_240); p = quirks; while (*p) { @@ -518,6 +519,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) case 'f': f |= US_FL_NO_REPORT_OPCODES; break; + case 'g': + f |= US_FL_MAX_SECTORS_240; + break; case 'h': f |= US_FL_CAPACITY_HEURISTICS; break; @@ -1052,7 +1056,7 @@ static int storage_probe(struct usb_interface *intf, /* If uas is enabled and this device can do uas then ignore it. */ #if IS_ENABLED(CONFIG_USB_UAS) - if (uas_use_uas_driver(intf, id)) + if (uas_use_uas_driver(intf, id, NULL)) return -ENXIO; #endif diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index 5db43fc100a4..7dd46312c180 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c @@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static void evtchn_2l_resume(void) +{ + int i; + + for_each_online_cpu(i) + memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); +} + static const struct evtchn_ops evtchn_ops_2l = { .max_channels = evtchn_2l_max_channels, .nr_channels = evtchn_2l_max_channels, @@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = { .mask = evtchn_2l_mask, .unmask = evtchn_2l_unmask, .handle_events = evtchn_2l_handle_events, + .resume = evtchn_2l_resume, }; void __init xen_evtchn_2l_init(void) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 82fbb0cd8f65..31ac2eec06e4 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq) if (rc) goto err; - bind_evtchn_to_cpu(evtchn, 0); info->evtchn = evtchn; + bind_evtchn_to_cpu(evtchn, 0); rc = xen_evtchn_port_setup(info); if (rc) @@ -1278,8 +1278,9 @@ void rebind_evtchn_irq(int evtchn, int irq) mutex_unlock(&irq_mapping_update_lock); - /* new event channels are always bound to cpu 0 */ - irq_set_affinity(irq, cpumask_of(0)); + bind_evtchn_to_cpu(evtchn, info->cpu); + /* This will be deferred until interrupt is processed */ + irq_set_affinity(irq, cpumask_of(info->cpu)); /* Unmask the event channel. */ enable_irq(irq); diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 75fe3d466515..9c234209d8b5 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -16,8 +16,8 @@ #include "conf_space.h" #include "conf_space_quirks.h" -bool permissive; -module_param(permissive, bool, 0644); +bool xen_pcibk_permissive; +module_param_named(permissive, xen_pcibk_permissive, bool, 0644); /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ @@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) * This means that some fields may still be read-only because * they have entries in the config_field list that intercept * the write and do nothing. */ - if (dev_data->permissive || permissive) { + if (dev_data->permissive || xen_pcibk_permissive) { switch (size) { case 1: err = pci_write_config_byte(dev, offset, diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index 2e1d73d1d5d0..62461a8ba1d6 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h @@ -64,7 +64,7 @@ struct config_field_entry { void *data; }; -extern bool permissive; +extern bool xen_pcibk_permissive; #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index 2d7369391472..f8baf463dd35 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -105,7 +105,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) cmd->val = value; - if (!permissive && (!dev_data || !dev_data->permissive)) + if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive)) return 0; /* Only allow the guest to control certain bits. */ diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 3c0a74b3e9b1..7d6de403e450 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -57,6 +57,7 @@ #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/events.h> +#include <xen/xen-ops.h> #include <xen/page.h> #include <xen/hvm.h> @@ -731,6 +732,30 @@ static int __init xenstored_local_init(void) return err; } +static int xenbus_resume_cb(struct notifier_block *nb, + unsigned long action, void *data) +{ + int err = 0; + + if (xen_hvm_domain()) { + uint64_t v; + + err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); + if (!err && v) + xen_store_evtchn = v; + else + pr_warn("Cannot update xenstore event channel: %d\n", + err); + } else + xen_store_evtchn = xen_start_info->store_evtchn; + + return err; +} + +static struct notifier_block xenbus_resume_nb = { + .notifier_call = xenbus_resume_cb, +}; + static int __init xenbus_init(void) { int err = 0; @@ -789,6 +814,10 @@ static int __init xenbus_init(void) goto out_error; } + if ((xen_store_domain_type != XS_LOCAL) && + (xen_store_domain_type != XS_UNKNOWN)) + xen_resume_notifier_register(&xenbus_resume_nb); + #ifdef CONFIG_XEN_COMPAT_XENFS /* * Create xenfs mountpoint in /proc for compatibility with diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5576abb92a23..7c5f053ee42c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2451,7 +2451,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, "Attempt to delete subvolume %llu during send", dest->root_key.objectid); err = -EPERM; - goto out_dput; + goto out_unlock_inode; } err = d_invalidate(dentry); @@ -2549,6 +2549,7 @@ out_unlock: root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); spin_unlock(&dest->root_item_lock); } +out_unlock_inode: mutex_unlock(&inode->i_mutex); if (!err) { shrink_dcache_sb(root->fs_info->sb); diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 0a48886e069c..c2f421c30ccd 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -121,7 +121,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, int len, i; int err = -ENOMEM; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); + entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return err; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index cdfe574ba3d9..99d74306e770 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4933,13 +4933,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (ret) return ret; - /* - * currently supporting (pre)allocate mode for extent-based - * files _only_ - */ - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - return -EOPNOTSUPP; - if (mode & FALLOC_FL_COLLAPSE_RANGE) return ext4_collapse_range(inode, offset, len); @@ -4961,6 +4954,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) mutex_lock(&inode->i_mutex); + /* + * We only support preallocation for extent-based files only + */ + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { + ret = -EOPNOTSUPP; + goto out; + } + if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 0b7e28e7eaa4..4880ae8a9dce 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -662,6 +662,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, BUG_ON(end < lblk); + if ((status & EXTENT_STATUS_DELAYED) && + (status & EXTENT_STATUS_WRITTEN)) { + ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as " + " delayed and written which can potentially " + " cause data loss.\n", lblk, len); + WARN_ON(1); + } + newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, pblk, status); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 02408b9c2874..6a37a1f51e56 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -577,6 +577,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && + !(status & EXTENT_STATUS_WRITTEN) && ext4_find_delalloc_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; @@ -691,6 +692,7 @@ found: status = map->m_flags & EXT4_MAP_UNWRITTEN ? EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && + !(status & EXTENT_STATUS_WRITTEN) && ext4_find_delalloc_range(inode, map->m_lblk, map->m_lblk + map->m_len - 1)) status |= EXTENT_STATUS_DELAYED; diff --git a/fs/namei.c b/fs/namei.c index 19ac9c89dbab..15fc3f5a5007 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3194,7 +3194,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, if (unlikely(file->f_flags & __O_TMPFILE)) { error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); - goto out; + goto out2; } error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base); @@ -3232,6 +3232,7 @@ out: path_put(&nd->root); if (base) fput(base); +out2: if (!(opened & FILE_OPENED)) { BUG_ON(!error); put_filp(file); diff --git a/fs/namespace.c b/fs/namespace.c index d74c67678bb1..6dfb6fc34286 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1579,8 +1579,11 @@ struct vfsmount *collect_mounts(struct path *path) { struct mount *tree; namespace_lock(); - tree = copy_tree(real_mount(path->mnt), path->dentry, - CL_COPY_ALL | CL_PRIVATE); + if (!check_mnt(real_mount(path->mnt))) + tree = ERR_PTR(-EINVAL); + else + tree = copy_tree(real_mount(path->mnt), path->dentry, + CL_COPY_ALL | CL_PRIVATE); namespace_unlock(); if (IS_ERR(tree)) return ERR_CAST(tree); @@ -3035,6 +3038,12 @@ bool fs_fully_visible(struct file_system_type *type) if (mnt->mnt.mnt_sb->s_type != type) continue; + /* This mount is not fully visible if it's root directory + * is not the root directory of the filesystem. + */ + if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) + continue; + /* This mount is not fully visible if there are any child mounts * that cover anything except for empty directories. */ diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index ecdbae19a766..090d8ce25bd1 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, nchildren = nilfs_btree_node_get_nchildren(node); if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || - level > NILFS_BTREE_LEVEL_MAX || + level >= NILFS_BTREE_LEVEL_MAX || nchildren < 0 || nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 9d405d6d2504..7b9f96899812 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -768,6 +768,19 @@ lookup: if (tmpres) { spin_unlock(&dlm->spinlock); spin_lock(&tmpres->spinlock); + + /* + * Right after dlm spinlock was released, dlm_thread could have + * purged the lockres. Check if lockres got unhashed. If so + * start over. + */ + if (hlist_unhashed(&tmpres->hash_node)) { + spin_unlock(&tmpres->spinlock); + dlm_lockres_put(tmpres); + tmpres = NULL; + goto lookup; + } + /* Wait on the thread that is mastering the resource */ if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { __dlm_wait_on_lockres(tmpres); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 055459999660..c768860e22ab 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -361,7 +361,8 @@ STATIC int /* error (positive) */ xfs_zero_last_block( struct xfs_inode *ip, xfs_fsize_t offset, - xfs_fsize_t isize) + xfs_fsize_t isize, + bool *did_zeroing) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); @@ -389,6 +390,7 @@ xfs_zero_last_block( zero_len = mp->m_sb.sb_blocksize - zero_offset; if (isize + zero_len > offset) zero_len = offset - isize; + *did_zeroing = true; return xfs_iozero(ip, isize, zero_len); } @@ -407,7 +409,8 @@ int /* error (positive) */ xfs_zero_eof( struct xfs_inode *ip, xfs_off_t offset, /* starting I/O offset */ - xfs_fsize_t isize) /* current inode size */ + xfs_fsize_t isize, /* current inode size */ + bool *did_zeroing) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t start_zero_fsb; @@ -429,7 +432,7 @@ xfs_zero_eof( * We only zero a part of that block so it is handled specially. */ if (XFS_B_FSB_OFFSET(mp, isize) != 0) { - error = xfs_zero_last_block(ip, offset, isize); + error = xfs_zero_last_block(ip, offset, isize, did_zeroing); if (error) return error; } @@ -489,6 +492,7 @@ xfs_zero_eof( if (error) return error; + *did_zeroing = true; start_zero_fsb = imap.br_startoff + imap.br_blockcount; ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); } @@ -527,13 +531,15 @@ restart: * having to redo all checks before. */ if (*pos > i_size_read(inode)) { + bool zero = false; + if (*iolock == XFS_IOLOCK_SHARED) { xfs_rw_iunlock(ip, *iolock); *iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, *iolock); goto restart; } - error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); + error = -xfs_zero_eof(ip, *pos, i_size_read(inode), &zero); if (error) return error; } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index f72bffa67266..f8397df396d7 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -379,8 +379,9 @@ int xfs_droplink(struct xfs_trans *, struct xfs_inode *); int xfs_bumplink(struct xfs_trans *, struct xfs_inode *); /* from xfs_file.c */ -int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); -int xfs_iozero(struct xfs_inode *, loff_t, size_t); +int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset, + xfs_fsize_t isize, bool *did_zeroing); +int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count); #define IHOLD(ip) \ diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 205613a06068..537d8daeaa9e 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -744,6 +744,7 @@ xfs_setattr_size( int error; uint lock_flags = 0; uint commit_flags = 0; + bool did_zeroing = false; trace_xfs_setattr(ip); @@ -787,20 +788,16 @@ xfs_setattr_size( return error; /* - * Now we can make the changes. Before we join the inode to the - * transaction, take care of the part of the truncation that must be - * done without the inode lock. This needs to be done before joining - * the inode to the transaction, because the inode cannot be unlocked - * once it is a part of the transaction. + * File data changes must be complete before we start the transaction to + * modify the inode. This needs to be done before joining the inode to + * the transaction because the inode cannot be unlocked once it is a + * part of the transaction. + * + * Start with zeroing any data block beyond EOF that we may expose on + * file extension. */ if (newsize > oldsize) { - /* - * Do the first part of growing a file: zero any data in the - * last block that is beyond the old EOF. We need to do this - * before the inode is joined to the transaction to modify - * i_size. - */ - error = xfs_zero_eof(ip, newsize, oldsize); + error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing); if (error) return error; } @@ -810,23 +807,18 @@ xfs_setattr_size( * any previous writes that are beyond the on disk EOF and the new * EOF that have not been written out need to be written here. If we * do not write the data out, we expose ourselves to the null files - * problem. - * - * Only flush from the on disk size to the smaller of the in memory - * file size or the new size as that's the range we really care about - * here and prevents waiting for other data not within the range we - * care about here. + * problem. Note that this includes any block zeroing we did above; + * otherwise those blocks may not be zeroed after a crash. */ - if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { + if (newsize > ip->i_d.di_size && + (oldsize != ip->i_d.di_size || did_zeroing)) { error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ip->i_d.di_size, newsize); if (error) return error; } - /* - * Wait for all direct I/O to complete. - */ + /* Now wait for all direct I/O to complete. */ inode_dio_wait(inode); /* diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 6c77a132b53b..664a1a4b6f6a 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -416,13 +416,13 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_load_tables(void)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init acpi_reallocate_root_table(void)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status __init - acpi_find_root_pointer(acpi_size * rsdp_address)) - + acpi_find_root_pointer(acpi_physical_address * + rsdp_address)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_get_table_header(acpi_string signature, - u32 instance, - struct acpi_table_header - *out_table_header)) + acpi_get_table_header(acpi_string signature, + u32 instance, + struct acpi_table_header + *out_table_header)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_get_table(acpi_string signature, u32 instance, struct acpi_table_header diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 19b26bb69a70..f0a23d7fd302 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -198,9 +198,29 @@ typedef int s32; typedef s32 acpi_native_int; typedef u32 acpi_size; + +#ifdef ACPI_32BIT_PHYSICAL_ADDRESS + +/* + * OSPMs can define this to shrink the size of the structures for 32-bit + * none PAE environment. ASL compiler may always define this to generate + * 32-bit OSPM compliant tables. + */ typedef u32 acpi_io_address; typedef u32 acpi_physical_address; +#else /* ACPI_32BIT_PHYSICAL_ADDRESS */ + +/* + * It is reported that, after some calculations, the physical addresses can + * wrap over the 32-bit boundary on 32-bit PAE environment. + * https://bugzilla.kernel.org/show_bug.cgi?id=87971 + */ +typedef u64 acpi_io_address; +typedef u64 acpi_physical_address; + +#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */ + #define ACPI_MAX_PTR ACPI_UINT32_MAX #define ACPI_SIZE_MAX ACPI_UINT32_MAX diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h index e863dd5c4e04..7049af0108b3 100644 --- a/include/acpi/platform/acenv.h +++ b/include/acpi/platform/acenv.h @@ -76,6 +76,7 @@ #define ACPI_LARGE_NAMESPACE_NODE #define ACPI_DATA_TABLE_DISASSEMBLY #define ACPI_SINGLE_THREADED +#define ACPI_32BIT_PHYSICAL_ADDRESS #endif /* acpi_exec configuration. Multithreaded with full AML debugger */ diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 6d9aeddc09bf..327b155e7cc9 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -60,7 +60,8 @@ struct arch_timer_cpu { #ifdef CONFIG_KVM_ARM_TIMER int kvm_timer_hyp_init(void); -int kvm_timer_init(struct kvm *kvm); +void kvm_timer_enable(struct kvm *kvm); +void kvm_timer_init(struct kvm *kvm); void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, const struct kvm_irq_level *irq); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); @@ -73,11 +74,8 @@ static inline int kvm_timer_hyp_init(void) return 0; }; -static inline int kvm_timer_init(struct kvm *kvm) -{ - return 0; -} - +static inline void kvm_timer_enable(struct kvm *kvm) {} +static inline void kvm_timer_init(struct kvm *kvm) {} static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, const struct kvm_irq_level *irq) {} static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} diff --git a/include/linux/efi.h b/include/linux/efi.h index 41bbf8ba4ba8..b3fac7c1656c 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -20,6 +20,7 @@ #include <linux/ioport.h> #include <linux/pfn.h> #include <linux/pstore.h> +#include <linux/reboot.h> #include <asm/page.h> @@ -875,6 +876,9 @@ extern void efi_reserve_boot_services(void); extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); extern struct efi_memory_map memmap; +extern int efi_reboot_quirk_mode; +extern bool efi_poweroff_required(void); + /* Iterate through an efi_memory_map */ #define for_each_efi_memory_desc(m, md) \ for ((md) = (m)->map; \ @@ -926,11 +930,14 @@ static inline bool efi_enabled(int feature) { return test_bit(feature, &efi.flags) != 0; } +extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); #else static inline bool efi_enabled(int feature) { return false; } +static inline void +efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} #endif /* diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index ff3fea3194c6..9abb763e4b86 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -460,7 +460,7 @@ struct nilfs_btree_node { /* level */ #define NILFS_BTREE_LEVEL_DATA 0 #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) -#define NILFS_BTREE_LEVEL_MAX 14 +#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */ /** * struct nilfs_palloc_group_desc - block group descriptor diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index a7f2604c5f25..7f5f78bd15ad 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -77,6 +77,8 @@ /* Cannot handle ATA_12 or ATA_16 CDBs */ \ US_FLAG(NO_REPORT_OPCODES, 0x04000000) \ /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \ + US_FLAG(MAX_SECTORS_240, 0x08000000) \ + /* Sets max_sectors to 240 */ \ #define US_FLAG(name, value) US_FL_##name = value , enum { US_DO_ALL_FLAGS }; diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 183eaab7c380..96e3f56519e7 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h @@ -36,5 +36,6 @@ for sequential scan */ #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ +#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ #endif diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index c46908c1bb3f..8af2fff0eb08 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -41,7 +41,8 @@ #define EMUPAGESIZE 4096 #define MAXREQVOICES 8 -#define MAXPAGES 8192 +#define MAXPAGES0 4096 /* 32 bit mode */ +#define MAXPAGES1 8192 /* 31 bit mode */ #define RESERVED 0 #define NUM_MIDI 16 #define NUM_G 64 /* use all channels */ @@ -50,8 +51,7 @@ /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */ #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */ -#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */ - /* See ALSA bug #1276 - rlrevell */ +#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */ #define TMEMSIZE 256*1024 #define TMEMSIZEREG 4 @@ -466,8 +466,11 @@ #define MAPB 0x0d /* Cache map B */ -#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */ -#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */ +#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */ +#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */ + +#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */ +#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */ /* 0x0e, 0x0f: Not used */ @@ -1704,6 +1707,7 @@ struct snd_emu10k1 { unsigned short model; /* subsystem id */ unsigned int card_type; /* EMU10K1_CARD_* */ unsigned int ecard_ctrl; /* ecard control bits */ + unsigned int address_mode; /* address mode */ unsigned long dma_mask; /* PCI DMA mask */ unsigned int delay_pcm_irq; /* in samples */ int max_cache_pages; /* max memory size / PAGE_SIZE */ diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 6b59471cdf44..f7c0a020518b 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -287,7 +287,7 @@ struct device; .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\ .tlv.p = (tlv_array), \ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ - .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) } + .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) } #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \ SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array) #define SOC_DAPM_ENUM(xname, xenum) \ diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index e73efba98301..798e912842d5 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void) * so we don't have to move tasks around upon policy change, * or flail around trying to allocate bandwidth on the fly. * A bandwidth exception in __sched_setscheduler() allows - * the policy change to proceed. Thereafter, task_group() - * returns &root_task_group, so zero bandwidth is required. + * the policy change to proceed. */ free_rt_sched_group(tg); tg->rt_se = root_task_group.rt_se; @@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) if (tg != &root_task_group) return false; - if (p->sched_class != &fair_sched_class) - return false; - /* * We can only assume the task group can't go away on us if * autogroup_move_group() can see us on ->thread_group list. diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 847922e4ba67..6356f644a47b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7348,6 +7348,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg) { struct task_struct *g, *p; + /* + * Autogroups do not have RT tasks; see autogroup_create(). + */ + if (task_group_is_autogroup(tg)) + return 0; + do_each_thread(g, p) { if (rt_task(p) && task_rq(p)->rt.tg == tg) return 1; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1e080a06e6ba..5f217e906ff9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3723,7 +3723,8 @@ retry: if (!pmd_huge(*pmd)) goto out; if (pmd_present(*pmd)) { - page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); + page = pte_page(*(pte_t *)pmd) + + ((address & ~PMD_MASK) >> PAGE_SHIFT); if (flags & FOLL_GET) get_page(page); } else { diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 607a6d62bcab..fad245565616 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1153,10 +1153,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) * The check (unnecessarily) ignores LRU pages being isolated and * walked by the page reclaim code, however that's not a big loss. */ - if (!PageHuge(p) && !PageTransTail(p)) { - if (!PageLRU(p)) - shake_page(p, 0); - if (!PageLRU(p)) { + if (!PageHuge(p)) { + if (!PageLRU(hpage)) + shake_page(hpage, 0); + if (!PageLRU(hpage)) { /* * shake_page could have turned it free. */ @@ -1723,12 +1723,12 @@ int soft_offline_page(struct page *page, int flags) } else if (ret == 0) { /* for free pages */ if (PageHuge(page)) { set_page_hwpoison_huge_page(hpage); - dequeue_hwpoisoned_huge_page(hpage); - atomic_long_add(1 << compound_order(hpage), + if (!dequeue_hwpoisoned_huge_page(hpage)) + atomic_long_add(1 << compound_order(hpage), &num_poisoned_pages); } else { - SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + if (!TestSetPageHWPoison(page)) + atomic_long_inc(&num_poisoned_pages); } } unset_migratetype_isolate(page, MIGRATE_MOVABLE); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6a3348761648..a6fd1a3615f9 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -583,7 +583,7 @@ static long long pos_ratio_polynom(unsigned long setpoint, long x; x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, - limit - setpoint + 1); + (limit - setpoint) | 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; @@ -810,7 +810,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, * scale global setpoint to bdi's: * bdi_setpoint = setpoint * bdi_thresh / thresh */ - x = div_u64((u64)bdi_thresh << 16, thresh + 1); + x = div_u64((u64)bdi_thresh << 16, thresh | 1); bdi_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single bdi case as indicated by @@ -825,7 +825,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, if (bdi_dirty < x_intercept - span / 4) { pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), - x_intercept - bdi_setpoint + 1); + (x_intercept - bdi_setpoint) | 1); } else pos_ratio /= 4; diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 5df05269d17a..d8b1833a363e 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -170,7 +170,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, struct br_port_msg *bpm; struct nlattr *nest, *nest2; - nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); if (!nlh) return -EMSGSIZE; diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index a1ef53c04415..b1f2d1f44d37 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -290,6 +290,7 @@ static int is_out(const struct crush_map *map, * @type: the type of item to choose * @out: pointer to output vector * @outpos: our position in that vector + * @out_size: size of the out vector * @tries: number of attempts to make * @recurse_tries: number of attempts to have recursive chooseleaf make * @local_retries: localized retries @@ -304,6 +305,7 @@ static int crush_choose_firstn(const struct crush_map *map, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, + int out_size, unsigned int tries, unsigned int recurse_tries, unsigned int local_retries, @@ -322,6 +324,7 @@ static int crush_choose_firstn(const struct crush_map *map, int item = 0; int itemtype; int collide, reject; + int count = out_size; dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n", recurse_to_leaf ? "_LEAF" : "", @@ -329,7 +332,7 @@ static int crush_choose_firstn(const struct crush_map *map, tries, recurse_tries, local_retries, local_fallback_retries, parent_r); - for (rep = outpos; rep < numrep; rep++) { + for (rep = outpos; rep < numrep && count > 0 ; rep++) { /* keep trying until we get a non-out, non-colliding item */ ftotal = 0; skip_rep = 0; @@ -403,7 +406,7 @@ static int crush_choose_firstn(const struct crush_map *map, map->buckets[-1-item], weight, weight_max, x, outpos+1, 0, - out2, outpos, + out2, outpos, count, recurse_tries, 0, local_retries, local_fallback_retries, @@ -463,6 +466,7 @@ reject: dprintk("CHOOSE got %d\n", item); out[outpos] = item; outpos++; + count--; } dprintk("CHOOSE returns %d\n", outpos); @@ -654,6 +658,7 @@ int crush_do_rule(const struct crush_map *map, __u32 step; int i, j; int numrep; + int out_size; /* * the original choose_total_tries value was off by one (it * counted "retries" and not "tries"). add one. @@ -761,6 +766,7 @@ int crush_do_rule(const struct crush_map *map, x, numrep, curstep->arg2, o+osize, j, + result_max-osize, choose_tries, recurse_tries, choose_local_retries, @@ -770,11 +776,13 @@ int crush_do_rule(const struct crush_map *map, c+osize, 0); } else { + out_size = ((numrep < (result_max-osize)) ? + numrep : (result_max-osize)); crush_choose_indep( map, map->buckets[-1-w[i]], weight, weight_max, - x, numrep, numrep, + x, out_size, numrep, curstep->arg2, o+osize, j, choose_tries, @@ -783,7 +791,7 @@ int crush_do_rule(const struct crush_map *map, recurse_to_leaf, c+osize, 0); - osize += numrep; + osize += out_size; } } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 0f3e713c5e75..fead914b09f6 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb) */ features = netif_skb_features(skb); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); - if (IS_ERR(segs)) { + if (IS_ERR_OR_NULL(segs)) { kfree_skb(skb); return -ENOMEM; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index e1cf0f89748d..7b049cb56825 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -993,7 +993,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, if (nla[NFTA_CHAIN_POLICY]) { if ((chain != NULL && - !(chain->flags & NFT_BASE_CHAIN)) || + !(chain->flags & NFT_BASE_CHAIN))) + return -EOPNOTSUPP; + + if (chain == NULL && nla[NFTA_CHAIN_HOOK] == NULL) return -EOPNOTSUPP; @@ -1794,6 +1797,10 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, n++; } } + /* Check for overflow of dlen field */ + err = -EFBIG; + if (size >= 1 << 12) + goto err1; if (nla[NFTA_RULE_USERDATA]) ulen = nla_len(nla[NFTA_RULE_USERDATA]); @@ -1857,12 +1864,6 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, err3: list_del_rcu(&rule->list); - if (trans) { - list_del_rcu(&nft_trans_rule(trans)->list); - nft_rule_clear(net, nft_trans_rule(trans)); - nft_trans_destroy(trans); - chain->use++; - } err2: nf_tables_rule_destroy(&ctx, rule); err1: diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 9e287cb56a04..7f035f0772ee 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -77,6 +77,9 @@ nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple, if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM]) return -EINVAL; + /* Not all fields are initialized so first zero the tuple */ + memset(tuple, 0, sizeof(struct nf_conntrack_tuple)); + tuple->src.l3num = ntohs(nla_get_be16(tb[NFCTH_TUPLE_L3PROTONUM])); tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]); diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 108120f216b1..5b169db2049a 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) * returned by nf_queue. For instance, callers rely on -ECANCELED to * mean 'ignore this hook'. */ - if (IS_ERR(segs)) + if (IS_ERR_OR_NULL(segs)) goto out_err; queued = 0; err = 0; diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 318e1f1b0e4c..4b8de88bb49d 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -82,6 +82,9 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; break; case AF_INET6: + if (proto) + entry->e6.ipv6.flags |= IP6T_F_PROTO; + entry->e6.ipv6.proto = proto; entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; break; @@ -322,6 +325,9 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0; break; case AF_INET6: + if (proto) + entry->e6.ipv6.flags |= IP6T_F_PROTO; + entry->e6.ipv6.proto = proto; entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; break; diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index e1836ff88199..0636387c7c80 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c @@ -37,10 +37,11 @@ static bool nft_rbtree_lookup(const struct nft_set *set, { const struct nft_rbtree *priv = nft_set_priv(set); const struct nft_rbtree_elem *rbe, *interval = NULL; - const struct rb_node *parent = priv->root.rb_node; + const struct rb_node *parent; int d; spin_lock_bh(&nft_rbtree_lock); + parent = priv->root.rb_node; while (parent != NULL) { rbe = rb_entry(parent, struct nft_rbtree_elem, node); @@ -158,7 +159,6 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem) struct nft_rbtree_elem *rbe; int d; - spin_lock_bh(&nft_rbtree_lock); while (parent != NULL) { rbe = rb_entry(parent, struct nft_rbtree_elem, node); @@ -173,11 +173,9 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem) !(rbe->flags & NFT_SET_ELEM_INTERVAL_END)) nft_data_copy(&elem->data, rbe->data); elem->flags = rbe->flags; - spin_unlock_bh(&nft_rbtree_lock); return 0; } } - spin_unlock_bh(&nft_rbtree_lock); return -ENOENT; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 1e71adeca318..4e6176d9a6cd 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -330,6 +330,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, segs = __skb_gso_segment(skb, NETIF_F_SG, false); if (IS_ERR(segs)) return PTR_ERR(segs); + if (segs == NULL) + return -EINVAL; /* Queue all of the segments. */ skb = segs; diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 9ec12f834c43..8467373e2ace 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -531,7 +531,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); - goto out2; + goto out3; } if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { @@ -599,14 +599,14 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) "phys register failed with %lX\n", __func__, PTR_ERR(ia->ri_bind_mem)); rc = -ENOMEM; - goto out2; + goto out3; } break; default: printk(KERN_ERR "RPC: Unsupported memory " "registration mode: %d\n", memreg); rc = -ENOMEM; - goto out2; + goto out3; } dprintk("RPC: %s: memory registration strategy is %d\n", __func__, memreg); @@ -615,6 +615,10 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) ia->ri_memreg_strategy = memreg; return 0; + +out3: + ib_dealloc_pd(ia->ri_pd); + ia->ri_pd = NULL; out2: rdma_destroy_id(ia->ri_id); ia->ri_id = NULL; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index c51e8f7b8653..e44f36057a2a 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb) kfree_skb(skb); if (IS_ERR(segs)) return PTR_ERR(segs); + if (segs == NULL) + return -EINVAL; do { struct sk_buff *nskb = segs->next; diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c index ad9d9f8b48ed..2537d474057c 100644 --- a/sound/pci/emu10k1/emu10k1.c +++ b/sound/pci/emu10k1/emu10k1.c @@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci, } #endif - strcpy(card->driver, emu->card_capabilities->driver); - strcpy(card->shortname, emu->card_capabilities->name); + strlcpy(card->driver, emu->card_capabilities->driver, + sizeof(card->driver)); + strlcpy(card->shortname, emu->card_capabilities->name, + sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i", card->shortname, emu->revision, emu->serial, emu->port, emu->irq); diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c index 874cd76c7b7f..d2c7ea3a7610 100644 --- a/sound/pci/emu10k1/emu10k1_callback.c +++ b/sound/pci/emu10k1/emu10k1_callback.c @@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp) snd_emu10k1_ptr_write(hw, Z2, ch, 0); /* invalidate maps */ - temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK; + temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); snd_emu10k1_ptr_write(hw, MAPA, ch, temp); snd_emu10k1_ptr_write(hw, MAPB, ch, temp); #if 0 @@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp) snd_emu10k1_ptr_write(hw, CDF, ch, sample); /* invalidate maps */ - temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK; + temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); snd_emu10k1_ptr_write(hw, MAPA, ch, temp); snd_emu10k1_ptr_write(hw, MAPB, ch, temp); diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index 229269788023..92f2371791a3 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume) snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */ snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */ - silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK; + silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); for (ch = 0; ch < NUM_G; ch++) { snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page); snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page); @@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume) outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG); } + if (emu->address_mode == 0) { + /* use 16M in 4G */ + outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG); + } + return 0; } @@ -1424,7 +1429,7 @@ static struct snd_emu_chip_details emu_chip_details[] = { * */ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102, - .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]", + .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]", .id = "Audigy2", .emu10k2_chip = 1, .ca0108_chip = 1, @@ -1574,7 +1579,7 @@ static struct snd_emu_chip_details emu_chip_details[] = { .adc_1361t = 1, /* 24 bit capture instead of 16bit */ .ac97_chip = 1} , {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102, - .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]", + .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]", .id = "Audigy2", .emu10k2_chip = 1, .ca0102_chip = 1, @@ -1880,8 +1885,10 @@ int snd_emu10k1_create(struct snd_card *card, is_audigy = emu->audigy = c->emu10k2_chip; + /* set addressing mode */ + emu->address_mode = is_audigy ? 0 : 1; /* set the DMA transfer mask */ - emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK; + emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK; if (pci_set_dma_mask(pci, emu->dma_mask) < 0 || pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) { dev_err(card->dev, @@ -1906,7 +1913,7 @@ int snd_emu10k1_create(struct snd_card *card, emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT; if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), - 32 * 1024, &emu->ptb_pages) < 0) { + (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) { err = -ENOMEM; goto error; } @@ -2005,8 +2012,8 @@ int snd_emu10k1_create(struct snd_card *card, /* Clear silent pages and set up pointers */ memset(emu->silent_page.area, 0, PAGE_SIZE); - silent_page = emu->silent_page.addr << 1; - for (idx = 0; idx < MAXPAGES; idx++) + silent_page = emu->silent_page.addr << emu->address_mode; + for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++) ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx); /* set up voice indices */ diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c index f82481bd2542..36f0b8646417 100644 --- a/sound/pci/emu10k1/emupcm.c +++ b/sound/pci/emu10k1/emupcm.c @@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, snd_emu10k1_ptr_write(emu, Z1, voice, 0); snd_emu10k1_ptr_write(emu, Z2, voice, 0); /* invalidate maps */ - silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK; + silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page); snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page); /* modulation envelope */ diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index c68e6dd2fa67..4f1f69be1865 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c @@ -34,10 +34,11 @@ * aligned pages in others */ #define __set_ptb_entry(emu,page,addr) \ - (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page))) + (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page))) #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) -#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES) +#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES) +#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES) /* get aligned page from offset address */ #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) /* get offset address from aligned page */ @@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis } page = blk->mapped_page + blk->pages; } - size = MAX_ALIGN_PAGES - page; + size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; if (size >= max_size) { *nextp = pos; return page; @@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) q = get_emu10k1_memblk(p, mapped_link); end_page = q->mapped_page; } else - end_page = MAX_ALIGN_PAGES; + end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0); /* remove links */ list_del(&blk->mapped_link); @@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst if (snd_BUG_ON(!emu)) return NULL; if (snd_BUG_ON(runtime->dma_bytes <= 0 || - runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE)) + runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) return NULL; hdr = emu->memhdr; if (snd_BUG_ON(!hdr)) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index fb9497a0062a..68bea67c3ca3 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2942,6 +2942,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = { .put = vmaster_mute_mode_put, }; +/* meta hook to call each driver's vmaster hook */ +static void vmaster_hook(void *private_data, int enabled) +{ + struct hda_vmaster_mute_hook *hook = private_data; + + if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER) + enabled = hook->mute_mode; + hook->hook(hook->codec, enabled); +} + /* * Add a mute-LED hook with the given vmaster switch kctl * "Mute-LED Mode" control is automatically created and associated with @@ -2955,9 +2965,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec, if (!hook->hook || !hook->sw_kctl) return 0; - snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec); hook->codec = codec; hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER; + snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook); if (!expose_enum_ctl) return 0; kctl = snd_ctl_new1(&vmaster_mute_mode, hook); @@ -2980,14 +2990,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook) */ if (hook->codec->bus->shutdown) return; - switch (hook->mute_mode) { - case HDA_VMUTE_FOLLOW_MASTER: - snd_ctl_sync_vmaster_hook(hook->sw_kctl); - break; - default: - hook->hook(hook->codec, hook->mute_mode); - break; - } + snd_ctl_sync_vmaster_hook(hook->sw_kctl); } EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook); diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index 6ba0b5517c40..2341fc334163 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c @@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { old_vmaster_hook = spec->vmaster_mute.hook; spec->vmaster_mute.hook = update_tpacpi_mute_led; + spec->vmaster_mute_enum = 1; removefunc = false; } if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index 833231e27340..d48b484ce47e 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -56,6 +56,9 @@ static const struct reg_default init_list[] = { {RT5677_PR_BASE + 0x1e, 0x0000}, {RT5677_PR_BASE + 0x12, 0x0eaa}, {RT5677_PR_BASE + 0x14, 0x018a}, + {RT5677_PR_BASE + 0x15, 0x0490}, + {RT5677_PR_BASE + 0x38, 0x0f71}, + {RT5677_PR_BASE + 0x39, 0x0f71}, }; #define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list) diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c index 319754cf6208..daf61abc3670 100644 --- a/sound/synth/emux/emux_oss.c +++ b/sound/synth/emux/emux_oss.c @@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) if (snd_BUG_ON(!arg || !emu)) return -ENXIO; - mutex_lock(&emu->register_mutex); - - if (!snd_emux_inc_count(emu)) { - mutex_unlock(&emu->register_mutex); + if (!snd_emux_inc_count(emu)) return -EFAULT; - } memset(&callback, 0, sizeof(callback)); callback.owner = THIS_MODULE; @@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) if (p == NULL) { snd_printk(KERN_ERR "can't create port\n"); snd_emux_dec_count(emu); - mutex_unlock(&emu->register_mutex); return -ENOMEM; } @@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) reset_port_mode(p, arg->seq_mode); snd_emux_reset_port(p); - - mutex_unlock(&emu->register_mutex); return 0; } @@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg) if (snd_BUG_ON(!emu)) return -ENXIO; - mutex_lock(&emu->register_mutex); snd_emux_sounds_off_all(p); snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port)); snd_seq_event_port_detach(p->chset.client, p->chset.port); snd_emux_dec_count(emu); - mutex_unlock(&emu->register_mutex); return 0; } diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c index 7778b8e19782..a0209204ae48 100644 --- a/sound/synth/emux/emux_seq.c +++ b/sound/synth/emux/emux_seq.c @@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu) if (emu->voices) snd_emux_terminate_all(emu); - mutex_lock(&emu->register_mutex); if (emu->client >= 0) { snd_seq_delete_kernel_client(emu->client); emu->client = -1; } - mutex_unlock(&emu->register_mutex); } @@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data, /* * increment usage count */ -int -snd_emux_inc_count(struct snd_emux *emu) +static int +__snd_emux_inc_count(struct snd_emux *emu) { emu->used++; if (!try_module_get(emu->ops.owner)) @@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu) return 1; } +int snd_emux_inc_count(struct snd_emux *emu) +{ + int ret; + + mutex_lock(&emu->register_mutex); + ret = __snd_emux_inc_count(emu); + mutex_unlock(&emu->register_mutex); + return ret; +} /* * decrease usage count */ -void -snd_emux_dec_count(struct snd_emux *emu) +static void +__snd_emux_dec_count(struct snd_emux *emu) { module_put(emu->card->module); emu->used--; @@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu) module_put(emu->ops.owner); } +void snd_emux_dec_count(struct snd_emux *emu) +{ + mutex_lock(&emu->register_mutex); + __snd_emux_dec_count(emu); + mutex_unlock(&emu->register_mutex); +} /* * Routine that is called upon a first use of a particular port @@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info) mutex_lock(&emu->register_mutex); snd_emux_init_port(p); - snd_emux_inc_count(emu); + __snd_emux_inc_count(emu); mutex_unlock(&emu->register_mutex); return 0; } @@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info) mutex_lock(&emu->register_mutex); snd_emux_sounds_off_all(p); - snd_emux_dec_count(emu); + __snd_emux_dec_count(emu); mutex_unlock(&emu->register_mutex); return 0; } diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 22fa819a9b6a..1c0772b340d8 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer) static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) { + int ret; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; - kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, - timer->irq->irq, - timer->irq->level); + ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, + timer->irq->irq, + timer->irq->level); + WARN_ON(ret); } static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) @@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) timer_disarm(timer); } -int kvm_timer_init(struct kvm *kvm) +void kvm_timer_enable(struct kvm *kvm) { - if (timecounter && wqueue) { - kvm->arch.timer.cntvoff = kvm_phys_timer_read(); + if (kvm->arch.timer.enabled) + return; + + /* + * There is a potential race here between VCPUs starting for the first + * time, which may be enabling the timer multiple times. That doesn't + * hurt though, because we're just setting a variable to the same + * variable that it already was. The important thing is that all + * VCPUs have the enabled variable set, before entering the guest, if + * the arch timers are enabled. + */ + if (timecounter && wqueue) kvm->arch.timer.enabled = 1; - } +} - return 0; +void kvm_timer_init(struct kvm *kvm) +{ + kvm->arch.timer.cntvoff = kvm_phys_timer_read(); } diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 476d3bf540a8..24ac123f4ad8 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -674,7 +674,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, { struct vgic_dist *dist = &vcpu->kvm->arch.vgic; int sgi; - int min_sgi = (offset & ~0x3) * 4; + int min_sgi = (offset & ~0x3); int max_sgi = min_sgi + 3; int vcpu_id = vcpu->vcpu_id; u32 reg = 0; @@ -695,7 +695,7 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, { struct vgic_dist *dist = &vcpu->kvm->arch.vgic; int sgi; - int min_sgi = (offset & ~0x3) * 4; + int min_sgi = (offset & ~0x3); int max_sgi = min_sgi + 3; int vcpu_id = vcpu->vcpu_id; u32 reg; @@ -1042,6 +1042,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) lr, irq, vgic_cpu->vgic_lr[lr]); BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; + __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); return true; } @@ -1055,6 +1056,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); vgic_cpu->vgic_irq_lr_map[irq] = lr; set_bit(lr, vgic_cpu->lr_used); + __clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); if (!vgic_irq_is_edge(vcpu, irq)) vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; @@ -1209,6 +1211,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) if (vgic_cpu->vgic_misr & GICH_MISR_U) vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; + /* + * In the next iterations of the vcpu loop, if we sync the vgic state + * after flushing it, but before entering the guest (this happens for + * pending signals and vmid rollovers), then make sure we don't pick + * up any old maintenance interrupts here. + */ + memset(vgic_cpu->vgic_eisr, 0, sizeof(vgic_cpu->vgic_eisr[0]) * 2); + return level_pending; } @@ -1387,7 +1397,8 @@ out: int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, bool level) { - if (vgic_update_irq_state(kvm, cpuid, irq_num, level)) + if (likely(vgic_initialized(kvm)) && + vgic_update_irq_state(kvm, cpuid, irq_num, level)) vgic_kick_vcpus(kvm); return 0; @@ -1610,7 +1621,7 @@ out: int kvm_vgic_create(struct kvm *kvm) { - int i, vcpu_lock_idx = -1, ret = 0; + int i, vcpu_lock_idx = -1, ret; struct kvm_vcpu *vcpu; mutex_lock(&kvm->lock); @@ -1625,6 +1636,7 @@ int kvm_vgic_create(struct kvm *kvm) * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure * that no other VCPUs are run while we create the vgic. */ + ret = -EBUSY; kvm_for_each_vcpu(i, vcpu, kvm) { if (!mutex_trylock(&vcpu->mutex)) goto out_unlock; @@ -1632,11 +1644,10 @@ int kvm_vgic_create(struct kvm *kvm) } kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu->arch.has_run_once) { - ret = -EBUSY; + if (vcpu->arch.has_run_once) goto out_unlock; - } } + ret = 0; spin_lock_init(&kvm->arch.vgic.lock); kvm->arch.vgic.vctrl_base = vgic_vctrl_base; @@ -1654,7 +1665,7 @@ out: return ret; } -static bool vgic_ioaddr_overlap(struct kvm *kvm) +static int vgic_ioaddr_overlap(struct kvm *kvm) { phys_addr_t dist = kvm->arch.vgic.vgic_dist_base; phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base; -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html