diff --git a/Makefile b/Makefile index c9db2e2..45ac2bc 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 8 SUBLEVEL = 13 -EXTRAVERSION = .5 +EXTRAVERSION = .6 NAME = Remoralised Urchins Update # *DOCUMENTATION* diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0782eaf..af4cd30 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -151,25 +151,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) #define ESR_CM (1 << 8) #define ESR_LNX_EXEC (1 << 24) -/* - * Check that the permissions on the VMA allow for the fault which occurred. - * If we encountered a write fault, we must have write permission, otherwise - * we allow any permission. - */ -static inline bool access_error(unsigned int esr, struct vm_area_struct *vma) -{ - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; - - if (esr & ESR_WRITE) - mask = VM_WRITE; - if (esr & ESR_LNX_EXEC) - mask = VM_EXEC; - - return vma->vm_flags & mask ? false : true; -} - static int __do_page_fault(struct mm_struct *mm, unsigned long addr, - unsigned int esr, unsigned int flags, + unsigned int mm_flags, unsigned long vm_flags, struct task_struct *tsk) { struct vm_area_struct *vma; @@ -187,12 +170,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr, * it. */ good_area: - if (access_error(esr, vma)) { + /* + * Check that the permissions on the VMA allow for the fault which + * occurred. If we encountered a write or exec fault, we must have + * appropriate permissions, otherwise we allow any permission. + */ + if (!(vma->vm_flags & vm_flags)) { fault = VM_FAULT_BADACCESS; goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) @@ -207,9 +195,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; - bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | - (write ? FAULT_FLAG_WRITE : 0); + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + + if (esr & ESR_LNX_EXEC) { + vm_flags = VM_EXEC; + } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { + vm_flags = VM_WRITE; + mm_flags |= FAULT_FLAG_WRITE; + } tsk = current; mm = tsk->mm; @@ -247,7 +241,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, esr, flags, tsk); + fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk); /* * If we need to retry but a fatal signal is pending, handle the @@ -264,7 +258,7 @@ retry: */ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, @@ -279,7 +273,7 @@ retry: * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of * starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; goto retry; } } diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index c1df590..49fa55b 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -82,10 +82,9 @@ struct exception_table_entry; void sort_ex_table(struct exception_table_entry *start, struct exception_table_entry *finish); -#ifdef CONFIG_MODVERSIONS +#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64) #define ARCH_RELOCATES_KCRCTAB - -extern const unsigned long reloc_start[]; +#define reloc_start PHYSICAL_START #endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MODULE_H */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 65d1c08..7703569 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4; #endif SECTIONS { - . = 0; - reloc_start = .; - . = KERNELBASE; /* diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 68f7e11..ce48203 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -34,6 +34,8 @@ int foo(void) DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread)); BLANK(); DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */ return 0; diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S index 44aad32..969f964 100644 --- a/arch/sparc/mm/hypersparc.S +++ b/arch/sparc/mm/hypersparc.S @@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out: /* The things we do for performance... */ hypersparc_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 @@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out: */ /* Verified, my ass... */ hypersparc_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %g2 #ifndef CONFIG_SMP cmp %g2, -1 @@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 82bbf04..c57da7f 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1099,7 +1099,14 @@ static int __init grab_mblocks(struct mdesc_handle *md) m->size = *val; val = mdesc_get_property(md, node, "address-congruence-offset", NULL); - m->offset = *val; + + /* The address-congruence-offset property is optional. + * Explicity zero it be identifty this. + */ + if (val) + m->offset = *val; + else + m->offset = 0UL; numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", count - 1, m->base, m->size, m->offset); diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S index c801c39..5d2b88d 100644 --- a/arch/sparc/mm/swift.S +++ b/arch/sparc/mm/swift.S @@ -105,7 +105,7 @@ swift_flush_cache_mm_out: .globl swift_flush_cache_range swift_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 sub %o2, %o1, %o2 sethi %hi(4096), %o3 cmp %o2, %o3 @@ -116,7 +116,7 @@ swift_flush_cache_range: .globl swift_flush_cache_page swift_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 70: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -219,7 +219,7 @@ swift_flush_sig_insns: .globl swift_flush_tlb_range .globl swift_flush_tlb_all swift_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 swift_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -233,7 +233,7 @@ swift_flush_tlb_all_out: .globl swift_flush_tlb_page swift_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 83d89bc..37e7bc4 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - global_flush_tlb_page(mm, vaddr); flush_tsb_user_page(mm, vaddr); + global_flush_tlb_page(mm, vaddr); goto out; } diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S index 4e55e8f..bf10a34 100644 --- a/arch/sparc/mm/tsunami.S +++ b/arch/sparc/mm/tsunami.S @@ -24,7 +24,7 @@ /* Sliiick... */ tsunami_flush_cache_page: tsunami_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -46,7 +46,7 @@ tsunami_flush_sig_insns: /* More slick stuff... */ tsunami_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -65,7 +65,7 @@ tsunami_flush_tlb_out: /* This one can be done in a fine grained manner... */ tsunami_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S index bf8ee06..852257f 100644 --- a/arch/sparc/mm/viking.S +++ b/arch/sparc/mm/viking.S @@ -108,7 +108,7 @@ viking_mxcc_flush_page: viking_flush_cache_page: viking_flush_cache_range: #ifndef CONFIG_SMP - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #endif viking_flush_cache_mm: #ifndef CONFIG_SMP @@ -148,7 +148,7 @@ viking_flush_tlb_mm: #endif viking_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -173,7 +173,7 @@ viking_flush_tlb_range: #endif viking_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range: tst %g5 bne 3f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 @@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page: tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 and %o1, PAGE_MASK, %o1 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index d5e0d71..753dfaa 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -48,9 +48,20 @@ int acpi_suspend_lowlevel(void) #ifndef CONFIG_64BIT store_gdt((struct desc_ptr *)&header->pmode_gdt); + /* + * We have to check that we can write back the value, and not + * just read it. At least on 90 nm Pentium M (Family 6, Model + * 13), reading an invalid MSR is not guaranteed to trap, see + * Erratum X4 in "Intel Pentium M Processor on 90 nm Process + * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90 + * nm process with 512-KB L2 Cache Specification Update". + */ if (!rdmsr_safe(MSR_EFER, &header->pmode_efer_low, - &header->pmode_efer_high)) + &header->pmode_efer_high) && + !wrmsr_safe(MSR_EFER, + header->pmode_efer_low, + header->pmode_efer_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); #endif /* !CONFIG_64BIT */ @@ -61,7 +72,10 @@ int acpi_suspend_lowlevel(void) } if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, &header->pmode_misc_en_low, - &header->pmode_misc_en_high)) + &header->pmode_misc_en_high) && + !wrmsr_safe(MSR_IA32_MISC_ENABLE, + header->pmode_misc_en_low, + header->pmode_misc_en_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); header->realmode_flags = acpi_realmode_flags; diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index b679bf8..6d11ec6 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -406,6 +406,7 @@ static int acpi_memory_device_add(struct acpi_device *device) /* Get the range from the _CRS */ result = acpi_memory_get_device_resources(mem_device); if (result) { + device->driver_data = NULL; kfree(mem_device); return result; } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 545246f..5006c47 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -449,6 +449,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, { .callback = video_ignore_initial_backlight, + .ident = "Fujitsu E753", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E753"), + }, + }, + { + .callback = video_ignore_initial_backlight, .ident = "HP Pavilion dm4", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index ef6d9be..bdee5f8 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -656,7 +656,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif, int status = BLKIF_RSP_OKAY; struct block_device *bdev = blkif->vbd.bdev; unsigned long secure; + struct phys_req preq; + + preq.sector_number = req->u.discard.sector_number; + preq.nr_sects = req->u.discard.nr_sectors; + err = xen_vbd_translate(&preq, blkif, WRITE); + if (err) { + pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", + preq.sector_number, + preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); + goto fail_response; + } blkif->st_ds_req++; xen_blkif_get(blkif); @@ -667,7 +678,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif, err = blkdev_issue_discard(bdev, req->u.discard.sector_number, req->u.discard.nr_sectors, GFP_KERNEL, secure); - +fail_response: if (err == -EOPNOTSUPP) { pr_debug(DRV_PFX "discard op failed, not supported\n"); status = BLKIF_RSP_EOPNOTSUPP; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 99daa89..38254c0 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1465,6 +1465,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->dev = dev; dev_priv->info = info; + spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->error_lock); + spin_lock_init(&dev_priv->rps.lock); + spin_lock_init(&dev_priv->dpio_lock); + mutex_init(&dev_priv->rps.hw_lock); + i915_dump_device_info(dev_priv); if (i915_get_bridge_dev(dev)) { @@ -1554,6 +1560,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_detect_pch(dev); intel_irq_init(dev); + intel_gt_sanitize(dev); intel_gt_init(dev); /* Try to make sure MCHBAR is enabled before poking at it */ @@ -1579,13 +1586,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); - spin_lock_init(&dev_priv->irq_lock); - spin_lock_init(&dev_priv->error_lock); - spin_lock_init(&dev_priv->rps.lock); - spin_lock_init(&dev_priv->dpio_lock); - - mutex_init(&dev_priv->rps.hw_lock); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) dev_priv->num_pipe = 3; else if (IS_MOBILE(dev) || !IS_GEN2(dev)) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 41e02dc..be9ddec 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -617,7 +617,7 @@ static int i915_drm_thaw(struct drm_device *dev) { int error = 0; - intel_gt_reset(dev); + intel_gt_sanitize(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); @@ -643,7 +643,7 @@ int i915_resume(struct drm_device *dev) pci_set_master(dev->pdev); - intel_gt_reset(dev); + intel_gt_sanitize(dev); /* * Platforms with opregion should have sane BIOS, older ones (gen3 and @@ -1247,23 +1247,23 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ + unsigned long irqflags; \ u##x val = 0; \ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (IS_GEN5(dev_priv->dev)) \ ilk_dummy_write(dev_priv); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - unsigned long irqflags; \ - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (dev_priv->forcewake_count == 0) \ dev_priv->gt.force_wake_get(dev_priv); \ val = read##y(dev_priv->regs + reg); \ if (dev_priv->forcewake_count == 0) \ dev_priv->gt.force_wake_put(dev_priv); \ - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ val = read##y(dev_priv->regs + reg + 0x180000); \ } else { \ val = read##y(dev_priv->regs + reg); \ } \ + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } @@ -1276,8 +1276,10 @@ __i915_read(64, q) #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ + unsigned long irqflags; \ u32 __fifo_ret = 0; \ trace_i915_reg_rw(true, reg, val, sizeof(val)); \ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ @@ -1299,6 +1301,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ DRM_ERROR("Unclaimed write to %x\n", reg); \ writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \ } \ + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ } __i915_write(8, b) __i915_write(16, w) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5f1fad3..8c661512 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -393,6 +393,7 @@ enum intel_sbi_destination { #define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_INVERT_BRIGHTNESS (1<<2) +#define QUIRK_NO_PCH_PWM_ENABLE (1<<3) struct intel_fbdev; struct intel_fbc_work; @@ -1328,7 +1329,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev); -extern void intel_gt_reset(struct drm_device *dev); +extern void intel_gt_sanitize(struct drm_device *dev); void i915_error_state_free(struct kref *error_ref); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 80bc298..22b5784 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1882,6 +1882,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, u32 seqno = intel_ring_get_seqno(ring); BUG_ON(ring == NULL); + if (obj->ring != ring && obj->last_write_seqno) { + /* Keep the seqno relative to the current ring */ + obj->last_write_seqno = seqno; + } obj->ring = ring; /* Add a reference if we're newly entering the active list. */ @@ -2519,52 +2523,56 @@ int i915_gpu_idle(struct drm_device *dev) return 0; } -static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +static void i965_write_fence_reg(struct drm_device *dev, int reg, + struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = dev->dev_private; - uint64_t val; - - if (obj) { - u32 size = obj->gtt_space->size; - - val = (uint64_t)((obj->gtt_offset + size - 4096) & - 0xfffff000) << 32; - val |= obj->gtt_offset & 0xfffff000; - val |= (uint64_t)((obj->stride / 128) - 1) << - SANDYBRIDGE_FENCE_PITCH_SHIFT; + int fence_reg; + int fence_pitch_shift; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I965_FENCE_TILING_Y_SHIFT; - val |= I965_FENCE_REG_VALID; - } else - val = 0; + if (INTEL_INFO(dev)->gen >= 6) { + fence_reg = FENCE_REG_SANDYBRIDGE_0; + fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; + } else { + fence_reg = FENCE_REG_965_0; + fence_pitch_shift = I965_FENCE_PITCH_SHIFT; + } - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); - POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); -} + fence_reg += reg * 8; -static void i965_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - uint64_t val; + /* To w/a incoherency with non-atomic 64-bit register updates, + * we split the 64-bit update into two 32-bit writes. In order + * for a partial fence not to be evaluated between writes, we + * precede the update with write to turn off the fence register, + * and only enable the fence as the last step. + * + * For extra levels of paranoia, we make sure each step lands + * before applying the next step. + */ + I915_WRITE(fence_reg, 0); + POSTING_READ(fence_reg); if (obj) { u32 size = obj->gtt_space->size; + uint64_t val; val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; val |= obj->gtt_offset & 0xfffff000; - val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; + val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - } else - val = 0; - I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); - POSTING_READ(FENCE_REG_965_0 + reg * 8); + I915_WRITE(fence_reg + 4, val >> 32); + POSTING_READ(fence_reg + 4); + + I915_WRITE(fence_reg + 0, val); + POSTING_READ(fence_reg); + } else { + I915_WRITE(fence_reg + 4, 0); + POSTING_READ(fence_reg + 4); + } } static void i915_write_fence_reg(struct drm_device *dev, int reg, @@ -2648,7 +2656,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, { switch (INTEL_INFO(dev)->gen) { case 7: - case 6: sandybridge_write_fence_reg(dev, reg, obj); break; + case 6: case 5: case 4: i965_write_fence_reg(dev, reg, obj); break; case 3: i915_write_fence_reg(dev, reg, obj); break; @@ -2663,35 +2671,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv, return fence - dev_priv->fence_regs; } -static void i915_gem_write_fence__ipi(void *data) -{ - wbinvd(); -} - static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int fence_reg = fence_number(dev_priv, fence); - - /* In order to fully serialize access to the fenced region and - * the update to the fence register we need to take extreme - * measures on SNB+. In theory, the write to the fence register - * flushes all memory transactions before, and coupled with the - * mb() placed around the register write we serialise all memory - * operations with respect to the changes in the tiler. Yet, on - * SNB+ we need to take a step further and emit an explicit wbinvd() - * on each processor in order to manually flush all memory - * transactions before updating the fence register. - */ - if (HAS_LLC(obj->base.dev)) - on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); - i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + int reg = fence_number(dev_priv, fence); + + i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); if (enable) { - obj->fence_reg = fence_reg; + obj->fence_reg = reg; fence->obj = obj; list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); } else { @@ -4429,7 +4419,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index debf25f..b8e53d9 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -681,7 +681,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - intel_dp->DP = intel_dig_port->port_reversal | + intel_dp->DP = intel_dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; switch (intel_dp->lane_count) { case 1: @@ -1303,7 +1303,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) * enabling the port. */ I915_WRITE(DDI_BUF_CTL(port), - intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); + intel_dig_port->saved_port_bits | + DDI_BUF_CTL_ENABLE); } else if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1482,16 +1483,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port) return; } - if (port != PORT_A) { - hdmi_connector = kzalloc(sizeof(struct intel_connector), - GFP_KERNEL); - if (!hdmi_connector) { - kfree(dp_connector); - kfree(intel_dig_port); - return; - } - } - intel_encoder = &intel_dig_port->base; encoder = &intel_encoder->base; @@ -1506,12 +1497,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_dig_port->port = port; - intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & - DDI_BUF_PORT_REVERSAL; - if (hdmi_connector) - intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); - else - intel_dig_port->hdmi.sdvox_reg = 0; + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + (DDI_BUF_PORT_REVERSAL | + DDI_A_4_LANES); intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); intel_encoder->type = INTEL_OUTPUT_UNKNOWN; @@ -1519,7 +1507,16 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->cloneable = false; intel_encoder->hot_plug = intel_ddi_hot_plug; - if (hdmi_connector) - intel_hdmi_init_connector(intel_dig_port, hdmi_connector); intel_dp_init_connector(intel_dig_port, dp_connector); + + if (intel_encoder->type != INTEL_OUTPUT_EDP) { + hdmi_connector = kzalloc(sizeof(struct intel_connector), + GFP_KERNEL); + if (!hdmi_connector) { + return; + } + + intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); + intel_hdmi_init_connector(intel_dig_port, hdmi_connector); + } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b17d8a8..e094040 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8843,6 +8843,17 @@ static void quirk_invert_brightness(struct drm_device *dev) DRM_INFO("applying inverted panel brightness quirk\n"); } +/* + * Some machines (Dell XPS13) suffer broken backlight controls if + * BLM_PCH_PWM_ENABLE is set. + */ +static void quirk_no_pcm_pwm_enable(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; + DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -8912,6 +8923,14 @@ static struct intel_quirk intel_quirks[] = { /* Acer/Packard Bell NCL20 */ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, + + /* Acer Aspire 4736Z */ + { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, + + /* Dell XPS13 HD Sandy Bridge */ + { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, + /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ + { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, }; static void intel_init_quirks(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1c1840f..2d8fff3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -334,7 +334,7 @@ struct dip_infoframe { } __attribute__((packed)); struct intel_hdmi { - u32 sdvox_reg; + u32 hdmi_reg; int ddc_bus; uint32_t color_range; bool has_hdmi_sink; @@ -377,7 +377,7 @@ struct intel_dp { struct intel_digital_port { struct intel_encoder base; enum port port; - u32 port_reversal; + u32 saved_port_bits; struct intel_dp dp; struct intel_hdmi hdmi; }; @@ -427,7 +427,7 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector) extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, - int sdvox_reg, enum port port); + int hdmi_reg, enum port port); extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index dccd859..8d7a334 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -50,7 +50,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; - WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, + WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits, "HDMI port enabled, expecting disabled\n"); } @@ -391,7 +391,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, return; } - switch (intel_hdmi->sdvox_reg) { + switch (intel_hdmi->hdmi_reg) { case SDVOB: port = VIDEO_DIP_PORT_B; break; @@ -447,7 +447,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, return; } - switch (intel_hdmi->sdvox_reg) { + switch (intel_hdmi->hdmi_reg) { case HDMIB: port = VIDEO_DIP_PORT_B; break; @@ -587,40 +587,40 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - u32 sdvox; + u32 hdmi_val; - sdvox = SDVO_ENCODING_HDMI; + hdmi_val = SDVO_ENCODING_HDMI; if (!HAS_PCH_SPLIT(dev)) - sdvox |= intel_hdmi->color_range; + hdmi_val |= intel_hdmi->color_range; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - sdvox |= SDVO_VSYNC_ACTIVE_HIGH; + hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - sdvox |= SDVO_HSYNC_ACTIVE_HIGH; + hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; if (intel_crtc->bpp > 24) - sdvox |= COLOR_FORMAT_12bpc; + hdmi_val |= COLOR_FORMAT_12bpc; else - sdvox |= COLOR_FORMAT_8bpc; + hdmi_val |= COLOR_FORMAT_8bpc; /* Required on CPT */ if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) - sdvox |= HDMI_MODE_SELECT; + hdmi_val |= HDMI_MODE_SELECT; if (intel_hdmi->has_audio) { DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", pipe_name(intel_crtc->pipe)); - sdvox |= SDVO_AUDIO_ENABLE; - sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; + hdmi_val |= SDVO_AUDIO_ENABLE; + hdmi_val |= SDVO_NULL_PACKETS_DURING_VSYNC; intel_write_eld(encoder, adjusted_mode); } if (HAS_PCH_CPT(dev)) - sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); + hdmi_val |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); else if (intel_crtc->pipe == PIPE_B) - sdvox |= SDVO_PIPE_B_SELECT; + hdmi_val |= SDVO_PIPE_B_SELECT; - I915_WRITE(intel_hdmi->sdvox_reg, sdvox); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); + POSTING_READ(intel_hdmi->hdmi_reg); intel_hdmi->set_infoframes(encoder, adjusted_mode); } @@ -633,7 +633,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); u32 tmp; - tmp = I915_READ(intel_hdmi->sdvox_reg); + tmp = I915_READ(intel_hdmi->hdmi_reg); if (!(tmp & SDVO_ENABLE)) return false; @@ -657,7 +657,7 @@ static void intel_enable_hdmi(struct intel_encoder *encoder) if (intel_hdmi->has_audio) enable_bits |= SDVO_AUDIO_ENABLE; - temp = I915_READ(intel_hdmi->sdvox_reg); + temp = I915_READ(intel_hdmi->hdmi_reg); /* HW workaround for IBX, we need to move the port to transcoder A * before disabling it. */ @@ -674,21 +674,21 @@ static void intel_enable_hdmi(struct intel_encoder *encoder) * we do this anyway which shows more stable in testing. */ if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); + POSTING_READ(intel_hdmi->hdmi_reg); } temp |= enable_bits; - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); /* HW workaround, need to write this twice for issue that may result * in first write getting masked. */ if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); } } @@ -700,7 +700,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) u32 temp; u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; - temp = I915_READ(intel_hdmi->sdvox_reg); + temp = I915_READ(intel_hdmi->hdmi_reg); /* HW workaround for IBX, we need to move the port to transcoder A * before disabling it. */ @@ -710,12 +710,12 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) if (temp & SDVO_PIPE_B_SELECT) { temp &= ~SDVO_PIPE_B_SELECT; - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); /* Again we need to write this twice. */ - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); /* Transcoder selection bits only update * effectively on vblank. */ @@ -730,21 +730,21 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) * we do this anyway which shows more stable in testing. */ if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); + POSTING_READ(intel_hdmi->hdmi_reg); } temp &= ~enable_bits; - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); /* HW workaround, need to write this twice for issue that may result * in first write getting masked. */ if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(intel_hdmi->sdvox_reg, temp); - POSTING_READ(intel_hdmi->sdvox_reg); + I915_WRITE(intel_hdmi->hdmi_reg, temp); + POSTING_READ(intel_hdmi->hdmi_reg); } } @@ -1009,7 +1009,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, } } -void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) +void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) { struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; @@ -1042,7 +1042,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) intel_encoder->cloneable = false; intel_dig_port->port = port; - intel_dig_port->hdmi.sdvox_reg = sdvox_reg; + intel_dig_port->hdmi.hdmi_reg = hdmi_reg; intel_dig_port->dp.output_reg = 0; intel_hdmi_init_connector(intel_dig_port, intel_connector); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 94d895b..cc6346d 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -347,7 +347,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, POSTING_READ(reg); I915_WRITE(reg, tmp | BLM_PWM_ENABLE); - if (HAS_PCH_SPLIT(dev)) { + if (HAS_PCH_SPLIT(dev) && + !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { tmp = I915_READ(BLC_PWM_PCH_CTL1); tmp |= BLM_PCH_PWM_ENABLE; tmp &= ~BLM_PCH_OVERRIDE_ENABLE; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 253bcf3..7af828c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4387,7 +4387,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv) gen6_gt_check_fifodbg(dev_priv); } -void intel_gt_reset(struct drm_device *dev) +void intel_gt_sanitize(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4398,6 +4398,10 @@ void intel_gt_reset(struct drm_device *dev) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) __gen6_gt_force_wake_mt_reset(dev_priv); } + + /* BIOS often leaves RC6 enabled, but disable it for hw init */ + if (INTEL_INFO(dev)->gen >= 6) + intel_disable_gt_powersave(dev); } void intel_gt_init(struct drm_device *dev) @@ -4406,14 +4410,41 @@ void intel_gt_init(struct drm_device *dev) spin_lock_init(&dev_priv->gt_lock); - intel_gt_reset(dev); - if (IS_VALLEYVIEW(dev)) { dev_priv->gt.force_wake_get = vlv_force_wake_get; dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev)) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; + } else if (IS_IVYBRIDGE(dev)) { + u32 ecobus; + + /* IVB configs may use multi-threaded forcewake */ + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } else { + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); + DRM_INFO("when using vblank-synced partial screen updates.\n"); + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; + } } else if (IS_GEN6(dev)) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 42ff97d..27f810f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -486,9 +486,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) struct pipe_control *pc = ring->private; struct drm_i915_gem_object *obj; - if (!ring->private) - return; - obj = pc->obj; kunmap(sg_page(obj->pages->sgl)); @@ -496,7 +493,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) drm_gem_object_unreference(&obj->base); kfree(pc); - ring->private = NULL; } static int init_render_ring(struct intel_ring_buffer *ring) @@ -567,7 +563,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) if (HAS_BROKEN_CS_TLB(dev)) drm_gem_object_unreference(to_gem_object(ring->private)); - cleanup_pipe_control(ring); + if (INTEL_INFO(dev)->gen >= 5) + cleanup_pipe_control(ring); + + ring->private = NULL; } static void diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 327c08b..3057181 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -156,6 +156,9 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t offset; + if (!dig || !dig->afmt) + return; + /* Silent, r600_hdmi_enable will raise WARN for us */ if (!dig->afmt->enabled) return; diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index d89a1f8..79e3cda 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -322,6 +322,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t offset; + if (!dig || !dig->afmt) + return; + /* Silent, r600_hdmi_enable will raise WARN for us */ if (!dig->afmt->enabled) return; @@ -480,6 +483,9 @@ void r600_hdmi_enable(struct drm_encoder *encoder) uint32_t offset; u32 hdmi; + if (!dig || !dig->afmt) + return; + if (ASIC_IS_DCE6(rdev)) return; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a08f657..aa2d2e2 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -387,6 +387,7 @@ struct radeon_sa_manager { uint64_t gpu_addr; void *cpu_ptr; uint32_t domain; + uint32_t align; }; struct radeon_sa_bo; diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index daf9710..dfa0ca1 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -467,6 +467,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) size *= 2; r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, RADEON_GPU_PAGE_ALIGN(size), + RADEON_GPU_PAGE_SIZE, RADEON_GEM_DOMAIN_VRAM); if (r) { dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 48f80cd..4f3b4d5 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -241,9 +241,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev) { int r = 0; - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); - INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); - spin_lock_init(&rdev->irq.lock); r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { @@ -265,6 +262,10 @@ int radeon_irq_kms_init(struct radeon_device *rdev) rdev->irq.installed = false; return r; } + + INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); + DRM_INFO("radeon: irq initialized.\n"); return 0; } @@ -284,8 +285,8 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) rdev->irq.installed = false; if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); + flush_work(&rdev->hotplug_work); } - flush_work(&rdev->hotplug_work); } /** diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 5fc86b0..ec18ad5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -158,7 +158,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo) extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, - unsigned size, u32 domain); + unsigned size, u32 align, u32 domain); extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager); extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 3760eff..cac9f20 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -205,6 +205,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) } r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo, RADEON_IB_POOL_SIZE*64*1024, + RADEON_GPU_PAGE_SIZE, RADEON_GEM_DOMAIN_GTT); if (r) { return r; diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index cb80099..f0bac68 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); int radeon_sa_bo_manager_init(struct radeon_device *rdev, struct radeon_sa_manager *sa_manager, - unsigned size, u32 domain) + unsigned size, u32 align, u32 domain) { int i, r; @@ -57,14 +57,15 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, sa_manager->bo = NULL; sa_manager->size = size; sa_manager->domain = domain; + sa_manager->align = align; sa_manager->hole = &sa_manager->olist; INIT_LIST_HEAD(&sa_manager->olist); for (i = 0; i < RADEON_NUM_RINGS; ++i) { INIT_LIST_HEAD(&sa_manager->flist[i]); } - r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo); + r = radeon_bo_create(rdev, size, align, true, + domain, NULL, &sa_manager->bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); return r; @@ -317,7 +318,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev, unsigned tries[RADEON_NUM_RINGS]; int i, r; - BUG_ON(align > RADEON_GPU_PAGE_SIZE); + BUG_ON(align > sa_manager->align); BUG_ON(size > sa_manager->size); *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c index 10460fd..dbcdfbf 100644 --- a/drivers/media/pci/saa7134/saa7134-alsa.c +++ b/drivers/media/pci/saa7134/saa7134-alsa.c @@ -172,7 +172,9 @@ static void saa7134_irq_alsa_done(struct saa7134_dev *dev, dprintk("irq: overrun [full=%d/%d] - Blocks in %d\n",dev->dmasound.read_count, dev->dmasound.bufsize, dev->dmasound.blocks); spin_unlock(&dev->slock); + snd_pcm_stream_lock(dev->dmasound.substream); snd_pcm_stop(dev->dmasound.substream,SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock(dev->dmasound.substream); return; } diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index c260af5..1f3ff91 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -175,6 +175,8 @@ static int __init dummy_init_module(void) rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); + if (err < 0) + goto out; for (i = 0; i < numdummies && !err; i++) { err = dummy_init_one(); @@ -182,6 +184,8 @@ static int __init dummy_init_module(void) } if (err < 0) __rtnl_link_unregister(&dummy_link_ops); + +out: rtnl_unlock(); return err; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index ca33b28..62cb33c 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1669,8 +1669,8 @@ check_sum: return 0; } -static void atl1e_tx_map(struct atl1e_adapter *adapter, - struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +static int atl1e_tx_map(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) { struct atl1e_tpd_desc *use_tpd = NULL; struct atl1e_tx_buffer *tx_buffer = NULL; @@ -1681,6 +1681,8 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, u16 nr_frags; u16 f; int segment; + int ring_start = adapter->tx_ring.next_to_use; + int ring_end; nr_frags = skb_shinfo(skb)->nr_frags; segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; @@ -1693,6 +1695,9 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, tx_buffer->length = map_len; tx_buffer->dma = pci_map_single(adapter->pdev, skb->data, hdr_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) + return -ENOSPC; + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); @@ -1719,6 +1724,22 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, tx_buffer->dma = pci_map_single(adapter->pdev, skb->data + mapped_len, map_len, PCI_DMA_TODEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + } + /* Reset the tx rings next pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); mapped_len += map_len; use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); @@ -1754,6 +1775,23 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, (i * MAX_TX_BUF_LEN), tx_buffer->length, DMA_TO_DEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma, + tx_buffer->length, DMA_TO_DEVICE); + } + + /* Reset the ring next to use pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | @@ -1771,6 +1809,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter, /* The last buffer info contain the skb address, so it will be free after unmap */ tx_buffer->skb = skb; + return 0; } static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, @@ -1838,10 +1877,15 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } - atl1e_tx_map(adapter, skb, tpd); + if (atl1e_tx_map(adapter, skb, tpd)) { + dev_kfree_skb_any(skb); + goto out; + } + atl1e_tx_queue(adapter, tpd_req, tpd); netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ +out: spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 520161c..4237224 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -172,8 +172,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rmcr_value = 0x00000001, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, - .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE | - EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | + EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE, .apr = 1, @@ -286,9 +287,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ - EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, .fdr_value = 0x0000072f, @@ -368,9 +369,9 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tx_check = EESR_TC1 | EESR_FTC, - .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ - EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ - EESR_ECI, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | + EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ EESR_TFE, diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index bae84fd..3ea34bf 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -472,7 +472,7 @@ enum EESR_BIT { #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ EESR_RTO) -#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | \ +#define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_TFE | EESR_TDE | EESR_ECI) #define DEFAULT_TX_ERROR_CHECK (EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \ diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index e1b8955..fb6f3e3 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1242,6 +1242,8 @@ static int vnet_port_remove(struct vio_dev *vdev) dev_set_drvdata(&vdev->dev, NULL); kfree(port); + + unregister_netdev(vp->dev); } return 0; } diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 344dceb..635d01c 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -290,11 +290,17 @@ static int __init ifb_init_module(void) rtnl_lock(); err = __rtnl_link_register(&ifb_link_ops); + if (err < 0) + goto out; - for (i = 0; i < numifbs && !err; i++) + for (i = 0; i < numifbs && !err; i++) { err = ifb_init_one(i); + cond_resched(); + } if (err) __rtnl_link_unregister(&ifb_link_ops); + +out: rtnl_unlock(); return err; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 0f0f9ce..9872202 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -535,8 +535,10 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return -EMSGSIZE; num_pages = get_user_pages_fast(base, size, 0, &page[i]); if (num_pages != size) { - for (i = 0; i < num_pages; i++) - put_page(page[i]); + int j; + + for (j = 0; j < num_pages; j++) + put_page(page[i + j]); return -EFAULT; } truesize = size * PAGE_SIZE; @@ -642,6 +644,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, return 0; } +static unsigned long iov_pages(const struct iovec *iv, int offset, + unsigned long nr_segs) +{ + unsigned long seg, base; + int pages = 0, len, size; + + while (nr_segs && (offset >= iv->iov_len)) { + offset -= iv->iov_len; + ++iv; + --nr_segs; + } + + for (seg = 0; seg < nr_segs; seg++) { + base = (unsigned long)iv[seg].iov_base + offset; + len = iv[seg].iov_len - offset; + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; + pages += size; + offset = 0; + } + + return pages; +} /* Get packet from user space buffer */ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, @@ -656,6 +680,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, int vnet_hdr_len = 0; int copylen = 0; bool zerocopy = false; + size_t linear; if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = q->vnet_hdr_sz; @@ -687,42 +712,35 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (unlikely(count > UIO_MAXIOV)) goto err; - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) - zerocopy = true; + if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { + copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; + linear = copylen; + if (iov_pages(iv, vnet_hdr_len + copylen, count) + <= MAX_SKB_FRAGS) + zerocopy = true; + } - if (zerocopy) { - /* Userspace may produce vectors with count greater than - * MAX_SKB_FRAGS, so we need to linearize parts of the skb - * to let the rest of data to be fit in the frags. - */ - if (count > MAX_SKB_FRAGS) { - copylen = iov_length(iv, count - MAX_SKB_FRAGS); - if (copylen < vnet_hdr_len) - copylen = 0; - else - copylen -= vnet_hdr_len; - } - /* There are 256 bytes to be copied in skb, so there is enough - * room for skb expand head in case it is used. - * The rest buffer is mapped from userspace. - */ - if (copylen < vnet_hdr.hdr_len) - copylen = vnet_hdr.hdr_len; - if (!copylen) - copylen = GOODCOPY_LEN; - } else + if (!zerocopy) { copylen = len; + linear = vnet_hdr.hdr_len; + } skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, - vnet_hdr.hdr_len, noblock, &err); + linear, noblock, &err); if (!skb) goto err; if (zerocopy) err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); - else + else { err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len); + if (!err && m && m->msg_control) { + struct ubuf_info *uarg = m->msg_control; + uarg->callback(uarg, false); + } + } + if (err) goto err_kfree; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2a56317..340cee2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1038,6 +1038,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, return 0; } +static unsigned long iov_pages(const struct iovec *iv, int offset, + unsigned long nr_segs) +{ + unsigned long seg, base; + int pages = 0, len, size; + + while (nr_segs && (offset >= iv->iov_len)) { + offset -= iv->iov_len; + ++iv; + --nr_segs; + } + + for (seg = 0; seg < nr_segs; seg++) { + base = (unsigned long)iv[seg].iov_base + offset; + len = iv[seg].iov_len - offset; + size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; + pages += size; + offset = 0; + } + + return pages; +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, const struct iovec *iv, @@ -1045,7 +1068,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; - size_t len = total_len, align = NET_SKB_PAD; + size_t len = total_len, align = NET_SKB_PAD, linear; struct virtio_net_hdr gso = { 0 }; int offset = 0; int copylen; @@ -1085,34 +1108,23 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EINVAL; } - if (msg_control) - zerocopy = true; - - if (zerocopy) { - /* Userspace may produce vectors with count greater than - * MAX_SKB_FRAGS, so we need to linearize parts of the skb - * to let the rest of data to be fit in the frags. - */ - if (count > MAX_SKB_FRAGS) { - copylen = iov_length(iv, count - MAX_SKB_FRAGS); - if (copylen < offset) - copylen = 0; - else - copylen -= offset; - } else - copylen = 0; - /* There are 256 bytes to be copied in skb, so there is enough - * room for skb expand head in case it is used. + if (msg_control) { + /* There are 256 bytes to be copied in skb, so there is + * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ - if (copylen < gso.hdr_len) - copylen = gso.hdr_len; - if (!copylen) - copylen = GOODCOPY_LEN; - } else + copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN; + linear = copylen; + if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS) + zerocopy = true; + } + + if (!zerocopy) { copylen = len; + linear = gso.hdr_len; + } - skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock); + skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EAGAIN) tun->dev->stats.rx_dropped++; @@ -1121,8 +1133,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) err = zerocopy_sg_from_iovec(skb, iv, offset, count); - else + else { err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len); + if (!err && msg_control) { + struct ubuf_info *uarg = msg_control; + uarg->callback(uarg, false); + } + } if (err) { tun->dev->stats.rx_dropped++; diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c index 6c1e313..fe6f536e 100644 --- a/drivers/staging/line6/pcm.c +++ b/drivers/staging/line6/pcm.c @@ -384,8 +384,11 @@ static int snd_line6_pcm_free(struct snd_device *device) */ static void pcm_disconnect_substream(struct snd_pcm_substream *substream) { - if (substream->runtime && snd_pcm_running(substream)) + if (substream->runtime && snd_pcm_running(substream)) { + snd_pcm_stream_lock_irq(substream); snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); + snd_pcm_stream_unlock_irq(substream); + } } /* diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d170412..aa8acfa 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6966,6 +6966,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int err = 0; int ret; int level; + bool root_dropped = false; path = btrfs_alloc_path(); if (!path) { @@ -7023,6 +7024,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, while (1) { btrfs_tree_lock(path->nodes[level]); btrfs_set_lock_blocking(path->nodes[level]); + path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; ret = btrfs_lookup_extent_info(trans, root, path->nodes[level]->start, @@ -7039,6 +7041,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, break; btrfs_tree_unlock(path->nodes[level]); + path->locks[level] = 0; WARN_ON(wc->refs[level] != 1); level--; } @@ -7134,12 +7137,22 @@ int btrfs_drop_snapshot(struct btrfs_root *root, free_extent_buffer(root->commit_root); kfree(root); } + root_dropped = true; out_end_trans: btrfs_end_transaction_throttle(trans, tree_root); out_free: kfree(wc); btrfs_free_path(path); out: + /* + * So if we need to stop dropping the snapshot for whatever reason we + * need to make sure to add it back to the dead root list so that we + * keep trying to do the work later. This also cleans up roots if we + * don't have it in the radix (like when we recover after a power fail + * or unmount) so we don't leak memory. + */ + if (root_dropped == false) + btrfs_add_dead_root(root); if (err) btrfs_std_error(root->fs_info, err); return err; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a5ce6ce..728c8f6 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4326,9 +4326,20 @@ void ext4_ext_truncate(struct inode *inode) last_block = (inode->i_size + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); +retry: err = ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); + if (err == ENOMEM) { + cond_resched(); + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto retry; + } + if (err) { + ext4_std_error(inode->i_sb, err); + return; + } err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); + ext4_std_error(inode->i_sb, err); /* In a multi-transaction truncate, we only make the final * transaction synchronous. diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 594abec..697d911 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -82,6 +82,9 @@ extern int ipv6_dev_get_saddr(struct net *net, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); +extern int __ipv6_get_lladdr(struct inet6_dev *idev, + struct in6_addr *addr, + unsigned char banned_flags); extern int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, unsigned char banned_flags); diff --git a/include/net/udp.h b/include/net/udp.h index 065f379..ad99eed 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -181,6 +181,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum, extern void udp_err(struct sk_buff *, u32); extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); +extern int udp_push_pending_frames(struct sock *sk); extern void udp_flush_pending_frames(struct sock *sk); extern int udp_rcv(struct sk_buff *skb); extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h index 0b46fd5..e36a4ae 100644 --- a/include/uapi/linux/if_pppox.h +++ b/include/uapi/linux/if_pppox.h @@ -135,11 +135,11 @@ struct pppoe_tag { struct pppoe_hdr { #if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 ver : 4; __u8 type : 4; + __u8 ver : 4; #elif defined(__BIG_ENDIAN_BITFIELD) - __u8 type : 4; __u8 ver : 4; + __u8 type : 4; #else #error "Please fix <asm/byteorder.h>" #endif diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c index ca304046..ab79ecb 100644 --- a/kernel/power/autosleep.c +++ b/kernel/power/autosleep.c @@ -32,7 +32,8 @@ static void try_to_suspend(struct work_struct *work) mutex_lock(&autosleep_lock); - if (!pm_save_wakeup_count(initial_count)) { + if (!pm_save_wakeup_count(initial_count) || + system_state != SYSTEM_RUNNING) { mutex_unlock(&autosleep_lock); goto out; } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9cbcfcd..4c14e8a 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -73,6 +73,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) { struct vlan_priority_tci_mapping *mp; + smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ + mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)]; while (mp) { if (mp->priority == skb->priority) { @@ -248,6 +250,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, np->next = mp; np->priority = skb_prio; np->vlan_qos = vlan_qos; + /* Before inserting this element in hash table, make sure all its fields + * are committed to memory. + * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask() + */ + smp_wmb(); vlan->egress_priority_map[skb_prio & 0xF] = np; if (vlan_qos) vlan->nr_egress_mappings++; diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index de8df95..2ee3879 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c @@ -24,11 +24,11 @@ */ void p9_release_pages(struct page **pages, int nr_pages) { - int i = 0; - while (pages[i] && nr_pages--) { - put_page(pages[i]); - i++; - } + int i; + + for (i = 0; i < nr_pages; i++) + if (pages[i]) + put_page(pages[i]); } EXPORT_SYMBOL(p9_release_pages); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 6d6f265..155bc05 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -458,8 +458,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, skb_set_transport_header(skb, skb->len); mldq = (struct mld_msg *) icmp6_hdr(skb); - interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : - br->multicast_query_response_interval; + interval = ipv6_addr_any(group) ? + br->multicast_query_response_interval : + br->multicast_last_member_interval; mldq->mld_type = ICMPV6_MGM_QUERY; mldq->mld_code = 0; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index c815f28..8f9a6c6 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -239,7 +239,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) we must kill timers etc. and move it to safe state. */ - skb_queue_purge(&n->arp_queue); + __skb_queue_purge(&n->arp_queue); n->arp_queue_len_bytes = 0; n->output = neigh_blackhole; if (n->nud_state & NUD_VALID) @@ -302,7 +302,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device if (!n) goto out_entries; - skb_queue_head_init(&n->arp_queue); + __skb_queue_head_init(&n->arp_queue); rwlock_init(&n->lock); seqlock_init(&n->ha_lock); n->updated = n->used = now; @@ -724,7 +724,9 @@ void neigh_destroy(struct neighbour *neigh) if (neigh_del_timer(neigh)) pr_warn("Impossible event\n"); - skb_queue_purge(&neigh->arp_queue); + write_lock_bh(&neigh->lock); + __skb_queue_purge(&neigh->arp_queue); + write_unlock_bh(&neigh->lock); neigh->arp_queue_len_bytes = 0; if (dev->netdev_ops->ndo_neigh_destroy) @@ -870,7 +872,7 @@ static void neigh_invalidate(struct neighbour *neigh) neigh->ops->error_report(neigh, skb); write_lock(&neigh->lock); } - skb_queue_purge(&neigh->arp_queue); + __skb_queue_purge(&neigh->arp_queue); neigh->arp_queue_len_bytes = 0; } @@ -1222,7 +1224,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, write_lock_bh(&neigh->lock); } - skb_queue_purge(&neigh->arp_queue); + __skb_queue_purge(&neigh->arp_queue); neigh->arp_queue_len_bytes = 0; } out: diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index f1395a6..e1e5a6a 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -190,10 +190,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); - __skb_pull(skb, ip_hdrlen(skb)); - - /* Point into the IP datagram, just past the header. */ - skb_reset_transport_header(skb); + __skb_pull(skb, skb_network_header_len(skb)); rcu_read_lock(); { @@ -442,6 +439,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, goto drop; } + skb->transport_header = skb->network_header + iph->ihl*4; + /* Remove any debris in the socket control block */ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index c3a4233..5ccf343 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -645,17 +645,10 @@ static int __net_init vti_fb_tunnel_init(struct net_device *dev) struct iphdr *iph = &tunnel->parms.iph; struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id); - tunnel->dev = dev; - strcpy(tunnel->parms.name, dev->name); - iph->version = 4; iph->protocol = IPPROTO_IPIP; iph->ihl = 5; - dev->tstats = alloc_percpu(struct pcpu_tstats); - if (!dev->tstats) - return -ENOMEM; - dev_hold(dev); rcu_assign_pointer(ipn->tunnels_wc[0], tunnel); return 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d9130a9..47b459a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1003,7 +1003,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig; - key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); + key = tcp_md5_do_lookup(sk, addr, family); if (key) { /* Pre-existing entry - just update that one. */ memcpy(key->key, newkey, newkeylen); @@ -1048,7 +1048,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) struct tcp_md5sig_key *key; struct tcp_md5sig_info *md5sig; - key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); + key = tcp_md5_do_lookup(sk, addr, family); if (!key) return -ENOENT; hlist_del_rcu(&key->node); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1f4d405..21e1eec 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -774,7 +774,7 @@ send: /* * Push out all pending data as one UDP datagram. Socket is locked. */ -static int udp_push_pending_frames(struct sock *sk) +int udp_push_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); @@ -793,6 +793,7 @@ out: up->pending = 0; return err; } +EXPORT_SYMBOL(udp_push_pending_frames); int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d1d6915..7b54fff 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1370,6 +1370,23 @@ try_nextdev: } EXPORT_SYMBOL(ipv6_dev_get_saddr); +int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, + unsigned char banned_flags) +{ + struct inet6_ifaddr *ifp; + int err = -EADDRNOTAVAIL; + + list_for_each_entry(ifp, &idev->addr_list, if_list) { + if (ifp->scope == IFA_LINK && + !(ifp->flags & banned_flags)) { + *addr = ifp->addr; + err = 0; + break; + } + } + return err; +} + int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, unsigned char banned_flags) { @@ -1379,17 +1396,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { - struct inet6_ifaddr *ifp; - read_lock_bh(&idev->lock); - list_for_each_entry(ifp, &idev->addr_list, if_list) { - if (ifp->scope == IFA_LINK && - !(ifp->flags & banned_flags)) { - *addr = ifp->addr; - err = 0; - break; - } - } + err = __ipv6_get_lladdr(idev, addr, banned_flags); read_unlock_bh(&idev->lock); } rcu_read_unlock(); @@ -2555,6 +2563,9 @@ static void init_loopback(struct net_device *dev) if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) continue; + if (sp_ifa->rt) + continue; + sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); /* Failure cases are ignored */ diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 710cafd..e67039f 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -634,6 +634,12 @@ insert_above: return ln; } +static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt) +{ + return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == + RTF_GATEWAY; +} + /* * Insert routing information in a node. */ @@ -648,6 +654,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, int add = (!info->nlh || (info->nlh->nlmsg_flags & NLM_F_CREATE)); int found = 0; + bool rt_can_ecmp = rt6_qualify_for_ecmp(rt); ins = &fn->leaf; @@ -693,9 +700,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, * To avoid long list, we only had siblings if the * route have a gateway. */ - if (rt->rt6i_flags & RTF_GATEWAY && - !(rt->rt6i_flags & RTF_EXPIRES) && - !(iter->rt6i_flags & RTF_EXPIRES)) + if (rt_can_ecmp && + rt6_qualify_for_ecmp(iter)) rt->rt6i_nsiblings++; } @@ -717,7 +723,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, /* Find the first route that have the same metric */ sibling = fn->leaf; while (sibling) { - if (sibling->rt6i_metric == rt->rt6i_metric) { + if (sibling->rt6i_metric == rt->rt6i_metric && + rt6_qualify_for_ecmp(sibling)) { list_add_tail(&rt->rt6i_siblings, &sibling->rt6i_siblings); break; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2adff32..e4c8920 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -843,11 +843,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk, const struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); - struct rt6_info *rt = (struct rt6_info *)dst; + struct rt6_info *rt; if (!dst) goto out; + if (dst->ops->family != AF_INET6) { + dst_release(dst); + return NULL; + } + + rt = (struct rt6_info *)dst; /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, @@ -1110,11 +1116,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src, return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } -static void ip6_append_data_mtu(int *mtu, +static void ip6_append_data_mtu(unsigned int *mtu, int *maxfraglen, unsigned int fragheaderlen, struct sk_buff *skb, - struct rt6_info *rt) + struct rt6_info *rt, + bool pmtuprobe) { if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { if (skb == NULL) { @@ -1126,7 +1133,9 @@ static void ip6_append_data_mtu(int *mtu, * this fragment is not first, the headers * space is regarded as data space. */ - *mtu = dst_mtu(rt->dst.path); + *mtu = min(*mtu, pmtuprobe ? + rt->dst.dev->mtu : + dst_mtu(rt->dst.path)); } *maxfraglen = ((*mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); @@ -1143,11 +1152,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, struct ipv6_pinfo *np = inet6_sk(sk); struct inet_cork *cork; struct sk_buff *skb, *skb_prev = NULL; - unsigned int maxfraglen, fragheaderlen; + unsigned int maxfraglen, fragheaderlen, mtu; int exthdrlen; int dst_exthdrlen; int hh_len; - int mtu; int copy; int err; int offset = 0; @@ -1307,7 +1315,9 @@ alloc_new_skb: /* update mtu and maxfraglen if necessary */ if (skb == NULL || skb_prev == NULL) ip6_append_data_mtu(&mtu, &maxfraglen, - fragheaderlen, skb, rt); + fragheaderlen, skb, rt, + np->pmtudisc == + IPV6_PMTUDISC_PROBE); skb_prev = skb; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 28dfa5f..79fa628 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1340,8 +1340,9 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) return scount; } -static struct sk_buff *mld_newpack(struct net_device *dev, int size) +static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) { + struct net_device *dev = idev->dev; struct net *net = dev_net(dev); struct sock *sk = net->ipv6.igmp_sk; struct sk_buff *skb; @@ -1366,7 +1367,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) skb_reserve(skb, hlen); - if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { + if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: * use unspecified address as the source address * when a valid link-local address is not available. @@ -1462,7 +1463,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, struct mld2_grec *pgr; if (!skb) - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(pmc->idev, dev->mtu); if (!skb) return NULL; pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); @@ -1482,7 +1483,8 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) { - struct net_device *dev = pmc->idev->dev; + struct inet6_dev *idev = pmc->idev; + struct net_device *dev = idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr = NULL; struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; @@ -1511,7 +1513,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb) mld_sendpack(skb); - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(idev, dev->mtu); } } first = 1; @@ -1538,7 +1540,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, pgr->grec_nsrcs = htons(scount); if (skb) mld_sendpack(skb); - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(idev, dev->mtu); first = 1; scount = 0; } @@ -1593,8 +1595,8 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) struct sk_buff *skb = NULL; int type; + read_lock_bh(&idev->lock); if (!pmc) { - read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (pmc->mca_flags & MAF_NOREPORT) continue; @@ -1606,7 +1608,6 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->mca_lock); } - read_unlock_bh(&idev->lock); } else { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) @@ -1616,6 +1617,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->mca_lock); } + read_unlock_bh(&idev->lock); if (skb) mld_sendpack(skb); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5845613..2f01ab3 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1118,10 +1118,13 @@ static void ip6_link_failure(struct sk_buff *skb) rt = (struct rt6_info *) skb_dst(skb); if (rt) { - if (rt->rt6i_flags & RTF_CACHE) - rt6_update_expires(rt, 0); - else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) + if (rt->rt6i_flags & RTF_CACHE) { + dst_hold(&rt->dst); + if (ip6_del_rt(rt)) + dst_free(&rt->dst); + } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { rt->rt6i_node->fn_sernum = -1; + } } } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index cdffb60..4ca80fc 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -955,11 +955,16 @@ static int udp_v6_push_pending_frames(struct sock *sk) struct udphdr *uh; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); - struct flowi6 *fl6 = &inet->cork.fl.u.ip6; + struct flowi6 *fl6; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; + if (up->pending == AF_INET) + return udp_push_pending_frames(sk); + + fl6 = &inet->cork.fl.u.ip6; + /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; diff --git a/net/key/af_key.c b/net/key/af_key.c index 5b426a6..891db6c 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1705,6 +1705,7 @@ static int key_notify_sa_flush(const struct km_event *c) hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); @@ -2689,6 +2690,7 @@ static int key_notify_policy_flush(const struct km_event *c) hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 8b875c29..c3c1152 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1831,7 +1831,8 @@ static const struct proto_ops pppol2tp_ops = { static const struct pppox_proto pppol2tp_proto = { .create = pppol2tp_create, - .ioctl = pppol2tp_ioctl + .ioctl = pppol2tp_ioctl, + .owner = THIS_MODULE, }; #ifdef CONFIG_L2TP_V3 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index a306bc6..b943e3e 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1586,11 +1586,11 @@ out_cud_release: case SIOCX25CALLACCPTAPPRV: { rc = -EINVAL; lock_sock(sk); - if (sk->sk_state != TCP_CLOSE) - break; - clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); + if (sk->sk_state == TCP_CLOSE) { + clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); + rc = 0; + } release_sock(sk); - rc = 0; break; } @@ -1598,14 +1598,15 @@ out_cud_release: rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) - break; + goto out_sendcallaccpt_release; /* must call accptapprv above */ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) - break; + goto out_sendcallaccpt_release; x25_write_internal(sk, X25_CALL_ACCEPTED); x25->state = X25_STATE_3; - release_sock(sk); rc = 0; +out_sendcallaccpt_release: + release_sock(sk); break; } diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c index 76e0d56..823359e 100644 --- a/sound/arm/pxa2xx-pcm-lib.c +++ b/sound/arm/pxa2xx-pcm-lib.c @@ -166,7 +166,9 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id) } else { printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n", rtd->params->name, dma_ch, dcsr); + snd_pcm_stream_lock(substream); snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock(substream); } } EXPORT_SYMBOL(pxa2xx_pcm_dma_irq); diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c index 3536b07..005a1f1 100644 --- a/sound/pci/asihpi/asihpi.c +++ b/sound/pci/asihpi/asihpi.c @@ -769,7 +769,10 @@ static void snd_card_asihpi_timer_function(unsigned long data) s->number); ds->drained_count++; if (ds->drained_count > 20) { + unsigned long flags; + snd_pcm_stream_lock_irqsave(s, flags); snd_pcm_stop(s, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock_irqrestore(s, flags); continue; } } else { diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c index a677431..3a80ffc 100644 --- a/sound/pci/atiixp.c +++ b/sound/pci/atiixp.c @@ -688,7 +688,9 @@ static void snd_atiixp_xrun_dma(struct atiixp *chip, struct atiixp_dma *dma) if (! dma->substream || ! dma->running) return; snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type); + snd_pcm_stream_lock(dma->substream); snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock(dma->substream); } /* diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c index d0bec7b..57f4182 100644 --- a/sound/pci/atiixp_modem.c +++ b/sound/pci/atiixp_modem.c @@ -638,7 +638,9 @@ static void snd_atiixp_xrun_dma(struct atiixp_modem *chip, if (! dma->substream || ! dma->running) return; snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type); + snd_pcm_stream_lock(dma->substream); snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock(dma->substream); } /* diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index f186897..ad23390 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2339,6 +2339,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi }, { .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi }, { .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_generic_hdmi }, +{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_generic_hdmi }, { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, @@ -2391,6 +2392,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0042"); MODULE_ALIAS("snd-hda-codec-id:10de0043"); MODULE_ALIAS("snd-hda-codec-id:10de0044"); MODULE_ALIAS("snd-hda-codec-id:10de0051"); +MODULE_ALIAS("snd-hda-codec-id:10de0060"); MODULE_ALIAS("snd-hda-codec-id:10de0067"); MODULE_ALIAS("snd-hda-codec-id:10de8001"); MODULE_ALIAS("snd-hda-codec-id:11069f80"); diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c index 30184a4..ffc9ddf 100644 --- a/sound/soc/atmel/atmel-pcm-dma.c +++ b/sound/soc/atmel/atmel-pcm-dma.c @@ -80,7 +80,9 @@ static void atmel_pcm_dma_irq(u32 ssc_sr, /* stop RX and capture: will be enabled again at restart */ ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_disable); + snd_pcm_stream_lock(substream); snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock(substream); /* now drain RHR and read status to remove xrun condition */ ssc_readx(prtd->ssc->regs, SSC_RHR); diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 92bbfec..ea47938 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -37,7 +37,7 @@ static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] = { [SGTL5000_CHIP_CLK_CTRL] = 0x0008, [SGTL5000_CHIP_I2S_CTRL] = 0x0010, - [SGTL5000_CHIP_SSS_CTRL] = 0x0008, + [SGTL5000_CHIP_SSS_CTRL] = 0x0010, [SGTL5000_CHIP_DAC_VOL] = 0x3c3c, [SGTL5000_CHIP_PAD_STRENGTH] = 0x015f, [SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818, diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index 8a9f435..d3a68bb 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -347,7 +347,7 @@ #define SGTL5000_PLL_INT_DIV_MASK 0xf800 #define SGTL5000_PLL_INT_DIV_SHIFT 11 #define SGTL5000_PLL_INT_DIV_WIDTH 5 -#define SGTL5000_PLL_FRAC_DIV_MASK 0x0700 +#define SGTL5000_PLL_FRAC_DIV_MASK 0x07ff #define SGTL5000_PLL_FRAC_DIV_SHIFT 0 #define SGTL5000_PLL_FRAC_DIV_WIDTH 11 diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c index 1358c7d..d0740a7 100644 --- a/sound/soc/s6000/s6000-pcm.c +++ b/sound/soc/s6000/s6000-pcm.c @@ -128,7 +128,9 @@ static irqreturn_t s6000_pcm_irq(int irq, void *data) substream->runtime && snd_pcm_running(substream)) { dev_dbg(pcm->dev, "xrun\n"); + snd_pcm_stream_lock(substream); snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock(substream); ret = IRQ_HANDLED; } diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c index 40dd50a..8221ff2 100644 --- a/sound/usb/6fire/pcm.c +++ b/sound/usb/6fire/pcm.c @@ -641,17 +641,25 @@ int usb6fire_pcm_init(struct sfire_chip *chip) void usb6fire_pcm_abort(struct sfire_chip *chip) { struct pcm_runtime *rt = chip->pcm; + unsigned long flags; int i; if (rt) { rt->panic = true; - if (rt->playback.instance) + if (rt->playback.instance) { + snd_pcm_stream_lock_irqsave(rt->playback.instance, flags); snd_pcm_stop(rt->playback.instance, SNDRV_PCM_STATE_XRUN); - if (rt->capture.instance) + snd_pcm_stream_unlock_irqrestore(rt->playback.instance, flags); + } + + if (rt->capture.instance) { + snd_pcm_stream_lock_irqsave(rt->capture.instance, flags); snd_pcm_stop(rt->capture.instance, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock_irqrestore(rt->capture.instance, flags); + } for (i = 0; i < PCM_N_URBS; i++) { usb_poison_urb(&rt->in_urbs[i].instance); diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c index 6ad617b..76d8329 100644 --- a/sound/usb/misc/ua101.c +++ b/sound/usb/misc/ua101.c @@ -613,14 +613,24 @@ static int start_usb_playback(struct ua101 *ua) static void abort_alsa_capture(struct ua101 *ua) { - if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) + unsigned long flags; + + if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states)) { + snd_pcm_stream_lock_irqsave(ua->capture.substream, flags); snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock_irqrestore(ua->capture.substream, flags); + } } static void abort_alsa_playback(struct ua101 *ua) { - if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) + unsigned long flags; + + if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states)) { + snd_pcm_stream_lock_irqsave(ua->playback.substream, flags); snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock_irqrestore(ua->playback.substream, flags); + } } static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream, diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c index b376532..0ce90337 100644 --- a/sound/usb/usx2y/usbusx2yaudio.c +++ b/sound/usb/usx2y/usbusx2yaudio.c @@ -273,7 +273,11 @@ static void usX2Y_clients_stop(struct usX2Ydev *usX2Y) struct snd_usX2Y_substream *subs = usX2Y->subs[s]; if (subs) { if (atomic_read(&subs->state) >= state_PRERUNNING) { + unsigned long flags; + + snd_pcm_stream_lock_irqsave(subs->pcm_substream, flags); snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN); + snd_pcm_stream_unlock_irqrestore(subs->pcm_substream, flags); } for (u = 0; u < NRURBS; u++) { struct urb *urb = subs->urb[u]; -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html