This is the combined diff for 3.16.39-rc1 relative to 3.16.38. Ben. -- Ben Hutchings If more than one person is responsible for a bug, no one is at fault.
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 0f3a1390bf00..4f65ae209a65 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -287,8 +287,8 @@ implementing on-disk size changes. Start with a copy of the old inode_setattr and vmtruncate, and the reorder the vmtruncate + foofs_vmtruncate sequence to be in order of zeroing blocks using block_truncate_page or similar helpers, size update and on finally on-disk truncation which should not fail. -inode_change_ok now includes the size checks for ATTR_SIZE and must be called -in the beginning of ->setattr unconditionally. +setattr_prepare (which used to be inode_change_ok) now includes the size checks +for ATTR_SIZE and must be called in the beginning of ->setattr unconditionally. [mandatory] diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt index 09c2382ad055..e3af8c27eef2 100644 --- a/Documentation/module-signing.txt +++ b/Documentation/module-signing.txt @@ -239,3 +239,9 @@ Since the private key is used to sign modules, viruses and malware could use the private key to sign modules and compromise the operating system. The private key must be either destroyed or moved to a secure location and not kept in the root node of the kernel source tree. + +If you use the same private key to sign modules for multiple kernel +configurations, you must ensure that the module version information is +sufficient to prevent loading a module into a different kernel. Either +set CONFIG_MODVERSIONS=y or ensure that each configuration has a different +kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION. diff --git a/Makefile b/Makefile index 37083fbc8784..438a0ceccaac 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 16 -SUBLEVEL = 38 -EXTRAVERSION = +SUBLEVEL = 39 +EXTRAVERSION = -rc1 NAME = Museum of Fishiegoodies # *DOCUMENTATION* diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 766fdfde2b7a..6e9d27ad5103 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len) return __cu_len; } -extern inline long -__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate) -{ - if (__access_ok((unsigned long)validate, len, get_fs())) - len = __copy_tofrom_user_nocheck(to, from, len); - return len; -} - #define __copy_to_user(to,from,n) \ ({ \ __chk_user_ptr(to); \ @@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user - extern inline long copy_to_user(void __user *to, const void *from, long n) { - return __copy_tofrom_user((__force void *)to, from, n, to); + if (likely(__access_ok((unsigned long)to, n, get_fs()))) + n = __copy_tofrom_user_nocheck((__force void *)to, from, n); + return n; } extern inline long copy_from_user(void *to, const void __user *from, long n) { - return __copy_tofrom_user(to, (__force void *)from, n, from); + if (likely(__access_ok((unsigned long)from, n, get_fs()))) + n = __copy_tofrom_user_nocheck(to, (__force void *)from, n); + else + memset(to, 0, n); + return n; } extern void __do_clear_user(void); diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index cb7efc29f16f..d98d3bd5ecf6 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h @@ -179,10 +179,10 @@ static inline void arch_unmask_irq(unsigned int irq) .endm .macro IRQ_ENABLE scratch + TRACE_ASM_IRQ_ENABLE lr \scratch, [status32] or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) flag \scratch - TRACE_ASM_IRQ_ENABLE .endm #endif /* __ASSEMBLY__ */ diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 30c9baffa96f..08770c750696 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -83,7 +83,10 @@ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ - "3: mov %0, %3\n" \ + "3: # return -EFAULT\n" \ + " mov %0, %3\n" \ + " # zero out dst ptr\n" \ + " mov %1, 0\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ @@ -101,7 +104,11 @@ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ - "3: mov %0, %3\n" \ + "3: # return -EFAULT\n" \ + " mov %0, %3\n" \ + " # zero out dst ptr\n" \ + " mov %1, 0\n" \ + " mov %R1, 0\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 79bfc81358c9..41043f204fa4 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -89,7 +89,7 @@ ex_saved_reg1: #ifdef CONFIG_SMP sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with GET_CPU_ID r0 ; get to per cpu scratch mem, - lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu + asl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu add r0, @ex_saved_reg1, r0 #else st r0, [@ex_saved_reg1] @@ -108,7 +108,7 @@ ex_saved_reg1: .macro TLBMISS_RESTORE_REGS #ifdef CONFIG_SMP GET_CPU_ID r0 ; get to per cpu scratch mem - lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide + asl r0, r0, L1_CACHE_SHIFT ; each is cache line wide add r0, @ex_saved_reg1, r0 ld_s r3, [r0,12] ld_s r2, [r0, 8] @@ -220,7 +220,7 @@ ex_saved_reg1: .macro CONV_PTE_TO_TLB and r3, r0, PTE_BITS_RWX ; r w x - lsl r2, r3, 3 ; r w x 0 0 0 + asl r2, r3, 3 ; r w x 0 0 0 and.f 0, r0, _PAGE_GLOBAL or.z r2, r2, r3 ; r w x r w x diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index d35692b63cfd..624bd3ea0adf 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -726,7 +726,7 @@ __armv7_mmu_cache_on: orrne r0, r0, #1 @ MMU enabled movne r1, #0xfffffffd @ domain 0 = client bic r6, r6, #1 << 31 @ 32-bit translation system - bic r6, r6, #3 << 0 @ use only ttbr0 + bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcrne p15, 0, r6, c2, c0, 2 @ load ttb control diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts index bfa5edde179c..2c1e7f09205f 100644 --- a/arch/arm/boot/dts/kirkwood-ib62x0.dts +++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts @@ -113,7 +113,7 @@ partition@e0000 { label = "u-boot environment"; - reg = <0xe0000 0x100000>; + reg = <0xe0000 0x20000>; }; partition@100000 { diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index e57d7e5bf96a..932125a20877 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -872,9 +872,9 @@ struct sa1111_save_data { #ifdef CONFIG_PM -static int sa1111_suspend(struct platform_device *dev, pm_message_t state) +static int sa1111_suspend_noirq(struct device *dev) { - struct sa1111 *sachip = platform_get_drvdata(dev); + struct sa1111 *sachip = dev_get_drvdata(dev); struct sa1111_save_data *save; unsigned long flags; unsigned int val; @@ -937,9 +937,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state) * restored by their respective drivers, and must be called * via LDM after this function. */ -static int sa1111_resume(struct platform_device *dev) +static int sa1111_resume_noirq(struct device *dev) { - struct sa1111 *sachip = platform_get_drvdata(dev); + struct sa1111 *sachip = dev_get_drvdata(dev); struct sa1111_save_data *save; unsigned long flags, id; void __iomem *base; @@ -955,7 +955,7 @@ static int sa1111_resume(struct platform_device *dev) id = sa1111_readl(sachip->base + SA1111_SKID); if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { __sa1111_remove(sachip); - platform_set_drvdata(dev, NULL); + dev_set_drvdata(dev, NULL); kfree(save); return 0; } @@ -1006,8 +1006,8 @@ static int sa1111_resume(struct platform_device *dev) } #else -#define sa1111_suspend NULL -#define sa1111_resume NULL +#define sa1111_suspend_noirq NULL +#define sa1111_resume_noirq NULL #endif static int sa1111_probe(struct platform_device *pdev) @@ -1041,6 +1041,11 @@ static int sa1111_remove(struct platform_device *pdev) return 0; } +static struct dev_pm_ops sa1111_pm_ops = { + .suspend_noirq = sa1111_suspend_noirq, + .resume_noirq = sa1111_resume_noirq, +}; + /* * Not sure if this should be on the system bus or not yet. * We really want some way to register a system device at @@ -1053,11 +1058,10 @@ static int sa1111_remove(struct platform_device *pdev) static struct platform_driver sa1111_device_driver = { .probe = sa1111_probe, .remove = sa1111_remove, - .suspend = sa1111_suspend, - .resume = sa1111_resume, .driver = { .name = "sa1111", .owner = THIS_MODULE, + .pm = &sa1111_pm_ops, }, }; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index c45b61a4b4a5..911dcd456123 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -117,7 +117,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) /* The ARM override for dma_max_pfn() */ static inline unsigned long dma_max_pfn(struct device *dev) { - return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); + return dma_to_pfn(dev, *dev->dma_mask); } #define dma_max_pfn(dev) dma_max_pfn(dev) diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index eb821e7b80f9..8a1c2c8a1a64 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -279,8 +279,12 @@ asmlinkage long sys_oabi_epoll_wait(int epfd, mm_segment_t fs; long ret, err, i; - if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event))) + if (maxevents <= 0 || + maxevents > (INT_MAX/sizeof(*kbuf)) || + maxevents > (INT_MAX/sizeof(*events))) return -EINVAL; + if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents)) + return -EFAULT; kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL); if (!kbuf) return -ENOMEM; @@ -317,6 +321,8 @@ asmlinkage long sys_oabi_semtimedop(int semid, if (nsops < 1 || nsops > SEMOPM) return -EINVAL; + if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) + return -EFAULT; sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); if (!sops) return -ENOMEM; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 499b236873d2..d75c6e3ed78c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -164,8 +164,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { int i; - kvm_free_stage2_pgd(kvm); - for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { kvm_arch_vcpu_free(kvm->vcpus[i]); diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index c61da3e6a629..1c89e49a2467 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1257,6 +1257,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm) void kvm_arch_flush_shadow_all(struct kvm *kvm) { + kvm_free_stage2_pgd(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index 01efe130912e..9e5c29df91f5 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -315,22 +315,16 @@ static void __init armada_370_coherency_init(struct device_node *np) } /* - * This ioremap hook is used on Armada 375/38x to ensure that PCIe - * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This - * is needed as a workaround for a deadlock issue between the PCIe - * interface and the cache controller. + * This ioremap hook is used on Armada 375/38x to ensure that all MMIO + * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is + * needed for the HW I/O coherency mechanism to work properly without + * deadlock. */ static void __iomem * -armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, - unsigned int mtype, void *caller) +armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, + unsigned int mtype, void *caller) { - struct resource pcie_mem; - - mvebu_mbus_get_pcie_mem_aperture(&pcie_mem); - - if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end) - mtype = MT_UNCACHED; - + mtype = MT_UNCACHED; return __arm_ioremap_caller(phys_addr, size, mtype, caller); } @@ -339,7 +333,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np) struct device_node *cache_dn; coherency_cpu_base = of_iomap(np, 0); - arch_ioremap_caller = armada_pcie_wa_ioremap_caller; + arch_ioremap_caller = armada_wa_ioremap_caller; /* * We should switch the PL310 to I/O coherency mode only if diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c index a579b89ce9b7..b08605cc93ff 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c @@ -1460,6 +1460,7 @@ static void omap_hwmod_am43xx_rst(void) { RSTCTRL(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTCTRL_OFFSET); RSTCTRL(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTCTRL_OFFSET); + RSTST(am33xx_pruss_hwmod, AM43XX_RM_PER_RSTST_OFFSET); RSTST(am33xx_gfx_hwmod, AM43XX_RM_GFX_RSTST_OFFSET); } diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 1cd0cfdc03e0..3ec3ed945221 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -724,8 +724,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { * display serial interface controller */ +static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = { + .rev_offs = 0x0000, + .sysc_offs = 0x0010, + .syss_offs = 0x0014, + .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | + SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | + SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type1, +}; + static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = { .name = "dsi", + .sysc = &omap3xxx_dsi_sysc, }; static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = { diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h index 7785be984edd..4c60d3464a87 100644 --- a/arch/arm/mach-omap2/prcm43xx.h +++ b/arch/arm/mach-omap2/prcm43xx.h @@ -32,6 +32,7 @@ /* RM RSTST offsets */ #define AM43XX_RM_GFX_RSTST_OFFSET 0x0014 +#define AM43XX_RM_PER_RSTST_OFFSET 0x0014 #define AM43XX_RM_WKUP_RSTST_OFFSET 0x0014 /* CM instances */ diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index d4ea142c4edd..186e3f8729ef 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -31,6 +31,7 @@ #include <mach/hardware.h> #include <mach/irqs.h> +#include <mach/reset.h> #include "generic.h" @@ -91,6 +92,8 @@ static void sa1100_power_off(void) void sa11x0_restart(enum reboot_mode mode, const char *cmd) { + clear_reset_status(RESET_STATUS_ALL); + if (mode == REBOOT_SOFT) { /* Jump into ROM at address 0 */ soft_restart(0); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 832b295eb4a5..f0fbd97fdfba 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -39,6 +39,9 @@ #include "mm.h" +#define NORMAL 0 +#define COHERENT 1 + /* * The DMA API is built upon the notion of "buffer ownership". A buffer * is either exclusively owned by the CPU (and therefore may be accessed @@ -219,7 +222,7 @@ static u64 get_coherent_dma_mask(struct device *dev) return mask; } -static void __dma_clear_buffer(struct page *page, size_t size) +static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) { /* * Ensure that the allocated pages are zeroed, and that any data @@ -231,17 +234,21 @@ static void __dma_clear_buffer(struct page *page, size_t size) while (size > 0) { void *ptr = kmap_atomic(page); memset(ptr, 0, PAGE_SIZE); - dmac_flush_range(ptr, ptr + PAGE_SIZE); + if (coherent_flag != COHERENT) + dmac_flush_range(ptr, ptr + PAGE_SIZE); kunmap_atomic(ptr); page++; size -= PAGE_SIZE; } - outer_flush_range(base, end); + if (coherent_flag != COHERENT) + outer_flush_range(base, end); } else { void *ptr = page_address(page); memset(ptr, 0, size); - dmac_flush_range(ptr, ptr + size); - outer_flush_range(__pa(ptr), __pa(ptr) + size); + if (coherent_flag != COHERENT) { + dmac_flush_range(ptr, ptr + size); + outer_flush_range(__pa(ptr), __pa(ptr) + size); + } } } @@ -249,7 +256,8 @@ static void __dma_clear_buffer(struct page *page, size_t size) * Allocate a DMA buffer for 'dev' of size 'size' using the * specified gfp mask. Note that 'size' must be page aligned. */ -static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) +static struct page *__dma_alloc_buffer(struct device *dev, size_t size, + gfp_t gfp, int coherent_flag) { unsigned long order = get_order(size); struct page *page, *p, *e; @@ -265,7 +273,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) __free_page(p); - __dma_clear_buffer(page, size); + __dma_clear_buffer(page, size, coherent_flag); return page; } @@ -287,7 +295,7 @@ static void __dma_free_buffer(struct page *page, size_t size) static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller); + const void *caller, int coherent_flag); static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, @@ -389,10 +397,13 @@ static int __init atomic_pool_init(void) pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); if (!pages) goto no_pages; - + /* + * The atomic pool is only used for non-coherent allocations + * so we must pass NORMAL for coherent_flag. + */ if (dev_get_cma_area(NULL)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, - atomic_pool_init); + atomic_pool_init, NORMAL); else ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, atomic_pool_init); @@ -505,7 +516,11 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, { struct page *page; void *ptr; - page = __dma_alloc_buffer(dev, size, gfp); + /* + * __alloc_remap_buffer is only called when the device is + * non-coherent + */ + page = __dma_alloc_buffer(dev, size, gfp, NORMAL); if (!page) return NULL; @@ -597,7 +612,7 @@ static int __free_from_pool(void *start, size_t size) static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller) + const void *caller, int coherent_flag) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; @@ -608,7 +623,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, if (!page) return NULL; - __dma_clear_buffer(page, size); + __dma_clear_buffer(page, size, coherent_flag); if (PageHighMem(page)) { ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); @@ -651,7 +666,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) #define __get_dma_pgprot(attrs, prot) __pgprot(0) #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL #define __alloc_from_pool(size, ret_page) NULL -#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL +#define __alloc_from_contiguous(dev, size, prot, ret, c, coherent_flag) NULL #define __free_from_pool(cpu_addr, size) 0 #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0) @@ -662,7 +677,8 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, struct page **ret_page) { struct page *page; - page = __dma_alloc_buffer(dev, size, gfp); + /* __alloc_simple_buffer is only called when the device is coherent */ + page = __dma_alloc_buffer(dev, size, gfp, COHERENT); if (!page) return NULL; @@ -713,7 +729,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, else if (!dev_get_cma_area(dev)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); else - addr = __alloc_from_contiguous(dev, size, prot, &page, caller); + addr = __alloc_from_contiguous(dev, size, prot, &page, caller, + NORMAL); if (addr) *handle = pfn_to_dma(dev, page_to_pfn(page)); @@ -1172,7 +1189,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, } static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, - gfp_t gfp, struct dma_attrs *attrs) + gfp_t gfp, struct dma_attrs *attrs, + int coherent_flag) { struct page **pages; int count = size >> PAGE_SHIFT; @@ -1195,7 +1213,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, if (!page) goto error; - __dma_clear_buffer(page, size); + __dma_clear_buffer(page, size, coherent_flag); for (i = 0; i < count; i++) pages[i] = page + i; @@ -1224,7 +1242,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, pages[i + j] = pages[i] + j; } - __dma_clear_buffer(pages[i], PAGE_SIZE << order); + __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); i += 1 << order; count -= 1 << order; } @@ -1427,7 +1445,8 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, */ gfp &= ~(__GFP_COMP); - pages = __iommu_alloc_buffer(dev, size, gfp, attrs); + /* For now always consider we are in a non-coherent case */ + pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL); if (!pages) return NULL; diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 5f63a791b2fb..31d014f01574 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -205,7 +205,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } - if (nbytes) { + if (walk.nbytes % AES_BLOCK_SIZE) { u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; u8 __aligned(8) tail[AES_BLOCK_SIZE]; diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 2009daf52bf1..922ff638669e 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -137,6 +137,7 @@ extern unsigned long randomize_et_dyn(unsigned long base); #define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT); +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index c45b7b1b7197..3c0bb9b303e8 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -231,4 +231,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() +/* + * Accesses appearing in program order before a spin_lock() operation + * can be reordered with accesses inside the critical section, by virtue + * of arch_spin_lock being constructed using acquire semantics. + * + * In cases where this is problematic (e.g. try_to_wake_up), an + * smp_mb__before_spinlock() can restore the required ordering. + */ +#define smp_mb__before_spinlock() smp_mb() + #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm64/include/uapi/asm/auxvec.h b/arch/arm64/include/uapi/asm/auxvec.h index 22d6d8885854..4cf0c17787a8 100644 --- a/arch/arm64/include/uapi/asm/auxvec.h +++ b/arch/arm64/include/uapi/asm/auxvec.h @@ -19,4 +19,6 @@ /* vDSO location */ #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + #endif diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index fe5b94078d82..a2db6f219bbe 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -159,7 +159,6 @@ static int debug_monitors_init(void) /* Clear the OS lock. */ on_each_cpu(clear_os_lock, NULL, 1); isb(); - local_dbg_enable(); /* Register hotplug handler. */ __register_cpu_notifier(&os_lock_nb); diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 6166745ecb93..be0ff66e4894 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -316,22 +316,31 @@ out: } static int -validate_event(struct pmu_hw_events *hw_events, - struct perf_event *event) +validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, + struct perf_event *event) { - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct arm_pmu *armpmu; struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; if (is_software_event(event)) return 1; + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != pmu) + return 0; + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; + armpmu = to_arm_pmu(event->pmu); return armpmu->get_event_idx(hw_events, &fake_event) >= 0; } @@ -349,15 +358,15 @@ validate_group(struct perf_event *event) memset(fake_used_mask, 0, sizeof(fake_used_mask)); fake_pmu.used_mask = fake_used_mask; - if (!validate_event(&fake_pmu, leader)) + if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_pmu, sibling)) + if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } - if (!validate_event(&fake_pmu, event)) + if (!validate_event(event->pmu, &fake_pmu, event)) return -EINVAL; return 0; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 9df272c0d240..c684be1e9942 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -174,7 +174,6 @@ asmlinkage void secondary_start_kernel(void) set_cpu_online(cpu, true); complete(&cpu_running); - local_dbg_enable(); local_irq_enable(); local_async_enable(); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index dc34442ff728..10e94cbc3192 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -189,6 +189,8 @@ ENTRY(__cpu_setup) msr cpacr_el1, x0 // Enable FP/ASIMD mov x0, #1 << 12 // Reset mdscr_el1 and disable msr mdscr_el1, x0 // access to the DCC from EL0 + isb // Unmask debug exceptions now, + enable_dbg // since this is per-cpu reset_pmuserenr_el0 x0 // Disable PMU access from EL0 /* * Memory region attributes for LPAE: diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 245b2ee213c9..a0a9b8c31041 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from, extern __kernel_size_t copy_to_user(void __user *to, const void *from, __kernel_size_t n); -extern __kernel_size_t copy_from_user(void *to, const void __user *from, +extern __kernel_size_t ___copy_from_user(void *to, const void __user *from, __kernel_size_t n); static inline __kernel_size_t __copy_to_user(void __user *to, const void *from, @@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to, { return __copy_user(to, (const void __force *)from, n); } +static inline __kernel_size_t copy_from_user(void *to, + const void __user *from, + __kernel_size_t n) +{ + size_t res = ___copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; +} #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c index d93ead02daed..7c6cf14f0985 100644 --- a/arch/avr32/kernel/avr32_ksyms.c +++ b/arch/avr32/kernel/avr32_ksyms.c @@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page); /* * Userspace access stuff. */ -EXPORT_SYMBOL(copy_from_user); +EXPORT_SYMBOL(___copy_from_user); EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(strncpy_from_user); diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S index ea59c04b07de..075373471da1 100644 --- a/arch/avr32/lib/copy_user.S +++ b/arch/avr32/lib/copy_user.S @@ -23,13 +23,13 @@ */ .text .align 1 - .global copy_from_user - .type copy_from_user, @function -copy_from_user: + .global ___copy_from_user + .type ___copy_from_user, @function +___copy_from_user: branch_if_kernel r8, __copy_user ret_if_privileged r8, r11, r10, r10 rjmp __copy_user - .size copy_from_user, . - copy_from_user + .size ___copy_from_user, . - ___copy_from_user .global copy_to_user .type copy_to_user, @function diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index 903c7d81d0d5..a8e208eaf2a4 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c @@ -435,7 +435,7 @@ void __init at32_init_pio(struct platform_device *pdev) struct resource *regs; struct pio_device *pio; - if (pdev->id > MAX_NR_PIO_DEVICES) { + if (pdev->id >= MAX_NR_PIO_DEVICES) { dev_err(&pdev->dev, "only %d PIO devices supported\n", MAX_NR_PIO_DEVICES); return; diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 57701c3b8a59..a992a788409c 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -177,11 +177,12 @@ static inline int bad_user_access_length(void) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - if (access_ok(VERIFY_READ, from, n)) + if (likely(access_ok(VERIFY_READ, from, n))) { memcpy(to, (const void __force *)from, n); - else - return n; - return 0; + return 0; + } + memset(to, 0, n); + return n; } static inline unsigned long __must_check diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index 914540801c5e..93bfa8acc38b 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -176,30 +176,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); extern unsigned long __do_clear_user(void __user *to, unsigned long n); -static inline unsigned long -__generic_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to,from,n); - return n; -} - -static inline unsigned long -__generic_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (access_ok(VERIFY_READ, from, n)) - return __copy_user_zeroing(to,from,n); - return n; -} - -static inline unsigned long -__generic_clear_user(void __user *to, unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - return __do_clear_user(to,n); - return n; -} - static inline long __strncpy_from_user(char *dst, const char __user *src, long count) { @@ -262,7 +238,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) else if (n == 24) __asm_copy_from_user_24(to, from, ret); else - ret = __generic_copy_from_user(to, from, n); + ret = __copy_user_zeroing(to, from, n); return ret; } @@ -312,7 +288,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) else if (n == 24) __asm_copy_to_user_24(to, from, ret); else - ret = __generic_copy_to_user(to, from, n); + ret = __copy_user(to, from, n); return ret; } @@ -344,26 +320,43 @@ __constant_clear_user(void __user *to, unsigned long n) else if (n == 24) __asm_clear_24(to, ret); else - ret = __generic_clear_user(to, n); + ret = __do_clear_user(to, n); return ret; } -#define clear_user(to, n) \ -(__builtin_constant_p(n) ? \ - __constant_clear_user(to, n) : \ - __generic_clear_user(to, n)) +static inline size_t clear_user(void __user *to, size_t n) +{ + if (unlikely(!access_ok(VERIFY_WRITE, to, n))) + return n; + if (__builtin_constant_p(n)) + return __constant_clear_user(to, n); + else + return __do_clear_user(to, n); +} -#define copy_from_user(to, from, n) \ -(__builtin_constant_p(n) ? \ - __constant_copy_from_user(to, from, n) : \ - __generic_copy_from_user(to, from, n)) +static inline size_t copy_from_user(void *to, const void __user *from, size_t n) +{ + if (unlikely(!access_ok(VERIFY_READ, from, n))) { + memset(to, 0, n); + return n; + } + if (__builtin_constant_p(n)) + return __constant_copy_from_user(to, from, n); + else + return __copy_user_zeroing(to, from, n); +} -#define copy_to_user(to, from, n) \ -(__builtin_constant_p(n) ? \ - __constant_copy_to_user(to, from, n) : \ - __generic_copy_to_user(to, from, n)) +static inline size_t copy_to_user(void __user *to, const void *from, size_t n) +{ + if (unlikely(!access_ok(VERIFY_WRITE, to, n))) + return n; + if (__builtin_constant_p(n)) + return __constant_copy_to_user(to, from, n); + else + return __copy_user(to, from, n); +} /* We let the __ versions of copy_from/to_user inline, because they're often * used in fast paths and have only a small space overhead. diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 3ac9a59d65d4..87d9e34c5df8 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -263,19 +263,25 @@ do { \ extern long __memset_user(void *dst, unsigned long count); extern long __memcpy_user(void *dst, const void *src, unsigned long count); -#define clear_user(dst,count) __memset_user(____force(dst), (count)) +#define __clear_user(dst,count) __memset_user(____force(dst), (count)) #define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n)) #define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n)) #else -#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) +#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) #define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0) #define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0) #endif -#define __clear_user clear_user +static inline unsigned long __must_check +clear_user(void __user *to, unsigned long n) +{ + if (likely(__access_ok(to, n))) + n = __clear_user(to, n); + return n; +} static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index e4127e4d6a5b..25fc9049db8a 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -102,7 +102,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, { long res = __strnlen_user(src, n); - /* return from strnlen can't be zero -- that would be rubbish. */ + if (unlikely(!res)) + return -EFAULT; if (res > n) { copy_from_user(dst, src, n); diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 449c8c0fa2bd..810926c56e31 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -262,17 +262,15 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) __cu_len; \ }) -#define copy_from_user(to, from, n) \ -({ \ - void *__cu_to = (to); \ - const void __user *__cu_from = (from); \ - long __cu_len = (n); \ - \ - __chk_user_ptr(__cu_from); \ - if (__access_ok(__cu_from, __cu_len, get_fs())) \ - __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ - __cu_len; \ -}) +static inline unsigned long +copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (likely(__access_ok(from, n, get_fs()))) + n = __copy_user((__force void __user *) to, from, n); + else + memset(to, 0, n); + return n; +} #define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 84fe7ba53035..c393e8f57cf7 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -215,7 +215,7 @@ extern int fixup_exception(struct pt_regs *regs); #define __get_user_nocheck(x,ptr,size) \ ({ \ long __gu_err = 0; \ - unsigned long __gu_val; \ + unsigned long __gu_val = 0; \ might_fault(); \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h index 0154e2807ebb..2369ad394876 100644 --- a/arch/metag/include/asm/cmpxchg_lnkget.h +++ b/arch/metag/include/asm/cmpxchg_lnkget.h @@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, " DCACHE [%2], %0\n" #endif "2:\n" - : "=&d" (temp), "=&da" (retval) + : "=&d" (temp), "=&d" (retval) : "da" (m), "bd" (old), "da" (new) : "cc" ); diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 0748b0a97986..7841f2290385 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -199,8 +199,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to, static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - if (access_ok(VERIFY_READ, from, n)) + if (likely(access_ok(VERIFY_READ, from, n))) return __copy_user_zeroing(to, from, n); + memset(to, 0, n); return n; } diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 0aa005703a0b..1858887105ba 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -226,7 +226,7 @@ extern long __user_bad(void); #define __get_user(x, ptr) \ ({ \ - unsigned long __gu_val; \ + unsigned long __gu_val = 0; \ /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \ long __gu_err; \ switch (sizeof(*(ptr))) { \ @@ -371,10 +371,13 @@ extern long __user_bad(void); static inline long copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned long res = n; might_fault(); - if (access_ok(VERIFY_READ, from, n)) - return __copy_from_user(to, from, n); - return n; + if (likely(access_ok(VERIFY_READ, from, n))) + res = __copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } #define __copy_to_user(to, from, n) \ diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h index 2f82bfa3a773..c9f5769dfc8f 100644 --- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h @@ -11,11 +11,13 @@ #define CP0_EBASE $15, 1 .macro kernel_entry_setup +#ifdef CONFIG_SMP mfc0 t0, CP0_EBASE andi t0, t0, 0x3ff # CPUNum beqz t0, 1f # CPUs other than zero goto smp_bootstrap j smp_bootstrap +#endif /* CONFIG_SMP */ 1: .endm diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 148a4766bf4d..f94d39f9e21d 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -572,7 +572,11 @@ static inline struct page *pmd_page(pmd_t pmd) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) { - pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); + pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK +#ifdef _PAGE_HUGE + | _PAGE_HUGE +#endif + )) | pgprot_val(newprot); return pmd; } diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index ed038d7c3410..7b74668d3e20 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -263,7 +263,11 @@ static inline void protected_writeback_dcache_line(unsigned long addr) static inline void protected_writeback_scache_line(unsigned long addr) { +#ifdef CONFIG_EVA + protected_cachee_op(Hit_Writeback_Inv_SD, addr); +#else protected_cache_op(Hit_Writeback_Inv_SD, addr); +#endif } /* diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 16e0ea6b99d8..cb4a3ee31983 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/thread_info.h> +#include <linux/string.h> #include <asm/asm-eva.h> /* @@ -1139,6 +1140,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_len = __invoke_copy_from_user(__cu_to, \ __cu_from, \ __cu_len); \ + } else { \ + memset(__cu_to, 0, __cu_len); \ } \ } \ __cu_len; \ diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 47dd5f9016c1..28a2cb2c6c9b 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -353,7 +353,7 @@ EXPORT(sysn32_call_table) PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key PTR sys_request_key - PTR sys_keyctl /* 6245 */ + PTR compat_sys_keyctl /* 6245 */ PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 7f5feb25ae04..3b726b9229b4 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -491,7 +491,7 @@ EXPORT(sys32_call_table) PTR sys_ni_syscall /* available, was setaltroot */ PTR sys_add_key /* 4280 */ PTR sys_request_key - PTR sys_keyctl + PTR compat_sys_keyctl PTR sys_set_thread_area PTR sys_inotify_init PTR sys_inotify_add_watch /* 4285 */ diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index 2c81c2c9e8dc..72aad2a9a6b4 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -1211,7 +1211,7 @@ int __init kvm_mips_init(void) */ kvm_mips_gfn_to_pfn = gfn_to_pfn; kvm_mips_release_pfn_clean = kvm_release_pfn_clean; - kvm_mips_is_error_pfn = is_error_pfn; + kvm_mips_is_error_pfn = is_error_noslot_pfn; pr_info("KVM/MIPS Initialized\n"); return 0; diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c index 1983678883c9..bac2bba41f38 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/kvm_mips_emul.c @@ -817,6 +817,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) return er; } +/** + * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. + * @vcpu: VCPU with changed mappings. + * @tlb: TLB entry being removed. + * + * This is called to indicate a single change in guest MMU mappings, so that we + * can arrange TLB flushes on this and other CPUs. + */ +static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb) +{ + int cpu, i; + bool user; + + /* No need to flush for entries which are already invalid */ + if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V)) + return; + /* User address space doesn't need flushing for KSeg2/3 changes */ + user = tlb->tlb_hi < KVM_GUEST_KSEG0; + + preempt_disable(); + + /* + * Probe the shadow host TLB for the entry being overwritten, if one + * matches, invalidate it + */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + /* Invalidate the whole ASID on other CPUs */ + cpu = smp_processor_id(); + for_each_possible_cpu(i) { + if (i == cpu) + continue; + if (user) + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + + preempt_enable(); +} + /* Write Guest TLB Entry @ Index */ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) { @@ -838,10 +879,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) } tlb = &vcpu->arch.guest_tlb[index]; -#if 1 - /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); -#endif + + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -880,10 +919,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) tlb = &vcpu->arch.guest_tlb[index]; -#if 1 - /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); -#endif + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -926,6 +962,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, int32_t rt, rd, copz, sel, co_bit, op; uint32_t pc = vcpu->arch.pc; unsigned long curr_pc; + int cpu, i; /* * Update PC and hold onto current PC in case there is @@ -1037,8 +1074,16 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, ASID_MASK, vcpu->arch.gprs[rt] & ASID_MASK); + preempt_disable(); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); + cpu = smp_processor_id(); + for_each_possible_cpu(i) + if (i != cpu) { + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, vcpu->arch.gprs[rt]); @@ -1481,9 +1526,13 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, preempt_disable(); if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { - - if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) { - kvm_mips_handle_kseg0_tlb_fault(va, vcpu); + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 && + kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) { + kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n", + __func__, va, vcpu, read_c0_entryhi()); + er = EMULATE_FAIL; + preempt_enable(); + goto done; } } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { @@ -1516,11 +1565,19 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, run, vcpu); preempt_enable(); goto dont_update_pc; - } else { - /* We fault an entry from the guest tlb to the shadow host TLB */ - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, - NULL, - NULL); + } + /* + * We fault an entry from the guest tlb to the + * shadow host TLB + */ + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, + NULL, NULL)) { + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, va, index, vcpu, + read_c0_entryhi()); + er = EMULATE_FAIL; + preempt_enable(); + goto done; } } } else { @@ -2335,8 +2392,13 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, - NULL); + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, + NULL, NULL)) { + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, va, index, vcpu, + read_c0_entryhi()); + er = EMULATE_FAIL; + } } } diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 8a5a700ad8de..1fde7236382d 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c @@ -155,7 +155,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) srcu_idx = srcu_read_lock(&kvm->srcu); pfn = kvm_mips_gfn_to_pfn(kvm, gfn); - if (kvm_mips_is_error_pfn(pfn)) { + if (is_error_noslot_pfn(pfn)) { kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); err = -EFAULT; goto out; @@ -278,7 +278,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, } gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); - if (gfn >= kvm->arch.guest_pmap_npages) { + if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, gfn, badvaddr); kvm_mips_dump_host_tlbs(); @@ -361,21 +361,39 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; struct kvm *kvm = vcpu->kvm; pfn_t pfn0, pfn1; + gfn_t gfn0, gfn1; + long tlb_lo[2]; + + + tlb_lo[0] = tlb->tlb_lo0; + tlb_lo[1] = tlb->tlb_lo1; + + /* + * The commpage address must not be mapped to anything else if the guest + * TLB contains entries nearby, or commpage accesses will break. + */ + if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & + VPN2_MASK & (PAGE_MASK << 1))) + tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; + + gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; + gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; + if (gfn0 >= kvm->arch.guest_pmap_npages || + gfn1 >= kvm->arch.guest_pmap_npages) { + kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", + __func__, gfn0, gfn1, tlb->tlb_hi); + kvm_mips_dump_guest_tlbs(vcpu); + return -1; + } + if (kvm_mips_map_page(kvm, gfn0) < 0) + return -1; - if ((tlb->tlb_hi & VPN2_MASK) == 0) { - pfn0 = 0; - pfn1 = 0; - } else { - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) - return -1; - - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) - return -1; + if (kvm_mips_map_page(kvm, gfn1) < 0) + return -1; - pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; - pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; - } + pfn0 = kvm->arch.guest_pmap[gfn0]; + pfn1 = kvm->arch.guest_pmap[gfn1]; if (hpa0) *hpa0 = pfn0 << PAGE_SHIFT; @@ -387,9 +405,9 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | - (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); + (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V); entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | - (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); + (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V); kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, tlb->tlb_lo0, tlb->tlb_lo1); @@ -783,10 +801,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) local_irq_restore(flags); return KVM_INVALID_INST; } - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, - &vcpu->arch. - guest_tlb[index], - NULL, NULL); + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, + &vcpu->arch.guest_tlb[index], + NULL, NULL)) { + kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", + __func__, opc, index, vcpu, + read_c0_entryhi()); + kvm_mips_dump_guest_tlbs(vcpu); + local_irq_restore(flags); + return KVM_INVALID_INST; + } inst = *(opc); } local_irq_restore(flags); diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c index 9ac1efcfbcc7..78f900c59276 100644 --- a/arch/mips/mm/sc-rm7k.c +++ b/arch/mips/mm/sc-rm7k.c @@ -161,7 +161,7 @@ static void rm7k_tc_disable(void) local_irq_save(flags); blast_rm7k_tcache(); clear_c0_config(RM7K_CONF_TE); - local_irq_save(flags); + local_irq_restore(flags); } static void rm7k_sc_disable(void) diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index db7c9e5826a6..89251e595d8a 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -36,6 +36,9 @@ #include <linux/console.h> #endif +#define ROCIT_CONFIG_GEN0 0x1f403000 +#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7) + extern void malta_be_init(void); extern int malta_be_handler(struct pt_regs *regs, int is_fixup); @@ -104,6 +107,8 @@ static void __init fd_activate(void) static int __init plat_enable_iocoherency(void) { int supported = 0; + u32 cfg; + if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; @@ -126,7 +131,8 @@ static int __init plat_enable_iocoherency(void) } else if (mips_cm_numiocu() != 0) { /* Nothing special needs to be done to enable coherency */ pr_info("CMP IOCU detected\n"); - if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { + cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); + if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) { pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); return 0; } diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 32751a0bba58..0de0bf0a638c 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -1365,7 +1365,7 @@ void bpf_jit_compile(struct sk_filter *fp) memset(&ctx, 0, sizeof(ctx)); - ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL); + ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); if (ctx.offsets == NULL) return; diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 537278746a15..4af43d9ba495 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -181,6 +181,7 @@ struct __large_struct { unsigned long buf[100]; }; "2:\n" \ " .section .fixup,\"ax\"\n" \ "3:\n\t" \ + " mov 0,%1\n" \ " mov %3,%0\n" \ " jmp 2b\n" \ " .previous\n" \ diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c index 7826e6c364e7..ce8899e5e171 100644 --- a/arch/mn10300/lib/usercopy.c +++ b/arch/mn10300/lib/usercopy.c @@ -9,7 +9,7 @@ * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ -#include <asm/uaccess.h> +#include <linux/uaccess.h> unsigned long __generic_copy_to_user(void *to, const void *from, unsigned long n) @@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) __copy_user_zeroing(to, from, n); + else + memset(to, 0, n); return n; } diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index ab2e7a198a4c..d441480a4af4 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size); static inline unsigned long copy_from_user(void *to, const void *from, unsigned long n) { - unsigned long over; - - if (access_ok(VERIFY_READ, from, n)) - return __copy_tofrom_user(to, from, n); - if ((unsigned long)from < TASK_SIZE) { - over = (unsigned long)from + n - TASK_SIZE; - return __copy_tofrom_user(to, from, n - over) + over; - } - return n; + unsigned long res = n; + + if (likely(access_ok(VERIFY_READ, from, n))) + res = __copy_tofrom_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } static inline unsigned long copy_to_user(void *to, const void *from, unsigned long n) { - unsigned long over; - - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_tofrom_user(to, from, n); - if ((unsigned long)to < TASK_SIZE) { - over = (unsigned long)to + n - TASK_SIZE; - return __copy_tofrom_user(to, from, n - over) + over; - } + if (likely(access_ok(VERIFY_WRITE, to, n))) + n = __copy_tofrom_user(to, from, n); return n; } @@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size); static inline __must_check unsigned long clear_user(void *addr, unsigned long size) { - - if (access_ok(VERIFY_WRITE, addr, size)) - return __clear_user(addr, size); - if ((unsigned long)addr < TASK_SIZE) { - unsigned long over = (unsigned long)addr + size - TASK_SIZE; - return __clear_user(addr, size - over) + over; - } + if (likely(access_ok(VERIFY_WRITE, addr, size))) + size = __clear_user(addr, size); return size; } diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index de2db1910755..9392458b6e86 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -9,6 +9,8 @@ #include <asm/errno.h> #include <asm-generic/uaccess-unaligned.h> +#include <linux/string.h> + #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -248,13 +250,14 @@ static inline unsigned long __must_check copy_from_user(void *to, unsigned long n) { int sz = __compiletime_object_size(to); - int ret = -EFAULT; + unsigned long ret = n; if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) ret = __copy_from_user(to, from, n); else copy_from_user_overflow(); - + if (unlikely(ret)) + memset(to + (n - ret), 0, ret); return ret; } diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h index c0ae62520d15..274d5bc6ecce 100644 --- a/arch/parisc/include/uapi/asm/errno.h +++ b/arch/parisc/include/uapi/asm/errno.h @@ -97,10 +97,10 @@ #define ENOTCONN 235 /* Transport endpoint is not connected */ #define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */ #define ETOOMANYREFS 237 /* Too many references: cannot splice */ -#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ #define ETIMEDOUT 238 /* Connection timed out */ #define ECONNREFUSED 239 /* Connection refused */ -#define EREMOTERELEASE 240 /* Remote peer released connection */ +#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ +#define EREMOTERELEASE 240 /* Remote peer released connection */ #define EHOSTDOWN 241 /* Host is down */ #define EHOSTUNREACH 242 /* No route to host */ diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 4ca90a39d6d0..1c92606ca792 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -246,6 +246,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, /* Allocate & free a PCI host bridge structure */ extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); extern void pcibios_free_controller(struct pci_controller *phb); +extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge); #ifdef CONFIG_PCI extern int pcibios_vaddr_is_ioport(void __iomem *address); diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 9485b43a7c00..46c486599645 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -323,30 +323,17 @@ extern unsigned long __copy_tofrom_user(void __user *to, static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - unsigned long over; - - if (access_ok(VERIFY_READ, from, n)) + if (likely(access_ok(VERIFY_READ, from, n))) return __copy_tofrom_user((__force void __user *)to, from, n); - if ((unsigned long)from < TASK_SIZE) { - over = (unsigned long)from + n - TASK_SIZE; - return __copy_tofrom_user((__force void __user *)to, from, - n - over) + over; - } + memset(to, 0, n); return n; } static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { - unsigned long over; - if (access_ok(VERIFY_WRITE, to, n)) return __copy_tofrom_user(to, (__force void __user *)from, n); - if ((unsigned long)to < TASK_SIZE) { - over = (unsigned long)to + n - TASK_SIZE; - return __copy_tofrom_user(to, (__force void __user *)from, - n - over) + over; - } return n; } @@ -437,10 +424,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size) might_fault(); if (likely(access_ok(VERIFY_WRITE, addr, size))) return __clear_user(addr, size); - if ((unsigned long)addr < TASK_SIZE) { - unsigned long over = (unsigned long)addr + size - TASK_SIZE; - return __clear_user(addr, size - over) + over; - } return size; } diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 9ff25dcbfb87..7d8a755f5558 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -153,29 +153,14 @@ machine_check_pSeries_1: */ HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) /* save r13 */ -#ifdef CONFIG_PPC_P7_NAP -BEGIN_FTR_SECTION - /* Running native on arch 2.06 or later, check if we are - * waking up from nap. We only handle no state loss and - * supervisor state loss. We do -not- handle hypervisor - * state loss at this time. + /* + * Running native on arch 2.06 or later, we may wakeup from winkle + * inside machine check. If yes, then last bit of HSPGR0 would be set + * to 1. Hence clear it unconditionally. */ - mfspr r13,SPRN_SRR1 - rlwinm. r13,r13,47-31,30,31 - OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) - beq 9f - - mfspr r13,SPRN_SRR1 - rlwinm. r13,r13,47-31,30,31 - /* waking up from powersave (nap) state */ - cmpwi cr1,r13,2 - /* Total loss of HV state is fatal. let's just stay stuck here */ - OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) - bgt cr1,. -9: - OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) -#endif /* CONFIG_PPC_P7_NAP */ + GET_PACA(r13) + clrrdi r13,r13,1 + SET_PACA(r13) EXCEPTION_PROLOG_0(PACA_EXMC) BEGIN_FTR_SECTION b machine_check_pSeries_early @@ -1418,17 +1403,17 @@ machine_check_handle_early: * Check if thread was in power saving mode. We come here when any * of the following is true: * a. thread wasn't in power saving mode - * b. thread was in power saving mode with no state loss or - * supervisor state loss + * b. thread was in power saving mode with no state loss, + * supervisor state loss or hypervisor state loss. * - * Go back to nap again if (b) is true. + * Go back to nap/sleep/winkle mode again if (b) is true. */ rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ beq 4f /* No, it wasn;t */ /* Thread was in power saving mode. Go back to nap again. */ cmpwi r11,2 - bne 3f - /* Supervisor state loss */ + blt 3f + /* Supervisor/Hypervisor state loss */ li r0,1 stb r0,PACA_NAPSTATELOST(r13) 3: bl machine_check_queue_event diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index a7fd4cb78b78..1c75f9777ce3 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -92,7 +92,8 @@ void save_mce_event(struct pt_regs *regs, long handled, mce->in_use = 1; mce->initiator = MCE_INITIATOR_CPU; - if (handled) + /* Mark it recovered if we have handled it and MSR(RI=1). */ + if (handled && (regs->msr & MSR_RI)) mce->disposition = MCE_DISPOSITION_RECOVERED; else mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index b49c72fd7f16..d4f779654f1d 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -102,6 +102,42 @@ void pcibios_free_controller(struct pci_controller *phb) } /* + * This function is used to call pcibios_free_controller() + * in a deferred manner: a callback from the PCI subsystem. + * + * _*DO NOT*_ call pcibios_free_controller() explicitly if + * this is used (or it may access an invalid *phb pointer). + * + * The callback occurs when all references to the root bus + * are dropped (e.g., child buses/devices and their users). + * + * It's called as .release_fn() of 'struct pci_host_bridge' + * which is associated with the 'struct pci_controller.bus' + * (root bus) - it expects .release_data to hold a pointer + * to 'struct pci_controller'. + * + * In order to use it, register .release_fn()/release_data + * like this: + * + * pci_set_host_bridge_release(bridge, + * pcibios_free_controller_deferred + * (void *) phb); + * + * e.g. in the pcibios_root_bridge_prepare() callback from + * pci_create_root_bus(). + */ +void pcibios_free_controller_deferred(struct pci_host_bridge *bridge) +{ + struct pci_controller *phb = (struct pci_controller *) + bridge->release_data; + + pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic); + + pcibios_free_controller(phb); +} +EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred); + +/* * The function is used to return the minimal alignment * for memory or I/O windows of the associated P2P bridge. * By default, 4KiB alignment for I/O windows and 1MiB for diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 1a85d8f96739..69be680f7184 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -685,7 +685,7 @@ unsigned char ibm_architecture_vec[] = { OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ /* option vector 5: PAPR/OF options */ - 19 - 2, /* length */ + 22 - 2, /* length */ 0, /* don't ignore, don't halt */ OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | @@ -716,8 +716,11 @@ unsigned char ibm_architecture_vec[] = { 0, 0, OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | - OV5_FEAT(OV5_PFO_HW_842), - OV5_FEAT(OV5_SUB_PROCESSORS), + OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */ + 0, /* Byte 18 */ + 0, /* Byte 19 */ + 0, /* Byte 20 */ + OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */ /* option vector 6: IBM PAPR hints */ 4 - 2, /* length */ 0, diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index f81d900a562a..08225a352477 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -338,8 +338,6 @@ _GLOBAL(__tm_recheckpoint) */ subi r7, r7, STACK_FRAME_OVERHEAD - SET_SCRATCH0(r1) - mfmsr r6 /* R4 = original MSR to indicate whether thread used FP/Vector etc. */ @@ -468,6 +466,7 @@ restore_gprs: * until we turn MSR RI back on. */ + SET_SCRATCH0(r1) ld r5, -8(r1) ld r1, -16(r1) diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c index 788a1977b9a5..2f420f9e4c54 100644 --- a/arch/powerpc/platforms/powernv/opal-dump.c +++ b/arch/powerpc/platforms/powernv/opal-dump.c @@ -365,6 +365,7 @@ static int process_dump(void) uint32_t dump_id, dump_size, dump_type; struct dump_obj *dump; char name[22]; + struct kobject *kobj; rc = dump_read_info(&dump_id, &dump_size, &dump_type); if (rc != OPAL_SUCCESS) @@ -376,8 +377,12 @@ static int process_dump(void) * that gracefully and not create two conflicting * entries. */ - if (kset_find_obj(dump_kset, name)) + kobj = kset_find_obj(dump_kset, name); + if (kobj) { + /* Drop reference added by kset_find_obj() */ + kobject_put(kobj); return 0; + } dump = create_dump_obj(dump_id, dump_size, dump_type); if (!dump) diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 0ad533b617f7..c4ae2958de9c 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c @@ -246,6 +246,7 @@ static void elog_work_fn(struct work_struct *work) uint64_t elog_type; int rc; char name[2+16+1]; + struct kobject *kobj; rc = opal_get_elog_size(&id, &size, &type); if (rc != OPAL_SUCCESS) { @@ -268,8 +269,12 @@ static void elog_work_fn(struct work_struct *work) * that gracefully and not create two conflicting * entries. */ - if (kset_find_obj(elog_kset, name)) + kobj = kset_find_obj(elog_kset, name); + if (kobj) { + /* Drop reference added by kset_find_obj() */ + kobject_put(kobj); return; + } create_elog_obj(log_id, elog_size, elog_type); } diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 0fa7178d36dc..12833e486d9b 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -459,6 +459,7 @@ static int opal_recover_mce(struct pt_regs *regs, if (!(regs->msr & MSR_RI)) { /* If MSR_RI isn't set, we cannot recover */ + pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n"); recovered = 0; } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { /* Platform corrected itself */ diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index c413ec158ff5..1017734bdc78 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -118,6 +118,10 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) bus = bridge->bus; + /* Rely on the pcibios_free_controller_deferred() callback. */ + pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred, + (void *) pci_bus_to_host(bus)); + dn = pcibios_get_phb_of_node(bus); if (!dn) return 0; diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 89e23811199c..e161a25bc64b 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -135,8 +135,11 @@ int remove_phb_dynamic(struct pci_controller *phb) release_resource(res); } - /* Free pci_controller data structure */ - pcibios_free_controller(phb); + /* + * The pci_controller data structure is freed by + * the pcibios_free_controller_deferred() callback; + * see pseries_root_bridge_prepare(). + */ return 0; } diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 78f4f8711d58..977afc2674e2 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -210,6 +210,7 @@ do { \ #define STACK_RND_MASK 0x7ffUL +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ if (vdso_enabled) \ diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 16c9c88658c8..1f2fa85091ce 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -88,7 +88,8 @@ static inline void __tlb_flush_full(struct mm_struct *mm) } /* - * Flush TLB entries for a specific ASCE on all CPUs. + * Flush TLB entries for a specific ASCE on all CPUs. Should never be used + * when more than one asce (e.g. gmap) ran on this mm. */ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) { diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index cd4c68e0398d..528f08c1d584 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -213,28 +213,28 @@ int __put_user_bad(void) __attribute__((noreturn)); __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: { \ - unsigned char __x; \ + unsigned char __x = 0; \ __gu_err = __get_user_fn(&__x, ptr, \ sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ - unsigned short __x; \ + unsigned short __x = 0; \ __gu_err = __get_user_fn(&__x, ptr, \ sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ - unsigned int __x; \ + unsigned int __x = 0; \ __gu_err = __get_user_fn(&__x, ptr, \ sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ - unsigned long long __x; \ + unsigned long long __x = 0; \ __gu_err = __get_user_fn(&__x, ptr, \ sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ diff --git a/arch/s390/include/uapi/asm/auxvec.h b/arch/s390/include/uapi/asm/auxvec.h index a1f153e89133..c53e08442255 100644 --- a/arch/s390/include/uapi/asm/auxvec.h +++ b/arch/s390/include/uapi/asm/auxvec.h @@ -3,4 +3,6 @@ #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + #endif diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 98eeb823342c..80fcdfb8a810 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -202,7 +202,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) static void gmap_flush_tlb(struct gmap *gmap) { if (MACHINE_HAS_IDTE) - __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table | + __tlb_flush_idte((unsigned long) gmap->table | _ASCE_TYPE_REGION1); else __tlb_flush_global(); @@ -221,7 +221,7 @@ void gmap_free(struct gmap *gmap) /* Flush tlb. */ if (MACHINE_HAS_IDTE) - __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table | + __tlb_flush_idte((unsigned long) gmap->table | _ASCE_TYPE_REGION1); else __tlb_flush_global(); diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h index ab66ddde777b..69326dfb894d 100644 --- a/arch/score/include/asm/uaccess.h +++ b/arch/score/include/asm/uaccess.h @@ -158,7 +158,7 @@ do { \ __get_user_asm(val, "lw", ptr); \ break; \ case 8: \ - if ((copy_from_user((void *)&val, ptr, 8)) == 0) \ + if (__copy_from_user((void *)&val, ptr, 8) == 0) \ __gu_err = 0; \ else \ __gu_err = -EFAULT; \ @@ -183,6 +183,8 @@ do { \ \ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ __get_user_common((x), size, __gu_ptr); \ + else \ + (x) = 0; \ \ __gu_err; \ }) @@ -196,6 +198,7 @@ do { \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:li %0, %4\n" \ + "li %1, 0\n" \ "j 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n" \ @@ -293,35 +296,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len); static inline unsigned long copy_from_user(void *to, const void *from, unsigned long len) { - unsigned long over; + unsigned long res = len; - if (access_ok(VERIFY_READ, from, len)) - return __copy_tofrom_user(to, from, len); + if (likely(access_ok(VERIFY_READ, from, len))) + res = __copy_tofrom_user(to, from, len); - if ((unsigned long)from < TASK_SIZE) { - over = (unsigned long)from + len - TASK_SIZE; - return __copy_tofrom_user(to, from, len - over) + over; - } - return len; + if (unlikely(res)) + memset(to + (len - res), 0, res); + + return res; } static inline unsigned long copy_to_user(void *to, const void *from, unsigned long len) { - unsigned long over; - - if (access_ok(VERIFY_WRITE, to, len)) - return __copy_tofrom_user(to, from, len); + if (likely(access_ok(VERIFY_WRITE, to, len))) + len = __copy_tofrom_user(to, from, len); - if ((unsigned long)to < TASK_SIZE) { - over = (unsigned long)to + len - TASK_SIZE; - return __copy_tofrom_user(to, from, len - over) + over; - } return len; } -#define __copy_from_user(to, from, len) \ - __copy_tofrom_user((to), (from), (len)) +static inline unsigned long +__copy_from_user(void *to, const void *from, unsigned long len) +{ + unsigned long left = __copy_tofrom_user(to, from, len); + if (unlikely(left)) + memset(to + (len - left), 0, left); + return left; +} #define __copy_to_user(to, from, len) \ __copy_tofrom_user((to), (from), (len)) @@ -335,17 +337,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len) static inline unsigned long __copy_from_user_inatomic(void *to, const void *from, unsigned long len) { - return __copy_from_user(to, from, len); + return __copy_tofrom_user(to, from, len); } -#define __copy_in_user(to, from, len) __copy_from_user(to, from, len) +#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len) static inline unsigned long copy_in_user(void *to, const void *from, unsigned long len) { if (access_ok(VERIFY_READ, from, len) && access_ok(VERFITY_WRITE, to, len)) - return copy_from_user(to, from, len); + return __copy_tofrom_user(to, from, len); } /* diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 9486376605f4..c04cc18ae9cd 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) __kernel_size_t __copy_size = (__kernel_size_t) n; if (__copy_size && __access_ok(__copy_from, __copy_size)) - return __copy_user(to, from, __copy_size); + __copy_size = __copy_user(to, from, __copy_size); + + if (unlikely(__copy_size)) + memset(to + (n - __copy_size), 0, __copy_size); return __copy_size; } diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 2e07e0f40c6a..a2f9d0531328 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h @@ -24,6 +24,7 @@ #define __get_user_size(x,ptr,size,retval) \ do { \ retval = 0; \ + x = 0; \ switch (size) { \ case 1: \ retval = __get_user_asm_b((void *)&x, \ diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 9634d086fc56..79b03872e165 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -265,8 +265,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un { if (n && __access_ok((unsigned long) from, n)) return __copy_user((__force void __user *) to, from, n); - else + else { + memset(to, 0, n); return n; + } } static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h index 41d9878a9686..581f1414b6ca 100644 --- a/arch/tile/include/asm/elf.h +++ b/arch/tile/include/asm/elf.h @@ -131,6 +131,7 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *); struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack); +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ diff --git a/arch/tile/include/uapi/asm/auxvec.h b/arch/tile/include/uapi/asm/auxvec.h index c93e92709f14..f497123ed980 100644 --- a/arch/tile/include/uapi/asm/auxvec.h +++ b/arch/tile/include/uapi/asm/auxvec.h @@ -18,4 +18,6 @@ /* The vDSO location. */ #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + #endif /* _ASM_TILE_AUXVEC_H */ diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index b77310833b03..cec4d620843e 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1266,7 +1266,7 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle, bool is64) { struct efi_info *efi = &boot_params->efi_info; - unsigned long map_sz, key, desc_size; + unsigned long map_sz, key, desc_size, buff_size; efi_memory_desc_t *mem_map; struct setup_data *e820ext; const char *signature; @@ -1277,14 +1277,20 @@ static efi_status_t exit_boot(struct boot_params *boot_params, bool called_exit = false; u8 nr_entries; int i; - - nr_desc = 0; - e820ext = NULL; - e820ext_size = 0; + struct efi_boot_memmap map; + + nr_desc = 0; + e820ext = NULL; + e820ext_size = 0; + map.map = &mem_map; + map.map_size = &map_sz; + map.desc_size = &desc_size; + map.desc_ver = &desc_version; + map.key_ptr = &key; + map.buff_size = &buff_size; get_map: - status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size, - &desc_version, &key); + status = efi_get_memory_map(sys_table, &map); if (status != EFI_SUCCESS) return status; diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 04905bfc508b..5e4b0cc54e43 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -17,7 +17,14 @@ static inline void __native_flush_tlb(void) { + /* + * If current->mm == NULL then we borrow a mm which may change during a + * task switch and therefore we must not be preempted while we write CR3 + * back: + */ + preempt_disable(); native_write_cr3(native_read_cr3()); + preempt_enable(); } static inline void __native_flush_tlb_global_irq_disabled(void) diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index c385d14dadc3..42937ccb8573 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1615,6 +1615,9 @@ void __init enable_IR_x2apic(void) int ret, x2apic_enabled = 0; int hardware_init_ret; + if (skip_ioapic_setup) + return; + /* Make sure irq_remap_ops are initialized */ setup_irq_remapping_ops(); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index edefacacc2cc..80e35e8522e4 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -522,6 +522,17 @@ static const int amd_erratum_383[]; static const int amd_erratum_400[]; static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); +#define MSR_AMD64_DE_CFG 0xC0011029 + +static void init_amd_ln(struct cpuinfo_x86 *c) +{ + /* + * Apply erratum 665 fix unconditionally so machines without a BIOS + * fix work. + */ + msr_set_bit(MSR_AMD64_DE_CFG, 31); +} + static void init_amd(struct cpuinfo_x86 *c) { u32 dummy; @@ -614,6 +625,9 @@ static void init_amd(struct cpuinfo_x86 *c) } } + if (c->x86 == 0x12) + init_amd_ln(c); + /* re-enable TopologyExtensions if switched off by BIOS */ if ((c->x86 == 0x15) && (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index beeb7cc07044..dc4c3b89c3f5 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, - [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d, + [PERF_COUNT_HW_CACHE_MISSES] = 0x077e, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */ diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 2e1a6853e00c..2fa494f59828 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -11,7 +11,11 @@ #include <linux/pci.h> #include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/dmi.h> #include <linux/pci_ids.h> +#include <linux/bcma/bcma.h> +#include <linux/bcma/bcma_regs.h> #include <drm/i915_drm.h> #include <asm/pci-direct.h> #include <asm/dma.h> @@ -21,6 +25,9 @@ #include <asm/iommu.h> #include <asm/gart.h> #include <asm/irq_remapping.h> +#include <asm/early_ioremap.h> + +#define dev_err(msg) pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg) static void __init fix_hypertransport_config(int num, int slot, int func) { @@ -76,6 +83,13 @@ static void __init nvidia_bugs(int num, int slot, int func) #ifdef CONFIG_ACPI #ifdef CONFIG_X86_IO_APIC /* + * Only applies to Nvidia root ports (bus 0) and not to + * Nvidia graphics cards with PCI ports on secondary buses. + */ + if (num) + return; + + /* * All timer overrides on Nvidia are * wrong unless HPET is enabled. * Unfortunately that's not true on many Asus boards. @@ -565,6 +579,61 @@ static void __init force_disable_hpet(int num, int slot, int func) #endif } +#define BCM4331_MMIO_SIZE 16384 +#define BCM4331_PM_CAP 0x40 +#define bcma_aread32(reg) ioread32(mmio + 1 * BCMA_CORE_SIZE + reg) +#define bcma_awrite32(reg, val) iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg) + +static void __init apple_airport_reset(int bus, int slot, int func) +{ + void __iomem *mmio; + u16 pmcsr; + u64 addr; + int i; + + if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) + return; + + /* Card may have been put into PCI_D3hot by grub quirk */ + pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL); + + if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) { + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr); + mdelay(10); + + pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL); + if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) { + dev_err("Cannot power up Apple AirPort card\n"); + return; + } + } + + addr = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0); + addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32; + addr &= PCI_BASE_ADDRESS_MEM_MASK; + + mmio = early_ioremap(addr, BCM4331_MMIO_SIZE); + if (!mmio) { + dev_err("Cannot iomap Apple AirPort card\n"); + return; + } + + pr_info("Resetting Apple AirPort card (left enabled by EFI)\n"); + + for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++) + udelay(10); + + bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET); + bcma_aread32(BCMA_RESET_CTL); + udelay(1); + + bcma_awrite32(BCMA_RESET_CTL, 0); + bcma_aread32(BCMA_RESET_CTL); + udelay(10); + + early_iounmap(mmio, BCM4331_MMIO_SIZE); +} #define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLIED 0x2 @@ -578,12 +647,6 @@ struct chipset { void (*f)(int num, int slot, int func); }; -/* - * Only works for devices on the root bus. If you add any devices - * not on bus 0 readd another loop level in early_quirks(). But - * be careful because at least the Nvidia quirk here relies on - * only matching on bus 0. - */ static struct chipset early_qrk[] __initdata = { { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs }, @@ -609,9 +672,13 @@ static struct chipset early_qrk[] __initdata = { */ { PCI_VENDOR_ID_INTEL, 0x0f00, PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, + { PCI_VENDOR_ID_BROADCOM, 0x4331, + PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, {} }; +static void __init early_pci_scan_bus(int bus); + /** * check_dev_quirk - apply early quirks to a given PCI device * @num: bus number @@ -620,7 +687,7 @@ static struct chipset early_qrk[] __initdata = { * * Check the vendor & device ID against the early quirks table. * - * If the device is single function, let early_quirks() know so we don't + * If the device is single function, let early_pci_scan_bus() know so we don't * poke at this device again. */ static int __init check_dev_quirk(int num, int slot, int func) @@ -629,6 +696,7 @@ static int __init check_dev_quirk(int num, int slot, int func) u16 vendor; u16 device; u8 type; + u8 sec; int i; class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); @@ -656,25 +724,36 @@ static int __init check_dev_quirk(int num, int slot, int func) type = read_pci_config_byte(num, slot, func, PCI_HEADER_TYPE); + + if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) { + sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS); + if (sec > num) + early_pci_scan_bus(sec); + } + if (!(type & 0x80)) return -1; return 0; } -void __init early_quirks(void) +static void __init early_pci_scan_bus(int bus) { int slot, func; - if (!early_pci_allowed()) - return; - /* Poor man's PCI discovery */ - /* Only scan the root bus */ for (slot = 0; slot < 32; slot++) for (func = 0; func < 8; func++) { /* Only probe function 0 on single fn devices */ - if (check_dev_quirk(0, slot, func)) + if (check_dev_quirk(bus, slot, func)) break; } } + +void __init early_quirks(void) +{ + if (!early_pci_allowed()) + return; + + early_pci_scan_bus(0); +} diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 8d12f0546dfc..8819ec730be4 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -55,12 +55,12 @@ asm (".pushsection .entry.text, \"ax\"\n" ".popsection"); /* identity function, which can be inlined */ -u32 _paravirt_ident_32(u32 x) +u32 notrace _paravirt_ident_32(u32 x) { return x; } -u64 _paravirt_ident_64(u64 x) +u64 notrace _paravirt_ident_64(u64 x) { return x; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e890c0398d62..413fbb087718 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -372,6 +372,7 @@ struct nested_vmx { struct list_head vmcs02_pool; int vmcs02_num; u64 vmcs01_tsc_offset; + bool change_vmcs01_virtual_x2apic_mode; /* L2 must run next, and mustn't decide to exit to L1. */ bool nested_run_pending; /* @@ -5777,22 +5778,27 @@ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) /* * Free all VMCSs saved for this vcpu, except the one pointed by - * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one - * currently used, if running L2), and vmcs01 when running L2. + * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs + * must be &vmx->vmcs01. */ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) { struct vmcs02_list *item, *n; + + WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { - if (vmx->loaded_vmcs != &item->vmcs02) - free_loaded_vmcs(&item->vmcs02); + /* + * Something will leak if the above WARN triggers. Better than + * a use-after-free. + */ + if (vmx->loaded_vmcs == &item->vmcs02) + continue; + + free_loaded_vmcs(&item->vmcs02); list_del(&item->list); kfree(item); + vmx->nested.vmcs02_num--; } - vmx->nested.vmcs02_num = 0; - - if (vmx->loaded_vmcs != &vmx->vmcs01) - free_loaded_vmcs(&vmx->vmcs01); } /* @@ -7079,6 +7085,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) { u32 sec_exec_control; + /* Postpone execution until vmcs01 is the current VMCS. */ + if (is_guest_mode(vcpu)) { + to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true; + return; + } + /* * There is not point to enable virtualize x2apic without enable * apicv @@ -7557,13 +7569,46 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_complete_interrupts(vmx); } +static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int cpu; + + if (vmx->loaded_vmcs == &vmx->vmcs01) + return; + + cpu = get_cpu(); + vmx->loaded_vmcs = &vmx->vmcs01; + vmx_vcpu_put(vcpu); + vmx_vcpu_load(vcpu, cpu); + vcpu->cpu = cpu; + put_cpu(); +} + +/* + * Ensure that the current vmcs of the logical processor is the + * vmcs01 of the vcpu before calling free_nested(). + */ +static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + int r; + + r = vcpu_load(vcpu); + BUG_ON(r); + vmx_load_vmcs01(vcpu); + free_nested(vmx); + vcpu_put(vcpu); +} + static void vmx_free_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); free_vpid(vmx); + leave_guest_mode(vcpu); + vmx_free_vcpu_nested(vcpu); free_loaded_vmcs(vmx->loaded_vmcs); - free_nested(vmx); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); @@ -8707,7 +8752,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, unsigned long exit_qualification) { struct vcpu_vmx *vmx = to_vmx(vcpu); - int cpu; struct vmcs12 *vmcs12 = get_vmcs12(vcpu); /* trying to cancel vmlaunch/vmresume is a bug */ @@ -8732,12 +8776,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vmcs12->vm_exit_intr_error_code, KVM_ISA_VMX); - cpu = get_cpu(); - vmx->loaded_vmcs = &vmx->vmcs01; - vmx_vcpu_put(vcpu); - vmx_vcpu_load(vcpu, cpu); - vcpu->cpu = cpu; - put_cpu(); + vmx_load_vmcs01(vcpu); vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); @@ -8752,6 +8791,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, /* Update TSC_OFFSET if TSC was changed while L2 ran */ vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); + if (vmx->nested.change_vmcs01_virtual_x2apic_mode) { + vmx->nested.change_vmcs01_virtual_x2apic_mode = false; + vmx_set_virtual_x2apic_mode(vcpu, + vcpu->arch.apic_base & X2APIC_ENABLE); + } + /* This is needed for same reason as it was needed in prepare_vmcs02 */ vmx->host_rsp = 0; diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index d6b867921612..50aa58be7933 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl @@ -294,7 +294,7 @@ # 285 sys_setaltroot 286 i386 add_key sys_add_key 287 i386 request_key sys_request_key -288 i386 keyctl sys_keyctl +288 i386 keyctl sys_keyctl compat_sys_keyctl 289 i386 ioprio_set sys_ioprio_set 290 i386 ioprio_get sys_ioprio_get 291 i386 inotify_init sys_inotify_init diff --git a/block/genhd.c b/block/genhd.c index 9316f5fd416f..548e9e698610 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -611,7 +611,7 @@ void add_disk(struct gendisk *disk) /* Register BDI before referencing it from bdev */ bdi = &disk->queue->backing_dev_info; - bdi_register_dev(bdi, disk_devt(disk)); + bdi_register_owner(bdi, disk_to_dev(disk)); blk_register_region(disk_devt(disk), disk->minors, NULL, exact_match, exact_lock, disk); @@ -829,6 +829,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v) if (iter) { class_dev_iter_exit(iter); kfree(iter); + seqf->private = NULL; } } diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 3c551d46aa3b..37130b81d0d4 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, return blkcipher_walk_done(desc, walk, -EINVAL); } + bsize = min(walk->walk_blocksize, n); + walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | BLKCIPHER_WALK_DIFF); if (!scatterwalk_aligned(&walk->in, walk->alignmask) || @@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc, } } - bsize = min(walk->walk_blocksize, n); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 75c415d37086..d85fab975514 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -565,9 +565,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out) static int cryptd_hash_import(struct ahash_request *req, const void *in) { - struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); + struct shash_desc *desc = cryptd_shash_desc(req); + + desc->tfm = ctx->child; + desc->flags = req->base.flags; - return crypto_shash_import(&rctx->desc, in); + return crypto_shash_import(desc, in); } static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, diff --git a/crypto/gcm.c b/crypto/gcm.c index f0bd00b15f26..d2a0f7371cf0 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -716,7 +716,9 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, - CRYPTO_ALG_TYPE_AHASH_MASK); + CRYPTO_ALG_TYPE_AHASH_MASK | + crypto_requires_sync(algt->type, + algt->mask)); if (IS_ERR(ghash_alg)) return ERR_CAST(ghash_alg); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 79ca2278c2a3..0ec7a6fa3d4d 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -68,7 +68,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out, void scatterwalk_done(struct scatter_walk *walk, int out, int more) { - if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more) + if (!more || walk->offset >= walk->sg->offset + walk->sg->length || + !(walk->offset & (PAGE_SIZE - 1))) scatterwalk_pagedone(walk, out, more); } EXPORT_SYMBOL_GPL(scatterwalk_done); diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 09b632ad0fe2..7ded994c6dd0 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -8,8 +8,6 @@ #include <linux/bcma/bcma.h> #include <linux/delay.h> -#define BCMA_CORE_SIZE 0x1000 - #define bcma_err(bus, fmt, ...) \ pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) #define bcma_warn(bus, fmt, ...) \ diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 8e51b6c8ccac..1af9fd3202c1 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -122,6 +122,8 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3432) }, { USB_DEVICE(0x13d3, 0x3472) }, { USB_DEVICE(0x13d3, 0x3474) }, + { USB_DEVICE(0x13d3, 0x3487) }, + { USB_DEVICE(0x13d3, 0x3490) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, @@ -188,6 +190,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a9ccee35973e..876654772092 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -216,6 +216,8 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index f66ea258382f..710aec7820a4 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -384,7 +384,12 @@ static int omap_rng_probe(struct platform_device *pdev) } pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "Failed to runtime_get device: %d\n", ret); + pm_runtime_put_noidle(&pdev->dev); + goto err_ioremap; + } ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) : get_omap_rng_device_details(priv); @@ -437,8 +442,15 @@ static int omap_rng_suspend(struct device *dev) static int omap_rng_resume(struct device *dev) { struct omap_rng_dev *priv = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_get_sync(dev); + if (ret) { + dev_err(dev, "Failed to runtime_get device: %d\n", ret); + pm_runtime_put_noidle(dev); + return ret; + } - pm_runtime_get_sync(dev); priv->pdata->init(priv); return 0; diff --git a/drivers/char/random.c b/drivers/char/random.c index dde8da31dcae..692482b7c2fb 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -956,6 +956,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) mix_pool_bytes(r, tmp, bytes, NULL); credit_entropy_bits(r, bytes*8); } +EXPORT_SYMBOL_GPL(add_interrupt_randomness); /* * Used as a workqueue function so that when the input pool is getting @@ -1379,12 +1380,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { + static int maxwarn = 10; int ret; - if (unlikely(nonblocking_pool.initialized == 0)) - printk_once(KERN_NOTICE "random: %s urandom read " - "with %d bits of entropy available\n", - current->comm, nonblocking_pool.entropy_total); + if (unlikely(nonblocking_pool.initialized == 0) && + maxwarn > 0) { + maxwarn--; + printk(KERN_NOTICE "random: %s: uninitialized urandom read " + "(%zd bytes read, %d bits of entropy available)\n", + current->comm, nbytes, nonblocking_pool.entropy_total); + } nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 51350cd0847e..415e47d79a46 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -197,16 +197,15 @@ static int get_burstcount(struct tpm_chip *chip) { unsigned long stop; int burstcnt; + u32 value; /* wait for burstcount */ /* which timeout value, spec has 2 answers (c & d) */ stop = jiffies + chip->vendor.timeout_d; do { - burstcnt = ioread8(chip->vendor.iobase + - TPM_STS(chip->vendor.locality) + 1); - burstcnt += ioread8(chip->vendor.iobase + - TPM_STS(chip->vendor.locality) + - 2) << 8; + value = ioread32(chip->vendor.iobase + + TPM_STS(chip->vendor.locality)); + burstcnt = (value >> 8) & 0xFFFF; if (burstcnt) return burstcnt; msleep(TPM_TIMEOUT); diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index f4a9c0058b4d..501c95941882 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -120,12 +120,16 @@ static struct clock_event_device sun4i_clockevent = { .set_next_event = sun4i_clkevt_next_event, }; +static void sun4i_timer_clear_interrupt(void) +{ + writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG); +} static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = (struct clock_event_device *)dev_id; - writel(0x1, timer_base + TIMER_IRQ_ST_REG); + sun4i_timer_clear_interrupt(); evt->event_handler(evt); return IRQ_HANDLED; @@ -182,6 +186,9 @@ static void __init sun4i_timer_init(struct device_node *node) /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(0); + /* clear timer0 interrupt */ + sun4i_timer_clear_interrupt(); + sun4i_clockevent.cpumask = cpu_possible_mask; sun4i_clockevent.irq = irq; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index a22f9b473c22..4f03b1a14d62 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1778,6 +1778,7 @@ caam_hash_alloc(struct caam_hash_template *template, template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); + t_alg->ahash_alg.setkey = NULL; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 5533fe31c90d..433a7696bf0f 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -330,7 +330,7 @@ static void nx_of_update_msc(struct device *dev, ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && i < msc->triplets; i++) { - if (msc->fc > NX_MAX_FC || msc->mode > NX_MAX_MODE) { + if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) { dev_err(dev, "unknown function code/mode " "combo: %d/%d (ignored)\n", msc->fc, msc->mode); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 9f90369dd6bd..ba850eb2d275 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -483,7 +483,7 @@ struct devfreq *devfreq_add_device(struct device *dev, devfreq->profile->max_state * devfreq->profile->max_state, GFP_KERNEL); - devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * + devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * devfreq->profile->max_state, GFP_KERNEL); devfreq->last_stat_updated = jiffies; diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index c3986452194d..3e8fcbf511e2 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -591,6 +591,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, int retval; u16 ether_type; + if (len <= RFC2374_UNFRAG_HDR_SIZE) + return 0; + hdr.w0 = be32_to_cpu(buf[0]); lf = fwnet_get_hdr_lf(&hdr); if (lf == RFC2374_HDR_UNFRAG) { @@ -615,7 +618,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, return fwnet_finish_incoming_packet(net, skb, source_node_id, is_broadcast, ether_type); } + /* A datagram fragment has been received, now the fun begins. */ + + if (len <= RFC2374_FRAG_HDR_SIZE) + return 0; + hdr.w1 = ntohl(buf[1]); buf += 2; len -= RFC2374_FRAG_HDR_SIZE; @@ -629,6 +637,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, datagram_label = fwnet_get_hdr_dgl(&hdr); dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ + if (fg_off + len > dg_size) + return 0; + spin_lock_irqsave(&dev->lock, flags); peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); @@ -735,6 +746,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, fw_send_response(card, r, rcode); } +static int gasp_source_id(__be32 *p) +{ + return be32_to_cpu(p[0]) >> 16; +} + +static u32 gasp_specifier_id(__be32 *p) +{ + return (be32_to_cpu(p[0]) & 0xffff) << 8 | + (be32_to_cpu(p[1]) & 0xff000000) >> 24; +} + +static u32 gasp_version(__be32 *p) +{ + return be32_to_cpu(p[1]) & 0xffffff; +} + static void fwnet_receive_broadcast(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { @@ -744,9 +771,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, __be32 *buf_ptr; int retval; u32 length; - u16 source_node_id; - u32 specifier_id; - u32 ver; unsigned long offset; unsigned long flags; @@ -763,22 +787,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context, spin_unlock_irqrestore(&dev->lock, flags); - specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 - | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; - ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; - source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; - - if (specifier_id == IANA_SPECIFIER_ID && - (ver == RFC2734_SW_VERSION + if (length > IEEE1394_GASP_HDR_SIZE && + gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID && + (gasp_version(buf_ptr) == RFC2734_SW_VERSION #if IS_ENABLED(CONFIG_IPV6) - || ver == RFC3146_SW_VERSION + || gasp_version(buf_ptr) == RFC3146_SW_VERSION #endif - )) { - buf_ptr += 2; - length -= IEEE1394_GASP_HDR_SIZE; - fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, + )) + fwnet_incoming_packet(dev, buf_ptr + 2, + length - IEEE1394_GASP_HDR_SIZE, + gasp_source_id(buf_ptr), context->card->generation, true); - } packet.payload_length = dev->rcv_buffer_size; packet.interrupt = 1; diff --git a/drivers/firmware/efi/efi-stub-helper.c b/drivers/firmware/efi/efi-stub-helper.c index 2fb3a0aab56c..8eae9c9d7863 100644 --- a/drivers/firmware/efi/efi-stub-helper.c +++ b/drivers/firmware/efi/efi-stub-helper.c @@ -15,6 +15,8 @@ #define EFI_ERROR (~0UL) +#define EFI_MMAP_NR_SLACK_SLOTS 8 + struct file_info { efi_file_handle_t *handle; u64 size; @@ -41,49 +43,62 @@ static void efi_printk(efi_system_table_t *sys_table_arg, char *str) #define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) +static inline bool mmap_has_headroom(unsigned long buff_size, + unsigned long map_size, + unsigned long desc_size) +{ + unsigned long slack = buff_size - map_size; + + return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS; +} + static efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, - efi_memory_desc_t **map, - unsigned long *map_size, - unsigned long *desc_size, - u32 *desc_ver, - unsigned long *key_ptr) + struct efi_boot_memmap *map) { efi_memory_desc_t *m = NULL; efi_status_t status; unsigned long key; u32 desc_version; - *map_size = sizeof(*m) * 32; + *map->desc_size = sizeof(*m); + *map->map_size = *map->desc_size * 32; + *map->buff_size = *map->map_size; again: - /* - * Add an additional efi_memory_desc_t because we're doing an - * allocation which may be in a new descriptor region. - */ - *map_size += sizeof(*m); status = efi_call_early(allocate_pool, EFI_LOADER_DATA, - *map_size, (void **)&m); + *map->map_size, (void **)&m); if (status != EFI_SUCCESS) goto fail; - *desc_size = 0; + *map->desc_size = 0; key = 0; - status = efi_call_early(get_memory_map, map_size, m, - &key, desc_size, &desc_version); - if (status == EFI_BUFFER_TOO_SMALL) { + status = efi_call_early(get_memory_map, map->map_size, m, + &key, map->desc_size, &desc_version); + if (status == EFI_BUFFER_TOO_SMALL || + !mmap_has_headroom(*map->buff_size, *map->map_size, + *map->desc_size)) { efi_call_early(free_pool, m); + /* + * Make sure there is some entries of headroom so that the + * buffer can be reused for a new map after allocations are + * no longer permitted. Its unlikely that the map will grow to + * exceed this headroom once we are ready to trigger + * ExitBootServices() + */ + *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS; + *map->buff_size = *map->map_size; goto again; } if (status != EFI_SUCCESS) efi_call_early(free_pool, m); - if (key_ptr && status == EFI_SUCCESS) - *key_ptr = key; - if (desc_ver && status == EFI_SUCCESS) - *desc_ver = desc_version; + if (map->key_ptr && status == EFI_SUCCESS) + *map->key_ptr = key; + if (map->desc_ver && status == EFI_SUCCESS) + *map->desc_ver = desc_version; fail: - *map = m; + *map->map = m; return status; } @@ -91,13 +106,20 @@ fail: static unsigned long __init get_dram_base(efi_system_table_t *sys_table_arg) { efi_status_t status; - unsigned long map_size; + unsigned long map_size, buff_size; unsigned long membase = EFI_ERROR; struct efi_memory_map map; efi_memory_desc_t *md; + struct efi_boot_memmap boot_map; - status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map, - &map_size, &map.desc_size, NULL, NULL); + boot_map.map = (efi_memory_desc_t **)&map.map; + boot_map.map_size = &map_size; + boot_map.desc_size = &map.desc_size; + boot_map.desc_ver = NULL; + boot_map.key_ptr = NULL; + boot_map.buff_size = &buff_size; + + status = efi_get_memory_map(sys_table_arg, &boot_map); if (status != EFI_SUCCESS) return membase; @@ -120,15 +142,22 @@ static efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, unsigned long *addr, unsigned long max) { - unsigned long map_size, desc_size; + unsigned long map_size, desc_size, buff_size; efi_memory_desc_t *map; efi_status_t status; unsigned long nr_pages; u64 max_addr = 0; int i; + struct efi_boot_memmap boot_map; + + boot_map.map = ↦ + boot_map.map_size = &map_size; + boot_map.desc_size = &desc_size; + boot_map.desc_ver = NULL; + boot_map.key_ptr = NULL; + boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, - NULL, NULL); + status = efi_get_memory_map(sys_table_arg, &boot_map); if (status != EFI_SUCCESS) goto fail; @@ -206,14 +235,21 @@ static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, unsigned long size, unsigned long align, unsigned long *addr) { - unsigned long map_size, desc_size; + unsigned long map_size, desc_size, buff_size; efi_memory_desc_t *map; efi_status_t status; unsigned long nr_pages; int i; + struct efi_boot_memmap boot_map; + + boot_map.map = ↦ + boot_map.map_size = &map_size; + boot_map.desc_size = &desc_size; + boot_map.desc_ver = NULL; + boot_map.key_ptr = NULL; + boot_map.buff_size = &buff_size; - status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size, - NULL, NULL); + status = efi_get_memory_map(sys_table_arg, &boot_map); if (status != EFI_SUCCESS) goto fail; diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c index 507a3df46a5d..519448aeaffd 100644 --- a/drivers/firmware/efi/fdt.c +++ b/drivers/firmware/efi/fdt.c @@ -178,12 +178,20 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, unsigned long fdt_addr, unsigned long fdt_size) { - unsigned long map_size, desc_size; + unsigned long map_size, desc_size, buff_size; u32 desc_ver; unsigned long mmap_key; efi_memory_desc_t *memory_map; unsigned long new_fdt_size; efi_status_t status; + struct efi_boot_memmap map; + + map.map = &memory_map; + map.map_size = &map_size; + map.desc_size = &desc_size; + map.desc_ver = &desc_ver; + map.key_ptr = &mmap_key; + map.buff_size = &buff_size; /* * Estimate size of new FDT, and allocate memory for it. We @@ -204,8 +212,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, * we can get the memory map key needed for * exit_boot_services(). */ - status = efi_get_memory_map(sys_table, &memory_map, &map_size, - &desc_size, &desc_ver, &mmap_key); + status = efi_get_memory_map(sys_table, &map); if (status != EFI_SUCCESS) goto fail_free_new_fdt; diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 4a1b5113e527..53a2f65fb166 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -50,6 +50,7 @@ config GPIO_DEVRES config OF_GPIO def_bool y depends on OF + depends on HAS_IOMEM config GPIO_ACPI def_bool y diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c index 118a6bf455d9..39d0bc81657e 100644 --- a/drivers/gpio/gpio-intel-mid.c +++ b/drivers/gpio/gpio-intel-mid.c @@ -17,7 +17,6 @@ * Moorestown platform Langwell chip. * Medfield platform Penwell chip. * Clovertrail platform Cloverview chip. - * Merrifield platform Tangier chip. */ #include <linux/module.h> @@ -66,10 +65,6 @@ enum GPIO_REG { /* intel_mid gpio driver data */ struct intel_mid_gpio_ddata { u16 ngpio; /* number of gpio pins */ - u32 gplr_offset; /* offset of first GPLR register from base */ - u32 flis_base; /* base address of FLIS registers */ - u32 flis_len; /* length of FLIS registers */ - u32 (*get_flis_offset)(int gpio); u32 chip_irq_type; /* chip interrupt type */ }; @@ -284,15 +279,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = { .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE, }; -static const struct intel_mid_gpio_ddata gpio_tangier = { - .ngpio = 192, - .gplr_offset = 4, - .flis_base = 0xff0c0000, - .flis_len = 0x8000, - .get_flis_offset = NULL, - .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE, -}; - static const struct pci_device_id intel_gpio_ids[] = { { /* Lincroft */ @@ -319,11 +305,6 @@ static const struct pci_device_id intel_gpio_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), .driver_data = (kernel_ulong_t)&gpio_cloverview_core, }, - { - /* Tangier */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199), - .driver_data = (kernel_ulong_t)&gpio_tangier, - }, { 0 } }; MODULE_DEVICE_TABLE(pci, intel_gpio_ids); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 103de0f01934..7a92eb0dad6b 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id); #define MAX_BANK 5 #define BANK_SZ 8 -#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ) +#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ) struct pca953x_chip { unsigned gpio_start; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 0dd2c26c55ab..8cbc8ea28e0e 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -4236,6 +4236,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, unsigned long flags; int ret = -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || page_flip->reserved != 0) return -EINVAL; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index dfa9769b26b5..0149b1e6dd5b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -72,6 +72,8 @@ #define EDID_QUIRK_FORCE_8BPC (1 << 8) /* Force 12bpc */ #define EDID_QUIRK_FORCE_12BPC (1 << 9) +/* Force 6bpc */ +#define EDID_QUIRK_FORCE_6BPC (1 << 10) struct detailed_mode_closure { struct drm_connector *connector; @@ -98,6 +100,9 @@ static struct edid_quirk { /* Unknown Acer */ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, + /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ + { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, @@ -3667,6 +3672,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) drm_add_display_info(edid, &connector->display_info, connector); + if (quirks & EDID_QUIRK_FORCE_6BPC) + connector->display_info.bpc = 6; + if (quirks & EDID_QUIRK_FORCE_8BPC) connector->display_info.bpc = 8; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a62393650307..2041c5bbb929 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9475,21 +9475,11 @@ connected_sink_compute_bpp(struct intel_connector *connector, pipe_config->pipe_bpp = connector->base.display_info.bpc*3; } - /* Clamp bpp to default limit on screens without EDID 1.4 */ - if (connector->base.display_info.bpc == 0) { - int type = connector->base.connector_type; - int clamp_bpp = 24; - - /* Fall back to 18 bpp when DP sink capability is unknown. */ - if (type == DRM_MODE_CONNECTOR_DisplayPort || - type == DRM_MODE_CONNECTOR_eDP) - clamp_bpp = 18; - - if (bpp > clamp_bpp) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", - bpp, clamp_bpp); - pipe_config->pipe_bpp = clamp_bpp; - } + /* Clamp bpp to 8 on screens without EDID 1.4 */ + if (connector->base.display_info.bpc == 0 && bpp > 24) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", + bpp); + pipe_config->pipe_bpp = 24; } } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 8a2c5fd0893e..cb4a95b75dd7 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -124,6 +124,12 @@ struct msm_drm_private { */ struct drm_mm mm; } vram; + + /* task holding struct_mutex.. currently only used in submit path + * to detect and reject faults from copy_from_user() for submit + * ioctl. + */ + struct task_struct *struct_mutex_task; }; struct msm_format { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 690d7e7b6d1e..f611b079a360 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -188,11 +188,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *obj = vma->vm_private_data; struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = dev->dev_private; struct page **pages; unsigned long pfn; pgoff_t pgoff; int ret; + /* This should only happen if userspace tries to pass a mmap'd + * but unfaulted gem bo vaddr into submit ioctl, triggering + * a page fault while struct_mutex is already held. This is + * not a valid use-case so just bail. + */ + if (priv->struct_mutex_task == current) + return VM_FAULT_SIGBUS; + /* Make sure we don't parallel update on a fault, nor move or remove * something from beneath our feet */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index cd0554f68316..057951163126 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, return submit; } +static inline unsigned long __must_check +copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + return __copy_from_user_inatomic(to, from, n); + return -EFAULT; +} + static int submit_lookup_objects(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { @@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, int ret = 0; spin_lock(&file->table_lock); + pagefault_disable(); for (i = 0; i < args->nr_bos; i++) { struct drm_msm_gem_submit_bo submit_bo; @@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, void __user *userptr = to_user_ptr(args->bos + (i * sizeof(submit_bo))); - ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); - if (ret) { - ret = -EFAULT; - goto out_unlock; + ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); + if (unlikely(ret)) { + pagefault_enable(); + spin_unlock(&file->table_lock); + ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); + if (ret) + goto out; + spin_lock(&file->table_lock); + pagefault_disable(); } if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { @@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, } out_unlock: - submit->nr_bos = i; + pagefault_enable(); spin_unlock(&file->table_lock); +out: + submit->nr_bos = i; + return ret; } @@ -339,12 +356,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (args->nr_cmds > MAX_CMDS) return -EINVAL; - mutex_lock(&dev->struct_mutex); + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + priv->struct_mutex_task = current; submit = submit_create(dev, gpu, args->nr_bos); if (!submit) { ret = -ENOMEM; - goto out; + goto out_unlock; } ret = submit_lookup_objects(submit, args, file); @@ -422,6 +443,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out: if (submit) submit_cleanup(submit, !!ret); +out_unlock: + priv->struct_mutex_task = NULL; mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 279206997e5c..8f057ab38e27 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -45,6 +45,7 @@ static struct nouveau_dsm_priv { bool dsm_detected; bool optimus_detected; + bool optimus_flags_detected; acpi_handle dhandle; acpi_handle other_handle; acpi_handle rom_handle; @@ -58,9 +59,6 @@ bool nouveau_is_v1_dsm(void) { return nouveau_dsm_priv.dsm_detected; } -#define NOUVEAU_DSM_HAS_MUX 0x1 -#define NOUVEAU_DSM_HAS_OPT 0x2 - #ifdef CONFIG_VGA_SWITCHEROO static const char nouveau_dsm_muid[] = { 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, @@ -111,7 +109,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t * * requirements on the fourth parameter, so a private implementation * instead of using acpi_check_dsm(). */ -static int nouveau_check_optimus_dsm(acpi_handle handle) +static int nouveau_dsm_get_optimus_functions(acpi_handle handle) { int result; @@ -126,7 +124,9 @@ static int nouveau_check_optimus_dsm(acpi_handle handle) * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. * If the n-th bit is enabled, function n is supported */ - return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS); + if (result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS)) + return result; + return 0; } static int nouveau_dsm(acpi_handle handle, int func, int arg) @@ -213,27 +213,36 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = { .get_client_id = nouveau_dsm_get_client_id, }; -static int nouveau_dsm_pci_probe(struct pci_dev *pdev) +static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out, + bool *has_mux, bool *has_opt, + bool *has_opt_flags) { acpi_handle dhandle; - int retval = 0; + bool supports_mux; + int optimus_funcs; dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) - return false; + return; if (!acpi_has_method(dhandle, "_DSM")) { nouveau_dsm_priv.other_handle = dhandle; - return false; + return; } - if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, - 1 << NOUVEAU_DSM_POWER)) - retval |= NOUVEAU_DSM_HAS_MUX; + supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, + 1 << NOUVEAU_DSM_POWER); + optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle); - if (nouveau_check_optimus_dsm(dhandle)) - retval |= NOUVEAU_DSM_HAS_OPT; + /* Does not look like a Nvidia device. */ + if (!supports_mux && !optimus_funcs) + return; + + *dhandle_out = dhandle; + *has_mux = supports_mux; + *has_opt = !!optimus_funcs; + *has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS); - if (retval & NOUVEAU_DSM_HAS_OPT) { + if (optimus_funcs) { uint32_t result; nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0, &result); @@ -242,10 +251,6 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "", (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : ""); } - if (retval) - nouveau_dsm_priv.dhandle = dhandle; - - return retval; } static bool nouveau_dsm_detect(void) @@ -253,11 +258,12 @@ static bool nouveau_dsm_detect(void) char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; - int has_dsm = 0; - int has_optimus = 0; + acpi_handle dhandle = NULL; + bool has_mux = false; + bool has_optimus = false; + bool has_optimus_flags = false; int vga_count = 0; bool guid_valid; - int retval; bool ret = false; /* lookup the MXM GUID */ @@ -270,32 +276,29 @@ static bool nouveau_dsm_detect(void) while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; - retval = nouveau_dsm_pci_probe(pdev); - if (retval & NOUVEAU_DSM_HAS_MUX) - has_dsm |= 1; - if (retval & NOUVEAU_DSM_HAS_OPT) - has_optimus = 1; + nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus, + &has_optimus_flags); } while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) { vga_count++; - retval = nouveau_dsm_pci_probe(pdev); - if (retval & NOUVEAU_DSM_HAS_MUX) - has_dsm |= 1; - if (retval & NOUVEAU_DSM_HAS_OPT) - has_optimus = 1; + nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus, + &has_optimus_flags); } /* find the optimus DSM or the old v1 DSM */ - if (has_optimus == 1) { + if (has_optimus) { + nouveau_dsm_priv.dhandle = dhandle; acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", acpi_method_name); nouveau_dsm_priv.optimus_detected = true; + nouveau_dsm_priv.optimus_flags_detected = has_optimus_flags; ret = true; - } else if (vga_count == 2 && has_dsm && guid_valid) { + } else if (vga_count == 2 && has_mux && guid_valid) { + nouveau_dsm_priv.dhandle = dhandle; acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", @@ -336,8 +339,9 @@ void nouveau_switcheroo_optimus_dsm(void) if (!nouveau_dsm_priv.optimus_detected) return; - nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, - 0x3, &result); + if (nouveau_dsm_priv.optimus_flags_detected) + nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, + 0x3, &result); nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 84a8b9fb20c8..9604d14e2094 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -472,7 +472,10 @@ nouveau_drm_unload(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - pm_runtime_get_sync(dev->dev); + if (nouveau_runtime_pm != 0) { + pm_runtime_get_sync(dev->dev); + } + nouveau_fbcon_fini(dev); nouveau_accel_fini(drm); nouveau_hwmon_fini(dev); diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index cf4ad5142f21..fba084558969 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -109,11 +109,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); - OUT_RING(chan, (image->height << 16) | image->width); + OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8)); OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); - dsize = ALIGN(image->width * image->height, 32) >> 5; + dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 3860b7443e90..e9f65ac0fb78 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING(chan, 0); OUT_RING(chan, image->dy); - dwords = ALIGN(image->width * image->height, 32) >> 5; + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index fff0a1a33df0..4327d8e85dfc 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING (chan, 0); OUT_RING (chan, image->dy); - dwords = ALIGN(image->width * image->height, 32) >> 5; + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 56e1d633875e..6e6c76080d6a 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -136,6 +136,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, * correctly globaly, since that would require * tracking all of our palettes. */ ret = qxl_bo_kmap(palette_bo, (void **)&pal); + if (ret) + return ret; pal->num_ents = 2; pal->unique = unique++; if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 6af1728c5c2b..d38799899ca9 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -119,6 +119,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: if (dig->backlight_level == 0) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); else { diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 0bf38157dfc3..dc86c87e01a0 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1128,7 +1128,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) le16_to_cpu(firmware_info->info.usReferenceClock); p1pll->reference_div = 0; - if (crev < 2) + if ((frev < 2) && (crev < 2)) p1pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); else @@ -1137,7 +1137,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - if (crev >= 4) { + if (((frev < 2) && (crev >= 4)) || (frev >= 2)) { p1pll->lcd_pll_out_min = le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_min == 0) diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index a9fb0d016d38..ba95c4934c8d 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/delay.h> #include "radeon_acpi.h" @@ -256,6 +257,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) if (!info) return -EIO; kfree(info); + + /* 200ms delay is required after off */ + if (state == 0) + msleep(200); } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 508eed9f8071..e77ddbcc9d56 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1835,7 +1835,6 @@ radeon_add_atom_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -2060,8 +2059,10 @@ radeon_add_atom_connector(struct drm_device *dev, } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; @@ -2137,7 +2138,6 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -2222,10 +2222,13 @@ radeon_add_legacy_connector(struct drm_device *dev, } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->display_info.subpixel_order = subpixel_order; drm_sysfs_connector_add(connector); } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 821c86bc70c2..7a7a4af75630 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -30,6 +30,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> +#include <linux/pm_runtime.h> #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <linux/efi.h> @@ -1465,6 +1466,9 @@ int radeon_device_init(struct radeon_device *rdev, return 0; failed: + /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */ + if (radeon_is_px(ddev)) + pm_runtime_put_noidle(ddev->dev); if (runtime) vga_switcheroo_fini_domain_pm_ops(rdev->dev); return r; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index b63094746b50..907ad15a8485 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -61,7 +61,9 @@ int radeon_driver_unload_kms(struct drm_device *dev) if (rdev->rmmio == NULL) goto done_free; - pm_runtime_get_sync(dev->dev); + if (radeon_is_px(dev)) { + pm_runtime_get_sync(dev->dev); + } radeon_acpi_fini(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c4675b9729c9..56ff46e68c54 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -232,8 +232,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, rdev = radeon_get_rdev(bo->bdev); ridx = radeon_copy_ring_index(rdev); - old_start = old_mem->start << PAGE_SHIFT; - new_start = new_mem->start << PAGE_SHIFT; + old_start = (u64)old_mem->start << PAGE_SHIFT; + new_start = (u64)new_mem->start << PAGE_SHIFT; switch (old_mem->mem_type) { case TTM_PL_VRAM: diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 92d849e68f14..12527d69877c 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -3022,6 +3022,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if (rdev->pdev->device == 0x6811 && rdev->pdev->revision == 0x81) max_mclk = 120000; + /* limit sclk/mclk on Jet parts for stability */ + if (rdev->pdev->device == 0x6665 && + rdev->pdev->revision == 0xc3) { + max_sclk = 75000; + max_mclk = 80000; + } /* XXX validate the min clocks required for display */ diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 0cb92e347258..e0453b6a21a8 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -49,10 +49,26 @@ struct uhid_device { atomic_t report_done; atomic_t report_id; struct uhid_event report_buf; + struct work_struct worker; }; static struct miscdevice uhid_misc; +static void uhid_device_add_worker(struct work_struct *work) +{ + struct uhid_device *uhid = container_of(work, struct uhid_device, worker); + int ret; + + ret = hid_add_device(uhid->hid); + if (ret) { + hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); + + hid_destroy_device(uhid->hid); + uhid->hid = NULL; + uhid->running = false; + } +} + static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) { __u8 newhead; @@ -471,18 +487,14 @@ static int uhid_dev_create2(struct uhid_device *uhid, uhid->hid = hid; uhid->running = true; - ret = hid_add_device(hid); - if (ret) { - hid_err(hid, "Cannot register HID device\n"); - goto err_hid; - } + /* Adding of a HID device is done through a worker, to allow HID drivers + * which use feature requests during .probe to work, without they would + * be blocked on devlock, which is held by uhid_char_write. + */ + schedule_work(&uhid->worker); return 0; -err_hid: - hid_destroy_device(hid); - uhid->hid = NULL; - uhid->running = false; err_free: kfree(uhid->rd_data); return ret; @@ -499,6 +511,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid) atomic_set(&uhid->report_done, 1); wake_up_interruptible(&uhid->report_wait); + cancel_work_sync(&uhid->worker); + hid_destroy_device(uhid->hid); kfree(uhid->rd_data); @@ -567,6 +581,7 @@ static int uhid_char_open(struct inode *inode, struct file *file) init_waitqueue_head(&uhid->report_wait); uhid->running = false; atomic_set(&uhid->report_done, 1); + INIT_WORK(&uhid->worker, uhid_device_add_worker); file->private_data = uhid; nonseekable_open(inode, file); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 233da0b9f4b9..1f2fff71dbe3 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -33,6 +33,7 @@ #include <linux/hyperv.h> #include <linux/kernel_stat.h> #include <linux/cpu.h> +#include <linux/random.h> #include <asm/hyperv.h> #include <asm/hypervisor.h> #include <asm/mshyperv.h> @@ -795,6 +796,8 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c EXPORT_SYMBOL_GPL(__vmbus_driver_register); /** + + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); * vmbus_driver_unregister() - Unregister a vmbus's driver * @drv: Pointer to driver structure you want to un-register * diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c index d9299dee37d1..dddaa161aadb 100644 --- a/drivers/hwmon/adt7411.c +++ b/drivers/hwmon/adt7411.c @@ -30,6 +30,7 @@ #define ADT7411_REG_CFG1 0x18 #define ADT7411_CFG1_START_MONITOR (1 << 0) +#define ADT7411_CFG1_RESERVED_BIT3 (1 << 3) #define ADT7411_REG_CFG2 0x19 #define ADT7411_CFG2_DISABLE_AVG (1 << 5) @@ -292,8 +293,10 @@ static int adt7411_probe(struct i2c_client *client, mutex_init(&data->device_lock); mutex_init(&data->update_lock); + /* According to the datasheet, we must only write 1 to bit 3 */ ret = adt7411_modify_bit(client, ADT7411_REG_CFG1, - ADT7411_CFG1_START_MONITOR, 1); + ADT7411_CFG1_RESERVED_BIT3 + | ADT7411_CFG1_START_MONITOR, 1); if (ret < 0) return ret; diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c index f7eccd682de9..599e620fd8da 100644 --- a/drivers/i2c/busses/i2c-efm32.c +++ b/drivers/i2c/busses/i2c-efm32.c @@ -427,7 +427,7 @@ static int efm32_i2c_probe(struct platform_device *pdev) ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata); if (ret < 0) { dev_err(&pdev->dev, "failed to request irq (%d)\n", ret); - return ret; + goto err_disable_clk; } ret = i2c_add_adapter(&ddata->adapter); diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index a44ea13d1434..85f33003c38b 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -777,13 +777,6 @@ static int pch_i2c_probe(struct pci_dev *pdev, /* Set the number of I2C channel instance */ adap_info->ch_num = id->driver_data; - ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, - KBUILD_MODNAME, adap_info); - if (ret) { - pch_pci_err(pdev, "request_irq FAILED\n"); - goto err_request_irq; - } - for (i = 0; i < adap_info->ch_num; i++) { pch_adap = &adap_info->pch_data[i].pch_adapter; adap_info->pch_i2c_suspended = false; @@ -800,6 +793,17 @@ static int pch_i2c_probe(struct pci_dev *pdev, adap_info->pch_data[i].pch_base_address = base_addr + 0x100 * i; pch_adap->dev.parent = &pdev->dev; + } + + ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, + KBUILD_MODNAME, adap_info); + if (ret) { + pch_pci_err(pdev, "request_irq FAILED\n"); + goto err_request_irq; + } + + for (i = 0; i < adap_info->ch_num; i++) { + pch_adap = &adap_info->pch_data[i].pch_adapter; pch_i2c_init(&adap_info->pch_data[i]); diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index eb47c98131ec..d9e6738db72c 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -724,7 +724,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device) #ifdef CONFIG_PM_SLEEP static int qup_i2c_suspend(struct device *device) { - qup_i2c_pm_suspend_runtime(device); + if (!pm_runtime_suspended(device)) + return qup_i2c_pm_suspend_runtime(device); return 0; } diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 9bd4212782ab..f1b945509477 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -161,7 +161,7 @@ static int pca954x_select_chan(struct i2c_adapter *adap, /* Only select the channel if its different from the last channel */ if (data->last_chan != regval) { ret = pca954x_reg_write(adap, client, regval); - data->last_chan = regval; + data->last_chan = ret ? 0 : regval; } return ret; diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index d8738d4f8df3..6bf81d95a3f4 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev, if (ret < 0) goto error_ret; *val = ret; + ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C)); if (ret < 0) goto error_ret; + *val = 0; *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK]; ret = IIO_VAL_INT_PLUS_MICRO; break; diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index fa5d299d9222..e98515fc469f 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private) st->ts_bufferedmeasure = false; input_report_key(st->ts_input, BTN_TOUCH, 0); input_sync(st->ts_input); - } else if (status & AT91_ADC_EOC(3)) { - /* Conversion finished */ + } else if (status & AT91_ADC_EOC(3) && st->ts_input) { + /* Conversion finished and we've a touchscreen */ if (st->ts_bufferedmeasure) { /* * Last measurement is always discarded, since it can diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 9d2957696b23..222cdb1caad9 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -32,6 +32,7 @@ struct tiadc_device { struct ti_tscadc_dev *mfd_tscadc; + struct mutex fifo1_lock; /* to protect fifo access */ int channels; u8 channel_line[8]; u8 channel_step[8]; @@ -341,6 +342,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct tiadc_device *adc_dev = iio_priv(indio_dev); + int ret = IIO_VAL_INT; int i, map_val; unsigned int fifo1count, read, stepid; bool found = false; @@ -354,6 +356,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, if (!step_en) return -EINVAL; + mutex_lock(&adc_dev->fifo1_lock); fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); while (fifo1count--) tiadc_readl(adc_dev, REG_FIFO1); @@ -370,7 +373,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, if (time_after(jiffies, timeout)) { am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); - return -EAGAIN; + ret = -EAGAIN; + goto err_unlock; } } map_val = adc_dev->channel_step[chan->scan_index]; @@ -396,8 +400,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); if (found == false) - return -EBUSY; - return IIO_VAL_INT; + ret = -EBUSY; + +err_unlock: + mutex_unlock(&adc_dev->fifo1_lock); + return ret; } static const struct iio_info tiadc_info = { @@ -444,6 +451,7 @@ static int tiadc_probe(struct platform_device *pdev) tiadc_step_config(indio_dev); tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD); + mutex_init(&adc_dev->fifo1_lock); err = tiadc_channel_init(indio_dev, adc_dev->channels); if (err < 0) diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index 403dd3d8986e..b321a253eba8 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -56,8 +56,8 @@ struct { {HID_USAGE_SENSOR_ALS, 0, 1, 0}, {HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0}, - {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0}, - {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0}, + {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0}, + {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000}, }; static int pow_10(unsigned power) diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 7995c68c2a34..230cbdda6ce1 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -404,9 +404,8 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); case IIO_VAL_FRACTIONAL: tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]); - vals[1] = do_div(tmp, 1000000000LL); - vals[0] = tmp; - return sprintf(buf, "%d.%09u\n", vals[0], vals[1]); + vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]); + return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1])); case IIO_VAL_FRACTIONAL_LOG2: tmp = (s64)vals[0] * 1000000000LL >> vals[1]; vals[1] = do_div(tmp, 1000000000LL); diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index 37c1bb2afd08..758c14703ad8 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c @@ -389,7 +389,7 @@ static int as3935_probe(struct spi_device *spi) return ret; } - ret = iio_triggered_buffer_setup(indio_dev, NULL, + ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time, &as3935_trigger_handler, NULL); if (ret) { diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index d2360a8ef0b2..180d7f436ed5 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -106,7 +106,6 @@ struct mcast_group { atomic_t refcount; enum mcast_group_state state; struct ib_sa_query *query; - int query_id; u16 pkey_index; u8 leave_state; int retries; @@ -339,11 +338,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member) member->multicast.comp_mask, 3000, GFP_KERNEL, join_handler, group, &group->query); - if (ret >= 0) { - group->query_id = ret; - ret = 0; - } - return ret; + return (ret > 0) ? 0 : ret; } static int send_leave(struct mcast_group *group, u8 leave_state) @@ -363,11 +358,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state) IB_SA_MCMEMBER_REC_JOIN_STATE, 3000, GFP_KERNEL, leave_handler, group, &group->query); - if (ret >= 0) { - group->query_id = ret; - ret = 0; - } - return ret; + return (ret > 0) ? 0 : ret; } static void join_group(struct mcast_group *group, struct mcast_member *member, diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 1cc83abb9b44..841cdc1554f5 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1062,6 +1062,27 @@ void handle_port_mgmt_change_event(struct work_struct *work) /* Generate GUID changed event */ if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) { + if (mlx4_is_master(dev->dev)) { + union ib_gid gid; + int err = 0; + + if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix) + err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1); + else + gid.global.subnet_prefix = + eqe->event.port_mgmt_change.params.port_info.gid_prefix; + if (err) { + pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n", + port, err); + } else { + pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n", + port, + (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix), + be64_to_cpu(gid.global.subnet_prefix)); + atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix, + be64_to_cpu(gid.global.subnet_prefix)); + } + } mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); /*if master, notify all slaves*/ if (mlx4_is_master(dev->dev)) @@ -2134,6 +2155,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) if (err) goto demux_err; dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; + atomic64_set(&dev->sriov.demux[i].subnet_prefix, + be64_to_cpu(gid.global.subnet_prefix)); err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, &dev->sriov.sqps[i]); if (err) diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index a0559a8af4f4..0948963a7493 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -485,7 +485,7 @@ static u8 get_leave_state(struct mcast_group *group) if (!group->members[i]) leave_state |= (1 << i); - return leave_state & (group->rec.scope_join_state & 7); + return leave_state & (group->rec.scope_join_state & 0xf); } static int join_group(struct mcast_group *group, int slave, u8 join_mask) @@ -560,8 +560,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) } else mcg_warn_group(group, "DRIVER BUG\n"); } else if (group->state == MCAST_LEAVE_SENT) { - if (group->rec.scope_join_state & 7) - group->rec.scope_join_state &= 0xf8; + if (group->rec.scope_join_state & 0xf) + group->rec.scope_join_state &= 0xf0; group->state = MCAST_IDLE; mutex_unlock(&group->lock); if (release_group(group, 1)) @@ -601,7 +601,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask, static int handle_join_req(struct mcast_group *group, u8 join_mask, struct mcast_req *req) { - u8 group_join_state = group->rec.scope_join_state & 7; + u8 group_join_state = group->rec.scope_join_state & 0xf; int ref = 0; u16 status; struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; @@ -686,8 +686,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work) u8 cur_join_state; resp_join_state = ((struct ib_sa_mcmember_data *) - group->response_sa_mad.data)->scope_join_state & 7; - cur_join_state = group->rec.scope_join_state & 7; + group->response_sa_mad.data)->scope_join_state & 0xf; + cur_join_state = group->rec.scope_join_state & 0xf; if (method == IB_MGMT_METHOD_GET_RESP) { /* successfull join */ @@ -706,7 +706,7 @@ process_requests: req = list_first_entry(&group->pending_list, struct mcast_req, group_list); sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; - req_join_state = sa_data->scope_join_state & 0x7; + req_join_state = sa_data->scope_join_state & 0xf; /* For a leave request, we will immediately answer the VF, and * update our internal counters. The actual leave will be sent diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 369da3ca5d64..5b6b3a3cf9a9 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -417,7 +417,7 @@ struct mlx4_ib_demux_ctx { struct workqueue_struct *wq; struct workqueue_struct *ud_wq; spinlock_t ud_lock; - __be64 subnet_prefix; + atomic64_t subnet_prefix; __be64 guid_cache[128]; struct mlx4_ib_dev *dev; /* the following lock protects both mcg_table and mcg_mgid0_list */ diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 5fe3646b248f..6f50a21331ed 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2149,24 +2149,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, sqp->ud_header.grh.flow_label = ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; - if (is_eth) + if (is_eth) { memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); - else { - if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { - /* When multi-function is enabled, the ib_core gid - * indexes don't necessarily match the hw ones, so - * we must use our own cache */ - sqp->ud_header.grh.source_gid.global.subnet_prefix = - to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. - subnet_prefix; - sqp->ud_header.grh.source_gid.global.interface_id = - to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. - guid_cache[ah->av.ib.gid_index]; - } else - ib_get_cached_gid(ib_dev, - be32_to_cpu(ah->av.ib.port_pd) >> 24, - ah->av.ib.gid_index, - &sqp->ud_header.grh.source_gid); + } else { + if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { + /* When multi-function is enabled, the ib_core gid + * indexes don't necessarily match the hw ones, so + * we must use our own cache + */ + sqp->ud_header.grh.source_gid.global.subnet_prefix = + cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov. + demux[sqp->qp.port - 1]. + subnet_prefix))); + sqp->ud_header.grh.source_gid.global.interface_id = + to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. + guid_cache[ah->av.ib.gid_index]; + } else { + ib_get_cached_gid(ib_dev, + be32_to_cpu(ah->av.ib.port_pd) >> 24, + ah->av.ib.gid_index, + &sqp->ud_header.grh.source_gid); + } } memcpy(sqp->ud_header.grh.destination_gid.raw, ah->av.ib.dgid, 16); diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 2a7c7dc28726..51f5edc9eaf6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -463,6 +463,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn); void ipoib_reap_ah(struct work_struct *work); +struct ipoib_path *__path_find(struct net_device *dev, void *gid); void ipoib_mark_paths_invalid(struct net_device *dev); void ipoib_flush_paths(struct net_device *dev); struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 105eebd77ad4..9bf0c8f59d54 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1303,6 +1303,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) } } +#define QPN_AND_OPTIONS_OFFSET 4 + static void ipoib_cm_tx_start(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, @@ -1311,6 +1313,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) struct ipoib_neigh *neigh; struct ipoib_cm_tx *p; unsigned long flags; + struct ipoib_path *path; int ret; struct ib_sa_path_rec pathrec; @@ -1323,7 +1326,19 @@ static void ipoib_cm_tx_start(struct work_struct *work) p = list_entry(priv->cm.start_list.next, typeof(*p), list); list_del_init(&p->list); neigh = p->neigh; + qpn = IPOIB_QPN(neigh->daddr); + /* + * As long as the search is with these 2 locks, + * path existence indicates its validity. + */ + path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET); + if (!path) { + pr_info("%s ignore not valid path %pI6\n", + __func__, + neigh->daddr + QPN_AND_OPTIONS_OFFSET); + goto free_neigh; + } memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); spin_unlock_irqrestore(&priv->lock, flags); @@ -1335,6 +1350,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) spin_lock_irqsave(&priv->lock, flags); if (ret) { +free_neigh: neigh = p->neigh; if (neigh) { neigh->cm = NULL; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 6a7003ddb0be..fcbfdda0e585 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1030,8 +1030,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, } if (level == IPOIB_FLUSH_LIGHT) { + int oper_up; ipoib_mark_paths_invalid(dev); + /* Set IPoIB operation as down to prevent races between: + * the flush flow which leaves MCG and on the fly joins + * which can happen during that time. mcast restart task + * should deal with join requests we missed. + */ + oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); ipoib_mcast_dev_flush(dev); + if (oper_up) + set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); } if (level >= IPOIB_FLUSH_NORMAL) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 7e4a6efbf2ce..dbfe8f1e6d7e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -253,7 +253,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf) return -EINVAL; } -static struct ipoib_path *__path_find(struct net_device *dev, void *gid) +struct ipoib_path *__path_find(struct net_device *dev, void *gid) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node *n = priv->path_tree.rb_node; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index e2c655964711..a137a3738128 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -883,6 +883,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id struct usb_endpoint_descriptor *ep_irq_in; int i, error; + if (intf->cur_altsetting->desc.bNumEndpoints != 2) + return -ENODEV; + for (i = 0; xpad_device[i].idVendor; i++) { if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) && (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct)) diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index 9757a58bc897..5d39b966dd28 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc) /* Reset the KBC controller to clear all previous status.*/ reset_control_assert(kbc->rst); udelay(100); - reset_control_assert(kbc->rst); + reset_control_deassert(kbc->rst); udelay(100); tegra_kbc_config_pins(kbc); diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index eb796fff9e62..9a0cc5b5561a 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1230,6 +1230,7 @@ static int __init i8042_create_kbd_port(void) serio->start = i8042_start; serio->stop = i8042_stop; serio->close = i8042_port_close; + serio->ps2_cmd_mutex = &i8042_mutex; serio->port_data = port; serio->dev.parent = &i8042_platform_device->dev; strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name)); @@ -1257,6 +1258,7 @@ static int __init i8042_create_aux_port(int idx) serio->write = i8042_aux_write; serio->start = i8042_start; serio->stop = i8042_stop; + serio->ps2_cmd_mutex = &i8042_mutex; serio->port_data = port; serio->dev.parent = &i8042_platform_device->dev; if (idx < 0) { @@ -1321,21 +1323,6 @@ static void i8042_unregister_ports(void) } } -/* - * Checks whether port belongs to i8042 controller. - */ -bool i8042_check_port_owner(const struct serio *port) -{ - int i; - - for (i = 0; i < I8042_NUM_PORTS; i++) - if (i8042_ports[i].serio == port) - return true; - - return false; -} -EXPORT_SYMBOL(i8042_check_port_owner); - static void i8042_free_irqs(void) { if (i8042_aux_irq_registered) diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 75516996db20..ded0c6f65c9f 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -56,19 +56,17 @@ EXPORT_SYMBOL(ps2_sendbyte); void ps2_begin_command(struct ps2dev *ps2dev) { - mutex_lock(&ps2dev->cmd_mutex); + struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; - if (i8042_check_port_owner(ps2dev->serio)) - i8042_lock_chip(); + mutex_lock(m); } EXPORT_SYMBOL(ps2_begin_command); void ps2_end_command(struct ps2dev *ps2dev) { - if (i8042_check_port_owner(ps2dev->serio)) - i8042_unlock_chip(); + struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; - mutex_unlock(&ps2dev->cmd_mutex); + mutex_unlock(m); } EXPORT_SYMBOL(ps2_end_command); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 3b260c67db75..af7bae7b4daf 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1829,7 +1829,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || - !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || + !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || @@ -1854,7 +1854,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cache *ca) { char name[BDEVNAME_SIZE]; - const char *err = NULL; + const char *err = NULL; /* must be set for any error case */ int ret = 0; memcpy(&ca->sb, sb, sizeof(struct cache_sb)); @@ -1871,8 +1871,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ca->discard = CACHE_DISCARD(&ca->sb); ret = cache_alloc(sb, ca); - if (ret != 0) + if (ret != 0) { + if (ret == -ENOMEM) + err = "cache_alloc(): -ENOMEM"; + else + err = "cache_alloc(): unknown error"; goto err; + } if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { err = "error calling kobject_add"; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index f87223056696..34e7db1e7304 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1400,7 +1400,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) unsigned i; int err; - cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), + cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), GFP_KERNEL); if (!cc->tfms) return -ENOMEM; diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index b257e46876d3..0f5e1820c92d 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -287,10 +287,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) pb->bio_submitted = true; /* - * Map reads as normal. + * Map reads as normal only if corrupt_bio_byte set. */ - if (bio_data_dir(bio) == READ) - goto map_bio; + if (bio_data_dir(bio) == READ) { + /* If flags were specified, only corrupt those that match. */ + if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && + all_corrupt_bio_flags_match(bio, fc)) + goto map_bio; + else + return -EIO; + } /* * Drop writes? @@ -328,12 +334,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) /* * Corrupt successful READs while in down state. - * If flags were specified, only corrupt those that match. */ - if (fc->corrupt_bio_byte && !error && pb->bio_submitted && - (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && - all_corrupt_bio_flags_match(bio, fc)) - corrupt_bio_data(bio, fc); + if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { + if (fc->corrupt_bio_byte) + corrupt_bio_data(bio, fc); + else + return -EIO; + } return error; } diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c index a5712cd7c65f..7df7fb3738a0 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.c +++ b/drivers/media/dvb-core/dvb_ringbuffer.c @@ -55,7 +55,13 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len) int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf) { - return (rbuf->pread==rbuf->pwrite); + /* smp_load_acquire() to load write pointer on reader side + * this pairs with smp_store_release() in dvb_ringbuffer_write(), + * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() + * + * for memory barriers also see Documentation/circular-buffers.txt + */ + return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); } @@ -64,7 +70,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) { ssize_t free; - free = rbuf->pread - rbuf->pwrite; + /* ACCESS_ONCE() to load read pointer on writer side + * this pairs with smp_store_release() in dvb_ringbuffer_read(), + * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), + * or dvb_ringbuffer_reset() + */ + free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite; if (free <= 0) free += rbuf->size; return free-1; @@ -76,7 +87,11 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) { ssize_t avail; - avail = rbuf->pwrite - rbuf->pread; + /* smp_load_acquire() to load write pointer on reader side + * this pairs with smp_store_release() in dvb_ringbuffer_write(), + * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() + */ + avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; if (avail < 0) avail += rbuf->size; return avail; @@ -86,14 +101,25 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf) { - rbuf->pread = rbuf->pwrite; + /* dvb_ringbuffer_flush() counts as read operation + * smp_load_acquire() to load write pointer + * smp_store_release() to update read pointer, this ensures that the + * correct pointer is visible for subsequent dvb_ringbuffer_free() + * calls on other cpu cores + */ + smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); rbuf->error = 0; } EXPORT_SYMBOL(dvb_ringbuffer_flush); void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf) { - rbuf->pread = rbuf->pwrite = 0; + /* dvb_ringbuffer_reset() counts as read and write operation + * smp_store_release() to update read pointer + */ + smp_store_release(&rbuf->pread, 0); + /* smp_store_release() to update write pointer */ + smp_store_release(&rbuf->pwrite, 0); rbuf->error = 0; } @@ -119,12 +145,17 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si return -EFAULT; buf += split; todo -= split; - rbuf->pread = 0; + /* smp_store_release() for read pointer update to ensure + * that buf is not overwritten until read is complete, + * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() + */ + smp_store_release(&rbuf->pread, 0); } if (copy_to_user(buf, rbuf->data+rbuf->pread, todo)) return -EFAULT; - rbuf->pread = (rbuf->pread + todo) % rbuf->size; + /* smp_store_release() to update read pointer, see above */ + smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); return len; } @@ -139,11 +170,16 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) memcpy(buf, rbuf->data+rbuf->pread, split); buf += split; todo -= split; - rbuf->pread = 0; + /* smp_store_release() for read pointer update to ensure + * that buf is not overwritten until read is complete, + * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() + */ + smp_store_release(&rbuf->pread, 0); } memcpy(buf, rbuf->data+rbuf->pread, todo); - rbuf->pread = (rbuf->pread + todo) % rbuf->size; + /* smp_store_release() to update read pointer, see above */ + smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); } @@ -158,10 +194,47 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t memcpy(rbuf->data+rbuf->pwrite, buf, split); buf += split; todo -= split; - rbuf->pwrite = 0; + /* smp_store_release() for write pointer update to ensure that + * written data is visible on other cpu cores before the pointer + * update, this pairs with smp_load_acquire() in + * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() + */ + smp_store_release(&rbuf->pwrite, 0); } memcpy(rbuf->data+rbuf->pwrite, buf, todo); - rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size; + /* smp_store_release() for write pointer update, see above */ + smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); + + return len; +} + +ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + const u8 __user *buf, size_t len) +{ + int status; + size_t todo = len; + size_t split; + + split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0; + + if (split > 0) { + status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split); + if (status) + return len - todo; + buf += split; + todo -= split; + /* smp_store_release() for write pointer update to ensure that + * written data is visible on other cpu cores before the pointer + * update, this pairs with smp_load_acquire() in + * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() + */ + smp_store_release(&rbuf->pwrite, 0); + } + status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo); + if (status) + return len - todo; + /* smp_store_release() for write pointer update, see above */ + smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); return len; } @@ -297,3 +370,4 @@ EXPORT_SYMBOL(dvb_ringbuffer_flush_spinlock_wakeup); EXPORT_SYMBOL(dvb_ringbuffer_read_user); EXPORT_SYMBOL(dvb_ringbuffer_read); EXPORT_SYMBOL(dvb_ringbuffer_write); +EXPORT_SYMBOL(dvb_ringbuffer_write_user); diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h index 41f04dae69b6..9e1e11b7c39c 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.h +++ b/drivers/media/dvb-core/dvb_ringbuffer.h @@ -133,6 +133,8 @@ extern void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, */ extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t len); +extern ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, + const u8 __user *buf, size_t len); /** diff --git a/drivers/media/pci/ngene/ngene-dvb.c b/drivers/media/pci/ngene/ngene-dvb.c index fcb16a615aab..3263d71b4808 100644 --- a/drivers/media/pci/ngene/ngene-dvb.c +++ b/drivers/media/pci/ngene/ngene-dvb.c @@ -59,7 +59,7 @@ static ssize_t ts_write(struct file *file, const char *buf, (&dev->tsout_rbuf) >= count) < 0) return 0; - dvb_ringbuffer_write(&dev->tsout_rbuf, buf, count); + dvb_ringbuffer_write_user(&dev->tsout_rbuf, buf, count); return count; } diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 41723180d10c..19c25e0245d1 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1003,6 +1003,11 @@ static int match_child(struct device *dev, void *data) return !strcmp(dev_name(dev), (char *)data); } +static void s5p_mfc_memdev_release(struct device *dev) +{ + dma_release_declared_memory(dev); +} + static void *mfc_get_drv_data(struct platform_device *pdev); static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) @@ -1015,6 +1020,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) mfc_err("Not enough memory\n"); return -ENOMEM; } + + dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l"); + dev->mem_dev_l->release = s5p_mfc_memdev_release; device_initialize(dev->mem_dev_l); of_property_read_u32_array(dev->plat_dev->dev.of_node, "samsung,mfc-l", mem_info, 2); @@ -1032,6 +1040,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) mfc_err("Not enough memory\n"); return -ENOMEM; } + + dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r"); + dev->mem_dev_r->release = s5p_mfc_memdev_release; device_initialize(dev->mem_dev_r); of_property_read_u32_array(dev->plat_dev->dev.of_node, "samsung,mfc-r", mem_info, 2); diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index b58d4ebf6419..6c1737828d5f 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -501,9 +501,8 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap, int addr, rc, i; u8 reg; - rc = rt_mutex_trylock(&dev->i2c_bus_lock); - if (rc < 0) - return rc; + if (!rt_mutex_trylock(&dev->i2c_bus_lock)) + return -EAGAIN; /* Switch I2C bus if needed */ if (bus != dev->cur_i2c_bus && diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index 0dd73a9caf32..354f5a7ae887 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -1537,13 +1537,6 @@ static int usbvision_probe(struct usb_interface *intf, printk(KERN_INFO "%s: %s found\n", __func__, usbvision_device_data[model].model_string); - /* - * this is a security check. - * an exploit using an incorrect bInterfaceNumber is known - */ - if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum]) - return -ENODEV; - if (usbvision_device_data[model].interface >= 0) interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; else if (ifnum < dev->actconfig->desc.bNumInterfaces) diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c51e164dfae0..619c67b71ecb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1659,8 +1659,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, packed_cmd_hdr = packed->cmd_hdr; memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); - packed_cmd_hdr[0] = (packed->nr_entries << 16) | - (PACKED_CMD_WR << 8) | PACKED_CMD_VER; + packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) | + (PACKED_CMD_WR << 8) | PACKED_CMD_VER); hdr_blocks = mmc_large_sector(card) ? 8 : 1; /* @@ -1674,14 +1674,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* Argument of CMD23 */ - packed_cmd_hdr[(i * 2)] = + packed_cmd_hdr[(i * 2)] = cpu_to_le32( (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | - blk_rq_sectors(prq); + blk_rq_sectors(prq)); /* Argument of CMD18 or CMD25 */ - packed_cmd_hdr[((i * 2)) + 1] = + packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32( mmc_card_blockaddr(card) ? - blk_rq_pos(prq) : blk_rq_pos(prq) << 9; + blk_rq_pos(prq) : blk_rq_pos(prq) << 9); packed->blocks += blk_rq_sectors(prq); i++; } diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c index 744ca5cacc9b..f9fa3fad728e 100644 --- a/drivers/mtd/maps/pmcmsp-flash.c +++ b/drivers/mtd/maps/pmcmsp-flash.c @@ -75,15 +75,15 @@ static int __init init_msp_flash(void) printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); - msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); + msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL); if (!msp_flash) return -ENOMEM; - msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); + msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL); if (!msp_parts) goto free_msp_flash; - msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); + msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL); if (!msp_maps) goto free_msp_parts; diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index b922c8efcf40..0ba96f9e1aa5 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -241,6 +241,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode) unsigned long flags; u32 val; + /* Reset ECC hardware */ + davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET); + spin_lock_irqsave(&davinci_nand_lock, flags); /* Start 4-bit ECC calculation for read/write */ diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 4f3e80c68a26..529f42a8d7f8 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2389,7 +2389,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, int cached = writelen > bytes && page != blockmask; uint8_t *wbuf = buf; int use_bufpoi; - int part_pagewr = (column || writelen < (mtd->writesize - 1)); + int part_pagewr = (column || writelen < mtd->writesize); if (part_pagewr) use_bufpoi = 1; diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index 21841fe25ad3..d86742a0fefd 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c @@ -807,6 +807,7 @@ out_unlock: * @pnum: the physical eraseblock number * @vid: The volume ID of the found volume will be stored in this pointer * @sqnum: The sqnum of the found volume will be stored in this pointer + * @fast: true if we're scanning for a Fastmap * * This function reads UBI headers of PEB @pnum, checks them, and adds * information about this PEB to the corresponding list or RB-tree in the @@ -814,7 +815,7 @@ out_unlock: * successfully handled and a negative error code in case of failure. */ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, - int pnum, int *vid, unsigned long long *sqnum) + int pnum, int *vid, unsigned long long *sqnum, bool fast) { long long uninitialized_var(ec); int err, bitflips = 0, vol_id = -1, ec_err = 0; @@ -931,6 +932,20 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, */ ai->maybe_bad_peb_count += 1; case UBI_IO_BAD_HDR: + /* + * If we're facing a bad VID header we have to drop *all* + * Fastmap data structures we find. The most recent Fastmap + * could be bad and therefore there is a chance that we attach + * from an old one. On a fine MTD stack a PEB must not render + * bad all of a sudden, but the reality is different. + * So, let's be paranoid and help finding the root cause by + * falling back to scanning mode instead of attaching with a + * bad EBA table and cause data corruption which is hard to + * analyze. + */ + if (fast) + ai->force_full_scan = 1; + if (ec_err) /* * Both headers are corrupted. There is a possibility @@ -1243,7 +1258,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, cond_resched(); dbg_gen("process PEB %d", pnum); - err = scan_peb(ubi, ai, pnum, NULL, NULL); + err = scan_peb(ubi, ai, pnum, NULL, NULL, false); if (err < 0) goto out_vidh; } @@ -1330,7 +1345,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai) cond_resched(); dbg_gen("process PEB %d", pnum); - err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum); + err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum, true); if (err < 0) goto out_vidh; @@ -1346,7 +1361,11 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai) if (fm_anchor < 0) return UBI_NO_FASTMAP; - return ubi_scan_fastmap(ubi, ai, fm_anchor); + if (ai->force_full_scan) + return UBI_NO_FASTMAP; + else + return ubi_scan_fastmap(ubi, ai, fm_anchor); + out_vidh: ubi_free_vid_hdr(ubi, vidh); diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 6e30a3c280d0..c3db383a9000 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -999,6 +999,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, goto out_detach; } + /* Make device "available" before it becomes accessible via sysfs */ + ubi_devices[ubi_num] = ubi; + err = uif_init(ubi, &ref); if (err) goto out_detach; @@ -1043,7 +1046,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, wake_up_process(ubi->bgt_thread); spin_unlock(&ubi->wl_lock); - ubi_devices[ubi_num] = ubi; ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); return ubi_num; @@ -1054,6 +1056,7 @@ out_uif: ubi_assert(ref); uif_close(ubi); out_detach: + ubi_devices[ubi_num] = NULL; ubi_wl_close(ubi); ubi_free_internal_volumes(ubi); vfree(ubi->vtbl); diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index c1f1087b6939..92774b7c6230 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -670,6 +670,8 @@ struct ubi_ainf_volume { * @vols_found: number of volumes found * @highest_vol_id: highest volume ID * @is_empty: flag indicating whether the MTD device is empty or not + * @force_full_scan: flag indicating whether we need to do a full scan and drop + all existing Fastmap data structures * @min_ec: lowest erase counter value * @max_ec: highest erase counter value * @max_sqnum: highest sequence number value @@ -696,6 +698,7 @@ struct ubi_attach_info { int vols_found; int highest_vol_id; int is_empty; + int force_full_scan; int min_ec; int max_ec; unsigned long long max_sqnum; diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 8330703c098f..96131eb34c9f 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -534,13 +534,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) spin_unlock(&ubi->volumes_lock); } - /* Change volume table record */ - vtbl_rec = ubi->vtbl[vol_id]; - vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs); - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); - if (err) - goto out_acc; - if (pebs < 0) { for (i = 0; i < -pebs; i++) { err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); @@ -558,6 +551,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) spin_unlock(&ubi->volumes_lock); } + /* + * When we shrink a volume we have to flush all pending (erase) work. + * Otherwise it can happen that upon next attach UBI finds a LEB with + * lnum > highest_lnum and refuses to attach. + */ + if (pebs < 0) { + err = ubi_wl_flush(ubi, vol_id, UBI_ALL); + if (err) + goto out_acc; + } + + /* Change volume table record */ + vtbl_rec = ubi->vtbl[vol_id]; + vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs); + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); + if (err) + goto out_acc; + vol->reserved_pebs = reserved_pebs; if (vol->vol_type == UBI_DYNAMIC_VOLUME) { vol->used_ebs = reserved_pebs; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index edb3b86eb118..e47ad8a789ca 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -21,6 +21,7 @@ #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/if_arp.h> +#include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/skb.h> @@ -392,9 +393,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb); /* * CAN device restart for bus-off recovery */ -static void can_restart(unsigned long data) +static void can_restart(struct net_device *dev) { - struct net_device *dev = (struct net_device *)data; struct can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; @@ -434,6 +434,14 @@ restart: netdev_err(dev, "Error %d during restart", err); } +static void can_restart_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); + + can_restart(priv->dev); +} + int can_restart_now(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -447,8 +455,8 @@ int can_restart_now(struct net_device *dev) if (priv->state != CAN_STATE_BUS_OFF) return -EBUSY; - /* Runs as soon as possible in the timer context */ - mod_timer(&priv->restart_timer, jiffies); + cancel_delayed_work_sync(&priv->restart_work); + can_restart(dev); return 0; } @@ -470,8 +478,8 @@ void can_bus_off(struct net_device *dev) priv->can_stats.bus_off++; if (priv->restart_ms) - mod_timer(&priv->restart_timer, - jiffies + (priv->restart_ms * HZ) / 1000); + schedule_delayed_work(&priv->restart_work, + msecs_to_jiffies(priv->restart_ms)); } EXPORT_SYMBOL_GPL(can_bus_off); @@ -578,6 +586,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) return NULL; priv = netdev_priv(dev); + priv->dev = dev; if (echo_skb_max) { priv->echo_skb_max = echo_skb_max; @@ -587,7 +596,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) priv->state = CAN_STATE_STOPPED; - init_timer(&priv->restart_timer); + INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); return dev; } @@ -662,8 +671,6 @@ int open_candev(struct net_device *dev) if (!netif_carrier_ok(dev)) netif_carrier_on(dev); - setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); - return 0; } EXPORT_SYMBOL_GPL(open_candev); @@ -678,7 +685,7 @@ void close_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - del_timer_sync(&priv->restart_timer); + cancel_delayed_work_sync(&priv->restart_work); can_flush_echo_skb(dev); } EXPORT_SYMBOL_GPL(close_candev); diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index bf83639888a9..7c06df557e37 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -1248,11 +1248,10 @@ static int __maybe_unused flexcan_suspend(struct device *device) struct flexcan_priv *priv = netdev_priv(dev); int err; - err = flexcan_chip_disable(priv); - if (err) - return err; - if (netif_running(dev)) { + err = flexcan_chip_disable(priv); + if (err) + return err; netif_stop_queue(dev); netif_device_detach(dev); } @@ -1265,13 +1264,17 @@ static int __maybe_unused flexcan_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); + int err; priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); + err = flexcan_chip_enable(priv); + if (err) + return err; } - return flexcan_chip_enable(priv); + return 0; } static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume); diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index f3658bdb64cc..00d0a05b9d7f 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1080,7 +1080,7 @@ static int ethoc_probe(struct platform_device *pdev) if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); ret = -ENXIO; - goto error; + goto free; } if (netdev->mem_end) { @@ -1089,7 +1089,7 @@ static int ethoc_probe(struct platform_device *pdev) if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); ret = -ENXIO; - goto error; + goto free; } } else { /* Allocate buffer memory */ @@ -1100,7 +1100,7 @@ static int ethoc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "cannot allocate %dB buffer\n", buffer_size); ret = -ENOMEM; - goto error; + goto free; } netdev->mem_end = netdev->mem_start + buffer_size; priv->dma_alloc = buffer_size; @@ -1111,7 +1111,7 @@ static int ethoc_probe(struct platform_device *pdev) 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); if (num_bd < 4) { ret = -ENODEV; - goto error; + goto free; } priv->num_bd = num_bd; /* num_tx must be a power of two */ @@ -1124,7 +1124,7 @@ static int ethoc_probe(struct platform_device *pdev) priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); if (!priv->vma) { ret = -ENOMEM; - goto error; + goto free; } /* Allow the platform setup code to pass in a MAC address. */ diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 99a69490f39b..4d468707a866 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -216,7 +216,7 @@ /* Various constants */ /* Coalescing */ -#define MVNETA_TXDONE_COAL_PKTS 1 +#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ #define MVNETA_RX_COAL_PKTS 32 #define MVNETA_RX_COAL_USEC 100 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index cea4098b1d80..3ee3dcedf74a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -159,13 +159,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) return cmd->cmd_buf + (idx << cmd->log_stride); } -static u8 xor8_buf(void *buf, int len) +static u8 xor8_buf(void *buf, size_t offset, int len) { u8 *ptr = buf; u8 sum = 0; int i; + int end = len + offset; - for (i = 0; i < len; i++) + for (i = offset; i < end; i++) sum ^= ptr[i]; return sum; @@ -173,41 +174,49 @@ static u8 xor8_buf(void *buf, int len) static int verify_block_sig(struct mlx5_cmd_prot_block *block) { - if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); + int xor_len = sizeof(*block) - sizeof(block->data) - 1; + + if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) return -EINVAL; - if (xor8_buf(block, sizeof(*block)) != 0xff) + if (xor8_buf(block, 0, sizeof(*block)) != 0xff) return -EINVAL; return 0; } -static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, - int csum) +static void calc_block_sig(struct mlx5_cmd_prot_block *block) { - block->token = token; - if (csum) { - block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - - sizeof(block->data) - 2); - block->sig = ~xor8_buf(block, sizeof(*block) - 1); - } + int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; + size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); + + block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len); + block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1); } -static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) +static void calc_chain_sig(struct mlx5_cmd_msg *msg) { struct mlx5_cmd_mailbox *next = msg->next; - - while (next) { - calc_block_sig(next->buf, token, csum); + int size = msg->len; + int blen = size - min_t(int, sizeof(msg->first.data), size); + int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) + / MLX5_CMD_DATA_BLOCK_SIZE; + int i = 0; + + for (i = 0; i < n && next; i++) { + calc_block_sig(next->buf); next = next->next; } } static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) { - ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); - calc_chain_sig(ent->in, ent->token, csum); - calc_chain_sig(ent->out, ent->token, csum); + ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); + if (csum) { + calc_chain_sig(ent->in); + calc_chain_sig(ent->out); + } } static void poll_timeout(struct mlx5_cmd_work_ent *ent) @@ -238,12 +247,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent) struct mlx5_cmd_mailbox *next = ent->out->next; int err; u8 sig; + int size = ent->out->len; + int blen = size - min_t(int, sizeof(ent->out->first.data), size); + int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) + / MLX5_CMD_DATA_BLOCK_SIZE; + int i = 0; - sig = xor8_buf(ent->lay, sizeof(*ent->lay)); + sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); if (sig != 0xff) return -EINVAL; - while (next) { + for (i = 0; i < n && next; i++) { err = verify_block_sig(next->buf); if (err) return err; @@ -555,7 +569,6 @@ static void cmd_work_handler(struct work_struct *work) ent->idx = cmd->max_reg_cmds; } - ent->token = alloc_token(cmd); cmd->ent_arr[ent->idx] = ent; lay = get_inst(cmd, ent->idx); ent->lay = lay; @@ -654,7 +667,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, mlx5_cmd_cbk_t callback, - void *context, int page_queue, u8 *status) + void *context, int page_queue, u8 *status, + u8 token) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; @@ -672,6 +686,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, if (IS_ERR(ent)) return PTR_ERR(ent); + ent->token = token; + if (!callback) init_completion(&ent->done); @@ -746,7 +762,8 @@ static const struct file_operations fops = { .write = dbg_write, }; -static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) +static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, + u8 token) { struct mlx5_cmd_prot_block *block; struct mlx5_cmd_mailbox *next; @@ -772,6 +789,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size) memcpy(block->data, from, copy); from += copy; size -= copy; + block->token = token; next = next->next; } @@ -841,7 +859,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev, } static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, - gfp_t flags, int size) + gfp_t flags, int size, + u8 token) { struct mlx5_cmd_mailbox *tmp, *head = NULL; struct mlx5_cmd_prot_block *block; @@ -870,6 +889,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, tmp->next = head; block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); block->block_num = cpu_to_be32(n - i - 1); + block->token = token; head = tmp; } msg->next = head; @@ -1239,7 +1259,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, } if (IS_ERR(msg)) - msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); return msg; } @@ -1258,6 +1278,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, gfp_t gfp; int err; u8 status = 0; + u8 token; pages_queue = is_manage_pages(in); gfp = callback ? GFP_ATOMIC : GFP_KERNEL; @@ -1268,20 +1289,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, return err; } - err = mlx5_copy_to_msg(inb, in, in_size); + token = alloc_token(&dev->cmd); + + err = mlx5_copy_to_msg(inb, in, in_size, token); if (err) { mlx5_core_warn(dev, "err %d\n", err); goto out_in; } - outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); + outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token); if (IS_ERR(outb)) { err = PTR_ERR(outb); goto out_in; } err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, - pages_queue, &status); + pages_queue, &status, token); if (err) goto out_out; @@ -1348,7 +1371,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev) INIT_LIST_HEAD(&cmd->cache.med.head); for (i = 0; i < NUM_LONG_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; @@ -1358,7 +1381,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev) } for (i = 0; i < NUM_MED_LISTS; i++) { - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0); if (IS_ERR(msg)) { err = PTR_ERR(msg); goto ex_err; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index bb33b0410a22..8432521408a7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -234,11 +234,14 @@ static void macvlan_process_broadcast(struct work_struct *w) rcu_read_unlock(); + if (src) + dev_put(src->dev); kfree_skb(skb); } } static void macvlan_broadcast_enqueue(struct macvlan_port *port, + const struct macvlan_dev *src, struct sk_buff *skb) { struct sk_buff *nskb; @@ -248,8 +251,12 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, if (!nskb) goto err; + MACVLAN_SKB_CB(nskb)->src = src; + spin_lock(&port->bc_queue.lock); if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { + if (src) + dev_hold(src->dev); __skb_queue_tail(&port->bc_queue, nskb); err = 0; } @@ -296,8 +303,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) goto out; } - MACVLAN_SKB_CB(skb)->src = src; - macvlan_broadcast_enqueue(port, skb); + macvlan_broadcast_enqueue(port, src, skb); return RX_HANDLER_PASS; } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 2b5ac1818797..03054f55073c 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -2338,8 +2338,6 @@ ppp_unregister_channel(struct ppp_channel *chan) spin_lock_bh(&pn->all_channels_lock); list_del(&pch->list); spin_unlock_bh(&pn->all_channels_lock); - put_net(pch->chan_net); - pch->chan_net = NULL; pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); @@ -2955,6 +2953,9 @@ ppp_disconnect_channel(struct channel *pch) */ static void ppp_destroy_channel(struct channel *pch) { + put_net(pch->chan_net); + pch->chan_net = NULL; + atomic_dec(&channel_count); if (!pch->file.dead) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 4d4527c13f52..343a71ad865b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -794,10 +794,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) goto drop; - if (skb->sk) { - sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags); - sw_tx_timestamp(skb); - } + skb_tx_timestamp(skb); /* Orphan the skb - required as we might hang on to it * for indefinite time. diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 235053ba7737..d429dcbcb055 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -4169,7 +4169,7 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9531(ah)) ar9003_hw_internal_regulator_apply(ah); ar9003_hw_apply_tuning_caps(ah); - ar9003_hw_apply_minccapwr_thresh(ah, chan); + ar9003_hw_apply_minccapwr_thresh(ah, is2ghz); ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz); ar9003_hw_thermometer_apply(ah); ar9003_hw_thermo_cal_apply(ah); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index a16e644e7c08..8367ad77ca43 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -708,8 +708,10 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, return -ENOMEM; err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, glom_skb); - if (err) + if (err) { + brcmu_pkt_buf_free_skb(glom_skb); goto done; + } skb_queue_walk(pktq, skb) { memcpy(skb->data, glom_skb->data, skb->len); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index d8fa276e368b..9e6502a9648c 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -3822,7 +3822,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, (u8 *)&settings->beacon.head[ie_offset], settings->beacon.head_len - ie_offset, WLAN_EID_SSID); - if (!ssid_ie) + if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN) return -EINVAL; memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c index 4fb9635d3919..7660b523dcf1 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c @@ -1079,8 +1079,10 @@ bool dma_rxfill(struct dma_pub *pub) pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, DMA_FROM_DEVICE); - if (dma_mapping_error(di->dmadev, pa)) + if (dma_mapping_error(di->dmadev, pa)) { + brcmu_pkt_buf_free_skb(p); return false; + } /* save the free packet pointer */ di->rxp[rxout] = p; diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c index dd9162722495..0ab865de1491 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c @@ -87,7 +87,7 @@ void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel, u16 chanspec) { - struct tx_power power; + struct tx_power power = { }; u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id; /* Clear previous settings */ diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 452bb1b0c791..5d2ff0e149f0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1364,9 +1364,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* start the TFD with the scratchbuf */ scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); - memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); + memcpy(&txq->scratchbufs[idx], &out_cmd->hdr, scratch_size); iwl_pcie_txq_build_tfd(trans, txq, - iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), + iwl_pcie_get_scratchbuf_dma(txq, idx), scratch_size, true); /* map first command fragment, if any remains */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 1f5ea2403d94..89ee38869334 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3074,13 +3074,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev) } /* - * Atheros AR93xx chips do not behave after a bus reset. The device will - * throw a Link Down error on AER-capable systems and regardless of AER, - * config space of the device is never accessible again and typically - * causes the system to hang or reset when access is attempted. + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. + * The device will throw a Link Down error on AER-capable systems and + * regardless of AER, config space of the device is never accessible again + * and typically causes the system to hang or reset when access is attempted. * http://www.spinics.net/lists/linux-pci/msg34797.html */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 301386c4d85b..450d790c8bbf 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -723,6 +723,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device) if (err) return err; + err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless, + sizeof(wireless), 0); + if (err) + return err; + if (wireless & 0x1) { wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, RFKILL_TYPE_WLAN, @@ -910,7 +915,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) gps_rfkill = NULL; rfkill2_count = 0; - if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device)) + if (hp_wmi_rfkill_setup(device)) hp_wmi_rfkill2_setup(device); err = device_create_file(&device->dev, &dev_attr_display); diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index 66da691c41cf..2ca452743609 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c @@ -295,13 +295,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip, } static inline void max17042_read_model_data(struct max17042_chip *chip, - u8 addr, u32 *data, int size) + u8 addr, u16 *data, int size) { struct regmap *map = chip->regmap; int i; + u32 tmp; - for (i = 0; i < size; i++) - regmap_read(map, addr + i, &data[i]); + for (i = 0; i < size; i++) { + regmap_read(map, addr + i, &tmp); + data[i] = (u16)tmp; + } } static inline int max17042_model_data_compare(struct max17042_chip *chip, @@ -324,7 +327,7 @@ static int max17042_init_model(struct max17042_chip *chip) { int ret; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u32 *temp_data; + u16 *temp_data; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); if (!temp_data) @@ -339,7 +342,7 @@ static int max17042_init_model(struct max17042_chip *chip) ret = max17042_model_data_compare( chip, chip->pdata->config_data->cell_char_tbl, - (u16 *)temp_data, + temp_data, table_size); max10742_lock_model(chip); @@ -352,7 +355,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) { int i; int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); - u32 *temp_data; + u16 *temp_data; int ret = 0; temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c index 38a8bbe74810..83797d89c30f 100644 --- a/drivers/pps/clients/pps_parport.c +++ b/drivers/pps/clients/pps_parport.c @@ -195,7 +195,7 @@ static void parport_detach(struct parport *port) struct pps_client_pp *device; /* FIXME: oooh, this is ugly! */ - if (strcmp(pardev->name, KBUILD_MODNAME)) + if (!pardev || strcmp(pardev->name, KBUILD_MODNAME)) /* not our port */ return; diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index f182dae3a4b7..5236842d28aa 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -611,6 +611,8 @@ static const struct rtc_class_ops ds13xx_rtc_ops = { * Alarm support for mcp7941x devices. */ +#define MCP794XX_REG_WEEKDAY 0x3 +#define MCP794XX_REG_WEEKDAY_WDAY_MASK 0x7 #define MCP7941X_REG_CONTROL 0x07 # define MCP7941X_BIT_ALM0_EN 0x10 # define MCP7941X_BIT_ALM1_EN 0x20 @@ -840,12 +842,15 @@ static int ds1307_probe(struct i2c_client *client, { struct ds1307 *ds1307; int err = -ENODEV; - int tmp; + int tmp, wday; const struct chip_desc *chip = &chips[id->driver_data]; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); bool want_irq = false; unsigned char *buf; struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev); + struct rtc_time tm; + unsigned long timestamp; + static const int bbsqi_bitpos[] = { [ds_1337] = 0, [ds_1339] = DS1339_BIT_BBSQI, @@ -1115,6 +1120,27 @@ read_rtc: return PTR_ERR(ds1307->rtc); } + /* + * Some IPs have weekday reset value = 0x1 which might not correct + * hence compute the wday using the current date/month/year values + */ + ds1307_get_time(&client->dev, &tm); + wday = tm.tm_wday; + rtc_tm_to_time(&tm, ×tamp); + rtc_time_to_tm(timestamp, &tm); + + /* + * Check if reset wday is different from the computed wday + * If different then set the wday which we computed using + * timestamp + */ + if (wday != tm.tm_wday) { + wday = i2c_smbus_read_byte_data(client, MCP794XX_REG_WEEKDAY); + wday = wday & ~MCP794XX_REG_WEEKDAY_WDAY_MASK; + wday = wday | (tm.tm_wday + 1); + i2c_smbus_write_byte_data(client, MCP794XX_REG_WEEKDAY, wday); + } + if (want_irq) { err = request_irq(client->irq, ds1307_irq, IRQF_SHARED, ds1307->rtc->name, client); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 1eef0f586950..cf0214005fca 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1613,9 +1613,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, unsigned long long now; int expires; + cqr = (struct dasd_ccw_req *) intparm; if (IS_ERR(irb)) { switch (PTR_ERR(irb)) { case -EIO: + if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { + device = (struct dasd_device *) cqr->startdev; + cqr->status = DASD_CQR_CLEARED; + dasd_device_clear_timer(device); + wake_up(&dasd_flush_wq); + dasd_schedule_device_bh(device); + return; + } break; case -ETIMEDOUT: DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " @@ -1631,7 +1640,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, } now = get_tod_clock(); - cqr = (struct dasd_ccw_req *) intparm; /* check for conditions that should be handled immediately */ if (!cqr || !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index fbcd48d0bfc3..16b2db3cd9f1 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) struct fib *fibptr; struct hw_fib * hw_fib = (struct hw_fib *)0; dma_addr_t hw_fib_pa = (dma_addr_t)0LL; - unsigned size; + unsigned int size, osize; int retval; if (dev->in_reset) { @@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) * will not overrun the buffer when we copy the memory. Return * an error if we would. */ - size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); + osize = size = le16_to_cpu(kfib->header.Size) + + sizeof(struct aac_fibhdr); if (size < le16_to_cpu(kfib->header.SenderSize)) size = le16_to_cpu(kfib->header.SenderSize); if (size > dev->max_fib_size) { @@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) goto cleanup; } + /* Sanity check the second copy */ + if ((osize != le16_to_cpu(kfib->header.Size) + + sizeof(struct aac_fibhdr)) + || (size < le16_to_cpu(kfib->header.SenderSize))) { + retval = -EINVAL; + goto cleanup; + } + if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { aac_adapter_interrupt(dev); /* diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 652b41b4ddbd..4494529c4a60 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -1802,7 +1802,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, case ARCMSR_MESSAGE_WRITE_WQBUFFER: { unsigned char *ver_addr; - int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; + uint32_t user_len; + int32_t my_empty_len, wqbuf_firstindex, wqbuf_lastindex; uint8_t *pQbuffer, *ptmpuserbuffer; ver_addr = kmalloc(1032, GFP_ATOMIC); @@ -1819,6 +1820,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, } ptmpuserbuffer = ver_addr; user_len = pcmdmessagefld->cmdmessage.Length; + if (user_len > 1032) { + retvalue = ARCMSR_MESSAGE_FAIL; + kfree(ver_addr); + goto message_out; + } memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c index d35a5d6c8d7c..2cde21be80c1 100644 --- a/drivers/scsi/constants.c +++ b/drivers/scsi/constants.c @@ -1335,9 +1335,10 @@ static const char * const snstext[] = { /* Get sense key string or NULL if not available */ const char * -scsi_sense_key_string(unsigned char key) { +scsi_sense_key_string(unsigned char key) +{ #ifdef CONFIG_SCSI_CONSTANTS - if (key <= 0xE) + if (key < ARRAY_SIZE(snstext)) return snstext[key]; #endif return NULL; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index acecd7de4ea1..0f6f296eaff9 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3996,7 +3996,7 @@ static int megasas_init_fw(struct megasas_instance *instance) /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); - if (pci_request_selected_regions(instance->pdev, instance->bar, + if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, "megasas: LSI")) { printk(KERN_DEBUG "megasas: IO memory region busy!\n"); return -EBUSY; @@ -4261,7 +4261,7 @@ fail_ready_state: iounmap(instance->reg_set); fail_ioremap: - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<<instance->bar); return -EINVAL; } @@ -4282,7 +4282,7 @@ static void megasas_release_mfi(struct megasas_instance *instance) iounmap(instance->reg_set); - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<<instance->bar); } /** diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index f16764bf9964..560bbbd03788 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2190,7 +2190,7 @@ megasas_release_fusion(struct megasas_instance *instance) iounmap(instance->reg_set); - pci_release_selected_regions(instance->pdev, instance->bar); + pci_release_selected_regions(instance->pdev, 1<<instance->bar); } /** diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c index a8f6036ad82b..746d7360cfed 100644 --- a/drivers/staging/comedi/drivers/daqboard2000.c +++ b/drivers/staging/comedi/drivers/daqboard2000.c @@ -684,7 +684,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev, const struct daq200_boardtype *board; int i; - if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH) + if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH) return NULL; for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 7ffdcc07ef92..83a96bcb21b6 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -2959,7 +2959,15 @@ static int ni_ao_inttrig(struct comedi_device *dev, int i; static const int timeout = 1000; - if (trig_num != cmd->start_arg) + /* + * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT. + * For backwards compatibility, also allow trig_num == 0 when + * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT); + * in that case, the internal trigger is being used as a pre-trigger + * before the external trigger. + */ + if (!(trig_num == cmd->start_arg || + (trig_num == 0 && cmd->start_src != TRIG_INT))) return -EINVAL; /* Null trig at beginning prevent ao start trigger from executing more than @@ -4407,7 +4415,7 @@ static int ni_E_init(struct comedi_device *dev) else s->maxdata = 0xffffff; s->insn_read = ni_tio_insn_read; - s->insn_write = ni_tio_insn_read; + s->insn_write = ni_tio_insn_write; s->insn_config = ni_tio_insn_config; #ifdef PCIDMA s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */; diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index 0e507f53fd9b..17cfa99b4fc0 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -1386,7 +1386,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import) attr->ia_valid |= ATTR_MTIME | ATTR_CTIME; } - /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */ + /* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */ if (attr->ia_valid & TIMES_SET_FLAGS) { if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CFS_CAP_FOWNER)) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 00b801df7322..ef9c33fe2617 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -505,7 +505,8 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); spin_lock_bh(&conn->cmd_lock); - if (!list_empty(&cmd->i_conn_node)) + if (!list_empty(&cmd->i_conn_node) && + !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); @@ -4174,6 +4175,7 @@ transport_err: static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) { + LIST_HEAD(tmp_list); struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; struct iscsi_session *sess = conn->sess; /* @@ -4182,18 +4184,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) * has been reset -> returned sleeping pre-handler state. */ spin_lock_bh(&conn->cmd_lock); - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { + list_splice_init(&conn->conn_cmd_list, &tmp_list); + list_for_each_entry(cmd, &tmp_list, i_conn_node) { + struct se_cmd *se_cmd = &cmd->se_cmd; + + if (se_cmd->se_tfo != NULL) { + spin_lock(&se_cmd->t_state_lock); + se_cmd->transport_state |= CMD_T_FABRIC_STOP; + spin_unlock(&se_cmd->t_state_lock); + } + } + spin_unlock_bh(&conn->cmd_lock); + + list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { list_del_init(&cmd->i_conn_node); - spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); - iscsit_free_cmd(cmd, true); - spin_lock_bh(&conn->cmd_lock); } - spin_unlock_bh(&conn->cmd_lock); } static void iscsit_stop_timers_for_cmds( diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 09dac9ac305e..d534738ff3c7 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1418,8 +1418,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) } login->zero_tsih = zero_tsih; - conn->sess->se_sess->sup_prot_ops = - conn->conn_transport->iscsit_get_sup_prot_ops(conn); + if (conn->sess) + conn->sess->se_sess->sup_prot_ops = + conn->conn_transport->iscsit_get_sup_prot_ops(conn); tpg = conn->tpg; if (!tpg) { diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 07425b8d606e..5638b60b4192 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1583,13 +1583,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) * in ATA and we need to set TPE=1 */ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, - struct request_queue *q, int block_size) + struct request_queue *q) { + int block_size = queue_logical_block_size(q); + if (!blk_queue_discard(q)) return false; - attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / - block_size; + attrib->max_unmap_lba_count = + q->limits.max_discard_sectors >> (ilog2(block_size) - 9); /* * Currently hardcoded to 1 in Linux/SCSI code.. */ diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 703f885deac1..03bfa49b5590 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -165,8 +165,7 @@ static int fd_configure_device(struct se_device *dev) dev_size, div_u64(dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); - if (target_configure_unmap_from_queue(&dev->dev_attrib, q, - fd_dev->fd_block_size)) + if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) pr_debug("IFILE: BLOCK Discard support available," " disabled by default\n"); /* diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 91c625ac40d1..7ee927d9c3ac 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -126,8 +126,7 @@ static int iblock_configure_device(struct se_device *dev) dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_queue_depth = q->nr_requests; - if (target_configure_unmap_from_queue(&dev->dev_attrib, q, - dev->dev_attrib.hw_block_size)) + if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) pr_debug("IBLOCK: BLOCK Discard support available," " disabled by default\n"); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 33fc58d514d3..7769af94aee2 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2455,15 +2455,10 @@ static void target_release_cmd_kref(struct kref *kref) struct se_session *se_sess = se_cmd->se_sess; bool fabric_stop; - if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock(&se_sess->sess_cmd_lock); - target_free_cmd_mem(se_cmd); - se_cmd->se_tfo->release_cmd(se_cmd); - return; - } spin_lock(&se_cmd->t_state_lock); - fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && + (se_cmd->transport_state & CMD_T_ABORTED); spin_unlock(&se_cmd->t_state_lock); if (se_cmd->cmd_wait_set || fabric_stop) { diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index b4df88da5b40..e90ea3da5cda 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1821,6 +1821,43 @@ pci_wch_ch353_setup(struct serial_private *priv, #define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 #define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 +#define PCI_VENDOR_ID_ACCESIO 0x494f +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB 0x105C +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S 0x105E +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB 0x1091 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2 0x1093 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB 0x1099 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4 0x109B +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB 0x10D1 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM 0x10D3 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB 0x10DA +#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM 0x10DC +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1 0x1108 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2 0x1110 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2 0x1111 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4 0x1118 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4 0x1119 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S 0x1152 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S 0x115A +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2 0x1190 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2 0x1191 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4 0x1198 +#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4 0x1199 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM 0x11D0 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4 0x105A +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4 0x105B +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8 0x106A +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8 0x106B +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4 0x1098 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8 0x10A9 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM 0x10D9 +#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9 +#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8 + + + /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 @@ -4890,6 +4927,108 @@ static struct pci_device_id serial_pci_tbl[] = { 0, 0, pbn_pericom_PI7C9X7958 }, /* + * ACCES I/O Products quad + */ + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7954 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_pericom_PI7C9X7958 }, + /* * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) */ { PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560, diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 0d39ae4ff533..d806657ca53e 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -434,19 +434,21 @@ static void atmel_start_tx(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - if (atmel_use_pdc_tx(port)) { - if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN) - /* The transmitter is already running. Yes, we - really need this.*/ - return; + if (atmel_use_pdc_tx(port) && (UART_GET_PTSR(port) + & ATMEL_PDC_TXTEN)) + /* The transmitter is already running. Yes, we + really need this.*/ + return; + if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) if ((atmel_port->rs485.flags & SER_RS485_ENABLED) && !(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX)) atmel_stop_rx(port); + if (atmel_use_pdc_tx(port)) /* re-enable PDC transmit */ UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); - } + /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); } diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 7268409dfc68..2a2a54db86ff 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1163,7 +1163,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, return -ENODEV; if (port->mapbase != 0) - return 0; + return -EINVAL; /* setup info for port */ port->dev = &platdev->dev; @@ -1213,14 +1213,15 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, if (IS_ERR(ourport->clk)) { pr_err("%s: Controller clock not found\n", dev_name(&platdev->dev)); - return PTR_ERR(ourport->clk); + ret = PTR_ERR(ourport->clk); + goto err; } ret = clk_prepare_enable(ourport->clk); if (ret) { pr_err("uart: clock failed to prepare+enable: %d\n", ret); clk_put(ourport->clk); - return ret; + goto err; } /* Keep all interrupts masked and cleared */ @@ -1236,7 +1237,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, /* reset the fifos (and setup the uart) */ s3c24xx_serial_resetport(port, cfg); + return 0; + +err: + port->mapbase = 0; + return ret; } #ifdef CONFIG_SAMSUNG_CLOCK @@ -1301,8 +1307,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) ourport->info->fifosize : ourport->drv_data->fifosize[probe_index]; - probe_index++; - dbg("%s: initialising port %p...\n", __func__, ourport); ret = s3c24xx_serial_init_port(ourport, pdev); @@ -1338,6 +1342,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "failed to add cpufreq notifier\n"); + probe_index++; + return 0; probe_err: diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index dc697cee248a..233103adad2d 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -492,12 +492,6 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig locked = spin_trylock_irqsave(&port->lock, flags); else spin_lock_irqsave(&port->lock, flags); - if (port->sysrq) { - locked = 0; - } else if (oops_in_progress) { - locked = spin_trylock(&port->lock); - } else - spin_lock(&port->lock); for (i = 0; i < n; i++) { if (*s == '\n') diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index d0e3a4497707..adf4d3124cc6 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -365,34 +365,22 @@ static void to_utf8(struct vc_data *vc, uint c) static void do_compute_shiftstate(void) { - unsigned int i, j, k, sym, val; + unsigned int k, sym, val; shift_state = 0; memset(shift_down, 0, sizeof(shift_down)); - for (i = 0; i < ARRAY_SIZE(key_down); i++) { - - if (!key_down[i]) + for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) { + sym = U(key_maps[0][k]); + if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK) continue; - k = i * BITS_PER_LONG; - - for (j = 0; j < BITS_PER_LONG; j++, k++) { - - if (!test_bit(k, key_down)) - continue; + val = KVAL(sym); + if (val == KVAL(K_CAPSSHIFT)) + val = KVAL(K_SHIFT); - sym = U(key_maps[0][k]); - if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK) - continue; - - val = KVAL(sym); - if (val == KVAL(K_CAPSSHIFT)) - val = KVAL(K_SHIFT); - - shift_down[val]++; - shift_state |= (1 << val); - } + shift_down[val]++; + shift_state |= BIT(val); } } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 54061a3bc5e9..73b3b054771f 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1336,7 +1336,6 @@ made_compressed_probe: spin_lock_init(&acm->write_lock); spin_lock_init(&acm->read_lock); mutex_init(&acm->mutex); - acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); acm->is_int_ep = usb_endpoint_xfer_int(epread); if (acm->is_int_ep) acm->bInterval = epread->bInterval; @@ -1386,14 +1385,14 @@ made_compressed_probe: urb->transfer_dma = rb->dma; if (acm->is_int_ep) { usb_fill_int_urb(urb, acm->dev, - acm->rx_endpoint, + usb_rcvintpipe(usb_dev, epread->bEndpointAddress), rb->base, acm->readsize, acm_read_bulk_callback, rb, acm->bInterval); } else { usb_fill_bulk_urb(urb, acm->dev, - acm->rx_endpoint, + usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), rb->base, acm->readsize, acm_read_bulk_callback, rb); diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 70d24d7e9dc5..6050d8e92c5e 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -95,7 +95,6 @@ struct acm { struct urb *read_urbs[ACM_NR]; struct acm_rb read_buffers[ACM_NR]; int rx_buflimit; - int rx_endpoint; spinlock_t read_lock; int write_used; /* number of non-empty write buffers */ int transmitting; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 894894f2ff93..8de16f982f55 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -142,6 +142,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, } } +static const unsigned short low_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 8, + [USB_ENDPOINT_XFER_ISOC] = 0, + [USB_ENDPOINT_XFER_BULK] = 0, + [USB_ENDPOINT_XFER_INT] = 8, +}; +static const unsigned short full_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 64, + [USB_ENDPOINT_XFER_ISOC] = 1023, + [USB_ENDPOINT_XFER_BULK] = 64, + [USB_ENDPOINT_XFER_INT] = 64, +}; +static const unsigned short high_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 64, + [USB_ENDPOINT_XFER_ISOC] = 1024, + [USB_ENDPOINT_XFER_BULK] = 512, + [USB_ENDPOINT_XFER_INT] = 1024, +}; +static const unsigned short super_speed_maxpacket_maxes[4] = { + [USB_ENDPOINT_XFER_CONTROL] = 512, + [USB_ENDPOINT_XFER_ISOC] = 1024, + [USB_ENDPOINT_XFER_BULK] = 1024, + [USB_ENDPOINT_XFER_INT] = 1024, +}; + static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, int asnum, struct usb_host_interface *ifp, int num_ep, unsigned char *buffer, int size) @@ -150,6 +175,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, struct usb_endpoint_descriptor *d; struct usb_host_endpoint *endpoint; int n, i, j, retval; + unsigned int maxp; + const unsigned short *maxpacket_maxes; d = (struct usb_endpoint_descriptor *) buffer; buffer += d->bLength; @@ -184,8 +211,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, memcpy(&endpoint->desc, d, n); INIT_LIST_HEAD(&endpoint->urb_list); - /* Fix up bInterval values outside the legal range. Use 32 ms if no - * proper value can be guessed. */ + /* + * Fix up bInterval values outside the legal range. + * Use 10 or 8 ms if no proper value can be guessed. + */ i = 0; /* i = min, j = max, n = default */ j = 255; if (usb_endpoint_xfer_int(d)) { @@ -193,13 +222,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, switch (to_usb_device(ddev)->speed) { case USB_SPEED_SUPER: case USB_SPEED_HIGH: - /* Many device manufacturers are using full-speed + /* + * Many device manufacturers are using full-speed * bInterval values in high-speed interrupt endpoint - * descriptors. Try to fix those and fall back to a - * 32 ms default value otherwise. */ + * descriptors. Try to fix those and fall back to an + * 8-ms default value otherwise. + */ n = fls(d->bInterval*8); if (n == 0) - n = 9; /* 32 ms = 2^(9-1) uframes */ + n = 7; /* 8 ms = 2^(7-1) uframes */ j = 16; /* @@ -214,10 +245,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, } break; default: /* USB_SPEED_FULL or _LOW */ - /* For low-speed, 10 ms is the official minimum. + /* + * For low-speed, 10 ms is the official minimum. * But some "overclocked" devices might want faster - * polling so we'll allow it. */ - n = 32; + * polling so we'll allow it. + */ + n = 10; break; } } else if (usb_endpoint_xfer_isoc(d)) { @@ -225,10 +258,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, j = 16; switch (to_usb_device(ddev)->speed) { case USB_SPEED_HIGH: - n = 9; /* 32 ms = 2^(9-1) uframes */ + n = 7; /* 8 ms = 2^(7-1) uframes */ break; default: /* USB_SPEED_FULL */ - n = 6; /* 32 ms = 2^(6-1) frames */ + n = 4; /* 8 ms = 2^(4-1) frames */ break; } } @@ -256,6 +289,41 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, endpoint->desc.wMaxPacketSize = cpu_to_le16(8); } + /* Validate the wMaxPacketSize field */ + maxp = usb_endpoint_maxp(&endpoint->desc); + + /* Find the highest legal maxpacket size for this endpoint */ + i = 0; /* additional transactions per microframe */ + switch (to_usb_device(ddev)->speed) { + case USB_SPEED_LOW: + maxpacket_maxes = low_speed_maxpacket_maxes; + break; + case USB_SPEED_FULL: + maxpacket_maxes = full_speed_maxpacket_maxes; + break; + case USB_SPEED_HIGH: + /* Bits 12..11 are allowed only for HS periodic endpoints */ + if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { + i = maxp & (BIT(12) | BIT(11)); + maxp &= ~i; + } + /* fallthrough */ + default: + maxpacket_maxes = high_speed_maxpacket_maxes; + break; + case USB_SPEED_SUPER: + maxpacket_maxes = super_speed_maxpacket_maxes; + break; + } + j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; + + if (maxp > j) { + dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", + cfgno, inum, asnum, d->bEndpointAddress, maxp, j); + maxp = j; + endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); + } + /* * Some buggy high speed devices have bulk endpoints using * maxpacket sizes other than 512. High speed HCDs may not @@ -263,9 +331,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, */ if (to_usb_device(ddev)->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) { - unsigned maxp; - - maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; if (maxp != 512) dev_warn(ddev, "config %d interface %d altsetting %d " "bulk endpoint 0x%X has invalid maxpacket %d\n", diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index dfcb5f8b8f18..39a8bb8f8820 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1527,11 +1527,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb as->urb->start_frame = uurb->start_frame; as->urb->number_of_packets = number_of_packets; as->urb->stream_id = stream_id; - if (uurb->type == USBDEVFS_URB_TYPE_ISO || - ps->dev->speed == USB_SPEED_HIGH) - as->urb->interval = 1 << min(15, ep->desc.bInterval - 1); - else - as->urb->interval = ep->desc.bInterval; + + if (ep->desc.bInterval) { + if (uurb->type == USBDEVFS_URB_TYPE_ISO || + ps->dev->speed == USB_SPEED_HIGH || + ps->dev->speed >= USB_SPEED_SUPER) + as->urb->interval = 1 << + min(15, ep->desc.bInterval - 1); + else + as->urb->interval = ep->desc.bInterval; + } + as->urb->context = as; as->urb->complete = async_completed; for (totlen = u = 0; u < number_of_packets; u++) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 0673a5abc21d..6c7e47f67dd4 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -125,6 +125,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04f3, 0x016f), .driver_info = USB_QUIRK_DEVICE_QUALIFIER }, + { USB_DEVICE(0x04f3, 0x0381), .driver_info = + USB_QUIRK_NO_LPM }, + + { USB_DEVICE(0x04f3, 0x21b8), .driver_info = + USB_QUIRK_DEVICE_QUALIFIER }, + /* Roland SC-8820 */ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 4a279bb9092a..ede2814702c4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1943,14 +1943,6 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, s_pkt = 1; } - /* - * We assume here we will always receive the entire data block - * which we should receive. Meaning, if we program RX to - * receive 4K but we receive only 2K, we assume that's all we - * should receive and we simply bounce the request back to the - * gadget driver for further processing. - */ - req->request.actual += req->request.length - count; if (s_pkt) return 1; if ((event->status & DEPEVT_STATUS_LST) && @@ -1970,6 +1962,7 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_trb *trb; unsigned int slot; unsigned int i; + int count = 0; int ret; do { @@ -1986,6 +1979,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, slot++; slot %= DWC3_TRB_NUM; trb = &dep->trb_pool[slot]; + count += trb->size & DWC3_TRB_SIZE_MASK; + ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, event, status); @@ -1993,6 +1988,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, break; }while (++i < req->request.num_mapped_sgs); + /* + * We assume here we will always receive the entire data block + * which we should receive. Meaning, if we program RX to + * receive 4K but we receive only 2K, we assume that's all we + * should receive and we simply bounce the request back to the + * gadget driver for further processing. + */ + req->request.actual += req->request.length - count; dwc3_gadget_giveback(dep, req, status); if (ret) @@ -2016,6 +2019,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, return 1; } + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) + if ((event->status & DEPEVT_STATUS_IOC) && + (trb->ctrl & DWC3_TRB_CTRL_IOC)) + return 0; return 1; } diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index ad5483335167..8214773321ef 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -1881,11 +1881,8 @@ static int qe_get_frame(struct usb_gadget *gadget) tmp = in_be16(&udc->usb_param->frame_n); if (tmp & 0x8000) - tmp = tmp & 0x07ff; - else - tmp = -EINVAL; - - return (int)tmp; + return tmp & 0x07ff; + return -EINVAL; } static int fsl_qe_start(struct usb_gadget *gadget, @@ -2057,7 +2054,7 @@ static void setup_received_handle(struct qe_udc *udc, struct qe_ep *ep; if (wValue != 0 || wLength != 0 - || pipe > USB_MAX_ENDPOINTS) + || pipe >= USB_MAX_ENDPOINTS) break; ep = &udc->eps[pipe]; diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 38913eac6e7c..d7ef85f38847 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c @@ -91,7 +91,7 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget, return; if (req->num_mapped_sgs) { - dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, + dma_unmap_sg(&gadget->dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->num_mapped_sgs = 0; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 69a70f880429..8ae80088d348 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -276,6 +276,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) ret = 0; virt_dev = xhci->devs[slot_id]; + if (!virt_dev) + return -ENODEV; + cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); if (!cmd) { xhci_dbg(xhci, "Couldn't allocate command structure.\n"); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4b36d8562eef..015d4c08a3cb 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -293,12 +293,13 @@ static void xhci_pci_remove(struct pci_dev *dev) usb_remove_hcd(xhci->shared_hcd); usb_put_hcd(xhci->shared_hcd); } - usb_hcd_pci_remove(dev); /* Workaround for spurious wakeups at shutdown with HSW */ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) pci_set_power_state(dev, PCI_D3hot); + usb_hcd_pci_remove(dev); + kfree(xhci); } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index f15e2df4448c..58edc5478d6e 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1351,12 +1351,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); - if (cmd->command_trb != xhci->cmd_ring->dequeue) { - xhci_err(xhci, - "Command completion event does not match command\n"); - return; - } - del_timer(&xhci->cmd_timer); trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); @@ -1368,6 +1362,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_stopped_cmd_ring(xhci, cmd); return; } + + if (cmd->command_trb != xhci->cmd_ring->dequeue) { + xhci_err(xhci, + "Command completion event does not match command\n"); + return; + } + /* * Host aborted the command ring, check if the current command was * supposed to be aborted, otherwise continue normally. diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 829f446064ea..419a5b3cb924 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -488,7 +488,6 @@ static void sg_timeout(unsigned long _req) { struct usb_sg_request *req = (struct usb_sg_request *) _req; - req->status = -ETIMEDOUT; usb_sg_cancel(req); } @@ -519,8 +518,10 @@ static int perform_sglist( mod_timer(&sg_timer, jiffies + msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); usb_sg_wait(req); - del_timer_sync(&sg_timer); - retval = req->status; + if (!del_timer_sync(&sg_timer)) + retval = -ETIMEDOUT; + else + retval = req->status; /* FIXME check resulting data pattern */ diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 50763ebff015..2a540e4b5c61 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -780,36 +780,46 @@ static void xfer_work(struct work_struct *work) { struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work); struct usbhs_pipe *pipe = pkt->pipe; - struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); + struct usbhs_fifo *fifo; struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct dma_async_tx_descriptor *desc; - struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); + struct dma_chan *chan; struct device *dev = usbhs_priv_to_dev(priv); enum dma_transfer_direction dir; + unsigned long flags; + usbhs_lock(priv, flags); + fifo = usbhs_pipe_to_fifo(pipe); + if (!fifo) + goto xfer_work_end; + + chan = usbhsf_dma_chan_get(fifo, pkt); dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual, pkt->trans, dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) - return; + goto xfer_work_end; desc->callback = usbhsf_dma_complete; desc->callback_param = pipe; if (dmaengine_submit(desc) < 0) { dev_err(dev, "Failed to submit dma descriptor\n"); - return; + goto xfer_work_end; } dev_dbg(dev, " %s %d (%d/ %d)\n", fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); - usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); - usbhs_pipe_enable(pipe); usbhsf_dma_start(pipe, fifo); + usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); dma_async_issue_pending(chan); + usbhs_pipe_enable(pipe); + +xfer_work_end: + usbhs_unlock(priv, flags); } /* diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c index 6a030b931a3b..254194d61915 100644 --- a/drivers/usb/renesas_usbhs/mod.c +++ b/drivers/usb/renesas_usbhs/mod.c @@ -272,9 +272,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data) usbhs_write(priv, INTSTS0, ~irq_state.intsts0 & INTSTS0_MAGIC); usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC); - usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); + /* + * The driver should not clear the xxxSTS after the line of + * "call irq callback functions" because each "if" statement is + * possible to call the callback function for avoiding any side effects. + */ + if (irq_state.intsts0 & BRDY) + usbhs_write(priv, BRDYSTS, ~irq_state.brdysts); usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts); - usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); + if (irq_state.intsts0 & BEMP) + usbhs_write(priv, BEMPSTS, ~irq_state.bempsts); /* * call irq callback functions diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index e344d50f4697..6583c04cc35e 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -558,6 +558,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep, struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); struct usbhs_pipe *pipe; int ret = -EIO; + unsigned long flags; + + usbhs_lock(priv, flags); /* * if it already have pipe, @@ -566,7 +569,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep, if (uep->pipe) { usbhs_pipe_clear(uep->pipe); usbhs_pipe_sequence_data0(uep->pipe); - return 0; + ret = 0; + goto usbhsg_ep_enable_end; } pipe = usbhs_pipe_malloc(priv, @@ -594,6 +598,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep, ret = 0; } +usbhsg_ep_enable_end: + usbhs_unlock(priv, flags); + return ret; } diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index fb3a832d86e3..6103727cd060 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -661,6 +661,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) }, { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, @@ -1021,6 +1023,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, + { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 334bc600282d..48db84f25cc9 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -406,6 +406,12 @@ #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 /* + * Ivium Technologies product IDs + */ +#define FTDI_PALMSENS_PID 0xf440 +#define FTDI_IVIUM_XSTAT_PID 0xf441 + +/* * Linx Technologies product ids */ #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ @@ -673,6 +679,12 @@ #define INTREPID_NEOVI_PID 0x0701 /* + * WICED USB UART + */ +#define WICED_VID 0x0A5C +#define WICED_USB20706V2_PID 0x6422 + +/* * Definitions for ID TECH (www.idt-net.com) devices */ #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index dfd728a263d2..c3b8ae360424 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -1239,7 +1239,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port, if (urb->transfer_buffer == NULL) { urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, - GFP_KERNEL); + GFP_ATOMIC); if (!urb->transfer_buffer) goto exit; } diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 393be562d875..f5f3b49ff9d5 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -1372,8 +1372,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, } if (urb->transfer_buffer == NULL) { - urb->transfer_buffer = - kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL); + urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE, + GFP_ATOMIC); if (!urb->transfer_buffer) goto exit; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 9d8fe3d2feca..f243c734eb05 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -276,6 +276,13 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 #define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE910 0x1201 +#define TELIT_PRODUCT_LE910_USBCFG4 0x1206 +#define TELIT_PRODUCT_LE920A4_1207 0x1207 +#define TELIT_PRODUCT_LE920A4_1208 0x1208 +#define TELIT_PRODUCT_LE920A4_1211 0x1211 +#define TELIT_PRODUCT_LE920A4_1212 0x1212 +#define TELIT_PRODUCT_LE920A4_1213 0x1213 +#define TELIT_PRODUCT_LE920A4_1214 0x1214 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 @@ -520,6 +527,12 @@ static void option_instat_callback(struct urb *urb); #define VIATELECOM_VENDOR_ID 0x15eb #define VIATELECOM_PRODUCT_CDS7 0x0001 +/* WeTelecom products */ +#define WETELECOM_VENDOR_ID 0x22de +#define WETELECOM_PRODUCT_WMD200 0x6801 +#define WETELECOM_PRODUCT_6802 0x6802 +#define WETELECOM_PRODUCT_WMD300 0x6803 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -642,6 +655,11 @@ static const struct option_blacklist_info sierra_mc73xx_blacklist = { .reserved = BIT(8) | BIT(10) | BIT(11), }; +static const struct option_blacklist_info telit_le920a4_blacklist_1 = { + .sendsetup = BIT(0), + .reserved = BIT(1), +}; + static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { .sendsetup = BIT(2), .reserved = BIT(0) | BIT(1) | BIT(3), @@ -1217,8 +1235,20 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), + .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), + .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, @@ -1974,9 +2004,13 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 5c19d3e7b4b8..790452c7a9e4 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -48,7 +48,8 @@ DEVICE(funsoft, FUNSOFT_IDS); /* Infineon Flashloader driver */ #define FLASHLOADER_IDS() \ { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \ - { USB_DEVICE(0x8087, 0x0716) } + { USB_DEVICE(0x8087, 0x0716) }, \ + { USB_DEVICE(0x8087, 0x0801) } DEVICE(flashloader, FLASHLOADER_IDS); /* Google Serial USB SubClass */ diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 6fbfc8fc2f5d..d3bf8348e638 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -1416,7 +1416,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] rc = usb_register(udriver); if (rc) - return rc; + goto failed_usb_register; for (sd = serial_drivers; *sd; ++sd) { (*sd)->usb_driver = udriver; @@ -1434,6 +1434,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] while (sd-- > serial_drivers) usb_serial_deregister(*sd); usb_deregister(udriver); +failed_usb_register: + kfree(udriver); return rc; } EXPORT_SYMBOL_GPL(usb_serial_register_drivers); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index a1e77f570b19..431fb1e25830 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -460,8 +460,9 @@ static long vfio_pci_ioctl(void *device_data, } else if (cmd == VFIO_DEVICE_SET_IRQS) { struct vfio_irq_set hdr; + size_t size; u8 *data = NULL; - int ret = 0; + int max, ret = 0; minsz = offsetofend(struct vfio_irq_set, count); @@ -469,23 +470,31 @@ static long vfio_pci_ioctl(void *device_data, return -EFAULT; if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || + hdr.count >= (U32_MAX - hdr.start) || hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | VFIO_IRQ_SET_ACTION_TYPE_MASK)) return -EINVAL; - if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { - size_t size; - int max = vfio_pci_get_irq_count(vdev, hdr.index); + max = vfio_pci_get_irq_count(vdev, hdr.index); + if (hdr.start >= max || hdr.start + hdr.count > max) + return -EINVAL; - if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) - size = sizeof(uint8_t); - else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) - size = sizeof(int32_t); - else - return -EINVAL; + switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { + case VFIO_IRQ_SET_DATA_NONE: + size = 0; + break; + case VFIO_IRQ_SET_DATA_BOOL: + size = sizeof(uint8_t); + break; + case VFIO_IRQ_SET_DATA_EVENTFD: + size = sizeof(int32_t); + break; + default: + return -EINVAL; + } - if (hdr.argsz - minsz < hdr.count * size || - hdr.start >= max || hdr.start + hdr.count > max) + if (size) { + if (hdr.argsz - minsz < hdr.count * size) return -EINVAL; data = memdup_user((void __user *)(arg + minsz), diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 9dd49c9839ac..36cb198b7290 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -465,7 +465,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) if (!is_irq_none(vdev)) return -EINVAL; - vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); if (!vdev->ctx) return -ENOMEM; @@ -752,41 +752,58 @@ static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, unsigned index, unsigned start, unsigned count, uint32_t flags, void *data) { - int32_t fd = *(int32_t *)data; - - if ((index != VFIO_PCI_ERR_IRQ_INDEX) || - !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) + if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1) return -EINVAL; /* DATA_NONE/DATA_BOOL enables loopback testing */ if (flags & VFIO_IRQ_SET_DATA_NONE) { - if (vdev->err_trigger) - eventfd_signal(vdev->err_trigger, 1); - return 0; + if (vdev->err_trigger) { + if (count) { + eventfd_signal(vdev->err_trigger, 1); + } else { + eventfd_ctx_put(vdev->err_trigger); + vdev->err_trigger = NULL; + } + return 0; + } } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { - uint8_t trigger = *(uint8_t *)data; + uint8_t trigger; + + if (!count) + return -EINVAL; + + trigger = *(uint8_t *)data; if (trigger && vdev->err_trigger) eventfd_signal(vdev->err_trigger, 1); - return 0; - } - /* Handle SET_DATA_EVENTFD */ - if (fd == -1) { - if (vdev->err_trigger) - eventfd_ctx_put(vdev->err_trigger); - vdev->err_trigger = NULL; return 0; - } else if (fd >= 0) { - struct eventfd_ctx *efdctx; - efdctx = eventfd_ctx_fdget(fd); - if (IS_ERR(efdctx)) - return PTR_ERR(efdctx); - if (vdev->err_trigger) - eventfd_ctx_put(vdev->err_trigger); - vdev->err_trigger = efdctx; + } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { + int32_t fd; + + if (!count) + return -EINVAL; + + fd = *(int32_t *)data; + if (fd == -1) { + if (vdev->err_trigger) + eventfd_ctx_put(vdev->err_trigger); + vdev->err_trigger = NULL; + } else if (fd >= 0) { + struct eventfd_ctx *efdctx; + + efdctx = eventfd_ctx_fdget(fd); + if (IS_ERR(efdctx)) + return PTR_ERR(efdctx); + + if (vdev->err_trigger) + eventfd_ctx_put(vdev->err_trigger); + + vdev->err_trigger = efdctx; + } return 0; - } else - return -EINVAL; + } + + return -EINVAL; } int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, unsigned index, unsigned start, unsigned count, diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7490e92c03d5..447571d19ecb 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -177,6 +177,8 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num) num = min(num, ARRAY_SIZE(vb->pfns)); mutex_lock(&vb->balloon_lock); + /* We can't release more pages than taken */ + num = min(num, (size_t)vb->num_pages); for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { page = balloon_page_dequeue(vb_dev_info); diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 85534ea63555..6bd06f9d737d 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type, rc = -ENOMEM; goto out; } + } else if (msg_type == XS_TRANSACTION_END) { + list_for_each_entry(trans, &u->transactions, list) + if (trans->handle.id == u->u.msg.tx_id) + break; + if (&trans->list == &u->transactions) + return -ESRCH; } reply = xenbus_dev_request_and_reply(&u->u.msg); if (IS_ERR(reply)) { - kfree(trans); + if (msg_type == XS_TRANSACTION_START) + kfree(trans); rc = PTR_ERR(reply); goto out; } @@ -330,12 +337,7 @@ static int xenbus_write_transaction(unsigned msg_type, list_add(&trans->list, &u->transactions); } else if (msg_type == XS_TRANSACTION_END) { - list_for_each_entry(trans, &u->transactions, list) - if (trans->handle.id == u->u.msg.tx_id) - break; - BUG_ON(&trans->list == &u->transactions); list_del(&trans->list); - kfree(trans); } diff --git a/fs/9p/acl.c b/fs/9p/acl.c index 8482f2d11606..d3f5d487ae46 100644 --- a/fs/9p/acl.c +++ b/fs/9p/acl.c @@ -320,32 +320,26 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name, case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { - umode_t mode = inode->i_mode; - retval = posix_acl_equiv_mode(acl, &mode); - if (retval < 0) + struct iattr iattr; + + retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl); + if (retval) goto err_out; - else { - struct iattr iattr; - if (retval == 0) { - /* - * ACL can be represented - * by the mode bits. So don't - * update ACL. - */ - acl = NULL; - value = NULL; - size = 0; - } - /* Updte the mode bits */ - iattr.ia_mode = ((mode & S_IALLUGO) | - (inode->i_mode & ~S_IALLUGO)); - iattr.ia_valid = ATTR_MODE; - /* FIXME should we update ctime ? - * What is the following setxattr update the - * mode ? + if (!acl) { + /* + * ACL can be represented + * by the mode bits. So don't + * update ACL. */ - v9fs_vfs_setattr_dotl(dentry, &iattr); + value = NULL; + size = 0; } + iattr.ia_valid = ATTR_MODE; + /* FIXME should we update ctime ? + * What is the following setxattr update the + * mode ? + */ + v9fs_vfs_setattr_dotl(dentry, &iattr); } break; case ACL_TYPE_DEFAULT: diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index bdfcea010c9f..d0b7320b17c4 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -1094,7 +1094,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) struct p9_wstat wstat; p9_debug(P9_DEBUG_VFS, "\n"); - retval = inode_change_ok(dentry->d_inode, iattr); + retval = setattr_prepare(dentry, iattr); if (retval) return retval; diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index d3fb85871921..f03024e5bcbe 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -560,7 +560,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) p9_debug(P9_DEBUG_VFS, "\n"); - retval = inode_change_ok(inode, iattr); + retval = setattr_prepare(dentry, iattr); if (retval) return retval; diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index b9acadafa4a1..247b351f3005 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -303,7 +303,7 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr) unsigned int ia_valid = attr->ia_valid; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); /* * we can't change the UID or GID of any file - diff --git a/fs/affs/inode.c b/fs/affs/inode.c index bec2d1a0c91c..55a931da9917 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -222,7 +222,7 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr) pr_debug("notify_change(%lu,0x%x)\n", inode->i_ino, attr->ia_valid); - error = inode_change_ok(inode,attr); + error = setattr_prepare(dentry, attr); if (error) goto out; diff --git a/fs/attr.c b/fs/attr.c index 6530ced19697..ee697ddc6c2e 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -17,19 +17,22 @@ #include <linux/ima.h> /** - * inode_change_ok - check if attribute changes to an inode are allowed - * @inode: inode to check + * setattr_prepare - check if attribute changes to a dentry are allowed + * @dentry: dentry to check * @attr: attributes to change * * Check if we are allowed to change the attributes contained in @attr - * in the given inode. This includes the normal unix access permission - * checks, as well as checks for rlimits and others. + * in the given dentry. This includes the normal unix access permission + * checks, as well as checks for rlimits and others. The function also clears + * SGID bit from mode if user is not allowed to set it. Also file capabilities + * and IMA extended attributes are cleared if ATTR_KILL_PRIV is set. * * Should be called as the first thing in ->setattr implementations, * possibly after taking additional locks. */ -int inode_change_ok(const struct inode *inode, struct iattr *attr) +int setattr_prepare(struct dentry *dentry, struct iattr *attr) { + struct inode *inode = d_inode(dentry); unsigned int ia_valid = attr->ia_valid; /* @@ -44,7 +47,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr) /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) - return 0; + goto kill_priv; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && @@ -77,9 +80,19 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr) return -EPERM; } +kill_priv: + /* User has permission for the change */ + if (ia_valid & ATTR_KILL_PRIV) { + int error; + + error = security_inode_killpriv(dentry); + if (error) + return error; + } + return 0; } -EXPORT_SYMBOL(inode_change_ok); +EXPORT_SYMBOL(setattr_prepare); /** * inode_newsize_ok - may this inode be truncated to a given size @@ -217,13 +230,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de if (!(ia_valid & ATTR_MTIME_SET)) attr->ia_mtime = now; if (ia_valid & ATTR_KILL_PRIV) { - attr->ia_valid &= ~ATTR_KILL_PRIV; - ia_valid &= ~ATTR_KILL_PRIV; error = security_inode_need_killpriv(dentry); - if (error > 0) - error = security_inode_killpriv(dentry); - if (error) + if (error < 0) return error; + if (error == 0) + ia_valid = attr->ia_valid &= ~ATTR_KILL_PRIV; } /* diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 9a0124a95851..fb3e64d37cb4 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -83,11 +83,9 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans, case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { - ret = posix_acl_equiv_mode(acl, &inode->i_mode); - if (ret < 0) + ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (ret) return ret; - if (ret == 0) - acl = NULL; } ret = 0; break; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a09ec5e7f3e2..4292500ef241 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4690,7 +4690,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) if (btrfs_root_readonly(root)) return -EROFS; - err = inode_change_ok(inode, attr); + err = setattr_prepare(dentry, attr); if (err) return err; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b69285f0593b..eb4348781a97 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1649,6 +1649,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, int namelen; int ret = 0; + if (!S_ISDIR(file_inode(file)->i_mode)) + return -ENOTDIR; + ret = mnt_want_write_file(file); if (ret) goto out; @@ -1706,6 +1709,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, struct btrfs_ioctl_vol_args *vol_args; int ret; + if (!S_ISDIR(file_inode(file)->i_mode)) + return -ENOTDIR; + vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); @@ -1729,6 +1735,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, bool readonly = false; struct btrfs_qgroup_inherit *inherit = NULL; + if (!S_ISDIR(file_inode(file)->i_mode)) + return -ENOTDIR; + vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); @@ -2355,6 +2364,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, int ret; int err = 0; + if (!S_ISDIR(dir->i_mode)) + return -ENOTDIR; + vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 2c3fcb38068f..830952d045c9 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2600,6 +2600,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, } if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { + blk_finish_plug(&plug); + list_del_init(&root_log_ctx.list); mutex_unlock(&log_root_tree->log_mutex); ret = root_log_ctx.log_ret; goto out; diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 469f2e8657e8..a02b900c1ed6 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -108,11 +108,9 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { - ret = posix_acl_equiv_mode(acl, &new_mode); - if (ret < 0) + ret = posix_acl_update_mode(inode, &new_mode, &acl); + if (ret) goto out; - if (ret == 0) - acl = NULL; } break; case ACL_TYPE_DEFAULT: diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 302085100c28..293d1cdf3645 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -985,16 +985,14 @@ out_unlocked: static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; - int ret; + loff_t ret; mutex_lock(&inode->i_mutex); if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); - if (ret < 0) { - offset = ret; + if (ret < 0) goto out; - } } switch (whence) { @@ -1009,7 +1007,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) * write() or lseek() might have altered it */ if (offset == 0) { - offset = file->f_pos; + ret = file->f_pos; goto out; } offset += file->f_pos; @@ -1029,11 +1027,11 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) break; } - offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); + ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out: mutex_unlock(&inode->i_mutex); - return offset; + return ret; } static inline void ceph_zero_partial_page( diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index c3e103ff18bd..122d60506ab9 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1708,7 +1708,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; - err = inode_change_ok(inode, attr); + err = setattr_prepare(dentry, attr); if (err != 0) return err; diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 0bd335a393f8..f1aa100758df 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -727,24 +727,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) memcpy(ses->auth_key.response + baselen, tiblob, tilen); + mutex_lock(&ses->server->srv_mutex); + rc = crypto_hmacmd5_alloc(ses->server); if (rc) { cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc); - goto setup_ntlmv2_rsp_ret; + goto unlock; } /* calculate ntlmv2_hash */ rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp); if (rc) { cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc); - goto setup_ntlmv2_rsp_ret; + goto unlock; } /* calculate first part of the client response (CR1) */ rc = CalcNTLMv2_response(ses, ntlmv2_hash); if (rc) { cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc); - goto setup_ntlmv2_rsp_ret; + goto unlock; } /* now calculate the session key for NTLMv2 */ @@ -753,13 +755,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) if (rc) { cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n", __func__); - goto setup_ntlmv2_rsp_ret; + goto unlock; } rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash); if (rc) { cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__); - goto setup_ntlmv2_rsp_ret; + goto unlock; } rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, @@ -767,7 +769,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) CIFS_HMAC_MD5_HASH_SIZE); if (rc) { cifs_dbg(VFS, "%s: Could not update with response\n", __func__); - goto setup_ntlmv2_rsp_ret; + goto unlock; } rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash, @@ -775,6 +777,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) if (rc) cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); +unlock: + mutex_unlock(&ses->server->srv_mutex); setup_ntlmv2_rsp_ret: kfree(tiblob); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 3db0c5fd9a11..3f2dd87b899a 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -229,6 +229,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, goto cifs_create_get_file_info; } + if (S_ISDIR(newinode->i_mode)) { + CIFSSMBClose(xid, tcon, fid->netfid); + iput(newinode); + rc = -EISDIR; + goto out; + } + if (!S_ISREG(newinode->i_mode)) { /* * The server may allow us to open things like @@ -399,10 +406,14 @@ cifs_create_set_dentry: if (rc != 0) { cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n", rc); - if (server->ops->close) - server->ops->close(xid, tcon, fid); - goto out; + goto out_err; } + + if (S_ISDIR(newinode->i_mode)) { + rc = -EISDIR; + goto out_err; + } + d_drop(direntry); d_add(direntry, newinode); @@ -410,6 +421,13 @@ out: kfree(buf); kfree(full_path); return rc; + +out_err: + if (server->ops->close) + server->ops->close(xid, tcon, fid); + if (newinode) + iput(newinode); + goto out; } int diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 8dc1cfcb534f..6a5ca969c301 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2074,7 +2074,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) attrs->ia_valid |= ATTR_FORCE; - rc = inode_change_ok(inode, attrs); + rc = setattr_prepare(direntry, attrs); if (rc < 0) goto out; @@ -2215,7 +2215,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) attrs->ia_valid |= ATTR_FORCE; - rc = inode_change_ok(inode, attrs); + rc = setattr_prepare(direntry, attrs); if (rc < 0) { free_xid(xid); return rc; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index b241a2a7683b..1059ba829774 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -858,6 +858,9 @@ smb2_new_lease_key(struct cifs_fid *fid) get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE); } +#define SMB2_SYMLINK_STRUCT_SIZE \ + (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp)) + static int smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, const char *full_path, char **target_path, @@ -870,7 +873,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid fid; struct smb2_err_rsp *err_buf = NULL; struct smb2_symlink_err_rsp *symlink; - unsigned int sub_len, sub_offset; + unsigned int sub_len; + unsigned int sub_offset; + unsigned int print_len; + unsigned int print_offset; cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); @@ -891,11 +897,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, kfree(utf16_path); return -ENOENT; } + + if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) || + get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) { + kfree(utf16_path); + return -ENOENT; + } + /* open must fail on symlink - reset rc */ rc = 0; symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData; sub_len = le16_to_cpu(symlink->SubstituteNameLength); sub_offset = le16_to_cpu(symlink->SubstituteNameOffset); + print_len = le16_to_cpu(symlink->PrintNameLength); + print_offset = le16_to_cpu(symlink->PrintNameOffset); + + if (get_rfc1002_length(err_buf) + 4 < + SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) { + kfree(utf16_path); + return -ENOENT; + } + + if (get_rfc1002_length(err_buf) + 4 < + SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) { + kfree(utf16_path); + return -ENOENT; + } + *target_path = cifs_strndup_from_utf16( (char *)symlink->PathBuffer + sub_offset, sub_len, true, cifs_sb->local_nls); diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 57ee4c53b4f8..8bc9ec714467 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -952,7 +952,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) } mutex_unlock(&crypt_stat->cs_mutex); - rc = inode_change_ok(inode, ia); + rc = setattr_prepare(dentry, ia); if (rc) goto out; if (ia->ia_valid & ATTR_SIZE) { diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 3f9cafd73931..799e7d4562fe 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -1039,7 +1039,7 @@ int exofs_setattr(struct dentry *dentry, struct iattr *iattr) if (unlikely(error)) return error; - error = inode_change_ok(inode, iattr); + error = setattr_prepare(dentry, iattr); if (unlikely(error)) return error; diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index 27695e6f4e46..d6aeb84e90b6 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c @@ -193,15 +193,11 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) case ACL_TYPE_ACCESS: name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - else { - inode->i_ctime = CURRENT_TIME_SEC; - mark_inode_dirty(inode); - if (error == 0) - acl = NULL; - } + inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(inode); } break; diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 36d35c36311d..b822e4a447d6 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1547,7 +1547,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr) struct inode *inode = dentry->d_inode; int error; - error = inode_change_ok(inode, iattr); + error = setattr_prepare(dentry, iattr); if (error) return error; diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c index 8bbaf5bcf982..c4509495a104 100644 --- a/fs/ext3/acl.c +++ b/fs/ext3/acl.c @@ -195,15 +195,11 @@ __ext3_set_acl(handle_t *handle, struct inode *inode, int type, case ACL_TYPE_ACCESS: name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - else { - inode->i_ctime = CURRENT_TIME_SEC; - ext3_mark_inode_dirty(handle, inode); - if (error == 0) - acl = NULL; - } + inode->i_ctime = CURRENT_TIME_SEC; + ext3_mark_inode_dirty(handle, inode); } break; diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 2c6ccc49ba27..215972e94e17 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -3244,7 +3244,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) int error, rc = 0; const unsigned int ia_valid = attr->ia_valid; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index d40c8dbbb0d6..87d9bbf6a53f 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -201,15 +201,11 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type, case ACL_TYPE_ACCESS: name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - else { - inode->i_ctime = ext4_current_time(inode); - ext4_mark_inode_dirty(handle, inode); - if (error == 0) - acl = NULL; - } + inode->i_ctime = ext4_current_time(inode); + ext4_mark_inode_dirty(handle, inode); } break; diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 06b65be20d48..5fb1195f602f 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -209,6 +209,9 @@ static int ext4_init_block_bitmap(struct super_block *sb, memset(bh->b_data, 0, sb->s_blocksize); bit_max = ext4_num_base_meta_clusters(sb, block_group); + if ((bit_max >> 3) >= bh->b_size) + return -EIO; + for (bit = 0; bit < bit_max; bit++) ext4_set_bit(bit, bh->b_data); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e41ec5034bad..d24d6fc1df4f 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -359,9 +359,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) ext4_fsblk_t block = ext4_ext_pblock(ext); int len = ext4_ext_get_actual_len(ext); ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); - ext4_lblk_t last = lblock + len - 1; - if (len == 0 || lblock > last) + /* + * We allow neither: + * - zero length + * - overflow/wrap-around + */ + if (lblock + len <= lblock) return 0; return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a2c4495d40e9..ae4271d5c6ca 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -207,9 +207,9 @@ void ext4_evict_inode(struct inode *inode) * Note that directories do not have this problem because they * don't use page cache. */ - if (ext4_should_journal_data(inode) && - (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && - inode->i_ino != EXT4_JOURNAL_INO) { + if (inode->i_ino != EXT4_JOURNAL_INO && + ext4_should_journal_data(inode) && + (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; @@ -2610,13 +2610,36 @@ retry: done = true; } } - ext4_journal_stop(handle); + /* + * Caution: If the handle is synchronous, + * ext4_journal_stop() can wait for transaction commit + * to finish which may depend on writeback of pages to + * complete or on page lock to be released. In that + * case, we have to wait until after after we have + * submitted all the IO, released page locks we hold, + * and dropped io_end reference (for extent conversion + * to be able to complete) before stopping the handle. + */ + if (!ext4_handle_valid(handle) || handle->h_sync == 0) { + ext4_journal_stop(handle); + handle = NULL; + } /* Submit prepared bio */ ext4_io_submit(&mpd.io_submit); /* Unlock pages we didn't use */ mpage_release_unused_pages(&mpd, give_up_on_write); - /* Drop our io_end reference we got from init */ - ext4_put_io_end(mpd.io_submit.io_end); + /* + * Drop our io_end reference we got from init. We have + * to be careful and use deferred io_end finishing if + * we are still holding the transaction as we can + * release the last reference to io_end which may end + * up doing unwritten extent conversion. + */ + if (handle) { + ext4_put_io_end_defer(mpd.io_submit.io_end); + ext4_journal_stop(handle); + } else + ext4_put_io_end(mpd.io_submit.io_end); if (ret == -ENOSPC && sbi->s_journal) { /* @@ -4649,7 +4672,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) int orphan = 0; const unsigned int ia_valid = attr->ia_valid; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 47f7af78675a..62d0c5cbcad6 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2911,7 +2911,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata", block, block+len); /* File system mounted not to panic on error - * Fix the bitmap and repeat the block allocation + * Fix the bitmap and return EFSCORRUPTED * We leak some of the blocks here. */ ext4_lock_group(sb, ac->ac_b_ex.fe_group); @@ -2920,7 +2920,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ext4_unlock_group(sb, ac->ac_b_ex.fe_group); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (!err) - err = -EAGAIN; + err = -EIO; goto out_err; } @@ -4489,18 +4489,7 @@ repeat: } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); - if (*errp == -EAGAIN) { - /* - * drop the reference that we took - * in ext4_mb_use_best_found - */ - ext4_mb_release_context(ac); - ac->ac_b_ex.fe_group = 0; - ac->ac_b_ex.fe_start = 0; - ac->ac_b_ex.fe_len = 0; - ac->ac_status = AC_STATUS_CONTINUE; - goto repeat; - } else if (*errp) { + if (*errp) { ext4_discard_allocated_blocks(ac); goto errout; } else { diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 83026f40d9cd..1c239bba4344 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2077,6 +2077,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group, /* Called at mount-time, super-block is locked */ static int ext4_check_descriptors(struct super_block *sb, + ext4_fsblk_t sb_block, ext4_group_t *first_not_zeroed) { struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -2107,6 +2108,11 @@ static int ext4_check_descriptors(struct super_block *sb, grp = i; block_bitmap = ext4_block_bitmap(sb, gdp); + if (block_bitmap == sb_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Block bitmap for group %u overlaps " + "superblock", i); + } if (block_bitmap < first_block || block_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Block bitmap for group %u not in group " @@ -2114,6 +2120,11 @@ static int ext4_check_descriptors(struct super_block *sb, return 0; } inode_bitmap = ext4_inode_bitmap(sb, gdp); + if (inode_bitmap == sb_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode bitmap for group %u overlaps " + "superblock", i); + } if (inode_bitmap < first_block || inode_bitmap > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " "Inode bitmap for group %u not in group " @@ -2121,6 +2132,11 @@ static int ext4_check_descriptors(struct super_block *sb, return 0; } inode_table = ext4_inode_table(sb, gdp); + if (inode_table == sb_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode table for group %u overlaps " + "superblock", i); + } if (inode_table < first_block || inode_table + sbi->s_itb_per_group - 1 > last_block) { ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " @@ -2228,6 +2244,16 @@ static void ext4_orphan_cleanup(struct super_block *sb, while (es->s_last_orphan) { struct inode *inode; + /* + * We may have encountered an error during cleanup; if + * so, skip the rest. + */ + if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { + jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); + es->s_last_orphan = 0; + break; + } + inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; @@ -3644,6 +3670,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } + if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { + ext4_msg(sb, KERN_ERR, + "Number of reserved GDT blocks insanely large: %d", + le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks)); + goto failed_mount; + } + if (sb->s_blocksize != blocksize) { /* Validate the filesystem blocksize */ if (!sb_set_blocksize(sb, blocksize)) { @@ -3885,7 +3918,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount2; } } - if (!ext4_check_descriptors(sb, &first_not_zeroed)) { + if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); goto failed_mount2; } diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index dbe2141d10ad..ff4737033823 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -213,12 +213,10 @@ static int __f2fs_set_acl(struct inode *inode, int type, case ACL_TYPE_ACCESS: name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; set_acl_inode(fi, inode->i_mode); - if (error == 0) - acl = NULL; } break; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 7d8b96275092..010c2af33858 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -500,7 +500,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) struct f2fs_inode_info *fi = F2FS_I(inode); int err; - err = inode_change_ok(inode, attr); + err = setattr_prepare(dentry, attr); if (err) return err; diff --git a/fs/fat/file.c b/fs/fat/file.c index 85f79a89e747..17982aac0ef3 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -394,7 +394,7 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr) attr->ia_valid &= ~TIMES_SET_FLAGS; } - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); attr->ia_valid = ia_valid; if (error) { if (sbi->options.quiet) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 0c6048247a34..31ad5fc879b8 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1704,9 +1704,10 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff) * vmtruncate() doesn't allow for this case, so do the rlimit checking * and the actual truncation by hand. */ -int fuse_do_setattr(struct inode *inode, struct iattr *attr, +int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, struct file *file) { + struct inode *inode = dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_req *req; @@ -1721,7 +1722,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr, if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) attr->ia_valid |= ATTR_FORCE; - err = inode_change_ok(inode, attr); + err = setattr_prepare(dentry, attr); if (err) return err; @@ -1826,9 +1827,9 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) return -EACCES; if (attr->ia_valid & ATTR_FILE) - return fuse_do_setattr(inode, attr, attr->ia_file); + return fuse_do_setattr(entry, attr, attr->ia_file); else - return fuse_do_setattr(inode, attr, NULL); + return fuse_do_setattr(entry, attr, NULL); } static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a6442041ab64..5f747cf3a412 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -454,6 +454,15 @@ static int fuse_flush(struct file *file, fl_owner_t id) fuse_sync_writes(inode); mutex_unlock(&inode->i_mutex); + if (test_bit(AS_ENOSPC, &file->f_mapping->flags) && + test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags)) + err = -ENOSPC; + if (test_bit(AS_EIO, &file->f_mapping->flags) && + test_and_clear_bit(AS_EIO, &file->f_mapping->flags)) + err = -EIO; + if (err) + return err; + req = fuse_get_req_nofail_nopages(fc, file); memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; @@ -499,6 +508,21 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end, goto out; fuse_sync_writes(inode); + + /* + * Due to implementation of fuse writeback + * filemap_write_and_wait_range() does not catch errors. + * We have to do this directly after fuse_sync_writes() + */ + if (test_bit(AS_ENOSPC, &file->f_mapping->flags) && + test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags)) + err = -ENOSPC; + if (test_bit(AS_EIO, &file->f_mapping->flags) && + test_and_clear_bit(AS_EIO, &file->f_mapping->flags)) + err = -EIO; + if (err) + goto out; + err = sync_inode_metadata(inode, 1); if (err) goto out; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index e8e47a6ab518..300619ba8591 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -894,7 +894,7 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos); int fuse_flush_times(struct inode *inode, struct fuse_file *ff); int fuse_write_inode(struct inode *inode, struct writeback_control *wbc); -int fuse_do_setattr(struct inode *inode, struct iattr *attr, +int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, struct file *file); #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 6abafb11f214..fdf7b33306be 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -933,7 +933,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | - FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | + FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT; req->in.h.opcode = FUSE_INIT; diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 7b3143064af1..88e66aa516c4 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c @@ -79,17 +79,11 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) if (type == ACL_TYPE_ACCESS) { umode_t mode = inode->i_mode; - error = posix_acl_equiv_mode(acl, &mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - - if (error == 0) - acl = NULL; - - if (mode != inode->i_mode) { - inode->i_mode = mode; + if (mode != inode->i_mode) mark_inode_dirty(inode); - } } if (acl) { diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index ba4b2e5377db..b6657ddd1de5 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -1774,7 +1774,7 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr) if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) goto out; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index d0929bc81782..a4a3f8a674b9 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -604,7 +604,7 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr) struct hfs_sb_info *hsb = HFS_SB(inode->i_sb); int error; - error = inode_change_ok(inode, attr); /* basic permission checks */ + error = setattr_prepare(dentry, attr); /* basic permission checks */ if (error) return error; diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 0cf786f2d046..9cd897cc2384 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -247,7 +247,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) struct inode *inode = dentry->d_inode; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c index df0c9af68d05..71b3087b7e32 100644 --- a/fs/hfsplus/posix_acl.c +++ b/fs/hfsplus/posix_acl.c @@ -68,8 +68,8 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, case ACL_TYPE_ACCESS: xattr_name = POSIX_ACL_XATTR_ACCESS; if (acl) { - err = posix_acl_equiv_mode(acl, &inode->i_mode); - if (err < 0) + err = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (err) return err; } err = 0; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 070e249f7d0f..50f0b4f0480e 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -792,7 +792,7 @@ static int hostfs_setattr(struct dentry *dentry, struct iattr *attr) int fd = HOSTFS_I(inode)->fd; - err = inode_change_ok(inode, attr); + err = setattr_prepare(dentry, attr); if (err) return err; @@ -942,10 +942,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) if (S_ISLNK(root_inode->i_mode)) { char *name = follow_link(host_root_path); - if (IS_ERR(name)) + if (IS_ERR(name)) { err = PTR_ERR(name); - else - err = read_name(root_inode, name); + goto out_put; + } + err = read_name(root_inode, name); kfree(name); if (err) goto out_put; diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index 7ce4b74234a1..5ada7b73b5d9 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -272,7 +272,7 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr) if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size) goto out_unlock; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) goto out_unlock; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 1e2872b25343..e3ac491424fc 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -429,7 +429,7 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) BUG_ON(!inode); - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 009ec0b5993d..4f43deb09a5a 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c @@ -236,9 +236,10 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) case ACL_TYPE_ACCESS: xprefix = JFFS2_XPREFIX_ACL_ACCESS; if (acl) { - umode_t mode = inode->i_mode; - rc = posix_acl_equiv_mode(acl, &mode); - if (rc < 0) + umode_t mode; + + rc = posix_acl_update_mode(inode, &mode, &acl); + if (rc) return rc; if (inode->i_mode != mode) { struct iattr attr; @@ -250,8 +251,6 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) if (rc < 0) return rc; } - if (rc == 0) - acl = NULL; } break; case ACL_TYPE_DEFAULT: diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 601afd1afddf..bb9460ea36e8 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -193,7 +193,7 @@ int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) struct inode *inode = dentry->d_inode; int rc; - rc = inode_change_ok(inode, iattr); + rc = setattr_prepare(dentry, iattr); if (rc) return rc; diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 0c8ca830b113..9fad9f4fe883 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -84,13 +84,11 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type, case ACL_TYPE_ACCESS: ea_name = POSIX_ACL_XATTR_ACCESS; if (acl) { - rc = posix_acl_equiv_mode(acl, &inode->i_mode); - if (rc < 0) + rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (rc) return rc; inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); - if (rc == 0) - acl = NULL; } break; case ACL_TYPE_DEFAULT: diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 33aa0cc1f8b8..b43cc9a6dba1 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -103,7 +103,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr) struct inode *inode = dentry->d_inode; int rc; - rc = inode_change_ok(inode, iattr); + rc = setattr_prepare(dentry, iattr); if (rc) return rc; diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index d895b4b7b661..b120e85aa7b9 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -828,21 +828,35 @@ repeat: mutex_lock(&kernfs_mutex); list_for_each_entry(info, &kernfs_root(kn)->supers, node) { + struct kernfs_node *parent; struct inode *inode; - struct dentry *dentry; + /* + * We want fsnotify_modify() on @kn but as the + * modifications aren't originating from userland don't + * have the matching @file available. Look up the inodes + * and generate the events manually. + */ inode = ilookup(info->sb, kn->ino); if (!inode) continue; - dentry = d_find_any_alias(inode); - if (dentry) { - fsnotify_parent(NULL, dentry, FS_MODIFY); - fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, - NULL, 0); - dput(dentry); + parent = kernfs_get_parent(kn); + if (parent) { + struct inode *p_inode; + + p_inode = ilookup(info->sb, parent->ino); + if (p_inode) { + fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD, + inode, FSNOTIFY_EVENT_INODE, kn->name, 0); + iput(p_inode); + } + + kernfs_put(parent); } + fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, + kn->name, 0); iput(inode); } diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index 5b8ab29494b7..8b6cb8cbf887 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -131,7 +131,7 @@ int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr) return -EINVAL; mutex_lock(&kernfs_mutex); - error = inode_change_ok(inode, iattr); + error = setattr_prepare(dentry, iattr); if (error) goto out; diff --git a/fs/libfs.c b/fs/libfs.c index 5ba7e10d6b23..51994a562aca 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -371,7 +371,7 @@ int simple_setattr(struct dentry *dentry, struct iattr *iattr) struct inode *inode = dentry->d_inode; int error; - error = inode_change_ok(inode, iattr); + error = setattr_prepare(dentry, iattr); if (error) return error; diff --git a/fs/logfs/file.c b/fs/logfs/file.c index 8538752df2f6..34f4d33b6ee9 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c @@ -244,7 +244,7 @@ static int logfs_setattr(struct dentry *dentry, struct iattr *attr) struct inode *inode = dentry->d_inode; int err = 0; - err = inode_change_ok(inode, attr); + err = setattr_prepare(dentry, attr); if (err) return err; diff --git a/fs/minix/file.c b/fs/minix/file.c index a967de085ac0..6b569161325e 100644 --- a/fs/minix/file.c +++ b/fs/minix/file.c @@ -28,7 +28,7 @@ static int minix_setattr(struct dentry *dentry, struct iattr *attr) struct inode *inode = dentry->d_inode; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index e31e589369a4..f8d3f461a01f 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -885,7 +885,7 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr) /* ageing the dentry to force validation */ ncp_age_dentry(server, dentry); - result = inode_change_ok(inode, attr); + result = setattr_prepare(dentry, attr); if (result < 0) goto out; diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 8c849947d34f..c1f955e2d809 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -301,6 +301,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, struct n err_socks: svc_rpcb_cleanup(serv, net); err_bind: + nn->cb_users[minorversion]--; dprintk("NFS: Couldn't create callback socket: err = %d; " "net = %p\n", ret, net); return ret; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 02f8d09e119f..b06c6ba9765d 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -915,7 +915,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r if (hdr_arg.minorversion == 0) { cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident); if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) - return rpc_drop_reply; + goto out_invalidcred; } cps.minorversion = hdr_arg.minorversion; @@ -943,6 +943,10 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r nfs_put_client(cps.clp); dprintk("%s: done, status = %u\n", __func__, ntohl(status)); return rpc_success; + +out_invalidcred: + pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n"); + return rpc_autherr_badcred; } /* diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 7dc0bf73e76a..1522ec81c26f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7063,13 +7063,22 @@ static int _nfs4_proc_create_session(struct nfs_client *clp, status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); trace_nfs4_create_session(clp, status); + switch (status) { + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_DELAY: + case -ETIMEDOUT: + case -EACCES: + case -EAGAIN: + goto out; + }; + + clp->cl_seqid++; if (!status) { /* Verify the session's negotiated channel_attrs values */ status = nfs4_verify_channel_attrs(&args, session); - /* Increment the clientid slot sequence id */ - clp->cl_seqid++; } +out: return status; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ecb0f9fd5632..34f18294817a 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1222,6 +1222,9 @@ int nfs_updatepage(struct file *file, struct page *page, dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", file, count, (long long)(page_file_offset(page) + offset)); + if (!count) + goto out; + if (nfs_can_extend_write(file, page, inode)) { count = max(count + offset, nfs_page_length(page)); offset = 0; @@ -1232,7 +1235,7 @@ int nfs_updatepage(struct file *file, struct page *page, nfs_set_pageerror(page); else __set_page_dirty_nobuffers(page); - +out: dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", status, (long long)i_size_read(inode)); return status; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 140c496f612c..521f717b4a99 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -300,17 +300,19 @@ commit_metadata(struct svc_fh *fhp) * NFS semantics and what Linux expects. */ static void -nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) +nfsd_sanitize_attrs(struct dentry *dentry, struct iattr *iap) { + struct inode *inode = dentry->d_inode; + /* * NFSv2 does not differentiate between "set-[ac]time-to-now" * which only requires access, and "set-[ac]time-to-X" which * requires ownership. * So if it looks like it might be "set both to the same time which - * is close to now", and if inode_change_ok fails, then we + * is close to now", and if setattr_prepare fails, then we * convert to "set to now" instead of "set to explicit time" * - * We only call inode_change_ok as the last test as technically + * We only call setattr_prepare as the last test as technically * it is not an interface that we should be using. */ #define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET) @@ -328,7 +330,7 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) if (delta < 0) delta = -delta; if (delta < MAX_TOUCH_TIME_ERROR && - inode_change_ok(inode, iap) != 0) { + setattr_prepare(dentry, iap) != 0) { /* * Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME. * This will cause notify_change to set these times @@ -435,7 +437,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, if (!iap->ia_valid) goto out; - nfsd_sanitize_attrs(inode, iap); + nfsd_sanitize_attrs(dentry, iap); /* * The size case is special, it changes the file in addition to the diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 31b6b2953c59..80e61bad215d 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -839,7 +839,7 @@ int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) struct super_block *sb = inode->i_sb; int err; - err = inode_change_ok(inode, iattr); + err = setattr_prepare(dentry, iattr); if (err) return err; diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 7e948ffba461..0439f6cdb931 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group, pr_debug("%s: group=%p event=%p\n", __func__, group, event); - wait_event(group->fanotify_data.access_waitq, event->response || - atomic_read(&group->fanotify_data.bypass_perm)); - - if (!event->response) { /* bypass_perm set */ - /* - * Event was canceled because group is being destroyed. Remove - * it from group's event list because we are responsible for - * freeing the permission event. - */ - fsnotify_remove_event(group, &event->fae.fse); - return 0; - } + wait_event(group->fanotify_data.access_waitq, event->response); /* userspace responded, convert to something usable */ switch (event->response) { diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index ec50a8385b13..394da4dc45e1 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file) #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS struct fanotify_perm_event_info *event, *next; + struct fsnotify_event *fsn_event; /* - * There may be still new events arriving in the notification queue - * but since userspace cannot use fanotify fd anymore, no event can - * enter or leave access_list by now. + * Stop new events from arriving in the notification queue. since + * userspace cannot use fanotify fd anymore, no event can enter or + * leave access_list by now either. */ - spin_lock(&group->fanotify_data.access_lock); - - atomic_inc(&group->fanotify_data.bypass_perm); + fsnotify_group_stop_queueing(group); + /* + * Process all permission events on access_list and notification queue + * and simulate reply from userspace. + */ + spin_lock(&group->fanotify_data.access_lock); list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, fae.fse.list) { pr_debug("%s: found group=%p event=%p\n", __func__, group, @@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file) spin_unlock(&group->fanotify_data.access_lock); /* - * Since bypass_perm is set, newly queued events will not wait for - * access response. Wake up the already sleeping ones now. - * synchronize_srcu() in fsnotify_destroy_group() will wait for all - * processes sleeping in fanotify_handle_event() waiting for access - * response and thus also for all permission events to be freed. + * Destroy all non-permission events. For permission events just + * dequeue them and set the response. They will be freed once the + * response is consumed and fanotify_get_response() returns. */ + mutex_lock(&group->notification_mutex); + while (!fsnotify_notify_queue_is_empty(group)) { + fsn_event = fsnotify_remove_notify_event(group); + if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) + fsnotify_destroy_event(group, fsn_event); + else + FANOTIFY_PE(fsn_event)->response = FAN_ALLOW; + } + mutex_unlock(&group->notification_mutex); + + /* Response for all permission events it set, wakeup waiters */ wake_up(&group->fanotify_data.access_waitq); #endif @@ -742,7 +755,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) spin_lock_init(&group->fanotify_data.access_lock); init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); - atomic_set(&group->fanotify_data.bypass_perm, 0); #endif switch (flags & FAN_ALL_CLASS_BITS) { case FAN_CLASS_NOTIF: diff --git a/fs/notify/group.c b/fs/notify/group.c index ad1995980456..f3e3188f3568 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -40,6 +40,17 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group) } /* + * Stop queueing new events for this group. Once this function returns + * fsnotify_add_event() will not add any new events to the group's queue. + */ +void fsnotify_group_stop_queueing(struct fsnotify_group *group) +{ + mutex_lock(&group->notification_mutex); + group->shutdown = true; + mutex_unlock(&group->notification_mutex); +} + +/* * Trying to get rid of a group. Remove all marks, flush all events and release * the group reference. * Note that another thread calling fsnotify_clear_marks_by_group() may still @@ -47,6 +58,14 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group) */ void fsnotify_destroy_group(struct fsnotify_group *group) { + /* + * Stop queueing new events. The code below is careful enough to not + * require this but fanotify needs to stop queuing events even before + * fsnotify_destroy_group() is called and this makes the other callers + * of fsnotify_destroy_group() to see the same behavior. + */ + fsnotify_group_stop_queueing(group); + /* clear all inode marks for this group */ fsnotify_clear_marks_by_group(group); diff --git a/fs/notify/notification.c b/fs/notify/notification.c index 25a07c70f1c9..1646ff5dca5c 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group, * Add an event to the group notification queue. The group can later pull this * event off the queue to deal with. The function returns 0 if the event was * added to the queue, 1 if the event was merged with some other queued event, - * 2 if the queue of events has overflown. + * 2 if the event was not queued - either the queue of events has overflown + * or the group is shutting down. */ int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, @@ -96,6 +97,11 @@ int fsnotify_add_notify_event(struct fsnotify_group *group, mutex_lock(&group->notification_mutex); + if (group->shutdown) { + mutex_unlock(&group->notification_mutex); + return 2; + } + if (group->q_len >= group->max_events) { ret = 2; /* Queue overflow event only if it isn't already queued */ @@ -126,21 +132,6 @@ queue: } /* - * Remove @event from group's notification queue. It is the responsibility of - * the caller to destroy the event. - */ -void fsnotify_remove_event(struct fsnotify_group *group, - struct fsnotify_event *event) -{ - mutex_lock(&group->notification_mutex); - if (!list_empty(&event->list)) { - list_del_init(&event->list); - group->q_len--; - } - mutex_unlock(&group->notification_mutex); -} - -/* * Remove and return the first event from the notification list. It is the * responsibility of the caller to destroy the obtained event */ diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index f47af5e6e230..a5589470373f 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -2891,7 +2891,7 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr) int err; unsigned int ia_valid = attr->ia_valid; - err = inode_change_ok(vi, attr); + err = setattr_prepare(dentry, attr); if (err) goto out; /* We do not support NTFS ACLs yet. */ diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 7f9e4484c6f6..293ab80f3fe2 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -241,14 +241,11 @@ int ocfs2_set_acl(handle_t *handle, case ACL_TYPE_ACCESS: name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { - umode_t mode = inode->i_mode; - ret = posix_acl_equiv_mode(acl, &mode); - if (ret < 0) + umode_t mode; + ret = posix_acl_update_mode(inode, &mode, &acl); + if (ret) return ret; else { - if (ret == 0) - acl = NULL; - ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); if (ret) diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index f90931335c6b..2e11658676eb 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock *lock, int flags, int type) { enum dlm_status status; - u8 old_owner = res->owner; mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); @@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; - lock->convert_pending = 0; /* if it failed, move it back to granted queue. * if master returns DLM_NORMAL and then down before sending ast, * it may have already been moved to granted queue, reset to @@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, if (status != DLM_NOTQUEUED) dlm_error(status); dlm_revert_pending_convert(res, lock); - } else if ((res->state & DLM_LOCK_RES_RECOVERING) || - (old_owner != res->owner)) { - mlog(0, "res %.*s is in recovering or has been recovered.\n", - res->lockname.len, res->lockname.name); + } else if (!lock->convert_pending) { + mlog(0, "%s: res %.*s, owner died and lock has been moved back " + "to granted list, retry convert.\n", + dlm->name, res->lockname.len, res->lockname.name); status = DLM_RECOVERING; } + + lock->convert_pending = 0; bail: spin_unlock(&res->spinlock); diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 09b7d9dac71d..4d129dfcc1c7 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -211,7 +211,7 @@ static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr) struct inode *inode = dentry->d_inode; attr->ia_valid &= ~ATTR_SIZE; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index fd106d136983..cd3b06f600a2 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1144,7 +1144,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) return 0; - status = inode_change_ok(inode, attr); + status = setattr_prepare(dentry, attr); if (status) return status; @@ -1516,7 +1516,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, u64 start, u64 len) { int ret = 0; - u64 tmpend, end = start + len; + u64 tmpend = 0; + u64 end = start + len; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); unsigned int csize = osb->s_clustersize; handle_t *handle; @@ -1548,18 +1549,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode, } /* - * We want to get the byte offset of the end of the 1st cluster. + * If start is on a cluster boundary and end is somewhere in another + * cluster, we have not COWed the cluster starting at start, unless + * end is also within the same cluster. So, in this case, we skip this + * first call to ocfs2_zero_range_for_truncate() truncate and move on + * to the next one. */ - tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1)); - if (tmpend > end) - tmpend = end; + if ((start & (csize - 1)) != 0) { + /* + * We want to get the byte offset of the end of the 1st + * cluster. + */ + tmpend = (u64)osb->s_clustersize + + (start & ~(osb->s_clustersize - 1)); + if (tmpend > end) + tmpend = end; - trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start, - (unsigned long long)tmpend); + trace_ocfs2_zero_partial_clusters_range1( + (unsigned long long)start, + (unsigned long long)tmpend); - ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); - if (ret) - mlog_errno(ret); + ret = ocfs2_zero_range_for_truncate(inode, handle, start, + tmpend); + if (ret) + mlog_errno(ret); + } if (tmpend < end) { /* diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 902e88527fce..b53f0602cc03 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -351,7 +351,7 @@ static int omfs_setattr(struct dentry *dentry, struct iattr *attr) struct inode *inode = dentry->d_inode; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/posix_acl.c b/fs/posix_acl.c index b1c249115aaa..a767c594f17a 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -594,6 +594,37 @@ no_acl: } EXPORT_SYMBOL_GPL(posix_acl_create); +/** + * posix_acl_update_mode - update mode in set_acl + * + * Update the file mode when setting an ACL: compute the new file permission + * bits based on the ACL. In addition, if the ACL is equivalent to the new + * file mode, set *acl to NULL to indicate that no ACL should be set. + * + * As with chmod, clear the setgit bit if the caller is not in the owning group + * or capable of CAP_FSETID (see inode_change_ok). + * + * Called from set_acl inode operations. + */ +int posix_acl_update_mode(struct inode *inode, umode_t *mode_p, + struct posix_acl **acl) +{ + umode_t mode = inode->i_mode; + int error; + + error = posix_acl_equiv_mode(*acl, &mode); + if (error < 0) + return error; + if (error == 0) + *acl = NULL; + if (!in_group_p(inode->i_gid) && + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) + mode &= ~S_ISGID; + *mode_p = mode; + return 0; +} +EXPORT_SYMBOL(posix_acl_update_mode); + /* * Fix up the uids and gids in posix acl extended attributes in place. */ diff --git a/fs/proc/base.c b/fs/proc/base.c index 0897f5cb4957..944c6be58199 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -536,7 +536,7 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr) if (attr->ia_valid & ATTR_MODE) return -EPERM; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index c18d9b05cea9..d95c6868b5f5 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -41,7 +41,7 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr) struct proc_dir_entry *de = PDE(inode); int error; - error = inode_change_ok(inode, iattr); + error = setattr_prepare(dentry, iattr); if (error) return error; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index b45db1ddb8e0..728c4657188f 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -753,7 +753,7 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr) if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) return -EPERM; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index dda012ad4208..a6b6083176d3 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -163,7 +163,7 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) int ret = 0; /* POSIX UID/GID verification for setting inode attributes */ - ret = inode_change_ok(inode, ia); + ret = setattr_prepare(dentry, ia); if (ret) return ret; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 63b2b0ec49e6..27b402e05aa2 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3312,7 +3312,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) unsigned int ia_valid; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 44503e293790..7071c2230a92 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -246,13 +246,9 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { - error = posix_acl_equiv_mode(acl, &inode->i_mode); - if (error < 0) + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) return error; - else { - if (error == 0) - acl = NULL; - } } break; case ACL_TYPE_DEFAULT: diff --git a/fs/seq_file.c b/fs/seq_file.c index 3857b720cb1b..fbb1688bff87 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -219,8 +219,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) size -= n; buf += n; copied += n; - if (!m->count) + if (!m->count) { + m->from = 0; m->index++; + } if (!size) goto Done; } diff --git a/fs/sysv/file.c b/fs/sysv/file.c index b00811c75b24..3bb4ccd69731 100644 --- a/fs/sysv/file.c +++ b/fs/sysv/file.c @@ -35,7 +35,7 @@ static int sysv_setattr(struct dentry *dentry, struct iattr *attr) struct inode *inode = dentry->d_inode; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index d37140e5b722..68d7fe857ba9 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1262,7 +1262,7 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr) dbg_gen("ino %lu, mode %#x, ia_valid %#x", inode->i_ino, inode->i_mode, attr->ia_valid); - err = inode_change_ok(inode, attr); + err = setattr_prepare(dentry, attr); if (err) return err; diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 3600994f8411..1512f3490848 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) p = c->gap_lebs; do { - ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs); + ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs); written = layout_leb_in_gaps(c, p); if (written < 0) { err = written; diff --git a/fs/udf/file.c b/fs/udf/file.c index d80738fdf424..1aaf63a5f32f 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -269,7 +269,7 @@ static int udf_setattr(struct dentry *dentry, struct iattr *attr) struct inode *inode = dentry->d_inode; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index f04f89fbd4d9..92cde998aead 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c @@ -496,7 +496,7 @@ int ufs_setattr(struct dentry *dentry, struct iattr *attr) unsigned int ia_valid = attr->ia_valid; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/fs/utimes.c b/fs/utimes.c index aa138d64560a..61abc3051377 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -81,7 +81,7 @@ static int utimes_common(struct path *path, struct timespec *times) newattrs.ia_valid |= ATTR_MTIME_SET; } /* - * Tell inode_change_ok(), that this is an explicit time + * Tell setattr_prepare(), that this is an explicit time * update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET * were used. */ @@ -90,7 +90,7 @@ static int utimes_common(struct path *path, struct timespec *times) /* * If times is NULL (or both times are UTIME_NOW), * then we need to check permissions, because - * inode_change_ok() won't do it. + * setattr_prepare() won't do it. */ error = -EACCES; if (IS_IMMUTABLE(inode)) diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 6888ad886ff6..d1229033a22f 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -244,7 +244,8 @@ xfs_set_mode(struct inode *inode, umode_t mode) iattr.ia_mode = mode; iattr.ia_ctime = current_fs_time(inode->i_sb); - error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); + error = -xfs_setattr_nonsize(NULL, XFS_I(inode), &iattr, + XFS_ATTR_NOACL); } return error; @@ -286,16 +287,11 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) return error; if (type == ACL_TYPE_ACCESS) { - umode_t mode = inode->i_mode; - error = posix_acl_equiv_mode(acl, &mode); - - if (error <= 0) { - acl = NULL; - - if (error < 0) - return error; - } + umode_t mode; + error = posix_acl_update_mode(inode, &mode, &acl); + if (error) + return error; error = xfs_set_mode(inode, mode); if (error) return error; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index d2f4cb598b46..eb04981b03b4 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -862,7 +862,7 @@ xfs_file_fallocate( iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; - error = xfs_setattr_size(ip, &iattr); + error = xfs_setattr_size(file->f_dentry, &iattr); } out_unlock: diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index afc859f44d01..cee62a5afbee 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -717,7 +717,7 @@ xfs_ioc_space( iattr.ia_valid = ATTR_SIZE; iattr.ia_size = bf->l_start; - error = xfs_setattr_size(ip, &iattr); + error = xfs_setattr_size(filp->f_dentry, &iattr); if (!error) clrprealloc = true; break; diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 125da8969c72..cfc5c260ecbe 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -527,6 +527,7 @@ xfs_setattr_time( int xfs_setattr_nonsize( + struct dentry *dentry, struct xfs_inode *ip, struct iattr *iattr, int flags) @@ -551,7 +552,7 @@ xfs_setattr_nonsize( if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); - error = -inode_change_ok(inode, iattr); + error = -setattr_prepare(dentry, iattr); if (error) return XFS_ERROR(error); } @@ -734,11 +735,12 @@ out_dqrele: */ int xfs_setattr_size( - struct xfs_inode *ip, + struct dentry *dentry, struct iattr *iattr) { + struct inode *inode = dentry->d_inode; + struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; - struct inode *inode = VFS_I(ip); xfs_off_t oldsize, newsize; struct xfs_trans *tp; int error; @@ -754,7 +756,7 @@ xfs_setattr_size( if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); - error = -inode_change_ok(inode, iattr); + error = -setattr_prepare(dentry, iattr); if (error) return XFS_ERROR(error); @@ -778,7 +780,7 @@ xfs_setattr_size( * Use the regular setattr path to update the timestamps. */ iattr->ia_valid &= ~ATTR_SIZE; - return xfs_setattr_nonsize(ip, iattr, 0); + return xfs_setattr_nonsize(dentry, ip, iattr, 0); } /* @@ -939,10 +941,10 @@ xfs_vn_setattr( if (iattr->ia_valid & ATTR_SIZE) { xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); - error = xfs_setattr_size(ip, iattr); + error = xfs_setattr_size(dentry, iattr); xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); } else { - error = xfs_setattr_nonsize(ip, iattr, 0); + error = xfs_setattr_nonsize(dentry, ip, iattr, 0); } return -error; diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h index 1c34e4335920..f66a37c0a88a 100644 --- a/fs/xfs/xfs_iops.h +++ b/fs/xfs/xfs_iops.h @@ -32,8 +32,8 @@ extern void xfs_setup_inode(struct xfs_inode *); */ #define XFS_ATTR_NOACL 0x01 /* Don't call posix_acl_chmod */ -extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, - int flags); -extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap); +extern int xfs_setattr_nonsize(struct dentry *dentry, struct xfs_inode *ip, + struct iattr *vap, int flags); +extern int xfs_setattr_size(struct dentry *dentry, struct iattr *vap); #endif /* __XFS_IOPS_H__ */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 72d8803832ff..ae88865a3d67 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -228,14 +228,18 @@ extern int __put_user_bad(void) __attribute__((noreturn)); might_fault(); \ access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ __get_user(x, ptr) : \ - -EFAULT; \ + ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ }) #ifndef __get_user_fn static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) { - size = __copy_from_user(x, ptr, size); - return size ? -EFAULT : size; + size_t n = __copy_from_user(x, ptr, size); + if (unlikely(n)) { + memset(x + (size - n), 0, n); + return -EFAULT; + } + return 0; } #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) @@ -255,11 +259,13 @@ extern int __get_user_bad(void) __attribute__((noreturn)); static inline long copy_from_user(void *to, const void __user * from, unsigned long n) { + unsigned long res = n; might_fault(); - if (access_ok(VERIFY_READ, from, n)) - return __copy_from_user(to, from, n); - else - return n; + if (likely(access_ok(VERIFY_READ, from, n))) + res = __copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } static inline long copy_to_user(void __user *to, diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e488e9459a93..f0069a94d9cc 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -100,6 +100,7 @@ struct backing_dev_info { struct list_head work_list; struct device *dev; + struct device *owner; struct timer_list laptop_mode_wb_timer; @@ -116,6 +117,7 @@ __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 0b3bb16c705a..b19c7ed88cff 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -153,6 +153,7 @@ struct bcma_host_ops { #define BCMA_CORE_DEFAULT 0xFFF #define BCMA_MAX_NR_CORES 16 +#define BCMA_CORE_SIZE 0x1000 /* Chip IDs of PCIe devices */ #define BCMA_CHIP_ID_BCM4313 0x4313 diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 6992afc6ba7f..e90bdfc4d0b7 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -31,6 +31,7 @@ enum can_mode { * CAN common private data */ struct can_priv { + struct net_device *dev; struct can_device_stats can_stats; struct can_bittiming bittiming, data_bittiming; @@ -43,7 +44,7 @@ struct can_priv { u32 ctrlmode_supported; int restart_ms; - struct timer_list restart_timer; + struct delayed_work restart_work; int (*do_set_bittiming)(struct net_device *dev); int (*do_set_data_bittiming)(struct net_device *dev); diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index bb2cdcd929c6..633716ef19b0 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -177,7 +177,7 @@ #define unreachable() __builtin_unreachable() /* Mark a function definition as prohibited from being cloned. */ -#define __noclone __attribute__((__noclone__)) +#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) #endif /* GCC_VERSION >= 40500 */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 8cb09c9d81ef..1431089c54dc 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -117,6 +117,15 @@ typedef struct { u32 imagesize; } efi_capsule_header_t; +struct efi_boot_memmap { + efi_memory_desc_t **map; + unsigned long *map_size; + unsigned long *desc_size; + u32 *desc_ver; + unsigned long *key_ptr; + unsigned long *buff_size; +}; + /* * Allocation types for calls to boottime->allocate_pages. */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 545837db9468..3b69a82bbdd9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2627,7 +2627,7 @@ extern int buffer_migrate_page(struct address_space *, #define buffer_migrate_page NULL #endif -extern int inode_change_ok(const struct inode *, struct iattr *); +extern int setattr_prepare(struct dentry *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); extern void setattr_copy(struct inode *inode, const struct iattr *attr); diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index d2be2526ec48..e36f4e76ba94 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -150,6 +150,7 @@ struct fsnotify_group { #define FS_PRIO_1 1 /* fanotify content based access control */ #define FS_PRIO_2 2 /* fanotify pre-content access */ unsigned int priority; + bool shutdown; /* group is being shut down, don't queue more events */ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ struct mutex mark_mutex; /* protect marks_list */ @@ -181,7 +182,6 @@ struct fsnotify_group { spinlock_t access_lock; struct list_head access_list; wait_queue_head_t access_waitq; - atomic_t bypass_perm; #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ int f_flags; unsigned int max_marks; @@ -314,6 +314,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ extern void fsnotify_put_group(struct fsnotify_group *group); +/* group destruction begins, stop queuing new events */ +extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); /* destroy group */ extern void fsnotify_destroy_group(struct fsnotify_group *group); /* fasync handler function */ @@ -326,8 +328,6 @@ extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)); -/* Remove passed event from groups notification queue */ -extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event); /* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ diff --git a/include/linux/i8042.h b/include/linux/i8042.h index 0f9bafa17a02..d98780ca9604 100644 --- a/include/linux/i8042.h +++ b/include/linux/i8042.h @@ -62,7 +62,6 @@ struct serio; void i8042_lock_chip(void); void i8042_unlock_chip(void); int i8042_command(unsigned char *param, int command); -bool i8042_check_port_owner(const struct serio *); int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)); int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str, @@ -83,11 +82,6 @@ static inline int i8042_command(unsigned char *param, int command) return -ENODEV; } -static inline bool i8042_check_port_owner(const struct serio *serio) -{ - return false; -} - static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str, struct serio *serio)) { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index ab1c14a56e6d..0efa883e6508 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -442,9 +442,9 @@ struct mlx5_destroy_qp_mbox_out { struct mlx5_modify_qp_mbox_in { struct mlx5_inbox_hdr hdr; __be32 qpn; - u8 rsvd1[4]; - __be32 optparam; u8 rsvd0[4]; + __be32 optparam; + u8 rsvd1[4]; struct mlx5_qp_context ctx; u8 rsvd2[16]; }; diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 79aaa9fc1a15..d5277fc3ce2e 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -103,5 +103,5 @@ struct mfc_cache { struct rtmsg; extern int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait); + struct rtmsg *rtm, int nowait, u32 portid); #endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 66982e764051..f831155dc7d1 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -115,7 +115,7 @@ struct mfc6_cache { struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, - struct rtmsg *rtm, int nowait); + struct rtmsg *rtm, int nowait, u32 portid); #ifdef CONFIG_IPV6_MROUTE extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 7741efa43b35..cc615e273f80 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset); +unsigned int *xt_alloc_entry_offsets(unsigned int size); +bool xt_find_jump_offset(const unsigned int *offsets, + unsigned int target, unsigned int size); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 25f106b0375b..10b57940dc62 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -599,56 +599,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) */ static inline int fault_in_multipages_writeable(char __user *uaddr, int size) { - int ret = 0; char __user *end = uaddr + size - 1; if (unlikely(size == 0)) - return ret; + return 0; + if (unlikely(uaddr > end)) + return -EFAULT; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ - while (uaddr <= end) { - ret = __put_user(0, uaddr); - if (ret != 0) - return ret; + do { + if (unlikely(__put_user(0, uaddr) != 0)) + return -EFAULT; uaddr += PAGE_SIZE; - } + } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) - ret = __put_user(0, end); + return __put_user(0, end); - return ret; + return 0; } static inline int fault_in_multipages_readable(const char __user *uaddr, int size) { volatile char c; - int ret = 0; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) - return ret; + return 0; - while (uaddr <= end) { - ret = __get_user(c, uaddr); - if (ret != 0) - return ret; + if (unlikely(uaddr > end)) + return -EFAULT; + + do { + if (unlikely(__get_user(c, uaddr) != 0)) + return -EFAULT; uaddr += PAGE_SIZE; - } + } while (uaddr <= end); /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) { - ret = __get_user(c, end); - (void)c; + return __get_user(c, end); } - return ret; + return 0; } int add_to_page_cache_locked(struct page *page, struct address_space *mapping, diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 3e96a6a76103..d1a8ad7e5ae4 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -95,6 +95,7 @@ extern int set_posix_acl(struct inode *, int, struct posix_acl *); extern int posix_acl_chmod(struct inode *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, struct posix_acl **); +extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); extern int simple_set_acl(struct inode *, struct posix_acl *, int); extern int simple_acl_create(struct inode *, struct inode *); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 5d5174b59802..673dee29a9b9 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -382,6 +382,7 @@ static inline __must_check void **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; + iter->tags = 0; return NULL; } diff --git a/include/linux/serio.h b/include/linux/serio.h index 9f779c7a2da4..27ae809edd70 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -29,7 +29,8 @@ struct serio { struct serio_device_id id; - spinlock_t lock; /* protects critical sections from port's interrupt handler */ + /* Protects critical sections from port's interrupt handler */ + spinlock_t lock; int (*write)(struct serio *, unsigned char); int (*open)(struct serio *); @@ -38,16 +39,29 @@ struct serio { void (*stop)(struct serio *); struct serio *parent; - struct list_head child_node; /* Entry in parent->children list */ + /* Entry in parent->children list */ + struct list_head child_node; struct list_head children; - unsigned int depth; /* level of nesting in serio hierarchy */ + /* Level of nesting in serio hierarchy */ + unsigned int depth; - struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */ - struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ + /* + * serio->drv is accessed from interrupt handlers; when modifying + * caller should acquire serio->drv_mutex and serio->lock. + */ + struct serio_driver *drv; + /* Protects serio->drv so attributes can pin current driver */ + struct mutex drv_mutex; struct device dev; struct list_head node; + + /* + * For use by PS/2 layer when several ports share hardware and + * may get indigestion when exposed to concurrent access (i8042). + */ + struct mutex *ps2_cmd_mutex; }; #define to_serio_port(d) container_of(d, struct serio, dev) diff --git a/include/linux/swap.h b/include/linux/swap.h index 4bdbee80eede..3bc5cd6ee4bb 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -274,6 +274,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node) static inline void workingset_node_pages_dec(struct radix_tree_node *node) { + VM_BUG_ON(!workingset_node_pages(node)); node->count--; } @@ -289,6 +290,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node) static inline void workingset_node_shadows_dec(struct radix_tree_node *node) { + VM_BUG_ON(!workingset_node_shadows(node)); node->count -= 1U << RADIX_TREE_COUNT_SHIFT; } diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 0dc0a51da38f..dce2d586d9ce 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) to = from | htonl(INET_ECN_CE << 20); *(__be32 *)iph = to; if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_add(csum_sub(skb->csum, from), to); + skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), + (__force __wsum)to); return 1; } diff --git a/include/net/tcp.h b/include/net/tcp.h index d587ff0f8828..60e2dc5cef26 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1413,6 +1413,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli { if (sk->sk_send_head == skb_unlinked) sk->sk_send_head = NULL; + if (tcp_sk(sk)->highest_sack == skb_unlinked) + tcp_sk(sk)->highest_sack = NULL; } static inline void tcp_init_send_head(struct sock *sk) diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index e006e10e08a9..a09a83603c52 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -97,6 +97,6 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, void array_free(void *array, int n); sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, - struct request_queue *q, int block_size); + struct request_queue *q); #endif /* TARGET_CORE_BACKEND_H */ diff --git a/ipc/msg.c b/ipc/msg.c index cfc8b388332d..02e72d3db498 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -678,7 +678,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, rcu_read_lock(); ipc_lock_object(&msq->q_perm); - ipc_rcu_putref(msq, ipc_rcu_free); + ipc_rcu_putref(msq, msg_rcu_free); /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; diff --git a/ipc/sem.c b/ipc/sem.c index 541cb0ff9bd6..fb0c4c96e50a 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -442,7 +442,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns static inline void sem_lock_and_putref(struct sem_array *sma) { sem_lock(sma, NULL, -1); - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); } static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) @@ -1385,7 +1385,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, rcu_read_unlock(); sem_io = ipc_alloc(sizeof(ushort)*nsems); if (sem_io == NULL) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); return -ENOMEM; } @@ -1419,20 +1419,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, if (nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); if (sem_io == NULL) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); return -ENOMEM; } } if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); err = -ERANGE; goto out_free; } @@ -1722,7 +1722,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(sma, sem_rcu_free); return ERR_PTR(-ENOMEM); } diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 524a8eef2965..533494236c12 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1963,6 +1963,20 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) kfree(cs); } +/* + * Make sure the new task conform to the current state of its parent, + * which could have been changed by cpuset just after it inherits the + * state from the parent and before it sits on the cgroup's task list. + */ +void cpuset_fork(struct task_struct *task) +{ + if (task_css_is_root(task, cpuset_cgrp_id)) + return; + + set_cpus_allowed_ptr(task, ¤t->cpus_allowed); + task->mems_allowed = current->mems_allowed; +} + struct cgroup_subsys cpuset_cgrp_subsys = { .css_alloc = cpuset_css_alloc, .css_online = cpuset_css_online, @@ -1971,6 +1985,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = { .can_attach = cpuset_can_attach, .cancel_attach = cpuset_cancel_attach, .attach = cpuset_attach, + .fork = cpuset_fork, .base_cftypes = files, .early_init = 1, }; diff --git a/kernel/fork.c b/kernel/fork.c index a4ca06e80972..602dbac0b201 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -777,14 +777,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) deactivate_mm(tsk, mm); /* - * If we're exiting normally, clear a user-space tid field if - * requested. We leave this alone when dying by signal, to leave - * the value intact in a core dump, and to save the unnecessary - * trouble, say, a killed vfork parent shouldn't touch this mm. - * Userland only wants this done for a sys_exit. + * Signal userspace if we're not exiting with a core dump + * because we want to leave the value intact for debugging + * purposes. */ if (tsk->clear_child_tid) { - if (!(tsk->flags & PF_SIGNALED) && + if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has diff --git a/kernel/module.c b/kernel/module.c index c3ca760edaf0..8c3baf05f7bd 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2435,13 +2435,18 @@ static inline void kmemleak_load_module(const struct module *mod, #endif #ifdef CONFIG_MODULE_SIG -static int module_sig_check(struct load_info *info) +static int module_sig_check(struct load_info *info, int flags) { int err = -ENOKEY; const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; const void *mod = info->hdr; - if (info->len > markerlen && + /* + * Require flags == 0, as a module with version information + * removed is no longer the module that was signed + */ + if (flags == 0 && + info->len > markerlen && memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { /* We truncate the module to discard the signature */ info->len -= markerlen; @@ -2463,7 +2468,7 @@ static int module_sig_check(struct load_info *info) return err; } #else /* !CONFIG_MODULE_SIG */ -static int module_sig_check(struct load_info *info) +static int module_sig_check(struct load_info *info, int flags) { return 0; } @@ -3200,7 +3205,7 @@ static int load_module(struct load_info *info, const char __user *uargs, long err; char *after_dashes; - err = module_sig_check(info); + err = module_sig_check(info, flags); if (err) goto free_copy; diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 3b8946416a5f..4bd1dd7db1a1 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -798,6 +798,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) timer->it.cpu.expires = 0; sample_to_timespec(timer->it_clock, timer->it.cpu.expires, &itp->it_value); + return; } else { cpu_timer_sample_group(timer->it_clock, p, &now); unlock_task_sighand(p, &flags); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 286ef6072714..2daeb2ec1979 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -301,12 +301,12 @@ static int create_image(int platform_mode) save_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); error = swsusp_arch_suspend(); + /* Restore control flow magically appears here */ + restore_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); if (error) printk(KERN_ERR "PM: Error %d creating hibernation image\n", error); - /* Restore control flow magically appears here */ - restore_processor_state(); if (!in_suspend) events_check_enabled = false; diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c index 276762f3a460..d5760c42f042 100644 --- a/kernel/printk/braille.c +++ b/kernel/printk/braille.c @@ -9,10 +9,10 @@ char *_braille_console_setup(char **str, char **brl_options) { - if (!memcmp(*str, "brl,", 4)) { + if (!strncmp(*str, "brl,", 4)) { *brl_options = ""; *str += 4; - } else if (!memcmp(str, "brl=", 4)) { + } else if (!strncmp(*str, "brl=", 4)) { *brl_options = *str + 4; *str = strchr(*brl_options, ','); if (!*str) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7c90f805e457..f2c0bcc4ba6c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1696,6 +1696,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) success = 1; /* we're going to change ->state */ cpu = task_cpu(p); + /* + * Ensure we load p->on_rq _after_ p->state, otherwise it would + * be possible to, falsely, observe p->on_rq == 0 and get stuck + * in smp_cond_load_acquire() below. + * + * sched_ttwu_pending() try_to_wake_up() + * [S] p->on_rq = 1; [L] P->state + * UNLOCK rq->lock -----. + * \ + * +--- RMB + * schedule() / + * LOCK rq->lock -----' + * UNLOCK rq->lock + * + * [task p] + * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq + * + * Pairs with the UNLOCK+LOCK on rq->lock from the + * last wakeup of our task and the schedule that got our task + * current. + */ + smp_rmb(); if (p->on_rq && ttwu_remote(p, wake_flags)) goto stat; @@ -5147,7 +5169,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_UP_PREPARE: rq->calc_load_update = calc_load_update; - account_reset_rq(rq); break; case CPU_ONLINE: diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3b45ba18f7f4..88379724fc5b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1563,16 +1563,3 @@ static inline u64 irq_time_read(int cpu) } #endif /* CONFIG_64BIT */ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ - -static inline void account_reset_rq(struct rq *rq) -{ -#ifdef CONFIG_IRQ_TIME_ACCOUNTING - rq->prev_irq_time = 0; -#endif -#ifdef CONFIG_PARAVIRT - rq->prev_steal_time = 0; -#endif -#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - rq->prev_steal_time_rq = 0; -#endif -} diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index 4d54f97558df..bbda623aafa7 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c @@ -23,7 +23,9 @@ #include "timekeeping_internal.h" -static unsigned int sleep_time_bin[32] = {0}; +#define NUM_BINS 32 + +static unsigned int sleep_time_bin[NUM_BINS] = {0}; static int tk_debug_show_sleep_time(struct seq_file *s, void *data) { @@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init); void tk_debug_account_sleep_time(struct timespec *t) { - sleep_time_bin[fls(t->tv_sec)]++; + /* Cap bin index so we don't overflow the array */ + int bin = min(fls(t->tv_sec), NUM_BINS-1); + + sleep_time_bin[bin]++; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6d3c38966eda..8df0ed9af291 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4435,13 +4435,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, struct trace_array *tr = iter->tr; ssize_t sret; - /* return any leftover data */ - sret = trace_seq_to_user(&iter->seq, ubuf, cnt); - if (sret != -EBUSY) - return sret; - - trace_seq_init(&iter->seq); - /* copy the tracer to avoid using a global lock all around */ mutex_lock(&trace_types_lock); if (unlikely(iter->trace->name != tr->current_trace->name)) @@ -4454,6 +4447,14 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, * is protected. */ mutex_lock(&iter->mutex); + + /* return any leftover data */ + sret = trace_seq_to_user(&iter->seq, ubuf, cnt); + if (sret != -EBUSY) + goto out; + + trace_seq_init(&iter->seq); + if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 4cc6442733f4..fe8b9ffbbb50 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -48,7 +48,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) return NULL; } if (nbytes > 0) - nbits -= count_leading_zeros(buffer[0]); + nbits -= count_leading_zeros(buffer[0]) - (BITS_PER_LONG - 8); else nbits = 0; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index afc8593327d6..5df7b79513ea 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -350,6 +350,20 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) } EXPORT_SYMBOL(bdi_register_dev); +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner) +{ + int rc; + + rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt), + MINOR(owner->devt)); + if (rc) + return rc; + bdi->owner = owner; + get_device(owner); + return 0; +} +EXPORT_SYMBOL(bdi_register_owner); + /* * Remove bdi from the global list and shutdown any threads we have running */ @@ -418,6 +432,11 @@ void bdi_unregister(struct backing_dev_info *bdi) device_unregister(dev); } + + if (bdi->owner) { + put_device(bdi->owner); + bdi->owner = NULL; + } } EXPORT_SYMBOL(bdi_unregister); diff --git a/mm/filemap.c b/mm/filemap.c index 322462d27b99..798324941a87 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -108,6 +108,48 @@ * ->tasklist_lock (memory_failure, collect_procs_ao) */ +static int page_cache_tree_insert(struct address_space *mapping, + struct page *page, void **shadowp) +{ + struct radix_tree_node *node; + void **slot; + int error; + + error = __radix_tree_create(&mapping->page_tree, page->index, + &node, &slot); + if (error) + return error; + if (*slot) { + void *p; + + p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); + if (!radix_tree_exceptional_entry(p)) + return -EEXIST; + if (shadowp) + *shadowp = p; + mapping->nrshadows--; + if (node) + workingset_node_shadows_dec(node); + } + radix_tree_replace_slot(slot, page); + mapping->nrpages++; + if (node) { + workingset_node_pages_inc(node); + /* + * Don't track node that contains actual pages. + * + * Avoid acquiring the list_lru lock if already + * untracked. The list_empty() test is safe as + * node->private_list is protected by + * mapping->tree_lock. + */ + if (!list_empty(&node->private_list)) + list_lru_del(&workingset_shadow_nodes, + &node->private_list); + } + return 0; +} + static void page_cache_tree_delete(struct address_space *mapping, struct page *page, void *shadow) { @@ -494,7 +536,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) spin_lock_irq(&mapping->tree_lock); __delete_from_page_cache(old, NULL); - error = radix_tree_insert(&mapping->page_tree, offset, new); + error = page_cache_tree_insert(mapping, new, NULL); BUG_ON(error); mapping->nrpages++; __inc_zone_page_state(new, NR_FILE_PAGES); @@ -513,48 +555,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) } EXPORT_SYMBOL_GPL(replace_page_cache_page); -static int page_cache_tree_insert(struct address_space *mapping, - struct page *page, void **shadowp) -{ - struct radix_tree_node *node; - void **slot; - int error; - - error = __radix_tree_create(&mapping->page_tree, page->index, - &node, &slot); - if (error) - return error; - if (*slot) { - void *p; - - p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); - if (!radix_tree_exceptional_entry(p)) - return -EEXIST; - if (shadowp) - *shadowp = p; - mapping->nrshadows--; - if (node) - workingset_node_shadows_dec(node); - } - radix_tree_replace_slot(slot, page); - mapping->nrpages++; - if (node) { - workingset_node_pages_inc(node); - /* - * Don't track node that contains actual pages. - * - * Avoid acquiring the list_lru lock if already - * untracked. The list_empty() test is safe as - * node->private_list is protected by - * mapping->tree_lock. - */ - if (!list_empty(&node->private_list)) - list_lru_del(&workingset_shadow_nodes, - &node->private_list); - } - return 0; -} - static int __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c83332d80152..0225f1c6263a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1655,6 +1655,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, * and reducing the surplus. */ spin_unlock(&hugetlb_lock); + + /* yield cpu to avoid soft lockup */ + cond_resched(); + if (hstate_is_gigantic(h)) ret = alloc_fresh_gigantic_page(h, nodes_allowed); else diff --git a/mm/ksm.c b/mm/ksm.c index 691f1bf55858..d8c9c689862c 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void) { struct rmap_item *rmap_item; - rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); + rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | + __GFP_NORETRY | __GFP_NOWARN); if (rmap_item) ksm_rmap_items++; return rmap_item; diff --git a/mm/shmem.c b/mm/shmem.c index a2b87affdc23..efab7a79b05f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -540,7 +540,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) struct inode *inode = dentry->d_inode; int error; - error = inode_change_ok(inode, attr); + error = setattr_prepare(dentry, attr); if (error) return error; diff --git a/mm/workingset.c b/mm/workingset.c index f7216fa7da27..c068f8c84f07 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -340,21 +340,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, * no pages, so we expect to be able to remove them all and * delete and free the empty node afterwards. */ - - BUG_ON(!node->count); - BUG_ON(node->count & RADIX_TREE_COUNT_MASK); + BUG_ON(!workingset_node_shadows(node)); + BUG_ON(workingset_node_pages(node)); for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { if (node->slots[i]) { BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); node->slots[i] = NULL; - BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT)); - node->count -= 1U << RADIX_TREE_COUNT_SHIFT; + workingset_node_shadows_dec(node); BUG_ON(!mapping->nrshadows); mapping->nrshadows--; } } - BUG_ON(node->count); + BUG_ON(workingset_node_shadows(node)); inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM); if (!__radix_tree_delete_node(&mapping->page_tree, node)) BUG(); diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 90673ff8b87b..335401d6351c 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -115,7 +115,18 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw) /* finally deinitialize the claim */ static void batadv_claim_release(struct batadv_bla_claim *claim) { - batadv_backbone_gw_free_ref(claim->backbone_gw); + struct batadv_bla_backbone_gw *old_backbone_gw; + spin_lock_bh(&claim->backbone_lock); + old_backbone_gw = claim->backbone_gw; + claim->backbone_gw = NULL; + spin_unlock_bh(&claim->backbone_lock); + + spin_lock_bh(&old_backbone_gw->crc_lock); + old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + spin_unlock_bh(&old_backbone_gw->crc_lock); + + batadv_backbone_gw_free_ref(old_backbone_gw); + kfree_rcu(claim, rcu); } @@ -242,7 +253,9 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) } /* all claims gone, intialize CRC */ + spin_lock_bh(&backbone_gw->crc_lock); backbone_gw->crc = BATADV_BLA_CRC_INIT; + spin_unlock_bh(&backbone_gw->crc_lock); } /** @@ -338,9 +351,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, break; } - if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb) + goto out; + } skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, soft_iface); @@ -389,6 +405,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, entry->lasttime = jiffies; entry->crc = BATADV_BLA_CRC_INIT; entry->bat_priv = bat_priv; + spin_lock_init(&entry->crc_lock); atomic_set(&entry->request_sent, 0); atomic_set(&entry->wait_periods, 0); ether_addr_copy(entry->orig, orig); @@ -537,7 +554,9 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv, __be16 crc; memcpy(mac, batadv_announce_mac, 4); + spin_lock_bh(&backbone_gw->crc_lock); crc = htons(backbone_gw->crc); + spin_unlock_bh(&backbone_gw->crc_lock); memcpy(&mac[4], &crc, 2); batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid, @@ -555,8 +574,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, const uint8_t *mac, const unsigned short vid, struct batadv_bla_backbone_gw *backbone_gw) { + struct batadv_bla_backbone_gw *old_backbone_gw; struct batadv_bla_claim *claim; struct batadv_bla_claim search_claim; + bool remove_crc = false; int hash_added; ether_addr_copy(search_claim.addr, mac); @@ -570,8 +591,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, return; ether_addr_copy(claim->addr, mac); + spin_lock_init(&claim->backbone_lock); claim->vid = vid; claim->lasttime = jiffies; + atomic_inc(&backbone_gw->refcount); claim->backbone_gw = backbone_gw; atomic_set(&claim->refcount, 2); @@ -598,20 +621,55 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, "bla_add_claim(): changing ownership for %pM, vid %d\n", mac, BATADV_PRINT_VID(vid)); - claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); - batadv_backbone_gw_free_ref(claim->backbone_gw); + remove_crc = true; } - /* set (new) backbone gw */ + + /* replace backbone_gw atomically and adjust reference counters */ + spin_lock_bh(&claim->backbone_lock); + old_backbone_gw = claim->backbone_gw; atomic_inc(&backbone_gw->refcount); claim->backbone_gw = backbone_gw; + spin_unlock_bh(&claim->backbone_lock); + if (remove_crc) { + /* remove claim address from old backbone_gw */ + spin_lock_bh(&old_backbone_gw->crc_lock); + old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + spin_unlock_bh(&old_backbone_gw->crc_lock); + } + + batadv_backbone_gw_free_ref(old_backbone_gw); + + /* add claim address to new backbone_gw */ + spin_lock_bh(&backbone_gw->crc_lock); backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + spin_unlock_bh(&backbone_gw->crc_lock); backbone_gw->lasttime = jiffies; claim_free_ref: batadv_claim_free_ref(claim); } +/** + * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of + * claim + * @claim: claim whose backbone_gw should be returned + * + * Return: valid reference to claim::backbone_gw + */ +static struct batadv_bla_backbone_gw * +batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim) +{ + struct batadv_bla_backbone_gw *backbone_gw; + + spin_lock_bh(&claim->backbone_lock); + backbone_gw = claim->backbone_gw; + atomic_inc(&backbone_gw->refcount); + spin_unlock_bh(&claim->backbone_lock); + + return backbone_gw; +} + /* Delete a claim from the claim hash which has the * given mac address and vid. */ @@ -633,8 +691,6 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, batadv_choose_claim, claim); batadv_claim_free_ref(claim); /* reference from the hash is gone */ - claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); - /* don't need the reference from hash_find() anymore */ batadv_claim_free_ref(claim); } @@ -645,7 +701,7 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; - uint16_t crc; + uint16_t backbone_crc, crc; if (memcmp(an_addr, batadv_announce_mac, 4) != 0) return 0; @@ -665,12 +721,16 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n", BATADV_PRINT_VID(vid), backbone_gw->orig, crc); - if (backbone_gw->crc != crc) { + spin_lock_bh(&backbone_gw->crc_lock); + backbone_crc = backbone_gw->crc; + spin_unlock_bh(&backbone_gw->crc_lock); + + if (backbone_crc != crc) { batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n", backbone_gw->orig, BATADV_PRINT_VID(backbone_gw->vid), - backbone_gw->crc, crc); + backbone_crc, crc); batadv_bla_send_request(backbone_gw); } else { @@ -1041,6 +1101,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, int now) { + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_bla_claim *claim; struct hlist_head *head; struct batadv_hashtable *hash; @@ -1055,14 +1116,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); if (now) goto purge_now; - if (!batadv_compare_eth(claim->backbone_gw->orig, + + if (!batadv_compare_eth(backbone_gw->orig, primary_if->net_dev->dev_addr)) - continue; + goto skip; + if (!batadv_has_timed_out(claim->lasttime, BATADV_BLA_CLAIM_TIMEOUT)) - continue; + goto skip; batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_purge_claims(): %pM, vid %d, time out\n", @@ -1070,8 +1134,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, purge_now: batadv_handle_unclaim(bat_priv, primary_if, - claim->backbone_gw->orig, + backbone_gw->orig, claim->addr, claim->vid); +skip: + batadv_backbone_gw_free_ref(backbone_gw); } rcu_read_unlock(); } @@ -1458,9 +1524,11 @@ void batadv_bla_free(struct batadv_priv *bat_priv) int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, bool is_bcast) { + struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; + bool own_claim; int ret; ethhdr = eth_hdr(skb); @@ -1493,8 +1561,12 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, } /* if it is our own claim ... */ - if (batadv_compare_eth(claim->backbone_gw->orig, - primary_if->net_dev->dev_addr)) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + own_claim = batadv_compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr); + batadv_backbone_gw_free_ref(backbone_gw); + + if (own_claim) { /* ... allow it in any case */ claim->lasttime = jiffies; goto allow; @@ -1557,7 +1629,9 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_hard_iface *primary_if; + bool client_roamed; int ret = 0; primary_if = batadv_primary_if_get_selected(bat_priv); @@ -1587,8 +1661,12 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto allow; /* check if we are responsible. */ - if (batadv_compare_eth(claim->backbone_gw->orig, - primary_if->net_dev->dev_addr)) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + client_roamed = batadv_compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr); + batadv_backbone_gw_free_ref(backbone_gw); + + if (client_roamed) { /* if yes, the client has roamed and we have * to unclaim it. */ @@ -1629,9 +1707,11 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_hashtable *hash = bat_priv->bla.claim_hash; + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_bla_claim *claim; struct batadv_hard_iface *primary_if; struct hlist_head *head; + u16 backbone_crc; uint32_t i; bool is_own; uint8_t *primary_addr; @@ -1652,13 +1732,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { - is_own = batadv_compare_eth(claim->backbone_gw->orig, + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + + is_own = batadv_compare_eth(backbone_gw->orig, primary_addr); + + spin_lock_bh(&backbone_gw->crc_lock); + backbone_crc = backbone_gw->crc; + spin_unlock_bh(&backbone_gw->crc_lock); seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", claim->addr, BATADV_PRINT_VID(claim->vid), - claim->backbone_gw->orig, + backbone_gw->orig, (is_own ? 'x' : ' '), - claim->backbone_gw->crc); + backbone_crc); + + batadv_backbone_gw_free_ref(backbone_gw); } rcu_read_unlock(); } @@ -1677,6 +1765,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) struct batadv_hard_iface *primary_if; struct hlist_head *head; int secs, msecs; + u16 backbone_crc; uint32_t i; bool is_own; uint8_t *primary_addr; @@ -1707,10 +1796,14 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) if (is_own) continue; + spin_lock_bh(&backbone_gw->crc_lock); + backbone_crc = backbone_gw->crc; + spin_unlock_bh(&backbone_gw->crc_lock); + seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n", backbone_gw->orig, BATADV_PRINT_VID(backbone_gw->vid), secs, - msecs, backbone_gw->crc); + msecs, backbone_crc); } rcu_read_unlock(); } diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index f49badcfd8a1..dc240a8d99d4 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -959,9 +959,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, if (!skb_new) goto out; - if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb_new) + goto out; + } skb_reset_mac_header(skb_new); skb_new->protocol = eth_type_trans(skb_new, @@ -1039,9 +1042,12 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv, */ skb_reset_mac_header(skb_new); - if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb_new) + goto out; + } /* To preserve backwards compatibility, the node has choose the outgoing * format based on the incoming request packet type. The assumption is diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 6b4adf1810d3..9e6967a1f23e 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -529,6 +529,8 @@ static void batadv_orig_node_release(struct batadv_orig_node *orig_node) struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; struct batadv_orig_ifinfo *orig_ifinfo; + struct batadv_orig_node_vlan *vlan, *vlan_tmp; + struct batadv_orig_ifinfo *last_candidate; spin_lock_bh(&orig_node->neigh_list_lock); @@ -544,8 +546,21 @@ static void batadv_orig_node_release(struct batadv_orig_node *orig_node) hlist_del_rcu(&orig_ifinfo->list); batadv_orig_ifinfo_free_ref(orig_ifinfo); } + + last_candidate = orig_node->last_bonding_candidate; + orig_node->last_bonding_candidate = NULL; spin_unlock_bh(&orig_node->neigh_list_lock); + if (last_candidate) + batadv_orig_ifinfo_free_ref(last_candidate); + + spin_lock_bh(&orig_node->vlan_list_lock); + list_for_each_entry_safe(vlan, vlan_tmp, &orig_node->vlan_list, list) { + list_del_rcu(&vlan->list); + batadv_orig_node_vlan_free_ref(vlan); + } + spin_unlock_bh(&orig_node->vlan_list_lock); + /* Free nc_nodes */ batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL); diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 22bbdd082546..34c20cc0670c 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -425,6 +425,52 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv, } /** + * batadv_last_bonding_get - Get last_bonding_candidate of orig_node + * @orig_node: originator node whose last bonding candidate should be retrieved + * + * Return: last bonding candidate of router or NULL if not found + * + * The object is returned with refcounter increased by 1. + */ +static struct batadv_orig_ifinfo * +batadv_last_bonding_get(struct batadv_orig_node *orig_node) +{ + struct batadv_orig_ifinfo *last_bonding_candidate; + + spin_lock_bh(&orig_node->neigh_list_lock); + last_bonding_candidate = orig_node->last_bonding_candidate; + + if (last_bonding_candidate) + atomic_inc(&last_bonding_candidate->refcount); + spin_unlock_bh(&orig_node->neigh_list_lock); + + return last_bonding_candidate; +} + +/** + * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node + * @orig_node: originator node whose bonding candidates should be replaced + * @new_candidate: new bonding candidate or NULL + */ +static void +batadv_last_bonding_replace(struct batadv_orig_node *orig_node, + struct batadv_orig_ifinfo *new_candidate) +{ + struct batadv_orig_ifinfo *old_candidate; + + spin_lock_bh(&orig_node->neigh_list_lock); + old_candidate = orig_node->last_bonding_candidate; + + if (new_candidate) + atomic_inc(&new_candidate->refcount); + orig_node->last_bonding_candidate = new_candidate; + spin_unlock_bh(&orig_node->neigh_list_lock); + + if (old_candidate) + batadv_orig_ifinfo_free_ref(old_candidate); +} + +/** * batadv_find_router - find a suitable router for this originator * @bat_priv: the bat priv with all the soft interface information * @orig_node: the destination node @@ -469,7 +515,7 @@ batadv_find_router(struct batadv_priv *bat_priv, * router - obviously there are no other candidates. */ rcu_read_lock(); - last_candidate = orig_node->last_bonding_candidate; + last_candidate = batadv_last_bonding_get(orig_node); if (last_candidate) last_cand_router = rcu_dereference(last_candidate->router); @@ -529,10 +575,6 @@ next: } rcu_read_unlock(); - /* last_bonding_candidate is reset below, remove the old reference. */ - if (orig_node->last_bonding_candidate) - batadv_orig_ifinfo_free_ref(orig_node->last_bonding_candidate); - /* After finding candidates, handle the three cases: * 1) there is a next candidate, use that * 2) there is no next candidate, use the first of the list @@ -541,23 +583,33 @@ next: if (next_candidate) { batadv_neigh_node_free_ref(router); - /* remove references to first candidate, we don't need it. */ - if (first_candidate) { - batadv_neigh_node_free_ref(first_candidate_router); - batadv_orig_ifinfo_free_ref(first_candidate); - } + atomic_inc(&next_candidate_router->refcount); router = next_candidate_router; - orig_node->last_bonding_candidate = next_candidate; + batadv_last_bonding_replace(orig_node, next_candidate); } else if (first_candidate) { batadv_neigh_node_free_ref(router); - /* refcounting has already been done in the loop above. */ + atomic_inc(&first_candidate_router->refcount); router = first_candidate_router; - orig_node->last_bonding_candidate = first_candidate; + batadv_last_bonding_replace(orig_node, first_candidate); } else { - orig_node->last_bonding_candidate = NULL; + batadv_last_bonding_replace(orig_node, NULL); } + /* cleanup of candidates */ + if (first_candidate) { + batadv_neigh_node_free_ref(first_candidate_router); + batadv_orig_ifinfo_free_ref(first_candidate); + } + + if (next_candidate) { + batadv_neigh_node_free_ref(next_candidate_router); + batadv_orig_ifinfo_free_ref(next_candidate); + } + + if (last_candidate) + batadv_orig_ifinfo_free_ref(last_candidate); + return router; } diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 6004c2de7b2a..2cfd5ddbb404 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -363,8 +363,8 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_orig_node *orig_node; orig_node = batadv_gw_get_selected_orig(bat_priv); - return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, - orig_node, vid); + return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, + BATADV_P_DATA, orig_node, vid); } void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 0cc890bab7a4..61f34f889555 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -202,12 +202,12 @@ struct batadv_orig_bat_iv { * @primary_addr: hosts primary interface address * @ifinfo_list: list for routers per outgoing interface * @last_bonding_candidate: pointer to last ifinfo of last used router - * @batadv_dat_addr_t: address of the orig node in the distributed hash + * @dat_addr: address of the orig node in the distributed hash * @last_seen: time when last packet from this node was received * @bcast_seqno_reset: time when the broadcast seqno window was reset * @mcast_handler_lock: synchronizes mcast-capability and -flag changes * @mcast_flags: multicast flags announced by the orig node - * @mcast_want_all_unsnoop_node: a list node for the + * @mcast_want_all_unsnoopables_node: a list node for the * mcast.want_all_unsnoopables list * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4 list * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6 list @@ -272,7 +272,9 @@ struct batadv_orig_node { DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); uint32_t last_bcast_seqno; struct hlist_head neigh_list; - /* neigh_list_lock protects: neigh_list and router */ + /* neigh_list_lock protects: neigh_list, ifinfo_list, + * last_bonding_candidate and router + */ spinlock_t neigh_list_lock; struct hlist_node hash_entry; struct batadv_priv *bat_priv; @@ -390,7 +392,7 @@ struct batadv_neigh_ifinfo { /** * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression - * @orig[ETH_ALEN]: mac address of orig node orginating the broadcast + * @orig: mac address of orig node orginating the broadcast * @crc: crc32 checksum of broadcast payload * @entrytime: time when the broadcast packet was received */ @@ -538,7 +540,7 @@ struct batadv_priv_tt { /** * struct batadv_priv_bla - per mesh interface bridge loope avoidance data - * @num_requests; number of bla requests in flight + * @num_requests: number of bla requests in flight * @claim_hash: hash table containing mesh nodes this host has claimed * @backbone_hash: hash table containing all detected backbone gateways * @bcast_duplist: recently received broadcast packets array (for broadcast @@ -760,7 +762,7 @@ struct batadv_softif_vlan { * @dat: distributed arp table data * @mcast: multicast data * @network_coding: bool indicating whether network coding is enabled - * @batadv_priv_nc: network coding data + * @nc: network coding data */ struct batadv_priv { atomic_t mesh_state; @@ -871,6 +873,7 @@ struct batadv_socket_packet { * backbone gateway - no bcast traffic is formwared until the situation was * resolved * @crc: crc16 checksum over all claims + * @crc_lock: lock protecting crc * @refcount: number of contexts the object is used * @rcu: struct used for freeing in an RCU-safe manner */ @@ -884,6 +887,7 @@ struct batadv_bla_backbone_gw { atomic_t wait_periods; atomic_t request_sent; uint16_t crc; + spinlock_t crc_lock; /* protects crc */ atomic_t refcount; struct rcu_head rcu; }; @@ -892,7 +896,8 @@ struct batadv_bla_backbone_gw { * struct batadv_bla_claim - claimed non-mesh client structure * @addr: mac address of claimed non-mesh client * @vid: vlan id this client was detected on - * @batadv_bla_backbone_gw: pointer to backbone gw claiming this client + * @backbone_gw: pointer to backbone gw claiming this client + * @backbone_lock: lock protecting backbone_gw pointer * @lasttime: last time we heard of claim (locals only) * @hash_entry: hlist node for batadv_priv_bla::claim_hash * @refcount: number of contexts the object is used @@ -902,6 +907,7 @@ struct batadv_bla_claim { uint8_t addr[ETH_ALEN]; unsigned short vid; struct batadv_bla_backbone_gw *backbone_gw; + spinlock_t backbone_lock; /* protects backbone_gw */ unsigned long lasttime; struct hlist_node hash_entry; struct rcu_head rcu; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index d0fd8b04f2e6..071d35c9f3b4 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -921,7 +921,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (get_user(opt, (u16 __user *) optval)) { err = -EFAULT; break; } diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 8bbbb5ec468c..bcb3160fefb4 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -334,16 +334,19 @@ static int rfcomm_sock_create(struct net *net, struct socket *sock, static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { - struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; + struct sockaddr_rc sa; struct sock *sk = sock->sk; - int chan = sa->rc_channel; - int err = 0; - - BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr); + int len, err = 0; if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; + memset(&sa, 0, sizeof(sa)); + len = min_t(unsigned int, sizeof(sa), addr_len); + memcpy(&sa, addr, len); + + BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr); + lock_sock(sk); if (sk->sk_state != BT_OPEN) { @@ -358,12 +361,13 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr write_lock(&rfcomm_sk_list.lock); - if (chan && __rfcomm_get_listen_sock_by_addr(chan, &sa->rc_bdaddr)) { + if (sa.rc_channel && + __rfcomm_get_listen_sock_by_addr(sa.rc_channel, &sa.rc_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ - bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr); - rfcomm_pi(sk)->channel = chan; + bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr); + rfcomm_pi(sk)->channel = sa.rc_channel; sk->sk_state = BT_BOUND; } diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 5740f71b609d..98c579243d87 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -1167,6 +1167,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end) } /* + * Encoding order is (new_up_client, new_state, new_weight). Need to + * apply in the (new_weight, new_state, new_up_client) order, because + * an incremental map may look like e.g. + * + * new_up_client: { osd=6, addr=... } # set osd_state and addr + * new_state: { osd=6, xorstate=EXISTS } # clear osd_state + */ +static int decode_new_up_state_weight(void **p, void *end, + struct ceph_osdmap *map) +{ + void *new_up_client; + void *new_state; + void *new_weight_end; + u32 len; + + new_up_client = *p; + ceph_decode_32_safe(p, end, len, e_inval); + len *= sizeof(u32) + sizeof(struct ceph_entity_addr); + ceph_decode_need(p, end, len, e_inval); + *p += len; + + new_state = *p; + ceph_decode_32_safe(p, end, len, e_inval); + len *= sizeof(u32) + sizeof(u8); + ceph_decode_need(p, end, len, e_inval); + *p += len; + + /* new_weight */ + ceph_decode_32_safe(p, end, len, e_inval); + while (len--) { + s32 osd; + u32 w; + + ceph_decode_need(p, end, 2*sizeof(u32), e_inval); + osd = ceph_decode_32(p); + w = ceph_decode_32(p); + BUG_ON(osd >= map->max_osd); + pr_info("osd%d weight 0x%x %s\n", osd, w, + w == CEPH_OSD_IN ? "(in)" : + (w == CEPH_OSD_OUT ? "(out)" : "")); + map->osd_weight[osd] = w; + + /* + * If we are marking in, set the EXISTS, and clear the + * AUTOOUT and NEW bits. + */ + if (w) { + map->osd_state[osd] |= CEPH_OSD_EXISTS; + map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | + CEPH_OSD_NEW); + } + } + new_weight_end = *p; + + /* new_state (up/down) */ + *p = new_state; + len = ceph_decode_32(p); + while (len--) { + s32 osd; + u8 xorstate; + int ret; + + osd = ceph_decode_32(p); + xorstate = ceph_decode_8(p); + if (xorstate == 0) + xorstate = CEPH_OSD_UP; + BUG_ON(osd >= map->max_osd); + if ((map->osd_state[osd] & CEPH_OSD_UP) && + (xorstate & CEPH_OSD_UP)) + pr_info("osd%d down\n", osd); + if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && + (xorstate & CEPH_OSD_EXISTS)) { + pr_info("osd%d does not exist\n", osd); + map->osd_weight[osd] = CEPH_OSD_IN; + ret = set_primary_affinity(map, osd, + CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); + if (ret) + return ret; + memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); + map->osd_state[osd] = 0; + } else { + map->osd_state[osd] ^= xorstate; + } + } + + /* new_up_client */ + *p = new_up_client; + len = ceph_decode_32(p); + while (len--) { + s32 osd; + struct ceph_entity_addr addr; + + osd = ceph_decode_32(p); + ceph_decode_copy(p, &addr, sizeof(addr)); + ceph_decode_addr(&addr); + BUG_ON(osd >= map->max_osd); + pr_info("osd%d up\n", osd); + map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; + map->osd_addr[osd] = addr; + } + + *p = new_weight_end; + return 0; + +e_inval: + return -EINVAL; +} + +/* * decode and apply an incremental map update. */ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, @@ -1265,49 +1374,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, __remove_pg_pool(&map->pg_pools, pi); } - /* new_up */ - ceph_decode_32_safe(p, end, len, e_inval); - while (len--) { - u32 osd; - struct ceph_entity_addr addr; - ceph_decode_32_safe(p, end, osd, e_inval); - ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval); - ceph_decode_addr(&addr); - pr_info("osd%d up\n", osd); - BUG_ON(osd >= map->max_osd); - map->osd_state[osd] |= CEPH_OSD_UP; - map->osd_addr[osd] = addr; - } - - /* new_state */ - ceph_decode_32_safe(p, end, len, e_inval); - while (len--) { - u32 osd; - u8 xorstate; - ceph_decode_32_safe(p, end, osd, e_inval); - xorstate = **(u8 **)p; - (*p)++; /* clean flag */ - if (xorstate == 0) - xorstate = CEPH_OSD_UP; - if (xorstate & CEPH_OSD_UP) - pr_info("osd%d down\n", osd); - if (osd < map->max_osd) - map->osd_state[osd] ^= xorstate; - } - - /* new_weight */ - ceph_decode_32_safe(p, end, len, e_inval); - while (len--) { - u32 osd, off; - ceph_decode_need(p, end, sizeof(u32)*2, e_inval); - osd = ceph_decode_32(p); - off = ceph_decode_32(p); - pr_info("osd%d weight 0x%x %s\n", osd, off, - off == CEPH_OSD_IN ? "(in)" : - (off == CEPH_OSD_OUT ? "(out)" : "")); - if (osd < map->max_osd) - map->osd_weight[osd] = off; - } + /* new_up_client, new_state, new_weight */ + err = decode_new_up_state_weight(p, end, map); + if (err) + goto bad; /* new_pg_temp */ err = decode_new_pg_temp(p, end, map); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index da9f18156ba9..65a28620006c 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2188,7 +2188,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait) + struct rtmsg *rtm, int nowait, u32 portid) { struct mfc_cache *cache; struct mr_table *mrt; @@ -2233,6 +2233,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, return -ENOMEM; } + NETLINK_CB(skb2).portid = portid; skb_push(skb2, sizeof(struct iphdr)); skb_reset_network_header(skb2); iph = ip_hdr(skb2); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 3bdb46b325ab..106db177a91c 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -367,7 +367,8 @@ static inline bool unconditional(const struct arpt_entry *e) * there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, - unsigned int valid_hooks, void *entry0) + unsigned int valid_hooks, void *entry0, + unsigned int *offsets) { unsigned int hook; @@ -456,6 +457,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + if (!xt_find_jump_offset(offsets, newpos, + newinfo->number)) + return 0; + e = (struct arpt_entry *) + (entry0 + newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; @@ -615,6 +621,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, const struct arpt_replace *repl) { struct arpt_entry *iter; + unsigned int *offsets; unsigned int i; int ret = 0; @@ -628,8 +635,10 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, } duprintf("translate_table: size %u\n", newinfo->size); + offsets = xt_alloc_entry_offsets(newinfo->number); + if (!offsets) + return -ENOMEM; i = 0; - /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, @@ -638,7 +647,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, repl->underflow, repl->valid_hooks); if (ret != 0) - break; + goto out_free; + if (i < repl->num_entries) + offsets[i] = (void *)iter - entry0; ++i; if (strcmp(arpt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) @@ -646,12 +657,13 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, } duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); if (ret != 0) - return ret; + goto out_free; + ret = -EINVAL; if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); - return -EINVAL; + goto out_free; } /* Check hooks all assigned */ @@ -662,17 +674,20 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); - return -EINVAL; + goto out_free; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); - return -EINVAL; + goto out_free; } } - if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) - return -ELOOP; + if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { + ret = -ELOOP; + goto out_free; + } + kvfree(offsets); /* Finally, each sanity check must pass */ i = 0; @@ -699,6 +714,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, } return ret; + out_free: + kvfree(offsets); + return ret; } static void get_counters(const struct xt_table_info *t, diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index baa1f65e5142..9e20b99bf325 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -443,7 +443,8 @@ ipt_do_table(struct sk_buff *skb, there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, - unsigned int valid_hooks, void *entry0) + unsigned int valid_hooks, void *entry0, + unsigned int *offsets) { unsigned int hook; @@ -536,6 +537,11 @@ mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + if (!xt_find_jump_offset(offsets, newpos, + newinfo->number)) + return 0; + e = (struct ipt_entry *) + (entry0 + newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; @@ -782,6 +788,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ipt_replace *repl) { struct ipt_entry *iter; + unsigned int *offsets; unsigned int i; int ret = 0; @@ -795,6 +802,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, } duprintf("translate_table: size %u\n", newinfo->size); + offsets = xt_alloc_entry_offsets(newinfo->number); + if (!offsets) + return -ENOMEM; i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { @@ -804,17 +814,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, repl->underflow, repl->valid_hooks); if (ret != 0) - return ret; + goto out_free; + if (i < repl->num_entries) + offsets[i] = (void *)iter - entry0; ++i; if (strcmp(ipt_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } + ret = -EINVAL; if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); - return -EINVAL; + goto out_free; } /* Check hooks all assigned */ @@ -825,17 +838,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); - return -EINVAL; + goto out_free; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); - return -EINVAL; + goto out_free; } } - if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) - return -ELOOP; + if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { + ret = -ELOOP; + goto out_free; + } + kvfree(offsets); /* Finally, each sanity check must pass */ i = 0; @@ -862,6 +878,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, } return ret; + out_free: + kvfree(offsets); + return ret; } static void diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 5df7ed8c62d9..2548c2274cb8 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2413,7 +2413,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { int err = ipmr_get_route(net, skb, fl4->saddr, fl4->daddr, - r, nowait); + r, nowait, portid); + if (err <= 0) { if (!nowait) { if (err == 0) diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 6088c58bc54c..2b986576e5ad 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -213,6 +213,7 @@ static bool tcp_fastopen_create_child(struct sock *sk, } } tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq; + tp->rcv_wup = tp->rcv_nxt; sk->sk_data_ready(sk); bh_unlock_sock(child); sock_put(child); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d3b64bfedd18..50023e8af668 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2316,10 +2316,9 @@ static void DBGUNDO(struct sock *sk, const char *msg) } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { - struct ipv6_pinfo *np = inet6_sk(sk); pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, - &np->daddr, ntohs(inet->inet_dport), + &sk->sk_v6_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9d26f4569a2e..8b5c457b40cd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -245,7 +245,8 @@ void tcp_select_initial_window(int __space, __u32 mss, /* Set window scaling on max possible window * See RFC1323 for an explanation of the limit to 14 */ - space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); + space = max_t(u32, space, sysctl_tcp_rmem[2]); + space = max_t(u32, space, sysctl_rmem_max); space = min_t(u32, space, *window_clamp); while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0002aa9ca91a..213fc4f9b265 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -723,7 +723,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) } if (p == &net->ipv6.devconf_all->forwarding) { + int old_dflt = net->ipv6.devconf_dflt->forwarding; + net->ipv6.devconf_dflt->forwarding = newf; + if ((!newf) ^ (!old_dflt)) + inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, + NETCONFA_IFINDEX_DEFAULT, + net->ipv6.devconf_dflt); + addrconf_forward_change(net, newf); if ((!newf) ^ (!old)) inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING, @@ -1679,6 +1686,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) spin_unlock_bh(&ifp->state_lock); addrconf_mod_dad_work(ifp, 0); + in6_ifa_put(ifp); } /* Join to solicited addr multicast group. @@ -3255,6 +3263,7 @@ static void addrconf_dad_work(struct work_struct *w) addrconf_dad_begin(ifp); goto out; } else if (action == DAD_ABORT) { + in6_ifa_hold(ifp); addrconf_dad_stop(ifp, 1); goto out; } diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 1ee013644bb2..473bb5031af2 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -881,7 +881,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) encap_limit = t->parms.encap_limit; memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - fl6.flowi6_proto = skb->protocol; err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); @@ -939,6 +938,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) fl6->daddr = p->raddr; fl6->flowi6_oif = p->link; fl6->flowlabel = 0; + fl6->flowi6_proto = IPPROTO_GRE; if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 5e739bb6ae90..17049f81c750 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2272,8 +2272,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, return 1; } -int ip6mr_get_route(struct net *net, - struct sk_buff *skb, struct rtmsg *rtm, int nowait) +int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, + int nowait, u32 portid) { int err; struct mr6_table *mrt; @@ -2318,6 +2318,7 @@ int ip6mr_get_route(struct net *net, return -ENOMEM; } + NETLINK_CB(skb2).portid = portid; skb_reset_transport_header(skb2); skb_put(skb2, sizeof(struct ipv6hdr)); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 9802b2469662..d0fe5a92b2f1 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -453,7 +453,8 @@ ip6t_do_table(struct sk_buff *skb, there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, - unsigned int valid_hooks, void *entry0) + unsigned int valid_hooks, void *entry0, + unsigned int *offsets) { unsigned int hook; @@ -546,6 +547,11 @@ mark_source_chains(const struct xt_table_info *newinfo, /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); + if (!xt_find_jump_offset(offsets, newpos, + newinfo->number)) + return 0; + e = (struct ip6t_entry *) + (entry0 + newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; @@ -792,6 +798,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ip6t_replace *repl) { struct ip6t_entry *iter; + unsigned int *offsets; unsigned int i; int ret = 0; @@ -805,6 +812,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, } duprintf("translate_table: size %u\n", newinfo->size); + offsets = xt_alloc_entry_offsets(newinfo->number); + if (!offsets) + return -ENOMEM; i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { @@ -814,17 +824,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, repl->underflow, repl->valid_hooks); if (ret != 0) - return ret; + goto out_free; + if (i < repl->num_entries) + offsets[i] = (void *)iter - entry0; ++i; if (strcmp(ip6t_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } + ret = -EINVAL; if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); - return -EINVAL; + goto out_free; } /* Check hooks all assigned */ @@ -835,17 +848,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); - return -EINVAL; + goto out_free; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); - return -EINVAL; + goto out_free; } } - if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) - return -ELOOP; + if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { + ret = -ELOOP; + goto out_free; + } + kvfree(offsets); /* Finally, each sanity check must pass */ i = 0; @@ -872,6 +888,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, } return ret; + out_free: + kvfree(offsets); + return ret; } static void diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4967b50cdf52..889e06626c26 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2618,7 +2618,9 @@ static int rt6_fill_node(struct net *net, if (iif) { #ifdef CONFIG_IPV6_MROUTE if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { - int err = ip6mr_get_route(net, skb, rtm, nowait); + int err = ip6mr_get_route(net, skb, rtm, nowait, + portid); + if (err <= 0) { if (!nowait) { if (err == 0) diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index f945293c17f0..a34379892d85 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -843,7 +843,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) struct sock *sk = sock->sk; struct irda_sock *new, *self = irda_sk(sk); struct sock *newsk; - struct sk_buff *skb; + struct sk_buff *skb = NULL; int err; IRDA_DEBUG(2, "%s()\n", __func__); @@ -913,7 +913,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) err = -EPERM; /* value does not seem to make sense. -arnd */ if (!new->tsap) { IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); - kfree_skb(skb); goto out; } @@ -932,7 +931,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) /* Clean up the original one to keep it in listen state */ irttp_listen(self->tsap); - kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; @@ -940,6 +938,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) irda_connect_response(new); err = 0; out: + kfree_skb(skb); release_sock(sk); return err; } @@ -1037,8 +1036,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, } /* Check if we have opened a local TSAP */ - if (!self->tsap) - irda_open_tsap(self, LSAP_ANY, addr->sir_name); + if (!self->tsap) { + err = irda_open_tsap(self, LSAP_ANY, addr->sir_name); + if (err) + goto out; + } /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 0c6a8f18b0fc..76f4c5c9a466 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1892,6 +1892,9 @@ static __net_exit void l2tp_exit_net(struct net *net) (void)l2tp_tunnel_delete(tunnel); } rcu_read_unlock_bh(); + + flush_workqueue(l2tp_wq); + rcu_barrier(); } static struct pernet_operations l2tp_net_ops = { diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index b704a9356208..1ae69a4695de 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -883,10 +883,8 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, pls = l2tp_session_priv(session); tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock); - if (tunnel == NULL) { - error = -EBADF; + if (tunnel == NULL) goto end_put_sess; - } inet = inet_sk(tunnel->sock); if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) { @@ -964,12 +962,11 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, } *usockaddr_len = len; + error = 0; sock_put(pls->tunnel_sock); end_put_sess: sock_put(sk); - error = 0; - end: return error; } diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 592f4b152ba8..708bc2074d90 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1186,7 +1186,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) /* free all potentially still buffered bcast frames */ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); - skb_queue_purge(&sdata->u.ap.ps.bc_buf); + ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); mutex_lock(&local->mtx); ieee80211_vif_copy_chanctx_to_vlans(sdata, true); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c344e0bc00b7..ca6131c452b5 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -351,7 +351,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) skb = skb_dequeue(&ps->bc_buf); if (skb) { purged++; - dev_kfree_skb(skb); + ieee80211_free_txskb(&local->hw, skb); } total += skb_queue_len(&ps->bc_buf); } @@ -434,7 +434,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { ps_dbg(tx->sdata, "BC TX buffer full - dropping the oldest frame\n"); - dev_kfree_skb(skb_dequeue(&ps->bc_buf)); + ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); } else tx->local->total_ps_buffered++; @@ -2989,7 +2989,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); if (!ieee80211_tx_prepare(sdata, &tx, skb)) break; - dev_kfree_skb_any(skb); + ieee80211_free_txskb(hw, skb); } info = IEEE80211_SKB_CB(skb); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index c45186f91cc8..16120a636bd4 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -556,7 +556,7 @@ static int exp_seq_show(struct seq_file *s, void *v) helper = rcu_dereference(nfct_help(expect->master)->helper); if (helper) { seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); - if (helper->expect_policy[expect->class].name) + if (helper->expect_policy[expect->class].name[0]) seq_printf(s, "/%s", helper->expect_policy[expect->class].name); } diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 7a83e57ad3f1..bd55f0c31795 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -985,10 +985,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, struct net *net = sock_net(ctnl); struct nfnl_queue_net *q = nfnl_queue_pernet(net); - queue = instance_lookup(q, queue_num); - if (!queue) - queue = verdict_instance_lookup(q, queue_num, - NETLINK_CB(skb).portid); + queue = verdict_instance_lookup(q, queue_num, + NETLINK_CB(skb).portid); if (IS_ERR(queue)) return PTR_ERR(queue); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 1393af786e15..3ca6db13af92 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -721,6 +721,56 @@ int xt_check_entry_offsets(const void *base, } EXPORT_SYMBOL(xt_check_entry_offsets); +/** + * xt_alloc_entry_offsets - allocate array to store rule head offsets + * + * @size: number of entries + * + * Return: NULL or kmalloc'd or vmalloc'd array + */ +unsigned int *xt_alloc_entry_offsets(unsigned int size) +{ + unsigned int *off; + + off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN); + + if (off) + return off; + + if (size < (SIZE_MAX / sizeof(unsigned int))) + off = vmalloc(size * sizeof(unsigned int)); + + return off; +} +EXPORT_SYMBOL(xt_alloc_entry_offsets); + +/** + * xt_find_jump_offset - check if target is a valid jump offset + * + * @offsets: array containing all valid rule start offsets of a rule blob + * @target: the jump target to search for + * @size: entries in @offset + */ +bool xt_find_jump_offset(const unsigned int *offsets, + unsigned int target, unsigned int size) +{ + int m, low = 0, hi = size; + + while (hi > low) { + m = (low + hi) / 2u; + + if (offsets[m] > target) + hi = m; + else if (offsets[m] < target) + low = m + 1; + else + return true; + } + + return false; +} +EXPORT_SYMBOL(xt_find_jump_offset); + int xt_check_target(struct xt_tgchk_param *par, unsigned int size, u_int8_t proto, bool inv_proto) { diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 3045a964f39c..8473d34f2e3a 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -699,7 +699,11 @@ socket_setattr_return: */ void netlbl_sock_delattr(struct sock *sk) { - cipso_v4_sock_delattr(sk); + switch (sk->sk_family) { + case AF_INET: + cipso_v4_sock_delattr(sk); + break; + } } /** @@ -862,7 +866,11 @@ req_setattr_return: */ void netlbl_req_delattr(struct request_sock *req) { - cipso_v4_req_delattr(req); + switch (req->rsk_ops->family) { + case AF_INET: + cipso_v4_req_delattr(req); + break; + } } /** diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index e1543b03e39d..c0bdd3bce189 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -590,18 +590,19 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, struct Qdisc *sch; if (!try_module_get(ops->owner)) - goto errout; + return NULL; sch = qdisc_alloc(dev_queue, ops); - if (IS_ERR(sch)) - goto errout; + if (IS_ERR(sch)) { + module_put(ops->owner); + return NULL; + } sch->parent = parentid; if (!ops->init || ops->init(sch, NULL) == 0) return sch; qdisc_destroy(sch); -errout: return NULL; } EXPORT_SYMBOL(qdisc_create_dflt); diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index b6e440baccc3..265be00a4ac5 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -326,12 +326,14 @@ gss_release_msg(struct gss_upcall_msg *gss_msg) } static struct gss_upcall_msg * -__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) +__gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth) { struct gss_upcall_msg *pos; list_for_each_entry(pos, &pipe->in_downcall, list) { if (!uid_eq(pos->uid, uid)) continue; + if (auth && pos->auth->service != auth->service) + continue; atomic_inc(&pos->count); dprintk("RPC: %s found msg %p\n", __func__, pos); return pos; @@ -351,7 +353,7 @@ gss_add_msg(struct gss_upcall_msg *gss_msg) struct gss_upcall_msg *old; spin_lock(&pipe->lock); - old = __gss_find_upcall(pipe, gss_msg->uid); + old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth); if (old == NULL) { atomic_inc(&gss_msg->count); list_add(&gss_msg->list, &pipe->in_downcall); @@ -700,7 +702,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) err = -ENOENT; /* Find a matching upcall */ spin_lock(&pipe->lock); - gss_msg = __gss_find_upcall(pipe, uid); + gss_msg = __gss_find_upcall(pipe, uid, NULL); if (gss_msg == NULL) { spin_unlock(&pipe->lock); goto err_put_ctx; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 78c809c1f819..a7e5624a1cf2 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1189,11 +1189,16 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); /* Encode reply */ - if (rqstp->rq_dropme) { + if (*statp == rpc_drop_reply || rqstp->rq_dropme) { if (procp->pc_release) procp->pc_release(rqstp, NULL, rqstp->rq_resp); goto dropit; } + if (*statp == rpc_autherr_badcred) { + if (procp->pc_release) + procp->pc_release(rqstp, NULL, rqstp->rq_resp); + goto err_bad_auth; + } if (*statp == rpc_success && (xdr = procp->pc_encode) && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 56918f89a1d6..d4fb275c4c20 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5980,7 +5980,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info) params.n_counter_offsets_presp = len / sizeof(u16); if (rdev->wiphy.max_num_csa_counters && - (params.n_counter_offsets_beacon > + (params.n_counter_offsets_presp > rdev->wiphy.max_num_csa_counters)) return -EINVAL; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 86f381b09d8d..725cf243860c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -339,6 +339,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) { tasklet_hrtimer_cancel(&x->mtimer); del_timer_sync(&x->rtimer); + kfree(x->aead); kfree(x->aalg); kfree(x->ealg); kfree(x->calg); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index d4db6ebb089d..8059e412c9f3 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -559,9 +559,12 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, if (err) goto error; - if (attrs[XFRMA_SEC_CTX] && - security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) - goto error; + if (attrs[XFRMA_SEC_CTX]) { + err = security_xfrm_state_alloc(x, + nla_data(attrs[XFRMA_SEC_CTX])); + if (err) + goto error; + } if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, attrs[XFRMA_REPLAY_ESN_VAL]))) @@ -869,7 +872,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb) struct sock *sk = cb->skb->sk; struct net *net = sock_net(sk); - xfrm_state_walk_done(walk, net); + if (cb->args[0]) + xfrm_state_walk_done(walk, net); return 0; } @@ -894,8 +898,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) u8 proto = 0; int err; - cb->args[0] = 1; - err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX, xfrma_policy); if (err < 0) @@ -914,6 +916,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) proto = nla_get_u8(attrs[XFRMA_PROTO]); xfrm_state_walk_init(walk, proto, filter); + cb->args[0] = 1; } (void) xfrm_state_walk(net, walk, dump_one_state, &info); diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 56f497b27c49..a97ae04f5d2a 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -33,10 +33,17 @@ #include <string.h> #include <unistd.h> +/* + * glibc synced up and added the metag number but didn't add the relocations. + * Work around this in a crude manner for now. + */ #ifndef EM_METAG -/* Remove this when these make it to the standard system elf.h. */ #define EM_METAG 174 +#endif +#ifndef R_METAG_ADDR32 #define R_METAG_ADDR32 2 +#endif +#ifndef R_METAG_NONE #define R_METAG_NONE 3 #endif diff --git a/security/keys/proc.c b/security/keys/proc.c index d3f6f2fd21db..62a86e8cb907 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -187,7 +187,7 @@ static int proc_keys_show(struct seq_file *m, void *v) struct timespec now; unsigned long timo; key_ref_t key_ref, skey_ref; - char xbuf[12]; + char xbuf[16]; int rc; struct keyring_search_context ctx = { diff --git a/sound/core/control.c b/sound/core/control.c index 1d395d865f1b..665020b7c82e 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -150,6 +150,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask, if (snd_BUG_ON(!card || !id)) return; + if (card->shutdown) + return; read_lock(&card->ctl_files_rwlock); #if IS_ENABLED(CONFIG_SND_MIXER_OSS) card->mixer_oss_change_count++; diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 43932e8dce66..2c9dc3106340 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -806,6 +806,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device, } EXPORT_SYMBOL(snd_pcm_new_internal); +static void free_chmap(struct snd_pcm_str *pstr) +{ + if (pstr->chmap_kctl) { + snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl); + pstr->chmap_kctl = NULL; + } +} + static void snd_pcm_free_stream(struct snd_pcm_str * pstr) { struct snd_pcm_substream *substream, *substream_next; @@ -828,6 +836,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr) kfree(setup); } #endif + free_chmap(pstr); } static int snd_pcm_free(struct snd_pcm *pcm) @@ -1142,10 +1151,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device) break; } snd_unregister_device(devtype, pcm->card, pcm->device); - if (pcm->streams[cidx].chmap_kctl) { - snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl); - pcm->streams[cidx].chmap_kctl = NULL; - } + free_chmap(&pcm->streams[cidx]); } mutex_unlock(&pcm->open_mutex); unlock: diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index 857741431fc6..b5c88e08f3fe 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -1637,12 +1637,14 @@ static int snd_rawmidi_dev_register(struct snd_device *device) return -EBUSY; } list_add_tail(&rmidi->list, &snd_rawmidi_devices); + mutex_unlock(®ister_mutex); sprintf(name, "midiC%iD%i", rmidi->card->number, rmidi->device); if ((err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device, &snd_rawmidi_f_ops, rmidi, name)) < 0) { rmidi_err(rmidi, "unable to register rawmidi device %i:%i\n", rmidi->card->number, rmidi->device); + mutex_lock(®ister_mutex); list_del(&rmidi->list); mutex_unlock(®ister_mutex); return err; @@ -1650,6 +1652,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device) if (rmidi->ops && rmidi->ops->dev_register && (err = rmidi->ops->dev_register(rmidi)) < 0) { snd_unregister_device(SNDRV_DEVICE_TYPE_RAWMIDI, rmidi->card, rmidi->device); + mutex_lock(®ister_mutex); list_del(&rmidi->list); mutex_unlock(®ister_mutex); return err; @@ -1682,7 +1685,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device) } } #endif /* CONFIG_SND_OSSEMUL */ - mutex_unlock(®ister_mutex); sprintf(name, "midi%d", rmidi->device); entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root); if (entry) { diff --git a/sound/core/timer.c b/sound/core/timer.c index cdc22503fdb8..d32c631cca6b 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -35,6 +35,9 @@ #include <sound/initval.h> #include <linux/kmod.h> +/* internal flags */ +#define SNDRV_TIMER_IFLG_PAUSED 0x00010000 + #if IS_ENABLED(CONFIG_SND_HRTIMER) #define DEFAULT_TIMER_LIMIT 4 #elif IS_ENABLED(CONFIG_SND_RTCTIMER) @@ -296,8 +299,21 @@ int snd_timer_open(struct snd_timer_instance **ti, get_device(&timer->card->card_dev); timeri->slave_class = tid->dev_sclass; timeri->slave_id = slave_id; - if (list_empty(&timer->open_list_head) && timer->hw.open) - timer->hw.open(timer); + + if (list_empty(&timer->open_list_head) && timer->hw.open) { + int err = timer->hw.open(timer); + if (err) { + kfree(timeri->owner); + kfree(timeri); + + if (timer->card) + put_device(&timer->card->card_dev); + module_put(timer->module); + mutex_unlock(®ister_mutex); + return err; + } + } + list_add_tail(&timeri->open_list, &timer->open_list_head); snd_timer_check_master(timeri); mutex_unlock(®ister_mutex); @@ -556,6 +572,10 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event) } } timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START); + if (event == SNDRV_TIMER_EVENT_STOP) + timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED; + else + timeri->flags |= SNDRV_TIMER_IFLG_PAUSED; spin_unlock_irqrestore(&timer->lock, flags); __end: if (event != SNDRV_TIMER_EVENT_RESOLUTION) @@ -598,6 +618,10 @@ int snd_timer_continue(struct snd_timer_instance *timeri) if (timeri == NULL) return result; + /* timer can continue only after pause */ + if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED)) + return -EINVAL; + if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) return snd_timer_start_slave(timeri); timer = timeri->timer; @@ -839,6 +863,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, timer->tmr_subdevice = tid->subdevice; if (id) strlcpy(timer->id, id, sizeof(timer->id)); + timer->sticks = 1; INIT_LIST_HEAD(&timer->device_list); INIT_LIST_HEAD(&timer->open_list_head); INIT_LIST_HEAD(&timer->active_list_head); @@ -1830,6 +1855,9 @@ static int snd_timer_user_continue(struct file *file) tu = file->private_data; if (!tu->timeri) return -EBADFD; + /* start timer instead of continue if it's not used before */ + if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED)) + return snd_timer_user_start(file); tu->timeri->lost = 0; return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0; } @@ -1971,6 +1999,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, tu->qused--; spin_unlock_irq(&tu->qlock); + mutex_lock(&tu->ioctl_lock); if (tu->tread) { if (copy_to_user(buffer, &tu->tqueue[qhead], sizeof(struct snd_timer_tread))) @@ -1980,6 +2009,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, sizeof(struct snd_timer_read))) err = -EFAULT; } + mutex_unlock(&tu->ioctl_lock); spin_lock_irq(&tu->qlock); if (err < 0) diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index 084d414b228c..b431c34b2d74 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -106,7 +106,6 @@ struct snd_efw { u8 *resp_buf; u8 *pull_ptr; u8 *push_ptr; - unsigned int resp_queues; }; int snd_efw_transaction_cmd(struct fw_unit *unit, diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c index 33df8655fe81..2e1d9a23920c 100644 --- a/sound/firewire/fireworks/fireworks_hwdep.c +++ b/sound/firewire/fireworks/fireworks_hwdep.c @@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained, { unsigned int length, till_end, type; struct snd_efw_transaction *t; + u8 *pull_ptr; long count = 0; if (remained < sizeof(type) + sizeof(struct snd_efw_transaction)) @@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained, buf += sizeof(type); /* write into buffer as many responses as possible */ - while (efw->resp_queues > 0) { - t = (struct snd_efw_transaction *)(efw->pull_ptr); + spin_lock_irq(&efw->lock); + + /* + * When another task reaches here during this task's access to user + * space, it picks up current position in buffer and can read the same + * series of responses. + */ + pull_ptr = efw->pull_ptr; + + while (efw->push_ptr != pull_ptr) { + t = (struct snd_efw_transaction *)(pull_ptr); length = be32_to_cpu(t->length) * sizeof(__be32); /* confirm enough space for this response */ @@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained, /* copy from ring buffer to user buffer */ while (length > 0) { till_end = snd_efw_resp_buf_size - - (unsigned int)(efw->pull_ptr - efw->resp_buf); + (unsigned int)(pull_ptr - efw->resp_buf); till_end = min_t(unsigned int, length, till_end); - if (copy_to_user(buf, efw->pull_ptr, till_end)) + spin_unlock_irq(&efw->lock); + + if (copy_to_user(buf, pull_ptr, till_end)) return -EFAULT; - efw->pull_ptr += till_end; - if (efw->pull_ptr >= efw->resp_buf + - snd_efw_resp_buf_size) - efw->pull_ptr -= snd_efw_resp_buf_size; + spin_lock_irq(&efw->lock); + + pull_ptr += till_end; + if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size) + pull_ptr -= snd_efw_resp_buf_size; length -= till_end; buf += till_end; count += till_end; remained -= till_end; } - - efw->resp_queues--; } + /* + * All of tasks can read from the buffer nearly simultaneously, but the + * last position for each task is different depending on the length of + * given buffer. Here, for simplicity, a position of buffer is set by + * the latest task. It's better for a listening application to allow one + * thread to read from the buffer. Unless, each task can read different + * sequence of responses depending on variation of buffer length. + */ + efw->pull_ptr = pull_ptr; + + spin_unlock_irq(&efw->lock); + return count; } @@ -76,14 +99,17 @@ static long hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count, loff_t *offset) { - union snd_firewire_event event; + union snd_firewire_event event = { + .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS, + }; - memset(&event, 0, sizeof(event)); + spin_lock_irq(&efw->lock); - event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS; event.lock_status.status = (efw->dev_lock_count > 0); efw->dev_lock_changed = false; + spin_unlock_irq(&efw->lock); + count = min_t(long, count, sizeof(event.lock_status)); if (copy_to_user(buf, &event, count)) @@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, { struct snd_efw *efw = hwdep->private_data; DEFINE_WAIT(wait); + bool dev_lock_changed; + bool queued; spin_lock_irq(&efw->lock); - while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) { + dev_lock_changed = efw->dev_lock_changed; + queued = efw->push_ptr != efw->pull_ptr; + + while (!dev_lock_changed && !queued) { prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE); spin_unlock_irq(&efw->lock); schedule(); @@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count, if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irq(&efw->lock); + dev_lock_changed = efw->dev_lock_changed; + queued = efw->push_ptr != efw->pull_ptr; } - if (efw->dev_lock_changed) + spin_unlock_irq(&efw->lock); + + if (dev_lock_changed) count = hwdep_read_locked(efw, buf, count, offset); - else if (efw->resp_queues > 0) + else if (queued) count = hwdep_read_resp_buf(efw, buf, count, offset); - spin_unlock_irq(&efw->lock); - return count; } @@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait) poll_wait(file, &efw->hwdep_wait, wait); spin_lock_irq(&efw->lock); - if (efw->dev_lock_changed || (efw->resp_queues > 0)) + if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr) events = POLLIN | POLLRDNORM; else events = 0; diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c index f29d4aaf56a1..5b3753b05006 100644 --- a/sound/firewire/fireworks/fireworks_proc.c +++ b/sound/firewire/fireworks/fireworks_proc.c @@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry, else consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr); - snd_iprintf(buffer, "%d %d/%d\n", - efw->resp_queues, consumed, snd_efw_resp_buf_size); + snd_iprintf(buffer, "%d/%d\n", + consumed, snd_efw_resp_buf_size); } static void diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c index 2a85e4209f0b..8e9abbb6b68c 100644 --- a/sound/firewire/fireworks/fireworks_transaction.c +++ b/sound/firewire/fireworks/fireworks_transaction.c @@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode) size_t capacity, till_end; struct snd_efw_transaction *t; - spin_lock_irq(&efw->lock); - t = (struct snd_efw_transaction *)data; length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); + spin_lock_irq(&efw->lock); + if (efw->push_ptr < efw->pull_ptr) capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); else @@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode) } /* for hwdep */ - efw->resp_queues++; wake_up(&efw->hwdep_wait); *rcode = RCODE_COMPLETE; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 68bea67c3ca3..22b7038cff0d 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -5793,13 +5793,15 @@ void *snd_array_new(struct snd_array *array) return NULL; if (array->used >= array->alloced) { int num = array->alloced + array->alloc_align; + int oldsize = array->alloced * array->elem_size; int size = (num + 1) * array->elem_size; void *nlist; if (snd_BUG_ON(num >= 4096)) return NULL; - nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO); + nlist = krealloc(array->list, size, GFP_KERNEL); if (!nlist) return NULL; + memset(nlist + oldsize, 0, size - oldsize); array->list = nlist; array->alloced = num; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 2f523b08f576..8ff15d834ace 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -950,8 +950,10 @@ static int azx_free(struct azx *chip) if (use_vga_switcheroo(chip)) { if (chip->disabled && chip->bus) snd_hda_unlock_devices(chip->bus); - if (chip->vga_switcheroo_registered) + if (chip->vga_switcheroo_registered) { vga_switcheroo_unregister_client(chip->pci); + vga_switcheroo_fini_domain_pm_ops(chip->card->dev); + } } if (chip->initialized) { diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4dd1538c1257..187d33729b50 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5837,6 +5837,7 @@ enum { ALC668_FIXUP_DELL_XPS13, ALC662_FIXUP_ASUS_Nx50, ALC668_FIXUP_ASUS_Nx51, + ALC662_FIXUP_ACER_VERITON, }; static const struct hda_fixup alc662_fixups[] = { @@ -6078,6 +6079,13 @@ static const struct hda_fixup alc662_fixups[] = { .chained = true, .chain_id = ALC662_FIXUP_BASS_CHMAP, }, + [ALC662_FIXUP_ACER_VERITON] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x15, 0x50170120 }, /* no internal speaker */ + { } + } + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -6113,6 +6121,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68), + SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON), SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), #if 0 diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c index f0e2ebeab02b..301acfe5aab7 100644 --- a/sound/soc/omap/omap-mcpdm.c +++ b/sound/soc/omap/omap-mcpdm.c @@ -390,8 +390,8 @@ static int omap_mcpdm_probe(struct snd_soc_dai *dai) pm_runtime_get_sync(mcpdm->dev); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00); - ret = devm_request_irq(mcpdm->dev, mcpdm->irq, omap_mcpdm_irq_handler, - 0, "McPDM", (void *)mcpdm); + ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM", + (void *)mcpdm); pm_runtime_put_sync(mcpdm->dev); @@ -416,6 +416,7 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); + free_irq(mcpdm->irq, (void *)mcpdm); pm_runtime_disable(mcpdm->dev); return 0; diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c index 808d5a9d5dcf..bcc6125657e5 100644 --- a/tools/vm/slabinfo.c +++ b/tools/vm/slabinfo.c @@ -493,10 +493,11 @@ static void slab_stats(struct slabinfo *s) s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total); } - if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) + if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) { printf("\nCmpxchg_double Looping\n------------------------\n"); printf("Locked Cmpxchg Double redos %lu\nUnlocked Cmpxchg Double redos %lu\n", s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail); + } } static void report(struct slabinfo *s) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a63127acc67d..6103f9238774 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -137,6 +137,7 @@ int vcpu_load(struct kvm_vcpu *vcpu) put_cpu(); return 0; } +EXPORT_SYMBOL_GPL(vcpu_load); void vcpu_put(struct kvm_vcpu *vcpu) { @@ -146,6 +147,7 @@ void vcpu_put(struct kvm_vcpu *vcpu) preempt_enable(); mutex_unlock(&vcpu->mutex); } +EXPORT_SYMBOL_GPL(vcpu_put); static void ack_flush(void *_completed) {
Attachment:
signature.asc
Description: This is a digitally signed message part