diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 36ecc26c7433..d958ed640cee 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -656,7 +656,8 @@ The batch value of each per cpu pagelist is also updated as a result. It is set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8) The initial value is zero. Kernel does not use this value at boot time to set -the high water marks for each per cpu page list. +the high water marks for each per cpu page list. If the user writes '0' to this +sysctl, it will revert to this default behavior. ============================================================== diff --git a/Makefile b/Makefile index 355d48131ece..305f1d382b99 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 11 SUBLEVEL = 10 -EXTRAVERSION = .13 +EXTRAVERSION = .14 NAME = Linux for Workgroups # *DOCUMENTATION* diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 2618cc13ba75..76a7739aab1c 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -11,6 +11,7 @@ #ifndef _UAPI__ASM_ARC_PTRACE_H #define _UAPI__ASM_ARC_PTRACE_H +#define PTRACE_GET_THREAD_AREA 25 #ifndef __ASSEMBLY__ /* diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c index 5d76706139dd..13b3ffb27a38 100644 --- a/arch/arc/kernel/ptrace.c +++ b/arch/arc/kernel/ptrace.c @@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request, pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data); switch (request) { + case PTRACE_GET_THREAD_AREA: + ret = put_user(task_thread_info(child)->thr_ptr, + (unsigned long __user *)data); + break; default: ret = ptrace_request(child, request, addr, data); break; diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index f82cf878d6af..94c2f6d17dae 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition, m0_entry = mux->muxnames[0]; /* First check for full name in mode0.muxmode format */ - if (mode0_len && strncmp(muxname, m0_entry, mode0_len)) - continue; + if (mode0_len) + if (strncmp(muxname, m0_entry, mode0_len) || + (strlen(m0_entry) != mode0_len)) + continue; /* Then check for muxmode only */ for (i = 0; i < OMAP_MUX_NR_MODES; i++) { diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 20925bcf4e2a..e1134d04e07e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -51,6 +51,8 @@ #define TASK_SIZE_32 UL(0x100000000) #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) #else #define TASK_SIZE TASK_SIZE_64 #endif /* CONFIG_COMPAT */ diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index e4193e3adc7f..0d64089d28b5 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -79,7 +79,8 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr) return; if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { - __flush_dcache_area(page_address(page), PAGE_SIZE); + __flush_dcache_area(page_address(page), + PAGE_SIZE << compound_order(page)); __flush_icache_all(); } else if (icache_is_aivivt()) { __flush_icache_all(); diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index fab40f7d2e03..ac9facc08694 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -131,7 +131,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma board_bind_eic_interrupt = &msc_bind_eic_interrupt; - for (; nirq >= 0; nirq--, imp++) { + for (; nirq > 0; nirq--, imp++) { int n = imp->im_irq; switch (imp->im_type) { diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index 426345ac6f6e..2c7b3ade8ec0 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -149,9 +149,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm) if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); } - - if (kvm->arch.guest_pmap) - kfree(kvm->arch.guest_pmap); + kfree(kvm->arch.guest_pmap); kvm_for_each_vcpu(i, vcpu, kvm) { kvm_arch_vcpu_free(vcpu); @@ -384,12 +382,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvm_mips_dump_stats(vcpu); - if (vcpu->arch.guest_ebase) - kfree(vcpu->arch.guest_ebase); - - if (vcpu->arch.kseg0_commpage) - kfree(vcpu->arch.kseg0_commpage); - + kfree(vcpu->arch.guest_ebase); + kfree(vcpu->arch.kseg0_commpage); + kfree(vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 8b2492644754..10d37ae9ab5b 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -60,8 +60,7 @@ struct power_pmu { #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ -#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ -#define PPMU_EBB 0x00000100 /* supports event based branch */ +#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ /* * Values for flags to get_alternatives() diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index d1f90d3d6f07..93ce00413e25 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -512,7 +512,7 @@ void timer_interrupt(struct pt_regs * regs) __get_cpu_var(irq_stat).timer_irqs++; -#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) +#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); #endif diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 99c7fc16dc0d..fd4a24541a4b 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1395,7 +1395,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) regs->gpr[rd] = byterev_4(val); goto ldst_done; -#ifdef CONFIG_PPC_CPU +#ifdef CONFIG_PPC_FPU case 535: /* lfsx */ case 567: /* lfsux */ if (!(regs->msr & MSR_FP)) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index eeae308cf982..c05f10e95a9e 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -483,7 +483,7 @@ static bool is_ebb_event(struct perf_event *event) * check that the PMU supports EBB, meaning those that don't can still * use bit 63 of the event code for something else if they wish. */ - return (ppmu->flags & PPMU_EBB) && + return (ppmu->flags & PPMU_ARCH_207S) && ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); } @@ -851,7 +851,22 @@ static void power_pmu_read(struct perf_event *event) } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); local64_add(delta, &event->count); - local64_sub(delta, &event->hw.period_left); + + /* + * A number of places program the PMC with (0x80000000 - period_left). + * We never want period_left to be less than 1 because we will program + * the PMC with a value >= 0x800000000 and an edge detected PMC will + * roll around to 0 before taking an exception. We have seen this + * on POWER8. + * + * To fix this, clamp the minimum value of period_left to 1. + */ + do { + prev = local64_read(&event->hw.period_left); + val = prev - delta; + if (val < 1) + val = 1; + } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); } /* @@ -1149,6 +1164,9 @@ static void power_pmu_enable(struct pmu *pmu) mb(); write_mmcr0(cpuhw, mmcr0); + if (ppmu->flags & PPMU_ARCH_207S) + mtspr(SPRN_MMCR2, 0); + /* * Enable instruction sampling if necessary */ @@ -1547,7 +1565,7 @@ static int power_pmu_event_init(struct perf_event *event) if (has_branch_stack(event)) { /* PMU has BHRB enabled */ - if (!(ppmu->flags & PPMU_BHRB)) + if (!(ppmu->flags & PPMU_ARCH_207S)) return -EOPNOTSUPP; } diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index a3f7abd2f13f..79b7e200c0e7 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -608,7 +608,7 @@ static struct power_pmu power8_pmu = { .get_constraint = power8_get_constraint, .get_alternatives = power8_get_alternatives, .disable_pmc = power8_disable_pmc, - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, + .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, .attr_groups = power8_pmu_attr_groups, diff --git a/arch/score/Kconfig b/arch/score/Kconfig index 5fc237581caf..305f7ee1f382 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig @@ -111,3 +111,6 @@ source "security/Kconfig" source "crypto/Kconfig" source "lib/Kconfig" + +config NO_IOMEM + def_bool y diff --git a/arch/score/Makefile b/arch/score/Makefile index 974aefe86123..9e3e060290e0 100644 --- a/arch/score/Makefile +++ b/arch/score/Makefile @@ -20,8 +20,8 @@ cflags-y += -G0 -pipe -mel -mnhwloop -D__SCOREEL__ \ # KBUILD_AFLAGS += $(cflags-y) KBUILD_CFLAGS += $(cflags-y) -KBUILD_AFLAGS_MODULE += -mlong-calls -KBUILD_CFLAGS_MODULE += -mlong-calls +KBUILD_AFLAGS_MODULE += +KBUILD_CFLAGS_MODULE += LDFLAGS += --oformat elf32-littlescore LDFLAGS_vmlinux += -G0 -static -nostdlib diff --git a/arch/score/include/asm/checksum.h b/arch/score/include/asm/checksum.h index f909ac3144a4..961bd64015a8 100644 --- a/arch/score/include/asm/checksum.h +++ b/arch/score/include/asm/checksum.h @@ -184,48 +184,57 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, __wsum sum) { __asm__ __volatile__( - ".set\tnoreorder\t\t\t# csum_ipv6_magic\n\t" - ".set\tnoat\n\t" - "addu\t%0, %5\t\t\t# proto (long in network byte order)\n\t" - "sltu\t$1, %0, %5\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %6\t\t\t# csum\n\t" - "sltu\t$1, %0, %6\n\t" - "lw\t%1, 0(%2)\t\t\t# four words source address\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %1\n\t" - "sltu\t$1, %0, %1\n\t" - "lw\t%1, 4(%2)\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %1\n\t" - "sltu\t$1, %0, %1\n\t" - "lw\t%1, 8(%2)\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %1\n\t" - "sltu\t$1, %0, %1\n\t" - "lw\t%1, 12(%2)\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %1\n\t" - "sltu\t$1, %0, %1\n\t" - "lw\t%1, 0(%3)\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %1\n\t" - "sltu\t$1, %0, %1\n\t" - "lw\t%1, 4(%3)\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %1\n\t" - "sltu\t$1, %0, %1\n\t" - "lw\t%1, 8(%3)\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %1\n\t" - "sltu\t$1, %0, %1\n\t" - "lw\t%1, 12(%3)\n\t" - "addu\t%0, $1\n\t" - "addu\t%0, %1\n\t" - "sltu\t$1, %0, %1\n\t" - "addu\t%0, $1\t\t\t# Add final carry\n\t" - ".set\tnoat\n\t" - ".set\tnoreorder" + ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t" + "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t" + "cmp.c\t%5, %0\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %6\t\t\t# csum\n\t" + "cmp.c\t%6, %0\n\t" + "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "1:lw\t%1, [%2, 4]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%2,8]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%2, 12]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0,%1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%3, 0]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%3, 4]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%3, 8]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "lw\t%1, [%3, 12]\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:add\t%0, %0, %1\n\t" + "cmp.c\t%1, %0\n\t" + "bleu 1f\n\t" + "addi\t%0, 0x1\n\t" + "1:\n\t" + ".set\toptimize" : "=r" (sum), "=r" (proto) : "r" (saddr), "r" (daddr), "0" (htonl(len)), "1" (htonl(proto)), "r" (sum)); diff --git a/arch/score/include/asm/io.h b/arch/score/include/asm/io.h index fbbfd7132e3b..574c8827abe2 100644 --- a/arch/score/include/asm/io.h +++ b/arch/score/include/asm/io.h @@ -5,5 +5,4 @@ #define virt_to_bus virt_to_phys #define bus_to_virt phys_to_virt - #endif /* _ASM_SCORE_IO_H */ diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h index 059a61b7071b..716b3fd1d863 100644 --- a/arch/score/include/asm/pgalloc.h +++ b/arch/score/include/asm/pgalloc.h @@ -2,7 +2,7 @@ #define _ASM_SCORE_PGALLOC_H #include <linux/mm.h> - +#include <linux/highmem.h> static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S index 7234ed09b7b7..befb87d30a89 100644 --- a/arch/score/kernel/entry.S +++ b/arch/score/kernel/entry.S @@ -264,7 +264,7 @@ resume_kernel: disable_irq lw r8, [r28, TI_PRE_COUNT] cmpz.c r8 - bne r8, restore_all + bne restore_all need_resched: lw r8, [r28, TI_FLAGS] andri.c r9, r8, _TIF_NEED_RESCHED @@ -415,7 +415,7 @@ ENTRY(handle_sys) sw r9, [r0, PT_EPC] cmpi.c r27, __NR_syscalls # check syscall number - bgeu illegal_syscall + bcs illegal_syscall slli r8, r27, 2 # get syscall routine la r11, sys_call_table diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c index f4c6d02421d3..a1519ad3d49d 100644 --- a/arch/score/kernel/process.c +++ b/arch/score/kernel/process.c @@ -78,8 +78,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.reg0 = (unsigned long) childregs; if (unlikely(p->flags & PF_KTHREAD)) { memset(childregs, 0, sizeof(struct pt_regs)); - p->thread->reg12 = usp; - p->thread->reg13 = arg; + p->thread.reg12 = usp; + p->thread.reg13 = arg; p->thread.reg3 = (unsigned long) ret_from_kernel_thread; } else { *childregs = *current_pt_regs(); diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index f30cd10293f0..8626b03e83b7 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) /* save number of bits */ bits[1] = cpu_to_be64(sctx->count[0] << 3); - bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128 and append length */ index = sctx->count[0] & 0x7f; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f87f7fcefa0a..e2986788dc6b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -92,7 +92,7 @@ #define KVM_REFILL_PAGES 25 #define KVM_MAX_CPUID_ENTRIES 80 #define KVM_NR_FIXED_MTRR_REGION 88 -#define KVM_NR_VAR_MTRR 8 +#define KVM_NR_VAR_MTRR 10 #define ASYNC_PF_PER_VCPU 64 @@ -453,7 +453,7 @@ struct kvm_vcpu_arch { bool nmi_injected; /* Trying to inject an NMI this entry */ struct mtrr_state_type mtrr_state; - u32 pat; + u64 pat; int switch_db_regs; unsigned long db[KVM_NR_DB_REGS]; diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 942a08623a1a..68e9f007cd4a 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -232,6 +232,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, #define ARCH_HAS_USER_SINGLE_STEP_INFO +/* + * When hitting ptrace_stop(), we cannot return using SYSRET because + * that does not restore the full CPU state, only a minimal set. The + * ptracer can change arbitrary register values, which is usually okay + * because the usual ptrace stops run off the signal delivery path which + * forces IRET; however, ptrace_event() stops happen in arbitrary places + * in the kernel and don't force IRET path. + * + * So force IRET path after a ptrace stop. + */ +#define arch_ptrace_stop_needed(code, info) \ +({ \ + set_thread_flag(TIF_NOTIFY_RESUME); \ + false; \ +}) + struct user_desc; extern int do_get_thread_area(struct task_struct *p, int idx, struct user_desc __user *info); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 0215e2c563ef..6c0262a44524 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -50,6 +50,21 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, return err; } +static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, + void *arg) +{ + unsigned long i; + + for (i = 0; i < nr_pages; ++i) + if (pfn_valid(start_pfn + i) && + !PageReserved(pfn_to_page(start_pfn + i))) + return 1; + + WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); + + return 0; +} + /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses @@ -93,14 +108,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Don't allow anybody to remap normal RAM that we're using.. */ + pfn = phys_addr >> PAGE_SHIFT; last_pfn = last_addr >> PAGE_SHIFT; - for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { - int is_ram = page_is_ram(pfn); - - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) - return NULL; - WARN_ON_ONCE(is_ram); - } + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) + return NULL; /* * Mappings have to be page-aligned diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 290792a13e3c..354efcdad847 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, blkg->q = q; INIT_LIST_HEAD(&blkg->q_node); blkg->blkcg = blkcg; - blkg->refcnt = 1; + atomic_set(&blkg->refcnt, 1); /* root blkg uses @q->root_rl, init rl only for !root blkgs */ if (blkcg != &blkcg_root) { @@ -392,11 +392,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head) /* release the blkcg and parent blkg refs this blkg has been holding */ css_put(&blkg->blkcg->css); - if (blkg->parent) { - spin_lock_irq(blkg->q->queue_lock); + if (blkg->parent) blkg_put(blkg->parent); - spin_unlock_irq(blkg->q->queue_lock); - } blkg_free(blkg); } diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index f50082d1e155..c20deb138239 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -18,6 +18,7 @@ #include <linux/seq_file.h> #include <linux/radix-tree.h> #include <linux/blkdev.h> +#include <linux/atomic.h> /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX @@ -104,7 +105,7 @@ struct blkcg_gq { struct request_list rl; /* reference count */ - int refcnt; + atomic_t refcnt; /* is this blkg online? protected by both blkcg and q locks */ bool online; @@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) * blkg_get - get a blkg reference * @blkg: blkg to get * - * The caller should be holding queue_lock and an existing reference. + * The caller should be holding an existing reference. */ static inline void blkg_get(struct blkcg_gq *blkg) { - lockdep_assert_held(blkg->q->queue_lock); - WARN_ON_ONCE(!blkg->refcnt); - blkg->refcnt++; + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + atomic_inc(&blkg->refcnt); } void __blkg_release_rcu(struct rcu_head *rcu); @@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu); /** * blkg_put - put a blkg reference * @blkg: blkg to put - * - * The caller should be holding queue_lock. */ static inline void blkg_put(struct blkcg_gq *blkg) { - lockdep_assert_held(blkg->q->queue_lock); - WARN_ON_ONCE(blkg->refcnt <= 0); - if (!--blkg->refcnt) + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + if (atomic_dec_and_test(&blkg->refcnt)) call_rcu(&blkg->rcu_head, __blkg_release_rcu); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index cb2ba88fcac3..06523d5743b6 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -34,6 +34,7 @@ #include <linux/dmi.h> #include <linux/slab.h> #include <linux/suspend.h> +#include <linux/delay.h> #include <asm/unaligned.h> #ifdef CONFIG_ACPI_PROCFS_POWER @@ -1081,6 +1082,28 @@ static struct dmi_system_id bat_dmi_table[] = { {}, }; +/* + * Some machines'(E,G Lenovo Z480) ECs are not stable + * during boot up and this causes battery driver fails to be + * probed due to failure of getting battery information + * from EC sometimes. After several retries, the operation + * may work. So add retry code here and 20ms sleep between + * every retries. + */ +static int acpi_battery_update_retry(struct acpi_battery *battery) +{ + int retry, ret; + + for (retry = 5; retry; retry--) { + ret = acpi_battery_update(battery); + if (!ret) + break; + + msleep(20); + } + return ret; +} + static int acpi_battery_add(struct acpi_device *device) { int result = 0; @@ -1100,9 +1123,11 @@ static int acpi_battery_add(struct acpi_device *device) if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle, "_BIX", &handle))) set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); - result = acpi_battery_update(battery); + + result = acpi_battery_update_retry(battery); if (result) goto fail; + #ifdef CONFIG_ACPI_PROCFS_POWER result = acpi_battery_add_fs(device); #endif diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 3ed713698d3d..8bcc70cdb1a0 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -81,6 +81,9 @@ enum { EC_FLAGS_BLOCKED, /* Transactions are blocked */ }; +#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ +#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ + /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); @@ -116,7 +119,7 @@ struct transaction { u8 ri; u8 wlen; u8 rlen; - bool done; + u8 flags; }; struct acpi_ec *boot_ec, *first_ec; @@ -157,60 +160,74 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) outb(data, ec->data_addr); } -static int ec_transaction_done(struct acpi_ec *ec) +static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; int ret = 0; spin_lock_irqsave(&ec->lock, flags); - if (!ec->curr || ec->curr->done) + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; spin_unlock_irqrestore(&ec->lock, flags); return ret; } -static void start_transaction(struct acpi_ec *ec) +static bool advance_transaction(struct acpi_ec *ec) { - ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; - ec->curr->done = false; - acpi_ec_write_cmd(ec, ec->curr->command); -} - -static void advance_transaction(struct acpi_ec *ec, u8 status) -{ - unsigned long flags; struct transaction *t; + u8 status; + bool wakeup = false; - spin_lock_irqsave(&ec->lock, flags); + pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); + status = acpi_ec_read_status(ec); t = ec->curr; if (!t) - goto unlock; - if (t->wlen > t->wi) { - if ((status & ACPI_EC_FLAG_IBF) == 0) - acpi_ec_write_data(ec, - t->wdata[t->wi++]); - else - goto err; - } else if (t->rlen > t->ri) { - if ((status & ACPI_EC_FLAG_OBF) == 1) { - t->rdata[t->ri++] = acpi_ec_read_data(ec); - if (t->rlen == t->ri) - t->done = true; + goto err; + if (t->flags & ACPI_EC_COMMAND_POLL) { + if (t->wlen > t->wi) { + if ((status & ACPI_EC_FLAG_IBF) == 0) + acpi_ec_write_data(ec, t->wdata[t->wi++]); + else + goto err; + } else if (t->rlen > t->ri) { + if ((status & ACPI_EC_FLAG_OBF) == 1) { + t->rdata[t->ri++] = acpi_ec_read_data(ec); + if (t->rlen == t->ri) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + } else + goto err; + } else if (t->wlen == t->wi && + (status & ACPI_EC_FLAG_IBF) == 0) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + return wakeup; + } else { + if ((status & ACPI_EC_FLAG_IBF) == 0) { + acpi_ec_write_cmd(ec, t->command); + t->flags |= ACPI_EC_COMMAND_POLL; } else goto err; - } else if (t->wlen == t->wi && - (status & ACPI_EC_FLAG_IBF) == 0) - t->done = true; - goto unlock; + return wakeup; + } err: /* * If SCI bit is set, then don't think it's a false IRQ * otherwise will take a not handled IRQ as a false one. */ - if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) - ++t->irq_count; + if (!(status & ACPI_EC_FLAG_SCI)) { + if (in_interrupt() && t) + ++t->irq_count; + } + return wakeup; +} -unlock: - spin_unlock_irqrestore(&ec->lock, flags); +static void start_transaction(struct acpi_ec *ec) +{ + ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; + ec->curr->flags = 0; + (void)advance_transaction(ec); } static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); @@ -235,15 +252,17 @@ static int ec_poll(struct acpi_ec *ec) /* don't sleep with disabled interrupts */ if (EC_FLAGS_MSI || irqs_disabled()) { udelay(ACPI_EC_MSI_UDELAY); - if (ec_transaction_done(ec)) + if (ec_transaction_completed(ec)) return 0; } else { if (wait_event_timeout(ec->wait, - ec_transaction_done(ec), + ec_transaction_completed(ec), msecs_to_jiffies(1))) return 0; } - advance_transaction(ec, acpi_ec_read_status(ec)); + spin_lock_irqsave(&ec->lock, flags); + (void)advance_transaction(ec); + spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug(PREFIX "controller reset, restart transaction\n"); spin_lock_irqsave(&ec->lock, flags); @@ -275,23 +294,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, return ret; } -static int ec_check_ibf0(struct acpi_ec *ec) -{ - u8 status = acpi_ec_read_status(ec); - return (status & ACPI_EC_FLAG_IBF) == 0; -} - -static int ec_wait_ibf0(struct acpi_ec *ec) -{ - unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); - /* interrupt wait manually if GPE mode is not active */ - while (time_before(jiffies, delay)) - if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), - msecs_to_jiffies(1))) - return 0; - return -ETIME; -} - static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) { int status; @@ -312,12 +314,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) goto unlock; } } - if (ec_wait_ibf0(ec)) { - pr_err(PREFIX "input buffer is not empty, " - "aborting transaction\n"); - status = -ETIME; - goto end; - } pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n", t->command, t->wdata ? t->wdata[0] : 0); /* disable GPE during transaction if storm is detected */ @@ -341,7 +337,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) set_bit(EC_FLAGS_GPE_STORM, &ec->flags); } pr_debug(PREFIX "transaction end\n"); -end: if (ec->global_lock) acpi_release_global_lock(glk); unlock: @@ -661,17 +656,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, u32 gpe_number, void *data) { + unsigned long flags; struct acpi_ec *ec = data; - u8 status = acpi_ec_read_status(ec); - pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status); - - advance_transaction(ec, status); - if (ec_transaction_done(ec) && - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { + spin_lock_irqsave(&ec->lock, flags); + if (advance_transaction(ec)) wake_up(&ec->wait); - ec_check_sci(ec, acpi_ec_read_status(ec)); - } + spin_unlock_irqrestore(&ec->lock, flags); + ec_check_sci(ec, acpi_ec_read_status(ec)); return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index eebf3d6ec8db..9568cacdd6a4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1375,6 +1375,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request) return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0; } +static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request) +{ + struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; + + return obj_request->img_offset < + round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header)); +} + static void rbd_obj_request_get(struct rbd_obj_request *obj_request) { dout("%s: obj %p (was %d)\n", __func__, obj_request, @@ -1391,6 +1399,13 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request) kref_put(&obj_request->kref, rbd_obj_request_destroy); } +static void rbd_img_request_get(struct rbd_img_request *img_request) +{ + dout("%s: img %p (was %d)\n", __func__, img_request, + atomic_read(&img_request->kref.refcount)); + kref_get(&img_request->kref); +} + static bool img_request_child_test(struct rbd_img_request *img_request); static void rbd_parent_request_destroy(struct kref *kref); static void rbd_img_request_destroy(struct kref *kref); @@ -2144,6 +2159,7 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) img_request->next_completion = which; out: spin_unlock_irq(&img_request->completion_lock); + rbd_img_request_put(img_request); if (!more) rbd_img_request_complete(img_request); @@ -2240,6 +2256,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, goto out_partial; obj_request->osd_req = osd_req; obj_request->callback = rbd_img_obj_callback; + rbd_img_request_get(img_request); osd_req_op_extent_init(osd_req, 0, opcode, offset, length, 0, 0); @@ -2662,7 +2679,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) */ if (!img_request_write_test(img_request) || !img_request_layered_test(img_request) || - rbd_dev->parent_overlap <= obj_request->img_offset || + !obj_request_overlaps_parent(obj_request) || ((known = obj_request_known_test(obj_request)) && obj_request_exists_test(obj_request))) { diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c index 080c3c5e33f6..1fe259021747 100644 --- a/drivers/clk/spear/spear3xx_clock.c +++ b/drivers/clk/spear/spear3xx_clock.c @@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { } /* array of all spear 320 clock lookups */ #ifdef CONFIG_MACH_SPEAR320 -#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) +#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) #define SPEAR320_UARTX_PCLK_MASK 0x1 diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index d345b5a7aa71..5a782e35c2e9 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -50,7 +50,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e52e5420033a..96a4c6e4d526 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -550,6 +550,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; + cpu->cpu = cpunum; intel_pstate_get_cpu_pstates(cpu); if (!cpu->pstate.current_pstate) { all_cpu_data[cpunum] = NULL; @@ -557,7 +558,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) return -ENODATA; } - cpu->cpu = cpunum; cpu->pstate_policy = (struct pstate_adjust_policy *)id->driver_data; init_timer_deferrable(&cpu->timer); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index b438a3a4d0d8..14e83e955182 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -39,7 +39,7 @@ struct pstore_read_data { static inline u64 generic_id(unsigned long timestamp, unsigned int part, int count) { - return (timestamp * 100 + part) * 1000 + count; + return ((u64) timestamp * 100 + part) * 1000 + count; } static int efi_pstore_read_func(struct efivar_entry *entry, void *data) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c old mode 100644 new mode 100755 index 6dd71735cab4..d71e1a46c4d4 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -460,8 +460,9 @@ long drm_ioctl(struct file *filp, retcode = -EFAULT; goto err_i1; } - } else + } else if (cmd & IOC_OUT) { memset(kdata, 0, usize); + } if (ioctl->flags & DRM_UNLOCKED) retcode = func(dev, kdata, file_priv); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 302d2b756e94..02846db501b5 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -1123,12 +1123,12 @@ #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ #define EOP_TCL1_ACTION_EN (1 << 16) #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ +#define EOP_TCL2_VOLATILE (1 << 24) #define EOP_CACHE_POLICY(x) ((x) << 25) /* 0 - LRU * 1 - Stream * 2 - Bypass */ -#define EOP_TCL2_VOLATILE (1 << 27) #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 7e5d0b570a30..06338db6d7f1 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -1549,7 +1549,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev, table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } return 0; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f5eb157f69cd..a34e20921711 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -183,7 +183,7 @@ static const u32 evergreen_golden_registers[] = 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002, 0x913c, 0x0000000f, 0x0000000a }; @@ -470,7 +470,7 @@ static const u32 cedar_golden_registers[] = 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002 }; @@ -629,7 +629,7 @@ static const u32 juniper_mgcg_init[] = static const u32 supersumo_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, @@ -713,7 +713,7 @@ static const u32 sumo_golden_registers[] = static const u32 wrestler_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 4c64254abd0f..596cc12bf356 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -1294,7 +1294,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev, table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } } diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index a923ba966a78..a4c4f25c93e6 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2333,12 +2333,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_MEMORY_SS, 0); - /* disable ss, causes hangs on some cayman boards */ - if (rdev->family == CHIP_CAYMAN) { - pi->sclk_ss = false; - pi->mclk_ss = false; - } - if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; else diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 021b5227e783..1b0f34bd3a03 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info) vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); - vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); } diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 8e8522d85509..f4ebce7a897e 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -317,9 +317,13 @@ static void process_chn_event(u32 relid) */ do { - hv_begin_read(&channel->inbound); + if (read_state) + hv_begin_read(&channel->inbound); channel->onchannel_callback(arg); - bytes_to_read = hv_end_read(&channel->inbound); + if (read_state) + bytes_to_read = hv_end_read(&channel->inbound); + else + bytes_to_read = 0; } while (read_state && (bytes_to_read != 0)); } else { pr_err("no channel callback for relid - %u\n", relid); diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 29dd9f746dfa..233b374334ed 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev, struct i2c_client *client = to_i2c_client(dev); struct adm1021_data *data = i2c_get_clientdata(client); long temp; - int err; + int reg_val, err; err = kstrtol(buf, 10, &temp); if (err) @@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev, temp /= 1000; mutex_lock(&data->update_lock); - data->temp_max[index] = clamp_val(temp, -128, 127); + reg_val = clamp_val(temp, -128, 127); + data->temp_max[index] = reg_val * 1000; if (!read_only) i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), - data->temp_max[index]); + reg_val); mutex_unlock(&data->update_lock); return count; @@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev, struct i2c_client *client = to_i2c_client(dev); struct adm1021_data *data = i2c_get_clientdata(client); long temp; - int err; + int reg_val, err; err = kstrtol(buf, 10, &temp); if (err) @@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev, temp /= 1000; mutex_lock(&data->update_lock); - data->temp_min[index] = clamp_val(temp, -128, 127); + reg_val = clamp_val(temp, -128, 127); + data->temp_min[index] = reg_val * 1000; if (!read_only) i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), - data->temp_min[index]); + reg_val); mutex_unlock(&data->update_lock); return count; diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index 9ee5e066423b..39441e5d922c 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev, /* Update the value */ reg = (reg & 0x3F) | (val << 6); + /* Update the cache */ + data->fan_div[attr->index] = reg; + /* Write value */ i2c_smbus_write_byte_data(client, ADM1029_REG_FAN_DIV[attr->index], reg); diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 253ea396106d..bdceca0d7e22 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr, if (ret) return ret; + val = clamp_val(val, 0, 127000); mutex_lock(&data->update_lock); data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), @@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr, if (ret) return ret; + val = clamp_val(val, 0, 127000); mutex_lock(&data->update_lock); data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]); @@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), @@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), @@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr, if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_crit[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index 4fe49d2bfe1d..09d2d78d482b 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c @@ -707,7 +707,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_MAX); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_CRIT); -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_temp, NULL, IDX_TEMP2_INPUT); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP2_MIN); diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index b07305622087..5790246a7e1d 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -248,11 +248,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da, int result = kstrtol(buf, 10, &val); if (result < 0) - return -EINVAL; + return result; - val = DIV_ROUND_CLOSEST(val, 1000); - if ((val < -63) || (val > 127)) - return -EINVAL; + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); mutex_lock(&data->update_lock); data->temp_min[nr] = val; @@ -272,11 +270,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da, int result = kstrtol(buf, 10, &val); if (result < 0) - return -EINVAL; + return result; - val = DIV_ROUND_CLOSEST(val, 1000); - if ((val < -63) || (val > 127)) - return -EINVAL; + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); mutex_lock(&data->update_lock); data->temp_max[nr] = val; @@ -320,7 +316,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da, int status = kstrtol(buf, 10, &new_div); if (status < 0) - return -EINVAL; + return status; if (new_div == old_div) /* No change */ return count; @@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da, { struct emc2103_data *data = emc2103_update_device(dev); struct i2c_client *client = to_i2c_client(dev); - long rpm_target; + unsigned long rpm_target; - int result = kstrtol(buf, 10, &rpm_target); + int result = kstrtoul(buf, 10, &rpm_target); if (result < 0) - return -EINVAL; + return result; /* Datasheet states 16384 as maximum RPM target (table 3.2) */ - if ((rpm_target < 0) || (rpm_target > 16384)) - return -EINVAL; + rpm_target = clamp_val(rpm_target, 0, 16384); mutex_lock(&data->update_lock); @@ -440,7 +435,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da, int result = kstrtol(buf, 10, &new_value); if (result < 0) - return -EINVAL; + return result; mutex_lock(&data->update_lock); switch (new_value) { diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 3ceac3e91dde..3d2d48969fed 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -120,6 +120,7 @@ static int tiadc_channel_init(struct iio_dev *indio_dev, int channels) chan->channel = adc_dev->channel_line[i]; chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); chan->datasheet_name = chan_name_ain[chan->channel]; + chan->scan_index = i; chan->scan_type.sign = 'u'; chan->scan_type.realbits = 12; chan->scan_type.storagebits = 32; @@ -155,7 +156,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, if (time_after(jiffies, timeout)) return -EAGAIN; } - map_val = chan->channel + TOTAL_CHANNELS; + map_val = adc_dev->channel_step[chan->scan_index]; /* * When the sub-system is first enabled, diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 0cf5f8e06cfc..1e8e94d4db7d 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, else if (name && index >= 0) { pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", np->full_name, name ? name : "", index); - return chan; + return NULL; } /* @@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, */ np = np->parent; if (np && !of_get_property(np, "io-channel-ranges", NULL)) - break; + return NULL; } + return chan; } @@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev, if (channel != NULL) return channel; } + return iio_channel_get_sys(name, channel_name); } EXPORT_SYMBOL_GPL(iio_channel_get); diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c index 8527743b5cef..391b9cea73ed 100644 --- a/drivers/irqchip/spear-shirq.c +++ b/drivers/irqchip/spear-shirq.c @@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = { }; static struct spear_shirq spear320_shirq_ras3 = { - .irq_nr = 3, + .irq_nr = 7, .irq_bit_off = 0, .invalid_irq = 1, .regs = { diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index ea49834377c8..d1de1626a9d2 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -10,6 +10,7 @@ #include <linux/device-mapper.h> #include <linux/bio.h> +#include <linux/completion.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/sched.h> @@ -34,7 +35,7 @@ struct dm_io_client { struct io { unsigned long error_bits; atomic_t count; - struct task_struct *sleeper; + struct completion *wait; struct dm_io_client *client; io_notify_fn callback; void *context; @@ -122,8 +123,8 @@ static void dec_count(struct io *io, unsigned int region, int error) invalidate_kernel_vmap_range(io->vma_invalidate_address, io->vma_invalidate_size); - if (io->sleeper) - wake_up_process(io->sleeper); + if (io->wait) + complete(io->wait); else { unsigned long r = io->error_bits; @@ -386,6 +387,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, */ volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); + DECLARE_COMPLETION_ONSTACK(wait); if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); @@ -394,7 +396,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = current; + io->wait = &wait; io->client = client; io->vma_invalidate_address = dp->vma_invalidate_address; @@ -402,15 +404,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, dispatch_io(rw, num_regions, where, dp, io, 1); - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - - if (!atomic_read(&io->count)) - break; - - io_schedule(); - } - set_current_state(TASK_RUNNING); + wait_for_completion_io(&wait); if (error_bits) *error_bits = io->error_bits; @@ -433,7 +427,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = NULL; + io->wait = NULL; io->client = client; io->callback = fn; io->context = context; diff --git a/drivers/md/md.c b/drivers/md/md.c index 2d3111ba445c..e29e434a5742 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7466,6 +7466,19 @@ void md_do_sync(struct md_thread *thread) rdev->recovery_offset < j) j = rdev->recovery_offset; rcu_read_unlock(); + + /* If there is a bitmap, we need to make sure all + * writes that started before we added a spare + * complete before we start doing a recovery. + * Otherwise the write might complete and (via + * bitmap_endwrite) set a bit in the bitmap after the + * recovery has checked that bit and skipped that + * region. + */ + if (mddev->bitmap) { + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + } } printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index e2ca1574d22a..8ed62292926f 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -164,6 +164,9 @@ static void mei_me_hw_reset_release(struct mei_device *dev) hcsr |= H_IG; hcsr &= ~H_RST; mei_hcsr_set(hw, hcsr); + + /* complete this write before we set host ready on another CPU */ + mmiowb(); } /** * mei_me_hw_reset - resets fw via mei csr register. @@ -214,6 +217,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) static void mei_me_host_set_ready(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); + hw->host_hw_state = mei_hcsr_read(hw); hw->host_hw_state |= H_IE | H_IG | H_RDY; mei_hcsr_set(hw, hw->host_hw_state); } @@ -506,19 +510,15 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if we need to start the dev */ if (!mei_host_is_ready(dev)) { if (mei_hw_is_ready(dev)) { + mei_me_hw_reset_release(dev); dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); dev->recvd_hw_ready = true; wake_up_interruptible(&dev->wait_hw_ready); - - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; } else { - dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); - mei_me_hw_reset_release(dev); - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; + dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n"); } + goto end; } /* check slots available for reading */ slots = mei_count_full_read_slots(dev); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 46dfb1378c17..81576c6c31e0 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -726,6 +726,7 @@ static int emac_open(struct net_device *dev) ret = emac_mdio_probe(dev); if (ret < 0) { + free_irq(dev->irq, dev); netdev_err(dev, "cannot probe MDIO bus\n"); return ret; } diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index e85d34b76039..ebcce00ce067 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -810,9 +810,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) break; case B43_PHYTYPE_G: status.band = IEEE80211_BAND_2GHZ; - /* chanid is the radio channel cookie value as used - * to tune the radio. */ - status.freq = chanid + 2400; + /* Somewhere between 478.104 and 508.1084 firmware for G-PHY + * has been modified to be compatible with N-PHY and others. + */ + if (dev->fw.rev >= 508) + status.freq = ieee80211_channel_to_frequency(chanid, status.band); + else + status.freq = chanid + 2400; break; case B43_PHYTYPE_N: case B43_PHYTYPE_LP: diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 0ac5c589ddce..13f557a44a62 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1684,8 +1684,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Detect if this device has an hardware controlled radio. */ - if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) { __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); + /* + * On this device RFKILL initialized during probe does not work. + */ + __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags); + } /* * Check if the BBP tuning should be enabled. diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index ee3fc570b11d..4f17cad0d9a9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -704,6 +704,7 @@ enum rt2x00_capability_flags { REQUIRE_SW_SEQNO, REQUIRE_HT_TX_DESC, REQUIRE_PS_AUTOWAKE, + REQUIRE_DELAYED_RFKILL, /* * Capabilities diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index e418d32882e8..cd4b1590db5c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -1128,9 +1128,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) return; /* - * Unregister extra components. + * Stop rfkill polling. */ - rt2x00rfkill_unregister(rt2x00dev); + if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_unregister(rt2x00dev); /* * Allow the HW to uninitialize. @@ -1168,6 +1169,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); + /* + * Start rfkill polling. + */ + if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_register(rt2x00dev); + return 0; } @@ -1377,7 +1384,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) rt2x00link_register(rt2x00dev); rt2x00leds_register(rt2x00dev); rt2x00debug_register(rt2x00dev); - rt2x00rfkill_register(rt2x00dev); + + /* + * Start rfkill polling. + */ + if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_register(rt2x00dev); return 0; @@ -1393,6 +1405,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); /* + * Stop rfkill polling. + */ + if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_unregister(rt2x00dev); + + /* * Disable radio. */ rt2x00lib_disable_radio(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 2b724fc4e306..c03748dafd49 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -489,6 +489,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, crypto.cipher = rt2x00crypto_key_to_cipher(key); if (crypto.cipher == CIPHER_NONE) return -EOPNOTSUPP; + if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev)) + return -EOPNOTSUPP; crypto.cmd = cmd; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b5352fcf5df7..b25b53e41a7e 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3669,7 +3669,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, u16 cmd; int rc; - WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); + WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); /* ARCH specific VGA enables */ rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index d0fa4b6c551f..c62b3e5d44bd 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + rmb(); } else crq = NULL; spin_unlock_irqrestore(&queue->lock, flags); @@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, { struct vio_dev *vdev = to_vio_dev(hostdata->dev); + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the VIOS to prevent it from fetching any stale data. + */ + mb(); return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } @@ -794,7 +804,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) evt->hostdata->dev); if (evt->cmnd_done) evt->cmnd_done(evt->cmnd); - } else if (evt->done) + } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && + evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ) evt->done(evt); free_event_struct(&evt->hostdata->pool, evt); spin_lock_irqsave(hostdata->host->host_lock, flags); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 21505962f539..309f2e1d254d 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -933,6 +933,15 @@ int scsi_eh_get_sense(struct list_head *work_q, SCSI_SENSE_VALID(scmd)) continue; + if (status_byte(scmd->result) != CHECK_CONDITION) + /* + * don't request sense if there's no check condition + * status because the error we're processing isn't one + * that has a sense code (and some devices get + * confused by sense requests out of the blue) + */ + continue; + SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, "%s: requesting sense\n", current->comm)); diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index d92fe4037e94..6b349e301869 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); +#else + sym_set_cam_status(cp->cmd, DID_REQUEUE); +#endif sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 95a5d73e675c..11f5326f449f 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -270,6 +270,16 @@ static void virtscsi_req_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; @@ -288,6 +298,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; +static void virtscsi_handle_event(struct work_struct *work); + static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { @@ -295,6 +307,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct scatterlist sg; unsigned long flags; + INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); @@ -412,7 +425,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; - INIT_WORK(&event_node->work, virtscsi_handle_event); schedule_work(&event_node->work); } @@ -602,6 +614,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + out: mempool_free(cmd, virtscsi_cmd_pool); return ret; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index a5825ea2488e..5283359ef299 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1317,7 +1317,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf, if (cmd->data_direction != DMA_TO_DEVICE) { pr_err("Command ITT: 0x%08x received DataOUT for a" " NON-WRITE command.\n", cmd->init_task_tag); - return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); + return iscsit_dump_data_payload(conn, payload_length, 1); } se_cmd = &cmd->se_cmd; iscsit_mod_dataout_timer(cmd); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index d93bc6b1d001..5763073ebd48 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1289,6 +1289,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta login->login_failed = 1; iscsit_collect_login_stats(conn, status_class, status_detail); + memset(&login->rsp[0], 0, ISCSI_HDR_LEN); + hdr = (struct iscsi_login_rsp *)&login->rsp[0]; hdr->opcode = ISCSI_OP_LOGIN_RSP; hdr->status_class = status_class; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 2640f2c65329..8a6b0e9f526a 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -614,6 +614,7 @@ void core_dev_unexport( dev->export_count--; spin_unlock(&hba->device_lock); + lun->lun_sep = NULL; lun->lun_se_dev = NULL; } diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 1f02e8edb45c..3c972c49f2cf 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -971,6 +971,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, return NULL; } +static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) +{ + unsigned long temp; + return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp); +} + static int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { @@ -1021,21 +1027,18 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) if (result) goto free_temp_mem; - if (tz->ops->get_crit_temp) { - unsigned long temperature; - if (!tz->ops->get_crit_temp(tz, &temperature)) { - snprintf(temp->temp_crit.name, - sizeof(temp->temp_crit.name), + if (thermal_zone_crit_temp_valid(tz)) { + snprintf(temp->temp_crit.name, + sizeof(temp->temp_crit.name), "temp%d_crit", hwmon->count); - temp->temp_crit.attr.attr.name = temp->temp_crit.name; - temp->temp_crit.attr.attr.mode = 0444; - temp->temp_crit.attr.show = temp_crit_show; - sysfs_attr_init(&temp->temp_crit.attr.attr); - result = device_create_file(hwmon->device, - &temp->temp_crit.attr); - if (result) - goto unregister_input; - } + temp->temp_crit.attr.attr.name = temp->temp_crit.name; + temp->temp_crit.attr.attr.mode = 0444; + temp->temp_crit.attr.show = temp_crit_show; + sysfs_attr_init(&temp->temp_crit.attr.attr); + result = device_create_file(hwmon->device, + &temp->temp_crit.attr); + if (result) + goto unregister_input; } mutex_lock(&thermal_list_lock); @@ -1083,7 +1086,7 @@ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) } device_remove_file(hwmon->device, &temp->temp_input.attr); - if (tz->ops->get_crit_temp) + if (thermal_zone_crit_temp_valid(tz)) device_remove_file(hwmon->device, &temp->temp_crit.attr); mutex_lock(&thermal_list_lock); diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index b13fc29966ac..592022c92652 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1322,6 +1322,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); unsigned long flags; + struct td_node *node, *tmpnode; if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || hwep->ep.desc == NULL || list_empty(&hwreq->queue) || @@ -1332,6 +1333,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) hw_ep_flush(hwep->ci, hwep->num, hwep->dir); + list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { + dma_pool_free(hwep->td_pool, node->ptr, node->dma); + list_del(&node->td); + kfree(node); + } + /* pop request */ list_del_init(&hwreq->queue); diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index b6e9d917221e..84219f656051 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -1389,11 +1389,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) ffs->ep0req->context = ffs; lang = ffs->stringtabs; - for (lang = ffs->stringtabs; *lang; ++lang) { - struct usb_string *str = (*lang)->strings; - int id = first_id; - for (; str->s; ++id, ++str) - str->id = id; + if (lang) { + for (; *lang; ++lang) { + struct usb_string *str = (*lang)->strings; + int id = first_id; + for (; str->s; ++id, ++str) + str->id = id; + } } ffs->gadget = cdev->gadget; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 1f582d969f97..8786b9b5a631 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -21,6 +21,7 @@ */ #include <linux/gfp.h> +#include <linux/device.h> #include <asm/unaligned.h> #include "xhci.h" @@ -1078,7 +1079,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd) * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME * is enabled, so also enable remote wake here. */ - if (hcd->self.root_hub->do_remote_wakeup) { + if (hcd->self.root_hub->do_remote_wakeup + && device_may_wakeup(hcd->self.controller)) { + if (t1 & PORT_CONNECT) { t2 |= PORT_WKOC_E | PORT_WKDISC_E; t2 &= ~PORT_WKCONN_E; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1cef7b36a187..bf2857b95c20 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3590,7 +3590,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, return 0; max_burst = urb->ep->ss_ep_comp.bMaxBurst; - return roundup(total_packet_count, max_burst + 1) - 1; + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; } /* diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 0411eb9ea64c..969a02993d97 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -973,7 +973,7 @@ int xhci_suspend(struct xhci_hcd *xhci) */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { - u32 command, temp = 0; + u32 command, temp = 0, status; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; @@ -1097,8 +1097,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) done: if (retval == 0) { - usb_hcd_resume_root_hub(hcd); - usb_hcd_resume_root_hub(xhci->shared_hcd); + /* Resume root hubs only when have pending events. */ + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) { + usb_hcd_resume_root_hub(hcd); + usb_hcd_resume_root_hub(xhci->shared_hcd); + } } /* diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index fce71b605936..027a35b003d9 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -275,7 +275,6 @@ static int ux500_probe(struct platform_device *pdev) musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &pdev->dev.coherent_dma_mask; musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; - musb->dev.of_node = pdev->dev.of_node; glue->dev = &pdev->dev; glue->musb = musb; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index a26b8c77e7c5..238497e9ae7b 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index b9e663ac9a35..9e75e3eaea4f 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -723,7 +723,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, - { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_1_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, @@ -947,6 +948,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, + /* Infineon Devices */ + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1577,14 +1580,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port) struct usb_device *udev = serial->dev; struct usb_interface *interface = serial->interface; - struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; + struct usb_endpoint_descriptor *ep_desc; unsigned num_endpoints; - int i; + unsigned i; num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); + if (!num_endpoints) + return; + /* NOTE: some customers have programmed FT232R/FT245R devices * with an endpoint size of 0 - not good. In this case, we * want to override the endpoint descriptor setting and use a diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 500474c48f4b..c4777bc6aee0 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -584,6 +584,12 @@ #define RATOC_PRODUCT_ID_USB60F 0xb020 /* + * Infineon Technologies + */ +#define INFINEON_VID 0x058b +#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ + +/* * Acton Research Corp. */ #define ACTON_VID 0x0647 /* Vendor ID */ @@ -798,7 +804,8 @@ * Submitted by Colin Leroy */ #define TESTO_VID 0x128D -#define TESTO_USB_INTERFACE_PID 0x0001 +#define TESTO_1_PID 0x0001 +#define TESTO_3_PID 0x0003 /* * Mobility Electronics products. diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 70ede84f4f6b..9da566a3f5c8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb); /* Zoom */ #define ZOOM_PRODUCT_4597 0x9607 +/* SpeedUp SU9800 usb 3g modem */ +#define SPEEDUP_PRODUCT_SU9800 0x9800 + /* Haier products */ #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 @@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb); /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 +#define OLIVETTI_PRODUCT_OLICARD120 0xc001 +#define OLIVETTI_PRODUCT_OLICARD140 0xc002 #define OLIVETTI_PRODUCT_OLICARD145 0xc003 +#define OLIVETTI_PRODUCT_OLICARD155 0xc004 #define OLIVETTI_PRODUCT_OLICARD200 0xc005 +#define OLIVETTI_PRODUCT_OLICARD160 0xc00a #define OLIVETTI_PRODUCT_OLICARD500 0xc00b /* Celot products */ @@ -1480,6 +1487,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, @@ -1577,6 +1586,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, @@ -1611,15 +1621,21 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, - - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist - }, + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), - .driver_info = (kernel_ulong_t)&net_intf4_blacklist - }, + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 8bc5e8ccb091..75b85578d98a 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1369,9 +1369,10 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, * returns <0 on error */ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, - struct btrfs_extent_item *ei, u32 item_size, - struct btrfs_extent_inline_ref **out_eiref, - int *out_type) + struct btrfs_key *key, + struct btrfs_extent_item *ei, u32 item_size, + struct btrfs_extent_inline_ref **out_eiref, + int *out_type) { unsigned long end; u64 flags; @@ -1381,19 +1382,26 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, /* first call */ flags = btrfs_extent_flags(eb, ei); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { - info = (struct btrfs_tree_block_info *)(ei + 1); - *out_eiref = - (struct btrfs_extent_inline_ref *)(info + 1); + if (key->type == BTRFS_METADATA_ITEM_KEY) { + /* a skinny metadata extent */ + *out_eiref = + (struct btrfs_extent_inline_ref *)(ei + 1); + } else { + WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); + info = (struct btrfs_tree_block_info *)(ei + 1); + *out_eiref = + (struct btrfs_extent_inline_ref *)(info + 1); + } } else { *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); } *ptr = (unsigned long)*out_eiref; - if ((void *)*ptr >= (void *)ei + item_size) + if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) return -ENOENT; } end = (unsigned long)ei + item_size; - *out_eiref = (struct btrfs_extent_inline_ref *)*ptr; + *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref); *ptr += btrfs_extent_inline_ref_size(*out_type); @@ -1412,8 +1420,8 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb, * <0 on error. */ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, - struct btrfs_extent_item *ei, u32 item_size, - u64 *out_root, u8 *out_level) + struct btrfs_key *key, struct btrfs_extent_item *ei, + u32 item_size, u64 *out_root, u8 *out_level) { int ret; int type; @@ -1424,8 +1432,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, return 1; while (1) { - ret = __get_extent_inline_ref(ptr, eb, ei, item_size, - &eiref, &type); + ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size, + &eiref, &type); if (ret < 0) return ret; diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 8f2e76702932..a3bd63b06044 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -40,8 +40,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, u64 *flags); int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, - struct btrfs_extent_item *ei, u32 item_size, - u64 *out_root, u8 *out_level); + struct btrfs_key *key, struct btrfs_extent_item *ei, + u32 item_size, u64 *out_root, u8 *out_level); int iterate_extent_inodes(struct btrfs_fs_info *fs_info, u64 extent_item_objectid, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3ce443662607..9001c263bc79 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3554,6 +3554,11 @@ int close_ctree(struct btrfs_root *root) btrfs_free_block_groups(fs_info); + /* + * we must make sure there is not any read request to + * submit after we stopping all workers. + */ + invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); del_fs_roots(fs_info); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6c80e0801e58..4a96d26bc6fd 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2387,7 +2387,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end) { int uptodate = (err == 0); struct extent_io_tree *tree; - int ret; + int ret = 0; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -2401,6 +2401,8 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end) if (!uptodate) { ClearPageUptodate(page); SetPageError(page); + ret = ret < 0 ? ret : -EIO; + mapping_set_error(page->mapping, ret); } return 0; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index b21a3cd667d8..cd9d696c489d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -835,7 +835,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, if (!matched) { __btrfs_remove_free_space_cache(ctl); - btrfs_err(fs_info, "block group %llu has wrong amount of free space", + btrfs_warn(fs_info, "block group %llu has wrong amount of free space", block_group->key.objectid); ret = -1; } @@ -847,7 +847,7 @@ out: spin_unlock(&block_group->lock); ret = 0; - btrfs_err(fs_info, "failed to load free space cache for block group %llu", + btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", block_group->key.objectid); } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 64a157becbe5..8a56d4432522 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -545,8 +545,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { do { - ret = tree_backref_for_extent(&ptr, eb, ei, item_size, - &ref_root, &ref_level); + ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, + item_size, &ref_root, + &ref_level); printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev %s, " "sector %llu: metadata %s (level %d) in tree " diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index d4c34c8471a4..4bde74798d2b 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1547,6 +1547,10 @@ static int lookup_dir_item_inode(struct btrfs_root *root, goto out; } btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); + if (key.type == BTRFS_ROOT_ITEM_KEY) { + ret = -ENOENT; + goto out; + } *found_inode = key.objectid; *found_type = btrfs_dir_type(path->nodes[0], di); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index feb93fc9863d..03f47b09f37f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1403,6 +1403,22 @@ out: return ret; } +/* + * Function to update ctime/mtime for a given device path. + * Mainly used for ctime/mtime based probe like libblkid. + */ +static void update_dev_time(char *path_name) +{ + struct file *filp; + + filp = filp_open(path_name, O_RDWR, 0); + if (!filp) + return; + file_update_time(filp); + filp_close(filp, NULL); + return; +} + static int btrfs_rm_dev_item(struct btrfs_root *root, struct btrfs_device *device) { @@ -1619,11 +1635,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) struct btrfs_fs_devices *fs_devices; fs_devices = root->fs_info->fs_devices; while (fs_devices) { - if (fs_devices->seed == cur_devices) + if (fs_devices->seed == cur_devices) { + fs_devices->seed = cur_devices->seed; break; + } fs_devices = fs_devices->seed; } - fs_devices->seed = cur_devices->seed; cur_devices->seed = NULL; lock_chunks(root); __btrfs_close_devices(cur_devices); @@ -1649,10 +1666,14 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) ret = 0; - /* Notify udev that device has changed */ - if (bdev) + if (bdev) { + /* Notify udev that device has changed */ btrfs_kobject_uevent(bdev, KOBJ_CHANGE); + /* Update ctime/mtime for device path for libblkid */ + update_dev_time(device_path); + } + error_brelse: brelse(bh); if (bdev) @@ -1824,7 +1845,6 @@ static int btrfs_prepare_sprout(struct btrfs_root *root) fs_devices->seeding = 0; fs_devices->num_devices = 0; fs_devices->open_devices = 0; - fs_devices->total_devices = 0; fs_devices->seed = seed_devices; generate_random_uuid(fs_devices->fsid); @@ -2096,6 +2116,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ret = btrfs_commit_transaction(trans, root); } + /* Update ctime/mtime for libblkid */ + update_dev_time(device_path); return ret; error_trans: @@ -5708,10 +5730,14 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info) struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; - mutex_lock(&fs_devices->device_list_mutex); - list_for_each_entry(device, &fs_devices->devices, dev_list) - device->dev_root = fs_info->dev_root; - mutex_unlock(&fs_devices->device_list_mutex); + while (fs_devices) { + mutex_lock(&fs_devices->device_list_mutex); + list_for_each_entry(device, &fs_devices->devices, dev_list) + device->dev_root = fs_info->dev_root; + mutex_unlock(&fs_devices->device_list_mutex); + + fs_devices = fs_devices->seed; + } } static void __btrfs_reset_dev_stats(struct btrfs_device *dev) diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 0227b45ef00a..15e9505aa35f 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -290,7 +290,8 @@ int cifsConvertToUTF16(__le16 *target, const char *source, int srclen, const struct nls_table *cp, int mapChars) { - int i, j, charlen; + int i, charlen; + int j = 0; char src_char; __le16 dst_char; wchar_t tmp; @@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, if (!mapChars) return cifs_strtoUTF16(target, source, PATH_MAX, cp); - for (i = 0, j = 0; i < srclen; j++) { + for (i = 0; i < srclen; j++) { src_char = source[i]; charlen = 1; switch (src_char) { case 0: - put_unaligned(0, &target[j]); goto ctoUTF16_out; case ':': dst_char = cpu_to_le16(UNI_COLON); @@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, } ctoUTF16_out: + put_unaligned(0, &target[j]); /* Null terminate target unicode string */ return j; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 8bf5999875ee..0ecc14028477 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -781,6 +781,13 @@ got: goto out; } + BUFFER_TRACE(group_desc_bh, "get_write_access"); + err = ext4_journal_get_write_access(handle, group_desc_bh); + if (err) { + ext4_std_error(sb, err); + goto out; + } + /* We may have to initialize the block bitmap if it isn't already */ if (ext4_has_group_desc_csum(sb) && gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { @@ -817,13 +824,6 @@ got: } } - BUFFER_TRACE(group_desc_bh, "get_write_access"); - err = ext4_journal_get_write_access(handle, group_desc_bh); - if (err) { - ext4_std_error(sb, err); - goto out; - } - /* Update the relevant bg descriptor fields */ if (ext4_has_group_desc_csum(sb)) { int free; diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 87b30cd357e7..0ade936f2b83 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -390,7 +390,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, return 0; failed: for (; i >= 0; i--) { - if (i != indirect_blks && branch[i].bh) + /* + * We want to ext4_forget() only freshly allocated indirect + * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and + * buffer at branch[0].bh is indirect block / inode already + * existing before ext4_alloc_branch() was called. + */ + if (i > 0 && i != indirect_blks && branch[i].bh) ext4_forget(handle, 1, inode, branch[i].bh, branch[i].bh->b_blocknr); ext4_free_blocks(handle, inode, NULL, new_blocks[i], @@ -1313,16 +1319,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode, blk = *i_data; if (level > 0) { ext4_lblk_t first2; + ext4_lblk_t count2; + bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); if (!bh) { EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), "Read failure"); return -EIO; } - first2 = (first > offset) ? first - offset : 0; + if (first > offset) { + first2 = first - offset; + count2 = count; + } else { + first2 = 0; + count2 = count - (offset - first); + } ret = free_hole_blocks(handle, inode, bh, (__le32 *)bh->b_data, level - 1, - first2, count - offset, + first2, count2, inode->i_sb->s_blocksize >> 2); if (ret) { brelse(bh); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 09d4762ffe91..e38c344a521e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1501,8 +1501,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, arg = JBD2_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * arg; } else if (token == Opt_max_batch_time) { - if (arg == 0) - arg = EXT4_DEF_MAX_BATCH_TIME; sbi->s_max_batch_time = arg; } else if (token == Opt_min_batch_time) { sbi->s_min_batch_time = arg; @@ -2725,10 +2723,11 @@ static void print_daily_error_info(unsigned long arg) es = sbi->s_es; if (es->s_error_count) - ext4_msg(sb, KERN_NOTICE, "error count: %u", + /* fsck newer than v1.41.13 is needed to clean this condition. */ + ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", le32_to_cpu(es->s_error_count)); if (es->s_first_error_time) { - printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", + printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_first_error_time), (int) sizeof(es->s_first_error_func), es->s_first_error_func, @@ -2742,7 +2741,7 @@ static void print_daily_error_info(unsigned long arg) printk("\n"); } if (es->s_last_error_time) { - printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", + printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_last_error_time), (int) sizeof(es->s_last_error_func), es->s_last_error_func, diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 7272cc6977ec..ab3815c856dc 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1590,9 +1590,12 @@ int jbd2_journal_stop(handle_t *handle) * to perform a synchronous write. We do this to detect the * case where a single process is doing a stream of sync * writes. No point in waiting for joiners in that case. + * + * Setting max_batch_time to 0 disables this completely. */ pid = current->pid; - if (handle->h_sync && journal->j_last_sync_writer != pid) { + if (handle->h_sync && journal->j_last_sync_writer != pid && + journal->j_max_batch_time) { u64 commit_time, trans_time; journal->j_last_sync_writer = pid; diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index b9e784486729..08c8e023c157 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -610,15 +610,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, switch (create->cr_type) { case NF4LNK: - /* ugh! we have to null-terminate the linktext, or - * vfs_symlink() will choke. it is always safe to - * null-terminate by brute force, since at worst we - * will overwrite the first byte of the create namelen - * in the XDR buffer, which has already been extracted - * during XDR decode. - */ - create->cr_linkname[create->cr_linklen] = 0; - status = nfsd_symlink(rqstp, &cstate->current_fh, create->cr_name, create->cr_namelen, create->cr_linkname, create->cr_linklen, diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 9e250c229007..84d741b3dc3b 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -594,7 +594,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create READ_BUF(4); READ32(create->cr_linklen); READ_BUF(create->cr_linklen); - SAVEMEM(create->cr_linkname, create->cr_linklen); + /* + * The VFS will want a null-terminated string, and + * null-terminating in place isn't safe since this might + * end on a page boundary: + */ + create->cr_linkname = + kmalloc(create->cr_linklen + 1, GFP_KERNEL); + if (!create->cr_linkname) + return nfserr_jukebox; + memcpy(create->cr_linkname, p, create->cr_linklen); + create->cr_linkname[create->cr_linklen] = '\0'; + defer_free(argp, kfree, create->cr_linkname); break; case NF4BLK: case NF4CHR: diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 1cf86c0e8689..ccc657e5ace1 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v) static int stat_open(struct inode *inode, struct file *file) { - size_t size = 1024 + 128 * num_possible_cpus(); - char *buf; - struct seq_file *m; - int res; + size_t size = 1024 + 128 * num_online_cpus(); /* minimum size to display an interrupt count : 2 bytes */ size += 2 * nr_irqs; - - /* don't ask for more than the kmalloc() max size */ - if (size > KMALLOC_MAX_SIZE) - size = KMALLOC_MAX_SIZE; - buf = kmalloc(size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - res = single_open(file, show_stat, NULL); - if (!res) { - m = file->private_data; - m->buf = buf; - m->size = ksize(buf); - } else - kfree(buf); - return res; + return single_open_size(file, show_stat, NULL, size); } static const struct file_operations proc_stat_operations = { diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 077904c8b70d..cc79eff4a1ad 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk, * calling arch_ptrace_stop() when it would be superfluous. For example, * if the thread has not been back to user mode since the last stop, the * thread state might indicate that nothing needs to be done. + * + * This is guaranteed to be invoked once before a task stops for ptrace and + * may include arch-specific operations necessary prior to a ptrace stop. */ #define arch_ptrace_stop_needed(code, info) (0) #endif diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index d69cf637a15a..49a4d6f59108 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) -void ring_buffer_wait(struct ring_buffer *buffer, int cpu); +int ring_buffer_wait(struct ring_buffer *buffer, int cpu); int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table); diff --git a/include/trace/syscall.h b/include/trace/syscall.h index fed853f3d7aa..9674145e2f6a 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -4,6 +4,7 @@ #include <linux/tracepoint.h> #include <linux/unistd.h> #include <linux/ftrace_event.h> +#include <linux/thread_info.h> #include <asm/ptrace.h> @@ -32,4 +33,18 @@ struct syscall_metadata { struct ftrace_event_call *exit_event; }; +#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) +static inline void syscall_tracepoint_update(struct task_struct *p) +{ + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT); + else + clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT); +} +#else +static inline void syscall_tracepoint_update(struct task_struct *p) +{ +} +#endif + #endif /* _TRACE_SYSCALL_H */ diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 26e7a5da9e84..8708f0c68623 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1247,7 +1247,13 @@ done: int current_cpuset_is_being_rebound(void) { - return task_cs(current) == cpuset_being_rebound; + int ret; + + rcu_read_lock(); + ret = task_cs(current) == cpuset_being_rebound; + rcu_read_unlock(); + + return ret; } static int update_relax_domain_level(struct cpuset *cs, s64 val) diff --git a/kernel/fork.c b/kernel/fork.c index 2782836d8adc..8c8cb29ebc95 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1491,7 +1491,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, total_forks++; spin_unlock(¤t->sighand->siglock); + syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); + proc_fork_connector(p); cgroup_post_fork(p); if (clone_flags & CLONE_THREAD) diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 1029a85b2c64..51a83343df68 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -82,6 +82,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) owner = *p; } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner); } + +/* + * Safe fastpath aware unlock: + * 1) Clear the waiters bit + * 2) Drop lock->wait_lock + * 3) Try to unlock the lock with cmpxchg + */ +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) + __releases(lock->wait_lock) +{ + struct task_struct *owner = rt_mutex_owner(lock); + + clear_rt_mutex_waiters(lock); + raw_spin_unlock(&lock->wait_lock); + /* + * If a new waiter comes in between the unlock and the cmpxchg + * we have two situations: + * + * unlock(wait_lock); + * lock(wait_lock); + * cmpxchg(p, owner, 0) == owner + * mark_rt_mutex_waiters(lock); + * acquire(lock); + * or: + * + * unlock(wait_lock); + * lock(wait_lock); + * mark_rt_mutex_waiters(lock); + * + * cmpxchg(p, owner, 0) != owner + * enqueue_waiter(); + * unlock(wait_lock); + * lock(wait_lock); + * wake waiter(); + * unlock(wait_lock); + * lock(wait_lock); + * acquire(lock); + */ + return rt_mutex_cmpxchg(lock, owner, NULL); +} + #else # define rt_mutex_cmpxchg(l,c,n) (0) static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) @@ -89,6 +130,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) lock->owner = (struct task_struct *) ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); } + +/* + * Simple slow path only version: lock->owner is protected by lock->wait_lock. + */ +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) + __releases(lock->wait_lock) +{ + lock->owner = NULL; + raw_spin_unlock(&lock->wait_lock); + return true; +} #endif /* @@ -142,27 +194,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task) */ int max_lock_depth = 1024; +static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) +{ + return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; +} + /* * Adjust the priority chain. Also used for deadlock detection. * Decreases task's usage by one - may thus free the task. * - * @task: the task owning the mutex (owner) for which a chain walk is probably - * needed + * @task: the task owning the mutex (owner) for which a chain walk is + * probably needed * @deadlock_detect: do we have to carry out deadlock detection? - * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck - * things for a task that has just got its priority adjusted, and - * is waiting on a mutex) + * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck + * things for a task that has just got its priority adjusted, and + * is waiting on a mutex) + * @next_lock: the mutex on which the owner of @orig_lock was blocked before + * we dropped its pi_lock. Is never dereferenced, only used for + * comparison to detect lock chain changes. * @orig_waiter: rt_mutex_waiter struct for the task that has just donated - * its priority to the mutex owner (can be NULL in the case - * depicted above or if the top waiter is gone away and we are - * actually deboosting the owner) - * @top_task: the current top waiter + * its priority to the mutex owner (can be NULL in the case + * depicted above or if the top waiter is gone away and we are + * actually deboosting the owner) + * @top_task: the current top waiter * * Returns 0 or -EDEADLK. */ static int rt_mutex_adjust_prio_chain(struct task_struct *task, int deadlock_detect, struct rt_mutex *orig_lock, + struct rt_mutex *next_lock, struct rt_mutex_waiter *orig_waiter, struct task_struct *top_task) { @@ -221,6 +282,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, goto out_unlock_pi; /* + * We dropped all locks after taking a refcount on @task, so + * the task might have moved on in the lock chain or even left + * the chain completely and blocks now on an unrelated lock or + * on @orig_lock. + * + * We stored the lock on which @task was blocked in @next_lock, + * so we can detect the chain change. + */ + if (next_lock != waiter->lock) + goto out_unlock_pi; + + /* * Drop out, when the task has no waiters. Note, * top_waiter can be NULL, when we are in the deboosting * mode! @@ -306,11 +379,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, __rt_mutex_adjust_prio(task); } + /* + * Check whether the task which owns the current lock is pi + * blocked itself. If yes we store a pointer to the lock for + * the lock chain change detection above. After we dropped + * task->pi_lock next_lock cannot be dereferenced anymore. + */ + next_lock = task_blocked_on_lock(task); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); top_waiter = rt_mutex_top_waiter(lock); raw_spin_unlock(&lock->wait_lock); + /* + * We reached the end of the lock chain. Stop right here. No + * point to go back just to figure that out. + */ + if (!next_lock) + goto out_put_task; + if (!detect_deadlock && waiter != top_waiter) goto out_put_task; @@ -421,8 +509,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; - unsigned long flags; + struct rt_mutex *next_lock; int chain_walk = 0, res; + unsigned long flags; /* * Early deadlock detection. We really don't want the task to @@ -455,20 +544,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, if (!owner) return 0; + raw_spin_lock_irqsave(&owner->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { - raw_spin_lock_irqsave(&owner->pi_lock, flags); plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); if (owner->pi_blocked_on) chain_walk = 1; - raw_spin_unlock_irqrestore(&owner->pi_lock, flags); - } - else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) + } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { chain_walk = 1; + } + + /* Store the lock on which owner is blocked or NULL */ + next_lock = task_blocked_on_lock(owner); - if (!chain_walk) + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); + /* + * Even if full deadlock detection is on, if the owner is not + * blocked itself, we can avoid finding this out in the chain + * walk. + */ + if (!chain_walk || !next_lock) return 0; /* @@ -480,8 +577,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, raw_spin_unlock(&lock->wait_lock); - res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, - task); + res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, + next_lock, waiter, task); raw_spin_lock(&lock->wait_lock); @@ -491,7 +588,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, /* * Wake up the next waiter on the lock. * - * Remove the top waiter from the current tasks waiter list and wake it up. + * Remove the top waiter from the current tasks pi waiter list and + * wake it up. * * Called with lock->wait_lock held. */ @@ -512,10 +610,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock) */ plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); - rt_mutex_set_owner(lock, NULL); + /* + * As we are waking up the top waiter, and the waiter stays + * queued on the lock until it gets the lock, this lock + * obviously has waiters. Just set the bit here and this has + * the added benefit of forcing all new tasks into the + * slow path making sure no task of lower priority than + * the top waiter can steal this lock. + */ + lock->owner = (void *) RT_MUTEX_HAS_WAITERS; raw_spin_unlock_irqrestore(¤t->pi_lock, flags); + /* + * It's safe to dereference waiter as it cannot go away as + * long as we hold lock->wait_lock. The waiter task needs to + * acquire it in order to dequeue the waiter. + */ wake_up_process(waiter->task); } @@ -530,8 +641,8 @@ static void remove_waiter(struct rt_mutex *lock, { int first = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); + struct rt_mutex *next_lock = NULL; unsigned long flags; - int chain_walk = 0; raw_spin_lock_irqsave(¤t->pi_lock, flags); plist_del(&waiter->list_entry, &lock->wait_list); @@ -555,15 +666,15 @@ static void remove_waiter(struct rt_mutex *lock, } __rt_mutex_adjust_prio(owner); - if (owner->pi_blocked_on) - chain_walk = 1; + /* Store the lock on which owner is blocked or NULL */ + next_lock = task_blocked_on_lock(owner); raw_spin_unlock_irqrestore(&owner->pi_lock, flags); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); - if (!chain_walk) + if (!next_lock) return; /* gets dropped in rt_mutex_adjust_prio_chain()! */ @@ -571,7 +682,7 @@ static void remove_waiter(struct rt_mutex *lock, raw_spin_unlock(&lock->wait_lock); - rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); + rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); raw_spin_lock(&lock->wait_lock); } @@ -584,6 +695,7 @@ static void remove_waiter(struct rt_mutex *lock, void rt_mutex_adjust_pi(struct task_struct *task) { struct rt_mutex_waiter *waiter; + struct rt_mutex *next_lock; unsigned long flags; raw_spin_lock_irqsave(&task->pi_lock, flags); @@ -593,12 +705,13 @@ void rt_mutex_adjust_pi(struct task_struct *task) raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } - + next_lock = waiter->lock; raw_spin_unlock_irqrestore(&task->pi_lock, flags); /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); - rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); + + rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); } /** @@ -766,12 +879,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock) rt_mutex_deadlock_account_unlock(current); - if (!rt_mutex_has_waiters(lock)) { - lock->owner = NULL; - raw_spin_unlock(&lock->wait_lock); - return; + /* + * We must be careful here if the fast path is enabled. If we + * have no waiters queued we cannot set owner to NULL here + * because of: + * + * foo->lock->owner = NULL; + * rtmutex_lock(foo->lock); <- fast path + * free = atomic_dec_and_test(foo->refcnt); + * rtmutex_unlock(foo->lock); <- fast path + * if (free) + * kfree(foo); + * raw_spin_unlock(foo->lock->wait_lock); + * + * So for the fastpath enabled kernel: + * + * Nothing can set the waiters bit as long as we hold + * lock->wait_lock. So we do the following sequence: + * + * owner = rt_mutex_owner(lock); + * clear_rt_mutex_waiters(lock); + * raw_spin_unlock(&lock->wait_lock); + * if (cmpxchg(&lock->owner, owner, 0) == owner) + * return; + * goto retry; + * + * The fastpath disabled variant is simple as all access to + * lock->owner is serialized by lock->wait_lock: + * + * lock->owner = NULL; + * raw_spin_unlock(&lock->wait_lock); + */ + while (!rt_mutex_has_waiters(lock)) { + /* Drops lock->wait_lock ! */ + if (unlock_rt_mutex_safe(lock) == true) + return; + /* Relock the rtmutex and try again */ + raw_spin_lock(&lock->wait_lock); } + /* + * The wakeup next waiter path does not suffer from the above + * race. See the comments there. + */ wakeup_next_waiter(lock); raw_spin_unlock(&lock->wait_lock); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8515e5f93fc4..134216d752df 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -138,7 +138,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; static int minolduid; -static int min_percpu_pagelist_fract = 8; static int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; @@ -1293,7 +1292,7 @@ static struct ctl_table vm_table[] = { .maxlen = sizeof(percpu_pagelist_fraction), .mode = 0644, .proc_handler = percpu_pagelist_fraction_sysctl_handler, - .extra1 = &min_percpu_pagelist_fract, + .extra1 = &zero, }, #ifdef CONFIG_MMU { diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0e337eedb909..15c4ae203885 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -543,7 +543,7 @@ static void rb_wake_up_waiters(struct irq_work *work) * as data is added to any of the @buffer's cpu buffers. Otherwise * it will wait for data to be added to a specific cpu buffer. */ -void ring_buffer_wait(struct ring_buffer *buffer, int cpu) +int ring_buffer_wait(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; DEFINE_WAIT(wait); @@ -557,6 +557,8 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) if (cpu == RING_BUFFER_ALL_CPUS) work = &buffer->irq_work; else { + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return -ENODEV; cpu_buffer = buffer->buffers[cpu]; work = &cpu_buffer->irq_work; } @@ -591,6 +593,7 @@ void ring_buffer_wait(struct ring_buffer *buffer, int cpu) schedule(); finish_wait(&work->waiters, &wait); + return 0; } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f788ecfee618..c8b8717afc23 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1044,13 +1044,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) } #endif /* CONFIG_TRACER_MAX_TRACE */ -static void default_wait_pipe(struct trace_iterator *iter) +static int default_wait_pipe(struct trace_iterator *iter) { /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) - return; + return 0; - ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); + return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); } #ifdef CONFIG_FTRACE_STARTUP_TEST @@ -1323,7 +1323,6 @@ void tracing_start(void) arch_spin_unlock(&ftrace_max_lock); - ftrace_start(); out: raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); } @@ -1370,7 +1369,6 @@ void tracing_stop(void) struct ring_buffer *buffer; unsigned long flags; - ftrace_stop(); raw_spin_lock_irqsave(&global_trace.start_lock, flags); if (global_trace.stop_count++) goto out; @@ -1417,12 +1415,12 @@ static void tracing_stop_tr(struct trace_array *tr) void trace_stop_cmdline_recording(void); -static void trace_save_cmdline(struct task_struct *tsk) +static int trace_save_cmdline(struct task_struct *tsk) { unsigned pid, idx; if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) - return; + return 0; /* * It's not the end of the world if we don't get @@ -1431,7 +1429,7 @@ static void trace_save_cmdline(struct task_struct *tsk) * so if we miss here, then better luck next time. */ if (!arch_spin_trylock(&trace_cmdline_lock)) - return; + return 0; idx = map_pid_to_cmdline[tsk->pid]; if (idx == NO_CMDLINE_MAP) { @@ -1456,6 +1454,8 @@ static void trace_save_cmdline(struct task_struct *tsk) memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); arch_spin_unlock(&trace_cmdline_lock); + + return 1; } void trace_find_cmdline(int pid, char comm[]) @@ -1497,9 +1497,8 @@ void tracing_record_cmdline(struct task_struct *tsk) if (!__this_cpu_read(trace_cmdline_save)) return; - __this_cpu_write(trace_cmdline_save, false); - - trace_save_cmdline(tsk); + if (trace_save_cmdline(tsk)) + __this_cpu_write(trace_cmdline_save, false); } void @@ -4063,17 +4062,19 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) * * Anyway, this is really very primitive wakeup. */ -void poll_wait_pipe(struct trace_iterator *iter) +int poll_wait_pipe(struct trace_iterator *iter) { set_current_state(TASK_INTERRUPTIBLE); /* sleep for 100 msecs, and try again. */ schedule_timeout(HZ / 10); + return 0; } /* Must be called with trace_types_lock mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; + int ret; while (trace_empty(iter)) { @@ -4083,10 +4084,13 @@ static int tracing_wait_pipe(struct file *filp) mutex_unlock(&iter->mutex); - iter->trace->wait_pipe(iter); + ret = iter->trace->wait_pipe(iter); mutex_lock(&iter->mutex); + if (ret) + return ret; + if (signal_pending(current)) return -EINTR; @@ -5020,8 +5024,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, goto out_unlock; } mutex_unlock(&trace_types_lock); - iter->trace->wait_pipe(iter); + ret = iter->trace->wait_pipe(iter); mutex_lock(&trace_types_lock); + if (ret) { + size = ret; + goto out_unlock; + } if (signal_pending(current)) { size = -EINTR; goto out_unlock; @@ -5233,8 +5241,10 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, goto out; } mutex_unlock(&trace_types_lock); - iter->trace->wait_pipe(iter); + ret = iter->trace->wait_pipe(iter); mutex_lock(&trace_types_lock); + if (ret) + goto out; if (signal_pending(current)) { ret = -EINTR; goto out; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index afaae41b0a02..61fbe61ed3a0 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -333,7 +333,7 @@ struct tracer { void (*stop)(struct trace_array *tr); void (*open)(struct trace_iterator *iter); void (*pipe_open)(struct trace_iterator *iter); - void (*wait_pipe)(struct trace_iterator *iter); + int (*wait_pipe)(struct trace_iterator *iter); void (*close)(struct trace_iterator *iter); void (*pipe_close)(struct trace_iterator *iter); ssize_t (*read)(struct trace_iterator *iter, @@ -548,7 +548,7 @@ void trace_init_global_iter(struct trace_iterator *iter); void tracing_iter_reset(struct trace_iterator *iter, int cpu); -void poll_wait_pipe(struct trace_iterator *iter); +int poll_wait_pipe(struct trace_iterator *iter); void tracing_sched_switch_trace(struct trace_array *tr, struct task_struct *prev, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 23071c9acdc1..7ae565366ba9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3391,6 +3391,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) } } + dev_set_uevent_suppress(&wq_dev->dev, false); kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); return 0; } @@ -4996,7 +4997,7 @@ static void __init wq_numa_init(void) BUG_ON(!tbl); for_each_node(node) - BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, + BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node_online(node) ? node : NUMA_NO_NODE)); for_each_possible_cpu(cpu) { diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 411be80ddb46..b6ce2cf35d5d 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize) len = *ip++; for (; len == 255; length += 255) len = *ip++; + if (unlikely(length > (size_t)(length + len))) + goto _output_error; length += len; } @@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize) if (length == ML_MASK) { for (; *ip == 255; length += 255) ip++; + if (unlikely(length > (size_t)(length + *ip))) + goto _output_error; length += *ip++; } @@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize) /* write overflow error detected */ _output_error: - return (int) (-(((char *)ip) - source)); + return -1; } static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, @@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, int s = 255; while ((ip < iend) && (s == 255)) { s = *ip++; + if (unlikely(length > (size_t)(length + s))) + goto _output_error; length += s; } } @@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, if (length == ML_MASK) { while (ip < iend) { int s = *ip++; + if (unlikely(length > (size_t)(length + s))) + goto _output_error; length += s; if (s == 255) continue; @@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, /* write overflow error detected */ _output_error: - return (int) (-(((char *) ip) - source)); + return -1; } int lz4_decompress(const char *src, size_t *src_len, char *dest, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f8e292add228..7c574fb35df3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2321,6 +2321,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, update_mmu_cache(vma, address, ptep); } +static int is_hugetlb_entry_migration(pte_t pte) +{ + swp_entry_t swp; + + if (huge_pte_none(pte) || pte_present(pte)) + return 0; + swp = pte_to_swp_entry(pte); + if (non_swap_entry(swp) && is_migration_entry(swp)) + return 1; + else + return 0; +} + +static int is_hugetlb_entry_hwpoisoned(pte_t pte) +{ + swp_entry_t swp; + + if (huge_pte_none(pte) || pte_present(pte)) + return 0; + swp = pte_to_swp_entry(pte); + if (non_swap_entry(swp) && is_hwpoison_entry(swp)) + return 1; + else + return 0; +} int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) @@ -2348,7 +2373,24 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, spin_lock(&dst->page_table_lock); spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); - if (!huge_pte_none(huge_ptep_get(src_pte))) { + entry = huge_ptep_get(src_pte); + if (huge_pte_none(entry)) { /* skip none entry */ + ; + } else if (unlikely(is_hugetlb_entry_migration(entry) || + is_hugetlb_entry_hwpoisoned(entry))) { + swp_entry_t swp_entry = pte_to_swp_entry(entry); + + if (is_write_migration_entry(swp_entry) && cow) { + /* + * COW mappings require pages in both + * parent and child to be set to read. + */ + make_migration_entry_read(&swp_entry); + entry = swp_entry_to_pte(swp_entry); + set_huge_pte_at(src, addr, src_pte, entry); + } + set_huge_pte_at(dst, addr, dst_pte, entry); + } else { if (cow) huge_ptep_set_wrprotect(src, addr, src_pte); entry = huge_ptep_get(src_pte); @@ -2366,32 +2408,6 @@ nomem: return -ENOMEM; } -static int is_hugetlb_entry_migration(pte_t pte) -{ - swp_entry_t swp; - - if (huge_pte_none(pte) || pte_present(pte)) - return 0; - swp = pte_to_swp_entry(pte); - if (non_swap_entry(swp) && is_migration_entry(swp)) - return 1; - else - return 0; -} - -static int is_hugetlb_entry_hwpoisoned(pte_t pte) -{ - swp_entry_t swp; - - if (huge_pte_none(pte) || pte_present(pte)) - return 0; - swp = pte_to_swp_entry(pte); - if (non_swap_entry(swp) && is_hwpoison_entry(swp)) - return 1; - else - return 0; -} - void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bfb32380190b..41fa5ec8a99e 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -608,19 +608,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, * If pagelist != NULL then isolate pages from the LRU and * put them on the pagelist. */ -static struct vm_area_struct * +static int check_range(struct mm_struct *mm, unsigned long start, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { - int err; - struct vm_area_struct *first, *vma, *prev; - + int err = 0; + struct vm_area_struct *vma, *prev; - first = find_vma(mm, start); - if (!first) - return ERR_PTR(-EFAULT); + vma = find_vma(mm, start); + if (!vma) + return -EFAULT; prev = NULL; - for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { + for (; vma && vma->vm_start < end; vma = vma->vm_next) { unsigned long endvma = vma->vm_end; if (endvma > end) @@ -630,9 +629,9 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, if (!(flags & MPOL_MF_DISCONTIG_OK)) { if (!vma->vm_next && vma->vm_end < end) - return ERR_PTR(-EFAULT); + return -EFAULT; if (prev && prev->vm_end < vma->vm_start) - return ERR_PTR(-EFAULT); + return -EFAULT; } if (is_vm_hugetlb_page(vma)) @@ -649,15 +648,13 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, err = check_pgd_range(vma, start, endvma, nodes, flags, private); - if (err) { - first = ERR_PTR(err); + if (err) break; - } } next: prev = vma; } - return first; + return err; } /* @@ -1138,16 +1135,17 @@ out: /* * Allocate a new page for page migration based on vma policy. - * Start assuming that page is mapped by vma pointed to by @private. + * Start by assuming the page is mapped by the same vma as contains @start. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ -static struct page *new_vma_page(struct page *page, unsigned long private, int **x) +static struct page *new_page(struct page *page, unsigned long start, int **x) { - struct vm_area_struct *vma = (struct vm_area_struct *)private; + struct vm_area_struct *vma; unsigned long uninitialized_var(address); + vma = find_vma(current->mm, start); while (vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) @@ -1173,7 +1171,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, return -ENOSYS; } -static struct page *new_vma_page(struct page *page, unsigned long private, int **x) +static struct page *new_page(struct page *page, unsigned long start, int **x) { return NULL; } @@ -1183,7 +1181,6 @@ static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { - struct vm_area_struct *vma; struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; @@ -1249,11 +1246,9 @@ static long do_mbind(unsigned long start, unsigned long len, if (err) goto mpol_out; - vma = check_range(mm, start, end, nmask, + err = check_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); - - err = PTR_ERR(vma); /* maybe ... */ - if (!IS_ERR(vma)) + if (!err) err = mbind_range(mm, start, end, new); if (!err) { @@ -1261,9 +1256,8 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); - nr_failed = migrate_pages(&pagelist, new_vma_page, - (unsigned long)vma, - MIGRATE_SYNC, MR_MEMPOLICY_MBIND); + nr_failed = migrate_pages(&pagelist, new_page, + start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); if (nr_failed) putback_lru_pages(&pagelist); } @@ -2092,7 +2086,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) } else *new = *old; - rcu_read_lock(); if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); if (new->flags & MPOL_F_REBINDING) @@ -2100,7 +2093,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) else mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); } - rcu_read_unlock(); atomic_set(&new->refcnt, 1); return new; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4b545f5595e4..1b5e9fb49ed4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -68,6 +68,7 @@ /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); +#define MIN_PERCPU_PAGELIST_FRACTION (8) #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID DEFINE_PER_CPU(int, numa_node); @@ -779,9 +780,21 @@ void __init init_cma_reserved_pageblock(struct page *page) set_page_count(p, 0); } while (++p, --i); - set_page_refcounted(page); set_pageblock_migratetype(page, MIGRATE_CMA); - __free_pages(page, pageblock_order); + + if (pageblock_order >= MAX_ORDER) { + i = pageblock_nr_pages; + p = page; + do { + set_page_refcounted(p); + __free_pages(p, MAX_ORDER - 1); + p += MAX_ORDER_NR_PAGES; + } while (i -= MAX_ORDER_NR_PAGES); + } else { + set_page_refcounted(page); + __free_pages(page, pageblock_order); + } + adjust_managed_page_count(page, pageblock_nr_pages); } #endif @@ -3988,7 +4001,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) #endif -static int __meminit zone_batchsize(struct zone *zone) +static int zone_batchsize(struct zone *zone) { #ifdef CONFIG_MMU int batch; @@ -4104,8 +4117,8 @@ static void pageset_set_high(struct per_cpu_pageset *p, pageset_update(&p->pcp, high, batch); } -static void __meminit pageset_set_high_and_batch(struct zone *zone, - struct per_cpu_pageset *pcp) +static void pageset_set_high_and_batch(struct zone *zone, + struct per_cpu_pageset *pcp) { if (percpu_pagelist_fraction) pageset_set_high(pcp, @@ -5689,23 +5702,38 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct zone *zone; - unsigned int cpu; + int old_percpu_pagelist_fraction; int ret; + mutex_lock(&pcp_batch_high_lock); + old_percpu_pagelist_fraction = percpu_pagelist_fraction; + ret = proc_dointvec_minmax(table, write, buffer, length, ppos); - if (!write || (ret < 0)) - return ret; + if (!write || ret < 0) + goto out; + + /* Sanity checking to avoid pcp imbalance */ + if (percpu_pagelist_fraction && + percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { + percpu_pagelist_fraction = old_percpu_pagelist_fraction; + ret = -EINVAL; + goto out; + } + + /* No change? */ + if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) + goto out; - mutex_lock(&pcp_batch_high_lock); for_each_populated_zone(zone) { - unsigned long high; - high = zone->managed_pages / percpu_pagelist_fraction; + unsigned int cpu; + for_each_possible_cpu(cpu) - pageset_set_high(per_cpu_ptr(zone->pageset, cpu), - high); + pageset_set_high_and_batch(zone, + per_cpu_ptr(zone->pageset, cpu)); } +out: mutex_unlock(&pcp_batch_high_lock); - return 0; + return ret; } int hashdist = HASHDIST_DEFAULT; diff --git a/mm/vmscan.c b/mm/vmscan.c index b4241b7d7f07..370c16d21fb2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1468,19 +1468,18 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, * If dirty pages are scanned that are not queued for IO, it * implies that flushers are not keeping up. In this case, flag * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing - * pages from reclaim context. It will forcibly stall in the - * next check. + * pages from reclaim context. */ if (nr_unqueued_dirty == nr_taken) zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); /* - * In addition, if kswapd scans pages marked marked for - * immediate reclaim and under writeback (nr_immediate), it - * implies that pages are cycling through the LRU faster than + * If kswapd scans pages marked marked for immediate + * reclaim and under writeback (nr_immediate), it implies + * that pages are cycling through the LRU faster than * they are written so also forcibly stall. */ - if (nr_unqueued_dirty == nr_taken || nr_immediate) + if (nr_immediate) congestion_wait(BLK_RW_ASYNC, HZ/10); } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 4c51c055d00f..8e7290aea8f8 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -659,7 +659,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) /* If we're already encrypted set the REAUTH_PEND flag, * otherwise set the ENCRYPT_PEND. */ - if (conn->key_type != 0xff) + if (conn->link_mode & HCI_LM_ENCRYPT) set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); else set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index b767ed9969c4..f4537e26caad 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -47,6 +47,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY); + hci_dev_lock(hdev); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + hci_dev_unlock(hdev); + hci_conn_check_pending(hdev); } @@ -3159,8 +3163,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, /* If we're not the initiators request authorization to * proceed from user space (mgmt_user_confirm with - * confirm_hint set to 1). */ - if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { + * confirm_hint set to 1). The exception is if neither + * side had MITM in which case we do auto-accept. + */ + if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && + (loc_mitm || rem_mitm)) { BT_DBG("Confirming auto-accept as acceptor"); confirm_hint = 1; goto confirm; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 07c9aea21244..a3a81d96314b 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -631,11 +631,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, /*change security for LE channels */ if (chan->scid == L2CAP_CID_ATT) { - if (!conn->hcon->out) { - err = -EINVAL; - break; - } - if (smp_conn_security(conn->hcon, sec.level)) break; sk->sk_state = BT_CONFIG; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index fedc5399d465..211fffb5dca8 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2319,8 +2319,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, } if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { - /* Continue with pairing via SMP */ + /* Continue with pairing via SMP. The hdev lock must be + * released as SMP may try to recquire it for crypto + * purposes. + */ + hci_dev_unlock(hdev); err = smp_user_confirm_reply(conn, mgmt_op, passkey); + hci_dev_lock(hdev); if (!err) err = cmd_complete(sk, hdev->id, mgmt_op, diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 2d45643c964e..97a4b5b9d506 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1055,6 +1055,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->u.ibss.privacy = params->privacy; sdata->u.ibss.control_port = params->control_port; sdata->u.ibss.basic_rates = params->basic_rates; + sdata->u.ibss.last_scan_completed = jiffies; memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, sizeof(params->mcast_rate)); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c8148e487386..c9de95e8faa6 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3781,6 +3781,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) cancel_delayed_work_sync(&ipvs->defense_work); cancel_work_sync(&ipvs->defense_work.work); unregister_net_sysctl_table(ipvs->sysctl_hdr); + ip_vs_stop_estimator(net, &ipvs->tot_stats); } #else @@ -3841,7 +3842,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net) */ rcu_barrier(); ip_vs_trash_cleanup(net); - ip_vs_stop_estimator(net, &ipvs->tot_stats); ip_vs_control_net_cleanup_sysctl(net); remove_proc_entry("ip_vs_stats_percpu", net->proc_net); remove_proc_entry("ip_vs_stats", net->proc_net); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 038eee5c8f85..2bb801e3ee8c 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -487,6 +487,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data) return i->status & IPS_NAT_MASK ? 1 : 0; } +static int nf_nat_proto_clean(struct nf_conn *ct, void *data) +{ + struct nf_conn_nat *nat = nfct_nat(ct); + + if (nf_nat_proto_remove(ct, data)) + return 1; + + if (!nat || !nat->ct) + return 0; + + /* This netns is being destroyed, and conntrack has nat null binding. + * Remove it from bysource hash, as the table will be freed soon. + * + * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() + * will delete entry from already-freed table. + */ + if (!del_timer(&ct->timeout)) + return 1; + + spin_lock_bh(&nf_nat_lock); + hlist_del_rcu(&nat->bysource); + ct->status &= ~IPS_NAT_DONE_MASK; + nat->ct = NULL; + spin_unlock_bh(&nf_nat_lock); + + add_timer(&ct->timeout); + + /* don't delete conntrack. Although that would make things a lot + * simpler, we'd end up flushing all conntracks on nat rmmod. + */ + return 0; +} + static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) { struct nf_nat_proto_clean clean = { @@ -749,7 +782,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) { struct nf_nat_proto_clean clean = {}; - nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean); + nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); synchronize_rcu(); nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); } diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 9d1421e63ff8..49b582a225b0 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -163,11 +163,11 @@ static int mcount_adjust = 0; static int MIPS_is_fake_mcount(Elf_Rel const *rp) { - static Elf_Addr old_r_offset; + static Elf_Addr old_r_offset = ~(Elf_Addr)0; Elf_Addr current_r_offset = _w(rp->r_offset); int is_fake; - is_fake = old_r_offset && + is_fake = (old_r_offset != ~(Elf_Addr)0) && (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET); old_r_offset = current_r_offset; diff --git a/sound/usb/card.c b/sound/usb/card.c index 64952e2d3ed1..fda227e3bbac 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) static int snd_usb_audio_free(struct snd_usb_audio *chip) { + struct list_head *p, *n; + + list_for_each_safe(p, n, &chip->ep_list) + snd_usb_endpoint_free(p); + mutex_destroy(&chip->mutex); kfree(chip); return 0; @@ -583,7 +588,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, struct snd_usb_audio *chip) { struct snd_card *card; - struct list_head *p, *n; + struct list_head *p; if (chip == (void *)-1L) return; @@ -596,14 +601,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, mutex_lock(®ister_mutex); chip->num_interfaces--; if (chip->num_interfaces <= 0) { + struct snd_usb_endpoint *ep; + snd_card_disconnect(card); /* release the pcm resources */ list_for_each(p, &chip->pcm_list) { snd_usb_stream_disconnect(p); } /* release the endpoint resources */ - list_for_each_safe(p, n, &chip->ep_list) { - snd_usb_endpoint_free(p); + list_for_each_entry(ep, &chip->ep_list, list) { + snd_usb_endpoint_release(ep); } /* release the midi resources */ list_for_each(p, &chip->midi_list) { diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 308c02b2a597..e3e8560c2e46 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -957,19 +957,30 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep) } /** + * snd_usb_endpoint_release: Tear down an snd_usb_endpoint + * + * @ep: the endpoint to release + * + * This function does not care for the endpoint's use count but will tear + * down all the streaming URBs immediately. + */ +void snd_usb_endpoint_release(struct snd_usb_endpoint *ep) +{ + release_urbs(ep, 1); +} + +/** * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint * * @ep: the list header of the endpoint to free * - * This function does not care for the endpoint's use count but will tear - * down all the streaming URBs immediately and free all resources. + * This free all resources of the given ep. */ void snd_usb_endpoint_free(struct list_head *head) { struct snd_usb_endpoint *ep; ep = list_entry(head, struct snd_usb_endpoint, list); - release_urbs(ep, 1); kfree(ep); } diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h index 2287adf5ca59..fe65a38ba387 100644 --- a/sound/usb/endpoint.h +++ b/sound/usb/endpoint.h @@ -21,6 +21,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep); void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); +void snd_usb_endpoint_release(struct snd_usb_endpoint *ep); void snd_usb_endpoint_free(struct list_head *head); int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep); diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c index fe1e66b6ef40..a87e99f37c52 100644 --- a/tools/usb/ffs-test.c +++ b/tools/usb/ffs-test.c @@ -116,8 +116,8 @@ static const struct { .header = { .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC), .length = cpu_to_le32(sizeof descriptors), - .fs_count = 3, - .hs_count = 3, + .fs_count = cpu_to_le32(3), + .hs_count = cpu_to_le32(3), }, .fs_descs = { .intf = { -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html