diff --git a/Makefile b/Makefile index 5526edb8f0e8..02a30af3aad0 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 18 -SUBLEVEL = 63 +SUBLEVEL = 64 EXTRAVERSION = NAME = Diseased Newt diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h index 6b340d0f1521..902e6ab00a06 100644 --- a/arch/alpha/include/asm/word-at-a-time.h +++ b/arch/alpha/include/asm/word-at-a-time.h @@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits) #endif } +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index b8fffc1a2ac2..8e76154b35b7 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -50,4 +50,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index ae630a928acd..6e1cea55d0ec 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -582,6 +582,8 @@ vmmc_aux-supply = <&vsim>; bus-width = <8>; non-removable; + no-sdio; + no-sd; }; &mmc3 { diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index bfe2a2f5a644..22b73112b75f 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level) #define ftrace_return_address(n) return_address(n) +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME + +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + if (!strcmp(sym, "sys_mmap2")) + sym = "sys_mmap_pgoff"; + else if (!strcmp(sym, "sys_statfs64_wrapper")) + sym = "sys_statfs64"; + else if (!strcmp(sym, "sys_fstatfs64_wrapper")) + sym = "sys_fstatfs64"; + else if (!strcmp(sym, "sys_arm_fadvise64_64")) + sym = "sys_fadvise64_64"; + + /* Ignore case since sym may start with "SyS" instead of "sys" */ + return !strcasecmp(sym, name); +} + #endif /* ifndef __ASSEMBLY__ */ #endif /* _ASM_ARM_FTRACE */ diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index 2a71b1cb9848..7aad3b5dd852 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -21,4 +21,5 @@ generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 46ed6bb9c679..deb02520a4f2 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -47,4 +47,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index e77e0c1dbe75..af4e2f9824b9 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -59,4 +59,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 2ca489eaadd3..c258bc5b1fc9 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -18,4 +18,5 @@ generic-y += scatterlist.h generic-y += sections.h generic-y += trace_clock.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 3caf05cabfc5..735bb20a0cff 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += scatterlist.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 5f234a5a2320..abc5d995bea9 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -59,4 +59,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 747320be9d0e..1dfb4ae9edc6 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += preempt.h generic-y += scatterlist.h generic-y += trace_clock.h generic-y += vtime.h +generic-y += word-at-a-time.h diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index 3796801d6e0c..de2c9633380f 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -10,3 +10,4 @@ generic-y += preempt.h generic-y += scatterlist.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index 7b8111c8f937..c3f75d9da968 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -55,4 +55,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 448143b8cabd..43f340c5c83a 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -11,3 +11,4 @@ generic-y += preempt.h generic-y += scatterlist.h generic-y += syscalls.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 72e1cf1cab00..4c8b7e206ef4 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -18,4 +18,5 @@ generic-y += serial.h generic-y += trace_clock.h generic-y += ucontext.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 54a062cb9f2c..1b217e60f358 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -10,3 +10,4 @@ generic-y += preempt.h generic-y += scatterlist.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 31e8f59aff38..f1b68061bec1 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += rwsem.h generic-y += scatterlist.h generic-y += trace_clock.h generic-y += vtime.h +generic-y += word-at-a-time.h diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index a69d091d31ff..5f1048eaa5b6 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -44,22 +44,8 @@ extern void __init dump_numa_cpu_topology(void); extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); -static inline int early_cpu_to_node(int cpu) -{ - int nid; - - nid = numa_cpu_lookup_table[cpu]; - - /* - * Fall back to node 0 if nid is unset (it should be, except bugs). - * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)). - */ - return (nid < 0) ? 0 : nid; -} #else -static inline int early_cpu_to_node(int cpu) { return 0; } - static inline void dump_numa_cpu_topology(void) {} static inline int sysfs_add_device_to_node(struct device *dev, int nid) diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 7043539e0248..4afe66aa1400 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +static inline unsigned long zero_bytemask(unsigned long mask) +{ + return ~1ul << __fls(mask); +} + #else #ifdef CONFIG_64BIT diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 85d6a23a8539..3dac0e66f915 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -754,7 +754,7 @@ void ppc64_boot_msg(unsigned int src, const char *msg) static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) { - return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align, + return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, __pa(MAX_DMA_ADDRESS)); } @@ -765,7 +765,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size) static int pcpu_cpu_distance(unsigned int from, unsigned int to) { - if (early_cpu_to_node(from) == early_cpu_to_node(to)) + if (cpu_to_node(from) == cpu_to_node(to)) return LOCAL_DISTANCE; else return REMOTE_DISTANCE; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a3021e6ee14d..060880ac7826 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1951,6 +1951,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) int r; int srcu_idx; unsigned long ebb_regs[3] = {}; /* shut up GCC */ + unsigned long user_tar = 0; + unsigned long proc_fscr = 0; + unsigned int user_vrsave; if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -1971,10 +1974,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) run->fail_entry.hardware_entry_failure_reason = 0; return -EINVAL; } + /* Enable TM so we can read the TM SPRs */ + mtmsr(mfmsr() | MSR_TM); current->thread.tm_tfhar = mfspr(SPRN_TFHAR); current->thread.tm_tfiar = mfspr(SPRN_TFIAR); current->thread.tm_texasr = mfspr(SPRN_TEXASR); - current->thread.regs->msr &= ~MSR_TM; } #endif @@ -2001,12 +2005,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) flush_altivec_to_thread(current); flush_vsx_to_thread(current); - /* Save userspace EBB register values */ + /* Save userspace EBB and other register values */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) { ebb_regs[0] = mfspr(SPRN_EBBHR); ebb_regs[1] = mfspr(SPRN_EBBRR); ebb_regs[2] = mfspr(SPRN_BESCR); + user_tar = mfspr(SPRN_TAR); + proc_fscr = mfspr(SPRN_FSCR); } + user_vrsave = mfspr(SPRN_VRSAVE); vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; @@ -2027,12 +2034,28 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) } } while (is_kvmppc_resume_guest(r)); - /* Restore userspace EBB register values */ + /* Restore userspace EBB and other register values */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) { mtspr(SPRN_EBBHR, ebb_regs[0]); mtspr(SPRN_EBBRR, ebb_regs[1]); mtspr(SPRN_BESCR, ebb_regs[2]); + mtspr(SPRN_TAR, user_tar); + mtspr(SPRN_FSCR, proc_fscr); } + mtspr(SPRN_VRSAVE, user_vrsave); + + /* + * Since we don't do lazy TM reload, we need to reload + * the TM registers here. + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && + (current->thread.regs->msr & MSR_TM)) { + mtspr(SPRN_TFHAR, current->thread.tm_tfhar); + mtspr(SPRN_TFIAR, current->thread.tm_tfiar); + mtspr(SPRN_TEXASR, current->thread.tm_texasr); + } +#endif out: vcpu->arch.state = KVMPPC_VCPU_NOTREADY; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 791ad037749c..c9aae3988d64 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -36,6 +36,12 @@ #define NAPPING_CEDE 1 #define NAPPING_NOVCPU 2 +/* Stack frame offsets for kvmppc_hv_entry */ +#define SFS 112 +#define STACK_SLOT_CIABR (SFS-16) +#define STACK_SLOT_DAWR (SFS-24) +#define STACK_SLOT_DAWRX (SFS-32) + /* * Call kvmppc_hv_entry in real mode. * Must be called with interrupts hard-disabled. @@ -360,7 +366,7 @@ kvmppc_hv_entry: */ mflr r0 std r0, PPC_LR_STKOFF(r1) - stdu r1, -112(r1) + stdu r1, -SFS(r1) /* Save R1 in the PACA */ std r1, HSTATE_HOST_R1(r13) @@ -618,6 +624,16 @@ BEGIN_FTR_SECTION mtspr SPRN_SPURR,r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) + /* Save host values of some registers */ +BEGIN_FTR_SECTION + mfspr r5, SPRN_CIABR + mfspr r6, SPRN_DAWR + mfspr r7, SPRN_DAWRX + std r5, STACK_SLOT_CIABR(r1) + std r6, STACK_SLOT_DAWR(r1) + std r7, STACK_SLOT_DAWRX(r1) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + BEGIN_FTR_SECTION /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ @@ -1180,8 +1196,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) */ li r0, 0 mtspr SPRN_IAMR, r0 - mtspr SPRN_CIABR, r0 - mtspr SPRN_DAWRX, r0 + mtspr SPRN_PSPB, r0 mtspr SPRN_TCSCR, r0 mtspr SPRN_WORT, r0 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ @@ -1198,6 +1213,7 @@ BEGIN_FTR_SECTION std r6,VCPU_UAMOR(r9) li r6,0 mtspr SPRN_AMR,r6 + mtspr SPRN_UAMOR, r6 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) /* Switch DSCR back to host value */ @@ -1356,6 +1372,17 @@ hdec_soon: /* r12 = trap, r13 = paca */ BEGIN_FTR_SECTION b 32f END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) + + /* Restore host values of some registers */ +BEGIN_FTR_SECTION + ld r5, STACK_SLOT_CIABR(r1) + ld r6, STACK_SLOT_DAWR(r1) + ld r7, STACK_SLOT_DAWRX(r1) + mtspr SPRN_CIABR, r5 + mtspr SPRN_DAWR, r6 + mtspr SPRN_DAWRX, r7 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + /* * POWER7 guest -> host partition switch code. * We don't have to lock against tlbies but we do @@ -1582,8 +1609,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) - ld r0, 112+PPC_LR_STKOFF(r1) - addi r1, r1, 112 + ld r0, SFS+PPC_LR_STKOFF(r1) + addi r1, r1, SFS mtlr r0 blr diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 0f319521e002..14392b4e4693 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -112,7 +112,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np) of_detach_node(np); of_node_put(parent); - of_node_put(np); /* Must decrement the refcount */ return 0; } diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 773f86676588..989e15dbfe50 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += scatterlist.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 46461c19f284..85d80eb7b5da 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -14,3 +14,4 @@ generic-y += sections.h generic-y += trace_clock.h generic-y += xor.h generic-y += serial.h +generic-y += word-at-a-time.h diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 7cca41842a9e..c6b5f3a8337c 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -27,6 +27,8 @@ config TILE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select HAVE_DEBUG_STACKOVERFLOW select ARCH_WANT_FRAME_POINTERS + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER # FIXME: investigate whether we need/want these options. # select HAVE_IOREMAP_PROT diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index 320ff5e6e61e..7f7acddd3237 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/string.h> #include <gxio/iorpc_globals.h> #include <gxio/iorpc_mpipe.h> @@ -29,32 +30,6 @@ /* HACK: Avoid pointless "shadow" warnings. */ #define link link_shadow -/** - * strscpy - Copy a C-string into a sized buffer, but only if it fits - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @size: size of destination buffer - * - * Use this routine to avoid copying too-long strings. - * The routine returns the total number of bytes copied - * (including the trailing NUL) or zero if the buffer wasn't - * big enough. To ensure that programmers pay attention - * to the return code, the destination has a single NUL - * written at the front (if size is non-zero) when the - * buffer is not big enough. - */ -static size_t strscpy(char *dest, const char *src, size_t size) -{ - size_t len = strnlen(src, size) + 1; - if (len > size) { - if (size) - dest[0] = '\0'; - return 0; - } - memcpy(dest, src, len); - return len; -} - int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) { char file[32]; @@ -537,7 +512,7 @@ int gxio_mpipe_link_instance(const char *link_name) if (!context) return GXIO_ERR_NO_DEVICE; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; return gxio_mpipe_info_instance_aux(context, name); @@ -555,7 +530,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); if (rv >= 0) { - if (strscpy(link_name, name.name, sizeof(name.name)) == 0) + if (strscpy(link_name, name.name, sizeof(name.name)) < 0) return GXIO_ERR_INVAL_MEMORY_SIZE; memcpy(link_mac, mac.mac, sizeof(mac.mac)); } @@ -572,7 +547,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link, _gxio_mpipe_link_name_t name; int rv; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; rv = gxio_mpipe_link_open_aux(context, name, flags); diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index e6462b8a6284..f59b1cf545e0 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -41,4 +41,5 @@ generic-y += termbits.h generic-y += termios.h generic-y += trace_clock.h generic-y += types.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index b6cde3209b96..b65f20404df8 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -64,6 +64,13 @@ static inline int is_arch_mappable_range(unsigned long addr, #define is_arch_mappable_range(addr, size) 0 #endif +/* + * Note that using this definition ignores is_arch_mappable_range(), + * so on tilepro code that uses user_addr_max() is constrained not + * to reference the tilepro user-interrupt region. + */ +#define user_addr_max() (current_thread_info()->addr_limit.seg) + /* * Test whether a block of memory is a valid user space address. * Returns 0 if the range is valid, nonzero otherwise. @@ -462,62 +469,9 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n) #endif -/** - * strlen_user: - Get the size of a string in user space. - * @str: The string to measure. - * - * Context: User context only. This function may sleep. - * - * Get the size of a NUL-terminated string in user space. - * - * Returns the size of the string INCLUDING the terminating NUL. - * On exception, returns 0. - * - * If there is a limit on the length of a valid string, you may wish to - * consider using strnlen_user() instead. - */ -extern long strnlen_user_asm(const char __user *str, long n); -static inline long __must_check strnlen_user(const char __user *str, long n) -{ - might_fault(); - return strnlen_user_asm(str, n); -} -#define strlen_user(str) strnlen_user(str, LONG_MAX) - -/** - * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. - * @dst: Destination address, in kernel space. This buffer must be at - * least @count bytes long. - * @src: Source address, in user space. - * @count: Maximum number of bytes to copy, including the trailing NUL. - * - * Copies a NUL-terminated string from userspace to kernel space. - * Caller must check the specified block with access_ok() before calling - * this function. - * - * On success, returns the length of the string (not including the trailing - * NUL). - * - * If access to userspace fails, returns -EFAULT (some data may have been - * copied). - * - * If @count is smaller than the length of the string, copies @count bytes - * and returns @count. - */ -extern long strncpy_from_user_asm(char *dst, const char __user *src, long); -static inline long __must_check __strncpy_from_user( - char *dst, const char __user *src, long count) -{ - might_fault(); - return strncpy_from_user_asm(dst, src, count); -} -static inline long __must_check strncpy_from_user( - char *dst, const char __user *src, long count) -{ - if (access_ok(VERIFY_READ, src, 1)) - return __strncpy_from_user(dst, src, count); - return -EFAULT; -} +extern long strnlen_user(const char __user *str, long n); +extern long strlen_user(const char __user *str); +extern long strncpy_from_user(char *dst, const char __user *src, long); /** * clear_user: - Zero a block of memory in user space. diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..b66a693c2c34 --- /dev/null +++ b/arch/tile/include/asm/word-at-a-time.h @@ -0,0 +1,42 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include <asm/byteorder.h> + +struct word_at_a_time { /* unused */ }; +#define WORD_AT_A_TIME_CONSTANTS {} + +/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */ +static inline unsigned long has_zero(unsigned long val, unsigned long *data, + const struct word_at_a_time *c) +{ +#ifdef __tilegx__ + unsigned long mask = __insn_v1cmpeqi(val, 0); +#else /* tilepro */ + unsigned long mask = __insn_seqib(val, 0); +#endif + *data = mask; + return mask; +} + +/* These operations are both nops. */ +#define prep_zero_mask(val, data, c) (data) +#define create_zero_mask(data) (data) + +/* And this operation just depends on endianness. */ +static inline long find_zero(unsigned long mask) +{ +#ifdef __BIG_ENDIAN + return __builtin_clzl(mask) >> 3; +#else + return __builtin_ctzl(mask) >> 3; +#endif +} + +#ifdef __BIG_ENDIAN +#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask))) +#else +#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1) +#endif + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index 82733c87d67e..16326f288177 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c @@ -18,8 +18,6 @@ /* arch/tile/lib/usercopy.S */ #include <linux/uaccess.h> -EXPORT_SYMBOL(strnlen_user_asm); -EXPORT_SYMBOL(strncpy_from_user_asm); EXPORT_SYMBOL(clear_user_asm); EXPORT_SYMBOL(flush_user_asm); EXPORT_SYMBOL(finv_user_asm); diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S index 1bc162224638..db93ad5fae25 100644 --- a/arch/tile/lib/usercopy_32.S +++ b/arch/tile/lib/usercopy_32.S @@ -19,52 +19,6 @@ /* Access user memory, but use MMU to avoid propagating kernel exceptions. */ -/* - * strnlen_user_asm takes the pointer in r0, and the length bound in r1. - * It returns the length, including the terminating NUL, or zero on exception. - * If length is greater than the bound, returns one plus the bound. - */ -STD_ENTRY(strnlen_user_asm) - { bz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */ -1: { lb_u r4, r0; addi r1, r1, -1 } - bz r4, 2f - { bnzt r1, 1b; addi r0, r0, 1 } -2: { sub r0, r0, r3; jrp lr } - STD_ENDPROC(strnlen_user_asm) - .pushsection .fixup,"ax" -strnlen_user_fault: - { move r0, zero; jrp lr } - ENDPROC(strnlen_user_fault) - .section __ex_table,"a" - .align 4 - .word 1b, strnlen_user_fault - .popsection - -/* - * strncpy_from_user_asm takes the kernel target pointer in r0, - * the userspace source pointer in r1, and the length bound (including - * the trailing NUL) in r2. On success, it returns the string length - * (not including the trailing NUL), or -EFAULT on failure. - */ -STD_ENTRY(strncpy_from_user_asm) - { bz r2, 2f; move r3, r0 } -1: { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 } - { sb r0, r4; addi r0, r0, 1 } - bz r4, 2f - bnzt r2, 1b - { sub r0, r0, r3; jrp lr } -2: addi r0, r0, -1 /* don't count the trailing NUL */ - { sub r0, r0, r3; jrp lr } - STD_ENDPROC(strncpy_from_user_asm) - .pushsection .fixup,"ax" -strncpy_from_user_fault: - { movei r0, -EFAULT; jrp lr } - ENDPROC(strncpy_from_user_fault) - .section __ex_table,"a" - .align 4 - .word 1b, strncpy_from_user_fault - .popsection - /* * clear_user_asm takes the user target address in r0 and the * number of bytes to zero in r1. diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S index b3b31a3306f8..9322dc551e91 100644 --- a/arch/tile/lib/usercopy_64.S +++ b/arch/tile/lib/usercopy_64.S @@ -19,52 +19,6 @@ /* Access user memory, but use MMU to avoid propagating kernel exceptions. */ -/* - * strnlen_user_asm takes the pointer in r0, and the length bound in r1. - * It returns the length, including the terminating NUL, or zero on exception. - * If length is greater than the bound, returns one plus the bound. - */ -STD_ENTRY(strnlen_user_asm) - { beqz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */ -1: { ld1u r4, r0; addi r1, r1, -1 } - beqz r4, 2f - { bnezt r1, 1b; addi r0, r0, 1 } -2: { sub r0, r0, r3; jrp lr } - STD_ENDPROC(strnlen_user_asm) - .pushsection .fixup,"ax" -strnlen_user_fault: - { move r0, zero; jrp lr } - ENDPROC(strnlen_user_fault) - .section __ex_table,"a" - .align 8 - .quad 1b, strnlen_user_fault - .popsection - -/* - * strncpy_from_user_asm takes the kernel target pointer in r0, - * the userspace source pointer in r1, and the length bound (including - * the trailing NUL) in r2. On success, it returns the string length - * (not including the trailing NUL), or -EFAULT on failure. - */ -STD_ENTRY(strncpy_from_user_asm) - { beqz r2, 2f; move r3, r0 } -1: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 } - { st1 r0, r4; addi r0, r0, 1 } - beqz r4, 2f - bnezt r2, 1b - { sub r0, r0, r3; jrp lr } -2: addi r0, r0, -1 /* don't count the trailing NUL */ - { sub r0, r0, r3; jrp lr } - STD_ENDPROC(strncpy_from_user_asm) - .pushsection .fixup,"ax" -strncpy_from_user_fault: - { movei r0, -EFAULT; jrp lr } - ENDPROC(strncpy_from_user_fault) - .section __ex_table,"a" - .align 8 - .quad 1b, strncpy_from_user_fault - .popsection - /* * clear_user_asm takes the user target address in r0 and the * number of bytes to zero in r1. diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 244b12c8cb39..dde4d43f4a76 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -27,4 +27,5 @@ generic-y += sections.h generic-y += switch_to.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 5a2bb53faa42..4058e6c31636 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -63,4 +63,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c index 493f3fd9f139..847f56c734b4 100644 --- a/arch/x86/boot/string.c +++ b/arch/x86/boot/string.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include "ctype.h" +#include "string.h" int memcmp(const void *s1, const void *s2, size_t len) { diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h index 725e820602b1..113588ddb43f 100644 --- a/arch/x86/boot/string.h +++ b/arch/x86/boot/string.h @@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len); #define memset(d,c,l) __builtin_memset(d,c,l) #define memcmp __builtin_memcmp +extern int strcmp(const char *str1, const char *str2); +extern int strncmp(const char *cs, const char *ct, size_t count); +extern size_t strlen(const char *s); +extern char *strstr(const char *s1, const char *s2); +extern size_t strnlen(const char *s, size_t maxlen); +extern unsigned int atou(const char *s); +extern unsigned long long simple_strtoull(const char *cp, char **endp, + unsigned int base); + #endif /* BOOT_STRING_H */ diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index be121fb8d88d..82e588f72992 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -573,6 +573,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) const char *name = th_names[bank]; int err = 0; + if (!dev) + return -ENODEV; + if (is_shared_bank(bank)) { nb = node_to_amd_nb(amd_get_nb_id(cpu)); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index cb1a9b268e18..dc3d52a245cf 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token) if (hlist_unhashed(&n.link)) break; + rcu_irq_exit(); + if (!n.halted) { local_irq_enable(); schedule(); @@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token) /* * We cannot reschedule. So halt. */ - rcu_irq_exit(); native_safe_halt(); local_irq_disable(); - rcu_irq_enter(); } + + rcu_irq_enter(); } if (!n.halted) finish_wait(&n.wq, &wait); diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 105d38922c44..fb5285c8f8dd 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -30,4 +30,5 @@ generic-y += statfs.h generic-y += termios.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index f96be389cf98..8efa864c02e5 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2798,10 +2798,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) { if (!sata_pmp_attached(ap)) { - if (likely(devno < ata_link_max_devices(&ap->link))) + if (likely(devno >= 0 && + devno < ata_link_max_devices(&ap->link))) return &ap->link.device[devno]; } else { - if (likely(devno < ap->nr_pmp_links)) + if (likely(devno >= 0 && + devno < ap->nr_pmp_links)) return &ap->pmp_link[devno].device[0]; } diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 37b8be7cba95..f335fcee09af 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -1156,10 +1156,11 @@ static int wdog_reboot_handler(struct notifier_block *this, ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { - /* Set a long timer to let the reboot happens, but - reboot if it hangs, but only if the watchdog + /* Set a long timer to let the reboot happen or + reset if it hangs, but only if the watchdog timer was already running. */ - timeout = 120; + if (timeout < 120) + timeout = 120; pretimeout = 0; ipmi_watchdog_state = WDOG_TIMEOUT_RESET; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 01730a27ae07..7206f417779b 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -57,6 +57,8 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, struct tpm_chip *chip = dev_get_drvdata(dev); + memset(&tpm_cmd, 0, sizeof(tpm_cmd)); + tpm_cmd.header.in = tpm_readpubek_header; err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, "attempting to read the PUBEK"); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 6afa29167fee..05d974ec35cb 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -199,7 +199,14 @@ void adreno_flush(struct msm_gpu *gpu) void adreno_idle(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - uint32_t wptr = get_wptr(gpu->rb); + uint32_t wptr; + + /* + * Mask wptr value that we calculate to fit in the HW range. This is + * to account for the possibility that the last command fit exactly into + * the ringbuffer and rb->next hasn't wrapped to zero yet + */ + wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1); /* wait for CP to drain ringbuffer: */ if (spin_until(adreno_gpu->memptrs->rptr == wptr)) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 4ff8c334e7c8..4a45ae01cc3e 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -90,7 +90,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, pagefault_disable(); } - if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { + if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) || + !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) { DRM_ERROR("invalid flags: %x\n", submit_bo.flags); ret = -EINVAL; goto out_unlock; diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 8171537dd7d1..40a5c0fe4396 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) struct msm_ringbuffer *ring; int ret; - size = ALIGN(size, 4); /* size should be dword aligned */ + if (WARN_ON(!is_power_of_2(size))) + return ERR_PTR(-EINVAL); ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 8ad66bbd4f28..a70f782ec661 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -285,7 +285,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - return capable(CAP_SYS_ADMIN) ? : -EINVAL; + return -EINVAL; } static int vmw_cmd_ok(struct vmw_private *dev_priv, diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 9b856e1890d1..e4c43a17b333 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; + bname[sizeof(bname)-1] = 0; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 94affa5e6f28..6d1173dbccba 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -2644,10 +2644,9 @@ isdn_net_newslave(char *parm) char newname[10]; if (p) { - /* Slave-Name MUST not be empty */ - if (!strlen(p + 1)) + /* Slave-Name MUST not be empty or overflow 'newname' */ + if (strscpy(newname, p + 1, sizeof(newname)) <= 0) return NULL; - strcpy(newname, p + 1); *p = 0; /* Master must already exist */ if (!(n = isdn_net_findif(parm))) diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 9c1e8adaf4fc..bf3fbd00a091 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s id); return NULL; } else { - rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); + rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC); if (!rs) return NULL; rs->state = CCPResetIdle; diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index afcb430508ec..e464ff084e82 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -99,11 +99,14 @@ static void tx_tick(struct mbox_chan *chan, int r) /* Submit next message */ msg_submit(chan); + if (!mssg) + return; + /* Notify the client */ - if (mssg && chan->cl->tx_done) + if (chan->cl->tx_done) chan->cl->tx_done(chan->cl, mssg, r); - if (chan->cl->tx_block) + if (r != -ETIME && chan->cl->tx_block) complete(&chan->tx_complete); } @@ -255,7 +258,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg) if (chan->txdone_method == TXDONE_BY_POLL) poll_txdone((unsigned long)chan->mbox); - if (chan->cl->tx_block && chan->active_req) { + if (chan->cl->tx_block) { unsigned long wait; int ret; @@ -266,8 +269,8 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg) ret = wait_for_completion_timeout(&chan->tx_complete, wait); if (ret == 0) { - t = -EIO; - tx_tick(chan, -EIO); + t = -ETIME; + tx_tick(chan, t); } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 604c5dc95ffc..094f36064ff0 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5224,6 +5224,8 @@ static void raid5_do_work(struct work_struct *work) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); + + async_tx_issue_pending_all(); blk_finish_plug(&plug); pr_debug("--- raid5worker inactive\n"); diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c index 8001cde1db1e..503135a4f47a 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c @@ -211,7 +211,7 @@ static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl) } if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS) - ret = s5c73m3_af_run(state, ~af_lock); + ret = s5c73m3_af_run(state, !af_lock); return ret; } diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index de55f47a77db..e30efa9fe56b 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -1706,27 +1706,9 @@ static long vpfe_param_handler(struct file *file, void *priv, switch (cmd) { case VPFE_CMD_S_CCDC_RAW_PARAMS: + ret = -EINVAL; v4l2_warn(&vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); - if (ccdc_dev->hw_ops.set_params) { - ret = ccdc_dev->hw_ops.set_params(param); - if (ret) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Error setting parameters in CCDC\n"); - goto unlock_out; - } - ret = vpfe_get_ccdc_image_format(vpfe_dev, - &vpfe_dev->fmt); - if (ret < 0) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Invalid image format at CCDC\n"); - goto unlock_out; - } - } else { - ret = -EINVAL; - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); - } + "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); break; default: ret = -ENOTTY; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 63c297c56ad4..a79e73781c0b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8705,11 +8705,14 @@ static void tg3_free_consistent(struct tg3 *tp) tg3_mem_rx_release(tp); tg3_mem_tx_release(tp); + /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ + tg3_full_lock(tp, 0); if (tp->hw_stats) { dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } + tg3_full_unlock(tp); } /* diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 97c9b1db1d27..27f22cd3b071 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -117,8 +117,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, if (!buf) return -ENOMEM; + if (offset_in_page(buf)) { + dma_free_coherent(dev, PAGE_SIZE << order, + buf, sg_dma_address(mem)); + return -ENOMEM; + } + sg_set_buf(mem, buf, PAGE_SIZE << order); - BUG_ON(mem->offset); sg_dma_len(mem) = PAGE_SIZE << order; return 0; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 54390b3e0344..e95cb1faf43e 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -326,6 +326,7 @@ enum cfg_version { static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c44bae495804..24e5d2135663 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -781,6 +781,7 @@ static struct sh_eth_cpu_data r8a7740_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, + .hw_crc = 1, .tsu = 1, .select_mii = 1, .shift_rd0 = 1, @@ -1832,6 +1833,9 @@ static int sh_eth_get_settings(struct net_device *ndev, unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); ret = phy_ethtool_gset(mdp->phydev, ecmd); spin_unlock_irqrestore(&mdp->lock, flags); @@ -1846,6 +1850,9 @@ static int sh_eth_set_settings(struct net_device *ndev, unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); /* disable tx and rx */ @@ -1880,6 +1887,9 @@ static int sh_eth_nway_reset(struct net_device *ndev) unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); ret = phy_start_aneg(mdp->phydev); spin_unlock_irqrestore(&mdp->lock, flags); @@ -2189,6 +2199,7 @@ static int sh_eth_close(struct net_device *ndev) if (mdp->phydev) { phy_stop(mdp->phydev); phy_disconnect(mdp->phydev); + mdp->phydev = NULL; } free_irq(ndev->irq, ndev); diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 16f8ffb50e04..aa688dc4d351 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val) static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) { struct usb_device *dev = mcs->usbdev; - int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, - MCS_RD_RTYPE, 0, reg, val, 2, - msecs_to_jiffies(MCS_CTRL_TIMEOUT)); + void *dmabuf; + int ret; + + dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, + MCS_RD_RTYPE, 0, reg, dmabuf, 2, + msecs_to_jiffies(MCS_CTRL_TIMEOUT)); + + memcpy(val, dmabuf, sizeof(__u16)); + kfree(dmabuf); return ret; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7ae062743bad..caf4d3469d4a 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -509,6 +509,9 @@ void phy_stop_machine(struct phy_device *phydev) if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); + + /* Now we can run the state machine synchronously */ + phy_state_machine(&phydev->state_queue.work); } /** diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 70a0d88de654..a1c0d6986148 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1267,6 +1267,8 @@ static int phy_remove(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); + cancel_delayed_work_sync(&phydev->state_queue); + mutex_lock(&phydev->lock); phydev->state = PHY_DOWN; mutex_unlock(&phydev->lock); @@ -1341,7 +1343,7 @@ static struct phy_driver genphy_driver[] = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, .name = "Generic PHY", - .soft_reset = genphy_soft_reset, + .soft_reset = genphy_no_soft_reset, .config_init = genphy_config_init, .features = PHY_GBIT_FEATURES | SUPPORTED_MII | SUPPORTED_AUI | SUPPORTED_FIBRE | diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index dcb6d33141e0..40f3fbf2ca01 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -1009,6 +1009,7 @@ static int kaweth_probe( struct net_device *netdev; const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int result = 0; + int rv = -EIO; dev_dbg(dev, "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", @@ -1029,6 +1030,7 @@ static int kaweth_probe( kaweth = netdev_priv(netdev); kaweth->dev = udev; kaweth->net = netdev; + kaweth->intf = intf; spin_lock_init(&kaweth->device_lock); init_waitqueue_head(&kaweth->term_wait); @@ -1048,6 +1050,10 @@ static int kaweth_probe( /* Download the firmware */ dev_info(dev, "Downloading firmware...\n"); kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); + if (!kaweth->firmware_buf) { + rv = -ENOMEM; + goto err_free_netdev; + } if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code.bin", 100, @@ -1139,8 +1145,6 @@ err_fw: dev_dbg(dev, "Initializing net device.\n"); - kaweth->intf = intf; - kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->tx_urb) goto err_free_netdev; @@ -1210,7 +1214,7 @@ err_only_tx: err_free_netdev: free_netdev(netdev); - return -EIO; + return rv; } /**************************************************************** diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 6500caf8d609..c5676841fd12 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -240,18 +240,19 @@ static void wil_fw_error_worker(struct work_struct *work) wil->last_fw_recovery = jiffies; + wil_info(wil, "fw error recovery requested (try %d)...\n", + wil->recovery_count); + if (!no_fw_recovery) + wil->recovery_state = fw_recovery_running; + if (wil_wait_for_recovery(wil) != 0) + return; + mutex_lock(&wil->mutex); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_MONITOR: - wil_info(wil, "fw error recovery requested (try %d)...\n", - wil->recovery_count); - if (!no_fw_recovery) - wil->recovery_state = fw_recovery_running; - if (0 != wil_wait_for_recovery(wil)) - break; - + /* silent recovery, upper layers will see disconnect */ __wil_down(wil); __wil_up(wil); break; diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 5f1fda44882b..bf515614f131 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -195,6 +195,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ unsigned long remaining_credit; struct timer_list credit_timeout; u64 credit_window_start; + bool rate_limited; /* Statistics */ struct xenvif_stats stats; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index d752d1c5c8bd..d9f786cf7d5b 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -99,7 +99,11 @@ int xenvif_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete(napi); - xenvif_napi_schedule_or_enable_events(queue); + /* If the queue is rate-limited, it shall be + * rescheduled in the timer callback. + */ + if (likely(!queue->rate_limited)) + xenvif_napi_schedule_or_enable_events(queue); } return work_done; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 63f94134dfce..7a85ff54a39f 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -819,6 +819,7 @@ static void tx_add_credit(struct xenvif_queue *queue) max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ queue->remaining_credit = min(max_credit, max_burst); + queue->rate_limited = false; } static void tx_credit_callback(unsigned long data) @@ -1336,8 +1337,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) msecs_to_jiffies(queue->credit_usec / 1000); /* Timer could already be pending in rare cases. */ - if (timer_pending(&queue->credit_timeout)) + if (timer_pending(&queue->credit_timeout)) { + queue->rate_limited = true; return true; + } /* Passed the point where we can replenish credit? */ if (time_after_eq64(now, next_credit)) { @@ -1354,6 +1357,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) mod_timer(&queue->credit_timeout, next_credit); queue->credit_window_start = next_credit; + queue->rate_limited = true; return true; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 1560115079c7..52d409408aa3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -4378,14 +4378,13 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, static int _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) { - int r, i; + int r, i, index; unsigned long flags; u32 reply_address; u16 smid; struct _tr_list *delayed_tr, *delayed_tr_next; struct adapter_reply_queue *reply_q; - long reply_post_free; - u32 reply_post_free_sz, index = 0; + Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, __func__)); @@ -4456,27 +4455,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) _base_assign_reply_queues(ioc); /* initialize Reply Post Free Queue */ - reply_post_free_sz = ioc->reply_post_queue_depth * - sizeof(Mpi2DefaultReplyDescriptor_t); - reply_post_free = (long)ioc->reply_post[index].reply_post_free; + index = 0; + reply_post_free_contig = ioc->reply_post[0].reply_post_free; list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + /* + * If RDPQ is enabled, switch to the next allocation. + * Otherwise advance within the contiguous region. + */ + if (ioc->rdpq_array_enable) { + reply_q->reply_post_free = + ioc->reply_post[index++].reply_post_free; + } else { + reply_q->reply_post_free = reply_post_free_contig; + reply_post_free_contig += ioc->reply_post_queue_depth; + } + reply_q->reply_post_host_index = 0; - reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) - reply_post_free; for (i = 0; i < ioc->reply_post_queue_depth; i++) reply_q->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX); if (!_base_is_controller_msix_enabled(ioc)) goto skip_init_reply_post_free_queue; - /* - * If RDPQ is enabled, switch to the next allocation. - * Otherwise advance within the contiguous region. - */ - if (ioc->rdpq_array_enable) - reply_post_free = (long) - ioc->reply_post[++index].reply_post_free; - else - reply_post_free += reply_post_free_sz; } skip_init_reply_post_free_queue: diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 82b92c414a9c..c1b2e86839ae 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -329,12 +329,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; ssize_t rval = 0; + mutex_lock(&ha->optrom_mutex); + if (ha->optrom_state != QLA_SREADING) - return 0; + goto out; - mutex_lock(&ha->optrom_mutex); rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, ha->optrom_region_size); + +out: mutex_unlock(&ha->optrom_mutex); return rval; @@ -349,14 +352,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; - if (ha->optrom_state != QLA_SWRITING) + mutex_lock(&ha->optrom_mutex); + + if (ha->optrom_state != QLA_SWRITING) { + mutex_unlock(&ha->optrom_mutex); return -EINVAL; - if (off > ha->optrom_region_size) + } + if (off > ha->optrom_region_size) { + mutex_unlock(&ha->optrom_mutex); return -ERANGE; + } if (off + count > ha->optrom_region_size) count = ha->optrom_region_size - off; - mutex_lock(&ha->optrom_mutex); memcpy(&ha->optrom_buffer[off], buf, count); mutex_unlock(&ha->optrom_mutex); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 1a0f266c4268..0f08d11d86d2 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -118,7 +118,10 @@ static const struct file_operations dw_spi_regs_ops = { static int dw_spi_debugfs_init(struct dw_spi *dws) { - dws->debugfs = debugfs_create_dir("dw_spi", NULL); + char name[128]; + + snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev)); + dws->debugfs = debugfs_create_dir(name, NULL); if (!dws->debugfs) return -ENOMEM; diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 87142ebc5517..f3f5d2b3faa8 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2590,9 +2590,6 @@ static int __init comedi_init(void) comedi_class->dev_groups = comedi_dev_groups; - /* XXX requires /proc interface */ - comedi_proc_init(); - /* create devices files for legacy/manual use */ for (i = 0; i < comedi_num_legacy_minors; i++) { struct comedi_device *dev; @@ -2610,6 +2607,9 @@ static int __init comedi_init(void) mutex_unlock(&dev->mutex); } + /* XXX requires /proc interface */ + comedi_proc_init(); + return 0; } module_init(comedi_init); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 450fbad835e4..ce844e980990 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3931,6 +3931,8 @@ int iscsi_target_tx_thread(void *arg) { int ret = 0; struct iscsi_conn *conn = arg; + bool conn_freed = false; + /* * Allow ourselves to be interrupted by SIGINT so that a * connection recovery / failure event can be triggered externally. @@ -3956,12 +3958,14 @@ get_immediate: goto transport_err; ret = iscsit_handle_response_queue(conn); - if (ret == 1) + if (ret == 1) { goto get_immediate; - else if (ret == -ECONNRESET) + } else if (ret == -ECONNRESET) { + conn_freed = true; goto out; - else if (ret < 0) + } else if (ret < 0) { goto transport_err; + } } transport_err: @@ -3971,8 +3975,13 @@ transport_err: * responsible for cleaning up the early connection failure. */ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) - iscsit_take_action_for_connection_exit(conn); + iscsit_take_action_for_connection_exit(conn, &conn_freed); out: + if (!conn_freed) { + while (!kthread_should_stop()) { + msleep(100); + } + } return 0; } @@ -4073,6 +4082,7 @@ int iscsi_target_rx_thread(void *arg) u32 checksum = 0, digest = 0; struct iscsi_conn *conn = arg; struct kvec iov; + bool conn_freed = false; /* * Allow ourselves to be interrupted by SIGINT so that a * connection recovery / failure event can be triggered externally. @@ -4084,7 +4094,7 @@ int iscsi_target_rx_thread(void *arg) */ rc = wait_for_completion_interruptible(&conn->rx_login_comp); if (rc < 0 || iscsi_target_check_conn_state(conn)) - return 0; + goto out; if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { struct completion comp; @@ -4169,7 +4179,13 @@ int iscsi_target_rx_thread(void *arg) transport_err: if (!signal_pending(current)) atomic_set(&conn->transport_failed, 1); - iscsit_take_action_for_connection_exit(conn); + iscsit_take_action_for_connection_exit(conn, &conn_freed); +out: + if (!conn_freed) { + while (!kthread_should_stop()) { + msleep(100); + } + } return 0; } @@ -4545,8 +4561,11 @@ static void iscsit_logout_post_handler_closesession( * always sleep waiting for RX/TX thread shutdown to complete * within iscsit_close_connection(). */ - if (conn->conn_transport->transport_type == ISCSI_TCP) + if (conn->conn_transport->transport_type == ISCSI_TCP) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4562,8 +4581,11 @@ static void iscsit_logout_post_handler_samecid( { int sleep = 1; - if (conn->conn_transport->transport_type == ISCSI_TCP) + if (conn->conn_transport->transport_type == ISCSI_TCP) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 62eb11329c71..1863de28ce46 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -562,6 +562,7 @@ struct iscsi_conn { #define LOGIN_FLAGS_READ_ACTIVE 1 #define LOGIN_FLAGS_CLOSED 2 #define LOGIN_FLAGS_READY 4 +#define LOGIN_FLAGS_INITIAL_PDU 8 unsigned long login_flags; struct delayed_work login_work; struct delayed_work login_cleanup_work; diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 87f687258729..f900de64e267 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -931,8 +931,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) } } -void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) +void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) { + *conn_freed = false; + spin_lock_bh(&conn->state_lock); if (atomic_read(&conn->connection_exit)) { spin_unlock_bh(&conn->state_lock); @@ -943,6 +945,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { spin_unlock_bh(&conn->state_lock); iscsit_close_connection(conn); + *conn_freed = true; return; } @@ -956,6 +959,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) spin_unlock_bh(&conn->state_lock); iscsit_handle_connection_cleanup(conn); + *conn_freed = true; } /* diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h index 21acc9a06376..42673fb44790 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.h +++ b/drivers/target/iscsi/iscsi_target_erl0.h @@ -9,7 +9,7 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *); extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); extern void iscsit_fall_back_to_erl0(struct iscsi_session *); -extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); +extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *); #endif /*** ISCSI_TARGET_ERL0_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index b06d3c0d2e66..4608d8dac04c 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1492,5 +1492,9 @@ int iscsi_target_login_thread(void *arg) break; } + while (!kthread_should_stop()) { + msleep(100); + } + return 0; } diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index dcb63481325f..5c4bab4a0c03 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -494,14 +494,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); -static bool iscsi_target_sk_state_check(struct sock *sk) +static bool __iscsi_target_sk_check_close(struct sock *sk) { if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { - pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," + pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," "returning FALSE\n"); - return false; + return true; } - return true; + return false; +} + +static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = (__iscsi_target_sk_check_close(sk) || + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); + read_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = test_bit(flag, &conn->login_flags); + read_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + state = (__iscsi_target_sk_check_close(sk) || + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); + if (!state) + clear_bit(flag, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + return state; } static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) @@ -541,6 +587,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", conn, current->comm, current->pid); + /* + * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() + * before initial PDU processing in iscsi_target_start_negotiation() + * has completed, go ahead and retry until it's cleared. + * + * Otherwise if the TCP connection drops while this is occuring, + * iscsi_target_start_negotiation() will detect the failure, call + * cancel_delayed_work_sync(&conn->login_work), and cleanup the + * remaining iscsi connection resources from iscsi_np process context. + */ + if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { + schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); + return; + } spin_lock(&tpg->tpg_state_lock); state = (tpg->tpg_state == TPG_STATE_ACTIVE); @@ -548,26 +608,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) if (!state) { pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; + goto err; } - if (conn->sock) { - struct sock *sk = conn->sock->sk; - - read_lock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - read_unlock_bh(&sk->sk_callback_lock); - - if (!state) { - pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; - } + if (iscsi_target_sk_check_close(conn)) { + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); + goto err; } conn->login_kworker = current; @@ -585,34 +631,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) flush_signals(current); conn->login_kworker = NULL; - if (rc < 0) { - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; - } + if (rc < 0) + goto err; pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", conn, current->comm, current->pid); rc = iscsi_target_do_login(conn, login); if (rc < 0) { - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); + goto err; } else if (!rc) { - if (conn->sock) { - struct sock *sk = conn->sock->sk; - - write_lock_bh(&sk->sk_callback_lock); - clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) + goto err; } else if (rc == 1) { iscsi_target_nego_release(conn); iscsi_post_login_handler(np, conn, zero_tsih); iscsit_deaccess_np(np, tpg, tpg_np); } + return; + +err: + iscsi_target_restore_sock_callbacks(conn); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); } static void iscsi_target_do_cleanup(struct work_struct *work) @@ -660,31 +701,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) orig_state_change(sk); return; } + state = __iscsi_target_sk_check_close(sk); + pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" " conn: %p\n", conn); + if (state) + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); write_unlock_bh(&sk->sk_callback_lock); orig_state_change(sk); return; } - if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", conn); write_unlock_bh(&sk->sk_callback_lock); orig_state_change(sk); return; } + /* + * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, + * but only queue conn->login_work -> iscsi_target_do_login_rx() + * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. + * + * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() + * will detect the dropped TCP connection from delayed workqueue context. + * + * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial + * iscsi_target_start_negotiation() is running, iscsi_target_do_login() + * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() + * via iscsi_target_sk_check_and_clear() is responsible for detecting the + * dropped TCP connection in iscsi_np process context, and cleaning up + * the remaining iscsi connection resources. + */ + if (state) { + pr_debug("iscsi_target_sk_state_change got failed state\n"); + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); + state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - write_unlock_bh(&sk->sk_callback_lock); - - pr_debug("iscsi_target_sk_state_change: state: %d\n", state); + orig_state_change(sk); - if (!state) { - pr_debug("iscsi_target_sk_state_change got failed state\n"); - schedule_delayed_work(&conn->login_cleanup_work, 0); + if (!state) + schedule_delayed_work(&conn->login_work, 0); return; } + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); } @@ -947,6 +1011,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo if (iscsi_target_handle_csg_one(conn, login) < 0) return -1; if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { + /* + * Check to make sure the TCP connection has not + * dropped asynchronously while session reinstatement + * was occuring in this kthread context, before + * transitioning to full feature phase operation. + */ + if (iscsi_target_sk_check_close(conn)) + return -1; + login->tsih = conn->sess->tsih; login->login_complete = 1; iscsi_target_restore_sock_callbacks(conn); @@ -973,21 +1046,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo break; } - if (conn->sock) { - struct sock *sk = conn->sock->sk; - bool state; - - read_lock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - read_unlock_bh(&sk->sk_callback_lock); - - if (!state) { - pr_debug("iscsi_target_do_login() failed state for" - " conn: %p\n", conn); - return -1; - } - } - return 0; } @@ -1251,16 +1309,28 @@ int iscsi_target_start_negotiation( { int ret; + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + /* + * If iscsi_target_do_login returns zero to signal more PDU + * exchanges are required to complete the login, go ahead and + * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection + * is still active. + * + * Otherwise if TCP connection dropped asynchronously, go ahead + * and perform connection cleanup now. + */ ret = iscsi_target_do_login(conn, login); - if (!ret) { - if (conn->sock) { - struct sock *sk = conn->sock->sk; + if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) + ret = -1; - write_lock_bh(&sk->sk_callback_lock); - set_bit(LOGIN_FLAGS_READY, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } - } else if (ret < 0) { + if (ret < 0) { cancel_delayed_work_sync(&conn->login_work); cancel_delayed_work_sync(&conn->login_cleanup_work); iscsi_target_restore_sock_callbacks(conn); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 0c3f90130b7d..54319d7f382d 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -84,6 +84,11 @@ static int target_fabric_mappedlun_link( "_tpg does not exist\n"); return -EINVAL; } + if (lun->lun_shutdown) { + pr_err("Unable to create mappedlun symlink because" + " lun->lun_shutdown=true\n"); + return -EINVAL; + } se_tpg = lun->lun_sep->sep_tpg; nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 0696de9553d3..51681866449a 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -843,6 +843,8 @@ void core_tpg_remove_lun( struct se_portal_group *tpg, struct se_lun *lun) { + lun->lun_shutdown = true; + core_clear_lun_from_tpg(lun, tpg); transport_clear_lun_ref(lun); @@ -850,6 +852,7 @@ void core_tpg_remove_lun( spin_lock(&tpg->tpg_lun_lock); lun->lun_status = TRANSPORT_LUN_STATUS_FREE; + lun->lun_shutdown = false; spin_unlock(&tpg->tpg_lun_lock); percpu_ref_exit(&lun->lun_ref); diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 210db24d2204..4d39f7959adf 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -190,7 +190,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, if (!vdev->has_vga) return -EINVAL; - switch (pos) { + if (pos > 0xbfffful) + return -EINVAL; + + switch ((u32)pos) { case 0xa0000 ... 0xbffff: count = min(count, (size_t)(0xc0000 - pos)); iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1); diff --git a/fs/dcache.c b/fs/dcache.c index 47434f2c66b2..ae7106123047 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3549,3 +3549,30 @@ void __init vfs_caches_init(unsigned long mempages) bdev_cache_init(); chrdev_init(); } + +void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + if (unlikely(dname_external(dentry))) { + struct external_name *p = external_name(dentry); + atomic_inc(&p->u.count); + spin_unlock(&dentry->d_lock); + name->name = p->name; + } else { + memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN); + spin_unlock(&dentry->d_lock); + name->name = name->inline_name; + } +} +EXPORT_SYMBOL(take_dentry_name_snapshot); + +void release_dentry_name_snapshot(struct name_snapshot *name) +{ + if (unlikely(name->name != name->inline_name)) { + struct external_name *p; + p = container_of(name->name, struct external_name, name[0]); + if (unlikely(atomic_dec_and_test(&p->u.count))) + kfree_rcu(p, u.head); + } +} +EXPORT_SYMBOL(release_dentry_name_snapshot); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 6f0ce531e221..1488e13d6221 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -620,7 +620,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, { int error; struct dentry *dentry = NULL, *trap; - const char *old_name; + struct name_snapshot old_name; trap = lock_rename(new_dir, old_dir); /* Source or destination directories don't exist? */ @@ -635,19 +635,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, if (IS_ERR(dentry) || dentry == trap || dentry->d_inode) goto exit; - old_name = fsnotify_oldname_init(old_dentry->d_name.name); + take_dentry_name_snapshot(&old_name, old_dentry); error = simple_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, dentry); if (error) { - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); goto exit; } d_move(old_dentry, dentry); - fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, + fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name, S_ISDIR(old_dentry->d_inode->i_mode), NULL, old_dentry); - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); unlock_rename(new_dir, old_dir); dput(dentry); return old_dentry; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index ed2a57a3bb50..9dfb324de119 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -340,6 +340,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, lastoff = page_offset(page); bh = head = page_buffers(page); do { + if (lastoff + bh->b_size <= startoff) + goto next; if (buffer_uptodate(bh) || buffer_unwritten(bh)) { if (whence == SEEK_DATA) @@ -354,6 +356,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, unlock_page(page); goto out; } +next: lastoff += bh->b_size; bh = bh->b_this_page; } while (bh != head); diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 77c81c64a47e..7015aa385cd9 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1929,7 +1929,8 @@ retry: n_desc_blocks = o_desc_blocks + le16_to_cpu(es->s_reserved_gdt_blocks); n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); - n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb); + n_blocks_count = (ext4_fsblk_t)n_group * + EXT4_BLOCKS_PER_GROUP(sb); n_group--; /* set to last group number */ } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 341466bcf180..8d275fad465d 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -932,6 +932,8 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) unsigned int total, fsmeta; struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + unsigned int main_segs, blocks_per_seg; + int i; total = le32_to_cpu(raw_super->segment_count); fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); @@ -943,6 +945,20 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) if (unlikely(fsmeta >= total)) return 1; + main_segs = le32_to_cpu(raw_super->segment_count_main); + blocks_per_seg = sbi->blocks_per_seg; + + for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { + if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs || + le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) + return 1; + } + for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { + if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs || + le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) + return 1; + } + if (unlikely(f2fs_cp_error(sbi))) { f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); return 1; diff --git a/fs/namei.c b/fs/namei.c index e1976450a1e2..ea306e1d95f5 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -4088,11 +4088,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, { int error; bool is_dir = d_is_dir(old_dentry); - const unsigned char *old_name; struct inode *source = old_dentry->d_inode; struct inode *target = new_dentry->d_inode; bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; + struct name_snapshot old_name; if (source == target) return 0; @@ -4142,7 +4142,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (error) return error; - old_name = fsnotify_oldname_init(old_dentry->d_name.name); + take_dentry_name_snapshot(&old_name, old_dentry); dget(new_dentry); if (!is_dir || (flags & RENAME_EXCHANGE)) lock_two_nondirectories(source, target); @@ -4203,14 +4203,14 @@ out: mutex_unlock(&target->i_mutex); dput(new_dentry); if (!error) { - fsnotify_move(old_dir, new_dir, old_name, is_dir, + fsnotify_move(old_dir, new_dir, old_name.name, is_dir, !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); if (flags & RENAME_EXCHANGE) { fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, new_is_dir, NULL, new_dentry); } } - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); return error; } diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 41e39102743a..e1f24ede210e 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -105,16 +105,20 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) if (unlikely(!fsnotify_inode_watches_children(p_inode))) __fsnotify_update_child_dentry_flags(p_inode); else if (p_inode->i_fsnotify_mask & mask) { + struct name_snapshot name; + /* we are notifying a parent so come up with the new mask which * specifies these are events which came from a child. */ mask |= FS_EVENT_ON_CHILD; + take_dentry_name_snapshot(&name, dentry); if (path) ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH, - dentry->d_name.name, 0); + name.name, 0); else ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE, - dentry->d_name.name, 0); + name.name, 0); + release_dentry_name_snapshot(&name); } dput(parent); diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 5fa34243b1ae..eaca4cf4cf61 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -365,7 +365,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, cxt->przs[i] = persistent_ram_new(*paddr, sz, 0, &cxt->ecc_info, - cxt->memtype); + cxt->memtype, 0); if (IS_ERR(cxt->przs[i])) { err = PTR_ERR(cxt->przs[i]); dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n", @@ -395,7 +395,8 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt, return -ENOMEM; } - *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype); + *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, + cxt->memtype, 0); if (IS_ERR(*prz)) { int err = PTR_ERR(*prz); diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index 76c3f80efdfa..57b165ba849c 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -80,16 +80,15 @@ static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a) } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old); } -static DEFINE_RAW_SPINLOCK(buffer_lock); - /* increase and wrap the start pointer, returning the old value */ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) { int old; int new; - unsigned long flags; + unsigned long flags = 0; - raw_spin_lock_irqsave(&buffer_lock, flags); + if (!(prz->flags & PRZ_FLAG_NO_LOCK)) + raw_spin_lock_irqsave(&prz->buffer_lock, flags); old = atomic_read(&prz->buffer->start); new = old + a; @@ -97,7 +96,8 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) new -= prz->buffer_size; atomic_set(&prz->buffer->start, new); - raw_spin_unlock_irqrestore(&buffer_lock, flags); + if (!(prz->flags & PRZ_FLAG_NO_LOCK)) + raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); return old; } @@ -107,9 +107,10 @@ static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a) { size_t old; size_t new; - unsigned long flags; + unsigned long flags = 0; - raw_spin_lock_irqsave(&buffer_lock, flags); + if (!(prz->flags & PRZ_FLAG_NO_LOCK)) + raw_spin_lock_irqsave(&prz->buffer_lock, flags); old = atomic_read(&prz->buffer->size); if (old == prz->buffer_size) @@ -121,7 +122,8 @@ static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a) atomic_set(&prz->buffer->size, new); exit: - raw_spin_unlock_irqrestore(&buffer_lock, flags); + if (!(prz->flags & PRZ_FLAG_NO_LOCK)) + raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); } static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic; @@ -487,6 +489,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, prz->buffer->sig); } + /* Rewind missing or invalid memory area. */ prz->buffer->sig = sig; persistent_ram_zap(prz); @@ -513,7 +516,7 @@ void persistent_ram_free(struct persistent_ram_zone *prz) struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, u32 sig, struct persistent_ram_ecc_info *ecc_info, - unsigned int memtype) + unsigned int memtype, u32 flags) { struct persistent_ram_zone *prz; int ret = -ENOMEM; @@ -524,6 +527,10 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, goto err; } + /* Initialize general buffer state. */ + raw_spin_lock_init(&prz->buffer_lock); + prz->flags = flags; + ret = persistent_ram_buffer_map(start, size, prz, memtype); if (ret) goto err; diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 94f9ea8abcae..011dde083f23 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -1,15 +1,10 @@ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H -/* - * This says "generic", but it's actually big-endian only. - * Little-endian can use more efficient versions of these - * interfaces, see for example - * arch/x86/include/asm/word-at-a-time.h - * for those. - */ - #include <linux/kernel.h> +#include <asm/byteorder.h> + +#ifdef __BIG_ENDIAN struct word_at_a_time { const unsigned long high_bits, low_bits; @@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct #define zero_bytemask(mask) (~1ul << __fls(mask)) #endif +#else + +/* + * The optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif /* __BIG_ENDIAN */ + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 340ee0dae93b..493926ff1717 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -527,4 +527,11 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) return upper; } +struct name_snapshot { + const char *name; + char inline_name[DNAME_INLINE_LEN]; +}; +void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); +void release_dentry_name_snapshot(struct name_snapshot *); + #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 7ee1774edee5..a7789559078b 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -310,35 +310,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) } } -#if defined(CONFIG_FSNOTIFY) /* notify helpers */ - -/* - * fsnotify_oldname_init - save off the old filename before we change it - */ -static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) -{ - return kstrdup(name, GFP_KERNEL); -} - -/* - * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init - */ -static inline void fsnotify_oldname_free(const unsigned char *old_name) -{ - kfree(old_name); -} - -#else /* CONFIG_FSNOTIFY */ - -static inline const char *fsnotify_oldname_init(const unsigned char *name) -{ - return NULL; -} - -static inline void fsnotify_oldname_free(const unsigned char *old_name) -{ -} - -#endif /* CONFIG_FSNOTIFY */ - #endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index d090cfcaa167..fbdacd1278e3 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -737,6 +737,10 @@ int genphy_read_status(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_soft_reset(struct phy_device *phydev); +static inline int genphy_no_soft_reset(struct phy_device *phydev) +{ + return 0; +} void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver); diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 4af3fdc85b01..ff5e247928c4 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -23,6 +23,13 @@ #include <linux/types.h> #include <linux/init.h> +/* + * Choose whether access to the RAM zone requires locking or not. If a zone + * can be written to from different CPUs like with ftrace for example, then + * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. + */ +#define PRZ_FLAG_NO_LOCK BIT(0) + struct persistent_ram_buffer; struct rs_control; @@ -39,6 +46,8 @@ struct persistent_ram_zone { void *vaddr; struct persistent_ram_buffer *buffer; size_t buffer_size; + u32 flags; + raw_spinlock_t buffer_lock; /* ECC correction */ char *par_buffer; @@ -54,7 +63,7 @@ struct persistent_ram_zone { struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, u32 sig, struct persistent_ram_ecc_info *ecc_info, - unsigned int memtype); + unsigned int memtype, u32 flags); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); diff --git a/include/linux/sched.h b/include/linux/sched.h index ebfd7a5ea6d4..4863d72acae4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -729,6 +729,16 @@ struct signal_struct { #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ + SIGNAL_STOP_CONTINUED) + +static inline void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; +} + /* If true, all threads except ->group_exit_task have pending SIGKILL */ static inline int signal_group_exit(const struct signal_struct *sig) { diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57d..5474847bee86 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -186,7 +186,7 @@ size_t ksize(const void *); * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif @@ -199,7 +199,7 @@ size_t ksize(const void *); * be allocated from the same page. */ #define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX 30 +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif diff --git a/include/linux/string.h b/include/linux/string.h index 2e22a2e58f3a..b0a4f0de8b3e 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); #ifndef __HAVE_ARCH_STRLCPY size_t strlcpy(char *, const char *, size_t); #endif +#ifndef __HAVE_ARCH_STRSCPY +ssize_t __must_check strscpy(char *, const char *, size_t); +#endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 9eb54f41623e..fa25f353f985 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -315,6 +315,7 @@ enum { __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ + __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ @@ -412,7 +413,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue(fmt, flags, args...) \ - alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) + alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ + __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) #define create_workqueue(name) \ alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index e8aa72eaded5..53127a94e219 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -545,7 +545,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, memcpy(stream + lcp_len, ((char *) &iwe->u) + IW_EV_POINT_OFF, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - memcpy(stream + point_len, extra, iwe->u.data.length); + if (iwe->u.data.length && extra) + memcpy(stream + point_len, extra, iwe->u.data.length); stream += event_len; } return stream; diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 230775f5952a..b1e96fff0255 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -441,6 +441,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) #define _sctp_walk_params(pos, chunk, end, member)\ for (pos.v = chunk->member;\ + (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\ + (void *)chunk + end) &&\ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ pos.v += WORD_ROUND(ntohs(pos.p->length))) @@ -451,6 +453,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) #define _sctp_walk_errors(err, chunk_hdr, end)\ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ sizeof(sctp_chunkhdr_t));\ + ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\ + (void *)chunk_hdr + end) &&\ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ ntohs(err->length) >= sizeof(sctp_errhdr_t); \ err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ad9ce86428a6..d1e7cb952168 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -719,6 +719,7 @@ struct se_lun { u32 lun_access; u32 lun_flags; u32 unpacked_lun; + bool lun_shutdown; atomic_t lun_acl_count; spinlock_t lun_acl_lock; spinlock_t lun_sep_lock; diff --git a/kernel/signal.c b/kernel/signal.c index 046c7f77cdcb..2e1c5d375a0f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task) * fresh group stop. Read comment in do_signal_stop() for details. */ if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { - sig->flags = SIGNAL_STOP_STOPPED; + signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); return true; } return false; @@ -896,7 +896,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) * will take ->siglock, notice SIGNAL_CLD_MASK, and * notify its parent. See get_signal_to_deliver(). */ - signal->flags = why | SIGNAL_STOP_CONTINUED; + signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); signal->group_stop_count = 0; signal->group_exit_code = 0; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0963b7fbc9a6..a636e38c0813 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3255,7 +3255,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return -EINVAL; wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); @@ -3830,8 +3830,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, return -EINVAL; /* creating multiple pwqs breaks ordering guarantee */ - if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) - return -EINVAL; + if (!list_empty(&wq->pwqs)) { + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + return -EINVAL; + + wq->flags &= ~__WQ_ORDERED; + } pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL); new_attrs = alloc_workqueue_attrs(GFP_KERNEL); @@ -4075,6 +4079,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, struct workqueue_struct *wq; struct pool_workqueue *pwq; + /* + * Unbound && max_active == 1 used to imply ordered, which is no + * longer the case on NUMA machines due to per-node pools. While + * alloc_ordered_workqueue() is the right way to create an ordered + * workqueue, keep the previous behavior to avoid subtle breakages + * on NUMA. + */ + if ((flags & WQ_UNBOUND) && max_active == 1) + flags |= __WQ_ORDERED; + /* see the comment above the definition of WQ_POWER_EFFICIENT */ if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) flags |= WQ_UNBOUND; @@ -4267,13 +4281,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) struct pool_workqueue *pwq; /* disallow meddling with max_active for ordered workqueues */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return; max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); mutex_lock(&wq->mutex); + wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; for_each_pwq(pwq, wq) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 4e35a5d767ed..aab836755e21 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED config DEBUG_INFO_SPLIT bool "Produce split debuginfo in .dwo files" - depends on DEBUG_INFO + depends on DEBUG_INFO && !FRV help Generate debug info into separate .dwo files. This significantly reduces the build directory size for builds with DEBUG_INFO, diff --git a/lib/string.c b/lib/string.c index 643b0a90802c..8e8a2e9e9522 100644 --- a/lib/string.c +++ b/lib/string.c @@ -27,6 +27,10 @@ #include <linux/bug.h> #include <linux/errno.h> +#include <asm/byteorder.h> +#include <asm/word-at-a-time.h> +#include <asm/page.h> + #ifndef __HAVE_ARCH_STRNCASECMP /** * strncasecmp - Case insensitive, length-limited string comparison @@ -154,6 +158,91 @@ size_t strlcpy(char *dest, const char *src, size_t size) EXPORT_SYMBOL(strlcpy); #endif +#ifndef __HAVE_ARCH_STRSCPY +/** + * strscpy - Copy a C-string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: Size of destination buffer + * + * Copy the string, or as much of it as fits, into the dest buffer. + * The routine returns the number of characters copied (not including + * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. + * The behavior is undefined if the string buffers overlap. + * The destination buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the src string beyond the specified "count" bytes, and since + * the return value is easier to error-check than strlcpy()'s. + * In addition, the implementation is robust to the string changing out + * from underneath it, unlike the current strlcpy() implementation. + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be + * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() + * with an overflow test, then just memset() the tail of the dest buffer. + */ +ssize_t strscpy(char *dest, const char *src, size_t count) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + size_t max = count; + long res = 0; + + if (count == 0) + return -E2BIG; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* + * If src is unaligned, don't cross a page boundary, + * since we don't know if the next page is mapped. + */ + if ((long)src & (sizeof(long) - 1)) { + size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); + if (limit < max) + max = limit; + } +#else + /* If src or dest is unaligned, don't do word-at-a-time. */ + if (((long) dest | (long) src) & (sizeof(long) - 1)) + max = 0; +#endif + + while (max >= sizeof(unsigned long)) { + unsigned long c, data; + + c = *(unsigned long *)(src+res); + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + *(unsigned long *)(dest+res) = c & zero_bytemask(data); + return res + find_zero(data); + } + *(unsigned long *)(dest+res) = c; + res += sizeof(unsigned long); + count -= sizeof(unsigned long); + max -= sizeof(unsigned long); + } + + while (count) { + char c; + + c = src[res]; + dest[res] = c; + if (!c) + return res; + res++; + count--; + } + + /* Hit buffer length without finding a NUL; force NUL-termination. */ + if (res) + dest[res-1] = '\0'; + + return -E2BIG; +} +EXPORT_SYMBOL(strscpy); +#endif + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ded3507dc793..7a8d1b808efc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1029,14 +1029,14 @@ int move_freepages(struct zone *zone, #endif for (page = start_page; page <= end_page;) { - /* Make sure we are not inadvertently changing nodes */ - VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); - if (!pfn_valid_within(page_to_pfn(page))) { page++; continue; } + /* Make sure we are not inadvertently changing nodes */ + VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); + if (!PageBuddy(page)) { page++; continue; @@ -5467,8 +5467,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s) } if (pages && s) - pr_info("Freeing %s memory: %ldK (%p - %p)\n", - s, pages << (PAGE_SHIFT - 10), start, end); + pr_info("Freeing %s memory: %ldK\n", + s, pages << (PAGE_SHIFT - 10)); return pages; } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index c168b1f991f2..4ccf021b85d5 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -292,6 +292,10 @@ static void vlan_sync_address(struct net_device *dev, if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) return; + /* vlan continues to inherit address of lower device */ + if (vlan_dev_inherit_address(vlandev, dev)) + goto out; + /* vlan address was different from the old address and is equal to * the new address */ if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && @@ -304,6 +308,7 @@ static void vlan_sync_address(struct net_device *dev, !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_add(dev, vlandev->dev_addr); +out: ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); } diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 9d010a09ab98..cc1557978066 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev, void vlan_setup(struct net_device *dev); int register_vlan_dev(struct net_device *dev); void unregister_vlan_dev(struct net_device *dev, struct list_head *head); +bool vlan_dev_inherit_address(struct net_device *dev, + struct net_device *real_dev); static inline u32 vlan_get_ingress_priority(struct net_device *dev, u16 vlan_tci) diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 0d441ec8763e..1ab6797be8a3 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -276,6 +276,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); } +bool vlan_dev_inherit_address(struct net_device *dev, + struct net_device *real_dev) +{ + if (dev->addr_assign_type != NET_ADDR_STOLEN) + return false; + + ether_addr_copy(dev->dev_addr, real_dev->dev_addr); + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); + return true; +} + static int vlan_dev_open(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); @@ -286,7 +297,8 @@ static int vlan_dev_open(struct net_device *dev) !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) return -ENETDOWN; - if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) && + !vlan_dev_inherit_address(dev, real_dev)) { err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) goto out; @@ -591,8 +603,10 @@ static int vlan_dev_init(struct net_device *dev) /* ipv6 shared card related stuff */ dev->dev_id = real_dev->dev_id; - if (is_zero_ether_addr(dev->dev_addr)) - eth_hw_addr_inherit(dev, real_dev); + if (is_zero_ether_addr(dev->dev_addr)) { + ether_addr_copy(dev->dev_addr, real_dev->dev_addr); + dev->addr_assign_type = NET_ADDR_STOLEN; + } if (is_zero_ether_addr(dev->broadcast)) memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 85bcc21e84d2..05f57e491ccb 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -511,13 +511,12 @@ static int bnep_session(void *arg) static struct device *bnep_get_device(struct bnep_session *session) { - struct hci_conn *conn; + struct l2cap_conn *conn = l2cap_pi(session->sock->sk)->chan->conn; - conn = l2cap_pi(session->sock->sk)->chan->conn->hcon; - if (!conn) + if (!conn || !conn->hcon) return NULL; - return &conn->dev; + return &conn->hcon->dev; } static struct device_type bnep_type = { @@ -533,6 +532,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) BT_DBG(""); + if (!l2cap_is_socket(sock)) + return -EBADFD; + baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 67fe5e84e68f..278a194e6af4 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) BT_DBG(""); + if (!l2cap_is_socket(sock)) + return -EBADFD; + session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); if (!session) return -ENOMEM; diff --git a/net/core/dev.c b/net/core/dev.c index 276e2f93fc6a..99b703e45c59 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2435,9 +2435,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment); static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) { if (tx_path) - return skb->ip_summed != CHECKSUM_PARTIAL; - else - return skb->ip_summed == CHECKSUM_NONE; + return skb->ip_summed != CHECKSUM_PARTIAL && + skb->ip_summed != CHECKSUM_NONE; + + return skb->ip_summed == CHECKSUM_NONE; } /** @@ -2454,11 +2455,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path) { + struct sk_buff *segs; + if (unlikely(skb_needs_check(skb, tx_path))) { int err; - skb_warn_bad_offload(skb); - + /* We're going to init ->check field in TCP or UDP header */ err = skb_cow_head(skb, 0); if (err < 0) return ERR_PTR(err); @@ -2470,7 +2472,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb, skb_reset_mac_header(skb); skb_reset_mac_len(skb); - return skb_mac_gso_segment(skb, features); + segs = skb_mac_gso_segment(skb, features); + + if (unlikely(skb_needs_check(skb, tx_path))) + skb_warn_bad_offload(skb); + + return segs; } EXPORT_SYMBOL(__skb_gso_segment); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 72e899a3efda..6f5e04732631 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; + ifr.ifr_name[IFNAMSIZ-1] = 0; error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); if (error) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e1574edf4e43..ff864cefd688 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1554,7 +1554,8 @@ static int do_setlink(const struct sk_buff *skb, struct sockaddr *sa; int len; - len = sizeof(sa_family_t) + dev->addr_len; + len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, + sizeof(*sa)); sa = kmalloc(len, GFP_KERNEL); if (!sa) { err = -ENOMEM; diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 9733ddbc96cb..bf767d27cfbe 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk) * singleton values (which always leads to failure). * These settings can still (later) be overridden via sockopts. */ - if (ccid_get_builtin_ccids(&tx.val, &tx.len) || - ccid_get_builtin_ccids(&rx.val, &rx.len)) + if (ccid_get_builtin_ccids(&tx.val, &tx.len)) return -ENOBUFS; + if (ccid_get_builtin_ccids(&rx.val, &rx.len)) { + kfree(tx.val); + return -ENOBUFS; + } if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 1c77a94c0cbf..db8a9840ffac 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1174,13 +1174,14 @@ static struct pernet_operations fib_net_ops = { void __init ip_fib_init(void) { - rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); - rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); - rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); + fib_trie_init(); register_pernet_subsys(&fib_net_ops); + register_netdevice_notifier(&fib_netdev_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier); - fib_trie_init(); + rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); + rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); + rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 357c2a914e77..ba7f71169132 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -888,7 +888,7 @@ static int __ip_append_data(struct sock *sk, csummode = CHECKSUM_PARTIAL; cork->length += length; - if (((length > mtu) || (skb && skb_is_gso(skb))) && + if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && (sk->sk_type == SOCK_DGRAM)) { diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 3dd7d4ebd7cd..a837f2065d2a 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -172,7 +172,7 @@ static void __net_exit ip6_fl_purge(struct net *net) { int i; - spin_lock(&ip6_fl_lock); + spin_lock_bh(&ip6_fl_lock); for (i = 0; i <= FL_HASH_MASK; i++) { struct ip6_flowlabel *fl; struct ip6_flowlabel __rcu **flp; @@ -190,7 +190,7 @@ static void __net_exit ip6_fl_purge(struct net *net) flp = &fl->next; } } - spin_unlock(&ip6_fl_lock); + spin_unlock_bh(&ip6_fl_lock); } static struct ip6_flowlabel *fl_intern(struct net *net, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a3154b8ddd07..650e7c53c3c8 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1305,7 +1305,7 @@ emsgsize: skb = skb_peek_tail(&sk->sk_write_queue); cork->length += length; - if (((length > mtu) || + if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO) && diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 20569d6087c4..c471baa92b9a 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -44,7 +44,7 @@ EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { - u16 offset = sizeof(struct ipv6hdr); + unsigned int offset = sizeof(struct ipv6hdr); unsigned int packet_len = skb_tail_pointer(skb) - skb_network_header(skb); int found_rhdr = 0; @@ -52,6 +52,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) while (offset <= packet_len) { struct ipv6_opt_hdr *exthdr; + unsigned int len; switch (**nexthdr) { @@ -77,7 +78,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); - offset += ipv6_optlen(exthdr); + len = ipv6_optlen(exthdr); + if (len + offset >= IPV6_MAXPLEN) + return -EINVAL; + offset += len; *nexthdr = &exthdr->nexthdr; } diff --git a/net/key/af_key.c b/net/key/af_key.c index 08e2c6159e03..a1bf6d825d4f 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -63,6 +63,7 @@ struct pfkey_sock { } u; struct sk_buff *skb; } dump; + struct mutex dump_lock; }; static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, @@ -143,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol, { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); struct sock *sk; + struct pfkey_sock *pfk; int err; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) @@ -157,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol, if (sk == NULL) goto out; + pfk = pfkey_sk(sk); + mutex_init(&pfk->dump_lock); + sock->ops = &pfkey_ops; sock_init_data(sock, sk); @@ -285,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) struct sadb_msg *hdr; int rc; + mutex_lock(&pfk->dump_lock); + if (!pfk->dump.dump) { + rc = 0; + goto out; + } + rc = pfk->dump.dump(pfk); - if (rc == -ENOBUFS) - return 0; + if (rc == -ENOBUFS) { + rc = 0; + goto out; + } if (pfk->dump.skb) { - if (!pfkey_can_dump(&pfk->sk)) - return 0; + if (!pfkey_can_dump(&pfk->sk)) { + rc = 0; + goto out; + } hdr = (struct sadb_msg *) pfk->dump.skb->data; hdr->sadb_msg_seq = 0; @@ -302,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) } pfkey_terminate_dump(pfk); + +out: + mutex_unlock(&pfk->dump_lock); return rc; } @@ -1805,19 +1823,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms struct xfrm_address_filter *filter = NULL; struct pfkey_sock *pfk = pfkey_sk(sk); - if (pfk->dump.dump != NULL) + mutex_lock(&pfk->dump_lock); + if (pfk->dump.dump != NULL) { + mutex_unlock(&pfk->dump_lock); return -EBUSY; + } proto = pfkey_satype2proto(hdr->sadb_msg_satype); - if (proto == 0) + if (proto == 0) { + mutex_unlock(&pfk->dump_lock); return -EINVAL; + } if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; filter = kmalloc(sizeof(*filter), GFP_KERNEL); - if (filter == NULL) + if (filter == NULL) { + mutex_unlock(&pfk->dump_lock); return -ENOMEM; + } memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr, sizeof(xfrm_address_t)); @@ -1833,6 +1858,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms pfk->dump.dump = pfkey_dump_sa; pfk->dump.done = pfkey_dump_sa_done; xfrm_state_walk_init(&pfk->dump.u.state, proto, filter); + mutex_unlock(&pfk->dump_lock); return pfkey_do_dump(pfk); } @@ -2692,14 +2718,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb { struct pfkey_sock *pfk = pfkey_sk(sk); - if (pfk->dump.dump != NULL) + mutex_lock(&pfk->dump_lock); + if (pfk->dump.dump != NULL) { + mutex_unlock(&pfk->dump_lock); return -EBUSY; + } pfk->dump.msg_version = hdr->sadb_msg_version; pfk->dump.msg_portid = hdr->sadb_msg_pid; pfk->dump.dump = pfkey_dump_sp; pfk->dump.done = pfkey_dump_sp_done; xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); + mutex_unlock(&pfk->dump_lock); return pfkey_do_dump(pfk); } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 05cfee756898..eafac12b39a1 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3895,7 +3895,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, register_prot_hook(sk); } spin_unlock(&po->bind_lock); - if (closing && (po->tp_version > TPACKET_V2)) { + if (pg_vec && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index fae88709aaa2..e50272592724 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -137,13 +137,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) unsigned long cl; unsigned long fh; int err; - int tp_created = 0; + int tp_created; if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) return -EPERM; replay: + tp_created = 0; + err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); if (err < 0) return err; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 92c920c9cfa6..92c6eac72ea6 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1604,7 +1604,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; sctp_scope_t scope; - bool fill_sinfo_ttl = false; + bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; __u16 sinfo_flags = 0; @@ -1944,6 +1944,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, if (err < 0) goto out_free; + wait_connect = true; pr_debug("%s: we associated primitively\n", __func__); } @@ -1981,6 +1982,11 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, sctp_datamsg_put(datamsg); err = msg_len; + if (unlikely(wait_connect)) { + timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); + sctp_wait_for_connect(asoc, &timeo); + } + /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 00266fa359d2..3a82b320b1c6 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1211,15 +1211,14 @@ static inline int policy_to_flow_dir(int dir) } static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, - const struct flowi *fl) + const struct flowi *fl, u16 family) { struct xfrm_policy *pol; struct net *net = sock_net(sk); read_lock_bh(&net->xfrm.xfrm_policy_lock); if ((pol = sk->sk_policy[dir]) != NULL) { - bool match = xfrm_selector_match(&pol->selector, fl, - sk->sk_family); + bool match = xfrm_selector_match(&pol->selector, fl, family); int err = 0; if (match) { @@ -2150,7 +2149,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { num_pols = 1; - pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); + pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family); err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); if (err < 0) @@ -2428,7 +2427,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, pol = NULL; if (sk && sk->sk_policy[dir]) { - pol = xfrm_sk_policy_lookup(sk, dir, &fl); + pol = xfrm_sk_policy_lookup(sk, dir, &fl, family); if (IS_ERR(pol)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); return 0; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 0e4908b6163d..09686203052c 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2203,6 +2203,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index f7c2a575a892..04b51844efc4 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c @@ -122,6 +122,16 @@ static const struct reg_default aic3x_reg[] = { { 108, 0x00 }, { 109, 0x00 }, }; +static bool aic3x_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case AIC3X_RESET: + return true; + default: + return false; + } +} + static const struct regmap_config aic3x_regmap = { .reg_bits = 8, .val_bits = 8, @@ -129,6 +139,9 @@ static const struct regmap_config aic3x_regmap = { .max_register = DAC_ICC_ADJ, .reg_defaults = aic3x_reg, .num_reg_defaults = ARRAY_SIZE(aic3x_reg), + + .volatile_reg = aic3x_volatile_reg, + .cache_type = REGCACHE_RBTREE, }; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index f8893f51d47b..2ee3b9dbe4f4 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -163,6 +163,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n", be->dai_link->name, event, dir); + if ((event == SND_SOC_DAPM_STREAM_STOP) && + (be->dpcm[dir].users >= 1)) + continue; + snd_soc_dapm_stream_event(be, dir, event); }