diff --git a/Makefile b/Makefile index 9429aa5e89de..6141df04fcb5 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 10 -SUBLEVEL = 54 +SUBLEVEL = 55 EXTRAVERSION = NAME = TOSSUG Baby Fish diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 2a75ff249e71..6430e7acb1eb 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -463,6 +463,18 @@ static void octeon_halt(void) octeon_kill_core(NULL); } +static char __read_mostly octeon_system_type[80]; + +static int __init init_octeon_system_type(void) +{ + snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)", + cvmx_board_type_to_string(octeon_bootinfo->board_type), + octeon_model_get_string(read_c0_prid())); + + return 0; +} +early_initcall(init_octeon_system_type); + /** * Handle all the error condition interrupts that might occur. * @@ -482,11 +494,7 @@ static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id) */ const char *octeon_board_type_string(void) { - static char name[80]; - sprintf(name, "%s (%s)", - cvmx_board_type_to_string(octeon_bootinfo->board_type), - octeon_model_get_string(read_c0_prid())); - return name; + return octeon_system_type; } const char *get_system_type(void) diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h index 910e71a12466..b8343ccbc989 100644 --- a/arch/mips/include/asm/reg.h +++ b/arch/mips/include/asm/reg.h @@ -12,116 +12,194 @@ #ifndef __ASM_MIPS_REG_H #define __ASM_MIPS_REG_H - -#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H) - -#define EF_R0 6 -#define EF_R1 7 -#define EF_R2 8 -#define EF_R3 9 -#define EF_R4 10 -#define EF_R5 11 -#define EF_R6 12 -#define EF_R7 13 -#define EF_R8 14 -#define EF_R9 15 -#define EF_R10 16 -#define EF_R11 17 -#define EF_R12 18 -#define EF_R13 19 -#define EF_R14 20 -#define EF_R15 21 -#define EF_R16 22 -#define EF_R17 23 -#define EF_R18 24 -#define EF_R19 25 -#define EF_R20 26 -#define EF_R21 27 -#define EF_R22 28 -#define EF_R23 29 -#define EF_R24 30 -#define EF_R25 31 +#define MIPS32_EF_R0 6 +#define MIPS32_EF_R1 7 +#define MIPS32_EF_R2 8 +#define MIPS32_EF_R3 9 +#define MIPS32_EF_R4 10 +#define MIPS32_EF_R5 11 +#define MIPS32_EF_R6 12 +#define MIPS32_EF_R7 13 +#define MIPS32_EF_R8 14 +#define MIPS32_EF_R9 15 +#define MIPS32_EF_R10 16 +#define MIPS32_EF_R11 17 +#define MIPS32_EF_R12 18 +#define MIPS32_EF_R13 19 +#define MIPS32_EF_R14 20 +#define MIPS32_EF_R15 21 +#define MIPS32_EF_R16 22 +#define MIPS32_EF_R17 23 +#define MIPS32_EF_R18 24 +#define MIPS32_EF_R19 25 +#define MIPS32_EF_R20 26 +#define MIPS32_EF_R21 27 +#define MIPS32_EF_R22 28 +#define MIPS32_EF_R23 29 +#define MIPS32_EF_R24 30 +#define MIPS32_EF_R25 31 /* * k0/k1 unsaved */ -#define EF_R26 32 -#define EF_R27 33 +#define MIPS32_EF_R26 32 +#define MIPS32_EF_R27 33 -#define EF_R28 34 -#define EF_R29 35 -#define EF_R30 36 -#define EF_R31 37 +#define MIPS32_EF_R28 34 +#define MIPS32_EF_R29 35 +#define MIPS32_EF_R30 36 +#define MIPS32_EF_R31 37 /* * Saved special registers */ -#define EF_LO 38 -#define EF_HI 39 - -#define EF_CP0_EPC 40 -#define EF_CP0_BADVADDR 41 -#define EF_CP0_STATUS 42 -#define EF_CP0_CAUSE 43 -#define EF_UNUSED0 44 - -#define EF_SIZE 180 - -#endif - -#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H) - -#define EF_R0 0 -#define EF_R1 1 -#define EF_R2 2 -#define EF_R3 3 -#define EF_R4 4 -#define EF_R5 5 -#define EF_R6 6 -#define EF_R7 7 -#define EF_R8 8 -#define EF_R9 9 -#define EF_R10 10 -#define EF_R11 11 -#define EF_R12 12 -#define EF_R13 13 -#define EF_R14 14 -#define EF_R15 15 -#define EF_R16 16 -#define EF_R17 17 -#define EF_R18 18 -#define EF_R19 19 -#define EF_R20 20 -#define EF_R21 21 -#define EF_R22 22 -#define EF_R23 23 -#define EF_R24 24 -#define EF_R25 25 +#define MIPS32_EF_LO 38 +#define MIPS32_EF_HI 39 + +#define MIPS32_EF_CP0_EPC 40 +#define MIPS32_EF_CP0_BADVADDR 41 +#define MIPS32_EF_CP0_STATUS 42 +#define MIPS32_EF_CP0_CAUSE 43 +#define MIPS32_EF_UNUSED0 44 + +#define MIPS32_EF_SIZE 180 + +#define MIPS64_EF_R0 0 +#define MIPS64_EF_R1 1 +#define MIPS64_EF_R2 2 +#define MIPS64_EF_R3 3 +#define MIPS64_EF_R4 4 +#define MIPS64_EF_R5 5 +#define MIPS64_EF_R6 6 +#define MIPS64_EF_R7 7 +#define MIPS64_EF_R8 8 +#define MIPS64_EF_R9 9 +#define MIPS64_EF_R10 10 +#define MIPS64_EF_R11 11 +#define MIPS64_EF_R12 12 +#define MIPS64_EF_R13 13 +#define MIPS64_EF_R14 14 +#define MIPS64_EF_R15 15 +#define MIPS64_EF_R16 16 +#define MIPS64_EF_R17 17 +#define MIPS64_EF_R18 18 +#define MIPS64_EF_R19 19 +#define MIPS64_EF_R20 20 +#define MIPS64_EF_R21 21 +#define MIPS64_EF_R22 22 +#define MIPS64_EF_R23 23 +#define MIPS64_EF_R24 24 +#define MIPS64_EF_R25 25 /* * k0/k1 unsaved */ -#define EF_R26 26 -#define EF_R27 27 +#define MIPS64_EF_R26 26 +#define MIPS64_EF_R27 27 -#define EF_R28 28 -#define EF_R29 29 -#define EF_R30 30 -#define EF_R31 31 +#define MIPS64_EF_R28 28 +#define MIPS64_EF_R29 29 +#define MIPS64_EF_R30 30 +#define MIPS64_EF_R31 31 /* * Saved special registers */ -#define EF_LO 32 -#define EF_HI 33 - -#define EF_CP0_EPC 34 -#define EF_CP0_BADVADDR 35 -#define EF_CP0_STATUS 36 -#define EF_CP0_CAUSE 37 - -#define EF_SIZE 304 /* size in bytes */ +#define MIPS64_EF_LO 32 +#define MIPS64_EF_HI 33 + +#define MIPS64_EF_CP0_EPC 34 +#define MIPS64_EF_CP0_BADVADDR 35 +#define MIPS64_EF_CP0_STATUS 36 +#define MIPS64_EF_CP0_CAUSE 37 + +#define MIPS64_EF_SIZE 304 /* size in bytes */ + +#if defined(CONFIG_32BIT) + +#define EF_R0 MIPS32_EF_R0 +#define EF_R1 MIPS32_EF_R1 +#define EF_R2 MIPS32_EF_R2 +#define EF_R3 MIPS32_EF_R3 +#define EF_R4 MIPS32_EF_R4 +#define EF_R5 MIPS32_EF_R5 +#define EF_R6 MIPS32_EF_R6 +#define EF_R7 MIPS32_EF_R7 +#define EF_R8 MIPS32_EF_R8 +#define EF_R9 MIPS32_EF_R9 +#define EF_R10 MIPS32_EF_R10 +#define EF_R11 MIPS32_EF_R11 +#define EF_R12 MIPS32_EF_R12 +#define EF_R13 MIPS32_EF_R13 +#define EF_R14 MIPS32_EF_R14 +#define EF_R15 MIPS32_EF_R15 +#define EF_R16 MIPS32_EF_R16 +#define EF_R17 MIPS32_EF_R17 +#define EF_R18 MIPS32_EF_R18 +#define EF_R19 MIPS32_EF_R19 +#define EF_R20 MIPS32_EF_R20 +#define EF_R21 MIPS32_EF_R21 +#define EF_R22 MIPS32_EF_R22 +#define EF_R23 MIPS32_EF_R23 +#define EF_R24 MIPS32_EF_R24 +#define EF_R25 MIPS32_EF_R25 +#define EF_R26 MIPS32_EF_R26 +#define EF_R27 MIPS32_EF_R27 +#define EF_R28 MIPS32_EF_R28 +#define EF_R29 MIPS32_EF_R29 +#define EF_R30 MIPS32_EF_R30 +#define EF_R31 MIPS32_EF_R31 +#define EF_LO MIPS32_EF_LO +#define EF_HI MIPS32_EF_HI +#define EF_CP0_EPC MIPS32_EF_CP0_EPC +#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR +#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS +#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE +#define EF_UNUSED0 MIPS32_EF_UNUSED0 +#define EF_SIZE MIPS32_EF_SIZE + +#elif defined(CONFIG_64BIT) + +#define EF_R0 MIPS64_EF_R0 +#define EF_R1 MIPS64_EF_R1 +#define EF_R2 MIPS64_EF_R2 +#define EF_R3 MIPS64_EF_R3 +#define EF_R4 MIPS64_EF_R4 +#define EF_R5 MIPS64_EF_R5 +#define EF_R6 MIPS64_EF_R6 +#define EF_R7 MIPS64_EF_R7 +#define EF_R8 MIPS64_EF_R8 +#define EF_R9 MIPS64_EF_R9 +#define EF_R10 MIPS64_EF_R10 +#define EF_R11 MIPS64_EF_R11 +#define EF_R12 MIPS64_EF_R12 +#define EF_R13 MIPS64_EF_R13 +#define EF_R14 MIPS64_EF_R14 +#define EF_R15 MIPS64_EF_R15 +#define EF_R16 MIPS64_EF_R16 +#define EF_R17 MIPS64_EF_R17 +#define EF_R18 MIPS64_EF_R18 +#define EF_R19 MIPS64_EF_R19 +#define EF_R20 MIPS64_EF_R20 +#define EF_R21 MIPS64_EF_R21 +#define EF_R22 MIPS64_EF_R22 +#define EF_R23 MIPS64_EF_R23 +#define EF_R24 MIPS64_EF_R24 +#define EF_R25 MIPS64_EF_R25 +#define EF_R26 MIPS64_EF_R26 +#define EF_R27 MIPS64_EF_R27 +#define EF_R28 MIPS64_EF_R28 +#define EF_R29 MIPS64_EF_R29 +#define EF_R30 MIPS64_EF_R30 +#define EF_R31 MIPS64_EF_R31 +#define EF_LO MIPS64_EF_LO +#define EF_HI MIPS64_EF_HI +#define EF_CP0_EPC MIPS64_EF_CP0_EPC +#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR +#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS +#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE +#define EF_SIZE MIPS64_EF_SIZE #endif /* CONFIG_64BIT */ diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 895320e25662..e6e5d9162213 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -131,6 +131,8 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_FPUBOUND (1<<TIF_FPUBOUND) #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) +#define _TIF_WORK_SYSCALL_ENTRY (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP) + /* work to do in syscall_trace_leave() */ #define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 202e581e6096..7fdf1de0447f 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -58,12 +58,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #include <asm/processor.h> -/* - * When this file is selected, we are definitely running a 64bit kernel. - * So using the right regs define in asm/reg.h - */ -#define WANT_COMPAT_REG_H - /* These MUST be defined before elf.h gets included */ extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs); #define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs); @@ -135,21 +129,21 @@ void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs) { int i; - for (i = 0; i < EF_R0; i++) + for (i = 0; i < MIPS32_EF_R0; i++) grp[i] = 0; - grp[EF_R0] = 0; + grp[MIPS32_EF_R0] = 0; for (i = 1; i <= 31; i++) - grp[EF_R0 + i] = (elf_greg_t) regs->regs[i]; - grp[EF_R26] = 0; - grp[EF_R27] = 0; - grp[EF_LO] = (elf_greg_t) regs->lo; - grp[EF_HI] = (elf_greg_t) regs->hi; - grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc; - grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr; - grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status; - grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause; -#ifdef EF_UNUSED0 - grp[EF_UNUSED0] = 0; + grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i]; + grp[MIPS32_EF_R26] = 0; + grp[MIPS32_EF_R27] = 0; + grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo; + grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi; + grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc; + grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr; + grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status; + grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause; +#ifdef MIPS32_EF_UNUSED0 + grp[MIPS32_EF_UNUSED0] = 0; #endif } diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index c01b307317a9..bffbbc557879 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -256,11 +256,13 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu, /* Setup Intr to Pin mapping */ if (pin & GIC_MAP_TO_NMI_MSK) { + int i; + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin); /* FIXME: hack to route NMI to all cpu's */ - for (cpu = 0; cpu < NR_CPUS; cpu += 32) { + for (i = 0; i < NR_CPUS; i += 32) { GICWRITE(GIC_REG_ADDR(SHARED, - GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)), + GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)), 0xffffffff); } } else { diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 9c6299c733a3..1b95b2443221 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -161,6 +161,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) __get_user(fregs[i], i + (__u64 __user *) data); __get_user(child->thread.fpu.fcr31, data + 64); + child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* FIR may not be written. */ @@ -451,7 +452,7 @@ long arch_ptrace(struct task_struct *child, long request, break; #endif case FPC_CSR: - child->thread.fpu.fcr31 = data; + child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X; break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 9b36424b03c5..ed5bafb5d637 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp) stack_done: lw t0, TI_FLAGS($28) # syscall tracing enabled? - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 97a5909a61cf..be6627ead619 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, syscall_trace_entry diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index edcb6594e7b5..cab150789c8d 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, n32_syscall_trace_entry diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 74f485d3c0ef..37605dc8eef7 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp) PTR 4b, bad_stack .previous - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 bnez t0, trace_a_syscall diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 203d8857070d..2c81265bcf46 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -604,7 +604,6 @@ static void emulate_load_store_insn(struct pt_regs *regs, case sdc1_op: die_if_kernel("Unaligned FP access in kernel code", regs); BUG_ON(!used_math()); - BUG_ON(!is_fpu_owner()); lose_fpu(1); /* Save FPU state for the emulator. */ res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 21813beec7a5..5495101d32c8 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -12,6 +12,7 @@ #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/linkage.h> +#include <linux/preempt.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> @@ -601,6 +602,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) /* Catch bad driver code */ BUG_ON(size == 0); + preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); @@ -621,6 +623,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache_range(addr, addr + size); } + preempt_enable(); bc_wback_inv(addr, size); __sync(); @@ -631,6 +634,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) /* Catch bad driver code */ BUG_ON(size == 0); + preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); @@ -655,6 +659,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) R4600_HIT_CACHEOP_WAR_IMPL; blast_inv_dcache_range(addr, addr + size); } + preempt_enable(); bc_inv(addr, size); __sync(); diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index afeef93f81a7..0e17e1352718 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1329,6 +1329,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) } #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT uasm_l_tlb_huge_update(&l, p); + UASM_i_LW(&p, K0, 0, K1); build_huge_update_entries(&p, htlb_info.huge_pte, K1); build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, htlb_info.restore_scratch); diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index d8a455ede5a7..fec8bf97d806 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -853,37 +853,44 @@ UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00) /* ========================================================[ return ] === */ +_resume_userspace: + DISABLE_INTERRUPTS(r3,r4) + l.lwz r4,TI_FLAGS(r10) + l.andi r13,r4,_TIF_WORK_MASK + l.sfeqi r13,0 + l.bf _restore_all + l.nop + _work_pending: - /* - * if (current_thread_info->flags & _TIF_NEED_RESCHED) - * schedule(); - */ - l.lwz r5,TI_FLAGS(r10) - l.andi r3,r5,_TIF_NEED_RESCHED - l.sfnei r3,0 - l.bnf _work_notifysig + l.lwz r5,PT_ORIG_GPR11(r1) + l.sfltsi r5,0 + l.bnf 1f l.nop - l.jal schedule + l.andi r5,r5,0 +1: + l.jal do_work_pending + l.ori r3,r1,0 /* pt_regs */ + + l.sfeqi r11,0 + l.bf _restore_all l.nop - l.j _resume_userspace + l.sfltsi r11,0 + l.bnf 1f l.nop - -/* Handle pending signals and notify-resume requests. - * do_notify_resume must be passed the latest pushed pt_regs, not - * necessarily the "userspace" ones. Also, pt_regs->syscallno - * must be set so that the syscall restart functionality works. - */ -_work_notifysig: - l.jal do_notify_resume - l.ori r3,r1,0 /* pt_regs */ - -_resume_userspace: - DISABLE_INTERRUPTS(r3,r4) - l.lwz r3,TI_FLAGS(r10) - l.andi r3,r3,_TIF_WORK_MASK - l.sfnei r3,0 - l.bf _work_pending + l.and r11,r11,r0 + l.ori r11,r11,__NR_restart_syscall + l.j _syscall_check_trace_enter l.nop +1: + l.lwz r11,PT_ORIG_GPR11(r1) + /* Restore arg registers */ + l.lwz r3,PT_GPR3(r1) + l.lwz r4,PT_GPR4(r1) + l.lwz r5,PT_GPR5(r1) + l.lwz r6,PT_GPR6(r1) + l.lwz r7,PT_GPR7(r1) + l.j _syscall_check_trace_enter + l.lwz r8,PT_GPR8(r1) _restore_all: RESTORE_ALL diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index ae167f7e081a..c277ec82783d 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -28,24 +28,24 @@ #include <linux/tracehook.h> #include <asm/processor.h> +#include <asm/syscall.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #define DEBUG_SIG 0 struct rt_sigframe { - struct siginfo *pinfo; - void *puc; struct siginfo info; struct ucontext uc; unsigned char retcode[16]; /* trampoline code */ }; -static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) +static int restore_sigcontext(struct pt_regs *regs, + struct sigcontext __user *sc) { - unsigned int err = 0; + int err = 0; - /* Alwys make any pending restarted system call return -EINTR */ + /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* @@ -53,25 +53,21 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc) * (sc is already checked for VERIFY_READ since the sigframe was * checked in sys_sigreturn previously) */ - if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long))) - goto badframe; - if (__copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long))) - goto badframe; - if (__copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long))) - goto badframe; + err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)); + err |= __copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long)); + err |= __copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long)); /* make sure the SM-bit is cleared so user-mode cannot fool us */ regs->sr &= ~SPR_SR_SM; + regs->orig_gpr11 = -1; /* Avoid syscall restart checks */ + /* TODO: the other ports use regs->orig_XX to disable syscall checks * after this completes, but we don't use that mechanism. maybe we can * use it now ? */ return err; - -badframe: - return 1; } asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs) @@ -111,21 +107,18 @@ badframe: * Set up a signal frame. */ -static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, - unsigned long mask) +static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; /* copy the regs */ - + /* There should be no need to save callee-saved registers here... + * ...but we save them anyway. Revisit this + */ err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long)); err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long)); err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long)); - /* then some other stuff */ - - err |= __put_user(mask, &sc->oldmask); - return err; } @@ -181,24 +174,18 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; - err |= __put_user(&frame->info, &frame->pinfo); - err |= __put_user(&frame->uc, &frame->puc); - + /* Create siginfo. */ if (ka->sa.sa_flags & SA_SIGINFO) err |= copy_siginfo_to_user(&frame->info, info); - if (err) - goto give_sigsegv; - /* Clear all the bits of the ucontext we don't use. */ - err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext)); + /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->sp); - err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); + err |= setup_sigcontext(regs, &frame->uc.uc_mcontext); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); @@ -207,9 +194,12 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* trampoline - the desired return ip is the retcode itself */ return_ip = (unsigned long)&frame->retcode; - /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ - err |= __put_user(0xa960, (short *)(frame->retcode + 0)); - err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2)); + /* This is: + l.ori r11,r0,__NR_sigreturn + l.sys 1 + */ + err |= __put_user(0xa960, (short *)(frame->retcode + 0)); + err |= __put_user(__NR_rt_sigreturn, (short *)(frame->retcode + 2)); err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); @@ -262,82 +252,106 @@ handle_signal(unsigned long sig, * mode below. */ -void do_signal(struct pt_regs *regs) +int do_signal(struct pt_regs *regs, int syscall) { siginfo_t info; int signr; struct k_sigaction ka; - - /* - * We want the common case to go fast, which - * is why we may in certain cases get here from - * kernel mode. Just return without doing anything - * if so. - */ - if (!user_mode(regs)) - return; - - signr = get_signal_to_deliver(&info, &ka, regs, NULL); - - /* If we are coming out of a syscall then we need - * to check if the syscall was interrupted and wants to be - * restarted after handling the signal. If so, the original - * syscall number is put back into r11 and the PC rewound to - * point at the l.sys instruction that resulted in the - * original syscall. Syscall results other than the four - * below mean that the syscall executed to completion and no - * restart is necessary. - */ - if (regs->orig_gpr11) { - int restart = 0; - - switch (regs->gpr[11]) { + unsigned long continue_addr = 0; + unsigned long restart_addr = 0; + unsigned long retval = 0; + int restart = 0; + + if (syscall) { + continue_addr = regs->pc; + restart_addr = continue_addr - 4; + retval = regs->gpr[11]; + + /* + * Setup syscall restart here so that a debugger will + * see the already changed PC. + */ + switch (retval) { case -ERESTART_RESTARTBLOCK: + restart = -2; + /* Fall through */ case -ERESTARTNOHAND: - /* Restart if there is no signal handler */ - restart = (signr <= 0); - break; case -ERESTARTSYS: - /* Restart if there no signal handler or - * SA_RESTART flag is set */ - restart = (signr <= 0 || (ka.sa.sa_flags & SA_RESTART)); - break; case -ERESTARTNOINTR: - /* Always restart */ - restart = 1; + restart++; + regs->gpr[11] = regs->orig_gpr11; + regs->pc = restart_addr; break; } + } - if (restart) { - if (regs->gpr[11] == -ERESTART_RESTARTBLOCK) - regs->gpr[11] = __NR_restart_syscall; - else - regs->gpr[11] = regs->orig_gpr11; - regs->pc -= 4; - } else { - regs->gpr[11] = -EINTR; + /* + * Get the signal to deliver. When running under ptrace, at this + * point the debugger may change all our registers ... + */ + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + /* + * Depending on the signal settings we may need to revert the + * decision to restart the system call. But skip this if a + * debugger has chosen to restart at a different PC. + */ + if (signr > 0) { + if (unlikely(restart) && regs->pc == restart_addr) { + if (retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK + || (retval == -ERESTARTSYS + && !(ka.sa.sa_flags & SA_RESTART))) { + /* No automatic restart */ + regs->gpr[11] = -EINTR; + regs->pc = continue_addr; + } } - } - if (signr <= 0) { - /* no signal to deliver so we just put the saved sigmask - * back */ - restore_saved_sigmask(); - } else { /* signr > 0 */ - /* Whee! Actually deliver the signal. */ handle_signal(signr, &info, &ka, regs); + } else { + /* no handler */ + restore_saved_sigmask(); + /* + * Restore pt_regs PC as syscall restart will be handled by + * kernel without return to userspace + */ + if (unlikely(restart) && regs->pc == restart_addr) { + regs->pc = continue_addr; + return restart; + } } - return; + return 0; } -asmlinkage void do_notify_resume(struct pt_regs *regs) +asmlinkage int +do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { - if (current_thread_info()->flags & _TIF_SIGPENDING) - do_signal(regs); - - if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } + do { + if (likely(thread_flags & _TIF_NEED_RESCHED)) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) + return 0; + local_irq_enable(); + if (thread_flags & _TIF_SIGPENDING) { + int restart = do_signal(regs, syscall); + if (unlikely(restart)) { + /* + * Restart without handlers. + * Deal with it without leaving + * the kernel space. + */ + return restart; + } + syscall = 0; + } else { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } + } + local_irq_disable(); + thread_flags = current_thread_info()->flags; + } while (thread_flags & _TIF_WORK_MASK); + return 0; } diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index d836d945068d..063fcadd1a00 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -40,17 +40,39 @@ #ifndef __ASSEMBLY__ +#include <asm/barrier.h> /* for smp_rmb() */ + /* * With 64K pages on hash table, we have a special PTE format that * uses a second "half" of the page table to encode sub-page information * in order to deal with 64K made of 4K HW pages. Thus we override the * generic accessors and iterators here */ -#define __real_pte(e,p) ((real_pte_t) { \ - (e), (pte_val(e) & _PAGE_COMBO) ? \ - (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) -#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ - (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) +#define __real_pte __real_pte +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) +{ + real_pte_t rpte; + + rpte.pte = pte; + rpte.hidx = 0; + if (pte_val(pte) & _PAGE_COMBO) { + /* + * Make sure we order the hidx load against the _PAGE_COMBO + * check. The store side ordering is done in __hash_page_4K + */ + smp_rmb(); + rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); + } + return rpte; +} + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + if ((pte_val(rpte.pte) & _PAGE_COMBO)) + return (rpte.hidx >> (index<<2)) & 0xf; + return (pte_val(rpte.pte) >> 12) & 0xf; +} + #define __rpte_to_pte(r) ((r).pte) #define __rpte_sub_valid(rpte, index) \ (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b7293bba0062..08c6f3185d45 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -586,8 +586,8 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: unmap_cpu_from_node(lcpu); - break; ret = NOTIFY_OK; + break; #endif } return ret; diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 9a432de363b8..bebe64ed5dc3 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -158,7 +158,7 @@ static int pseries_remove_memory(struct device_node *np) static inline int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) { - return -EOPNOTSUPP; + return 0; } static inline int pseries_remove_memory(struct device_node *np) { diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b95219d2168d..1ff8e97f853a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -883,6 +883,13 @@ void blkcg_drain_queue(struct request_queue *q) if (!q->root_blkg) return; + /* + * @q could be exiting and already have destroyed all blkgs as + * indicated by NULL root_blkg. If so, don't confuse policies. + */ + if (!q->root_blkg) + return; + blk_throtl_drain(q); } diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index e4c9291fc0a3..a63a4cdd2ce8 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -998,5 +998,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc, status = acpi_ut_copy_simple_object(source_desc, *dest_desc); } + /* Delete the allocated object if copy failed */ + + if (ACPI_FAILURE(status)) { + acpi_ut_remove_reference(*dest_desc); + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 4056d3175178..a88894190e41 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1101,9 +1101,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { - cpuidle_pause_and_lock(); /* Protect against cpu-hotplug */ get_online_cpus(); + cpuidle_pause_and_lock(); /* Disable all cpuidle devices */ for_each_online_cpu(cpu) { @@ -1130,8 +1130,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) cpuidle_enable_device(dev); } } - put_online_cpus(); cpuidle_resume_and_unlock(); + put_online_cpus(); } return 0; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index cca761e80d89..091682fb1617 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -769,12 +769,17 @@ static void acpi_device_notify(acpi_handle handle, u32 event, void *data) device->driver->ops.notify(device, event); } -static acpi_status acpi_device_notify_fixed(void *data) +static void acpi_device_notify_fixed(void *data) { struct acpi_device *device = data; /* Fixed hardware devices have no handles */ acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device); +} + +static acpi_status acpi_device_fixed_event(void *data) +{ + acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data); return AE_OK; } @@ -785,12 +790,12 @@ static int acpi_device_install_notify_handler(struct acpi_device *device) if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) status = acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_notify_fixed, + acpi_device_fixed_event, device); else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) status = acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_notify_fixed, + acpi_device_fixed_event, device); else status = acpi_install_notify_handler(device->handle, @@ -807,10 +812,10 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device) { if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, - acpi_device_notify_fixed); + acpi_device_fixed_event); else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, - acpi_device_notify_fixed); + acpi_device_fixed_event); else acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, acpi_device_notify); diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 7c3b3dcbfbc8..f659a571ad23 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -533,11 +533,10 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type) int tpm_get_timeouts(struct tpm_chip *chip) { struct tpm_cmd_t tpm_cmd; - struct timeout_t *timeout_cap; + unsigned long new_timeout[4]; + unsigned long old_timeout[4]; struct duration_t *duration_cap; ssize_t rc; - u32 timeout; - unsigned int scale = 1; tpm_cmd.header.in = tpm_getcap_header; tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP; @@ -571,25 +570,46 @@ int tpm_get_timeouts(struct tpm_chip *chip) != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32)) return -EINVAL; - timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout; - /* Don't overwrite default if value is 0 */ - timeout = be32_to_cpu(timeout_cap->a); - if (timeout && timeout < 1000) { - /* timeouts in msec rather usec */ - scale = 1000; - chip->vendor.timeout_adjusted = true; + old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a); + old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b); + old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c); + old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d); + memcpy(new_timeout, old_timeout, sizeof(new_timeout)); + + /* + * Provide ability for vendor overrides of timeout values in case + * of misreporting. + */ + if (chip->vendor.update_timeouts != NULL) + chip->vendor.timeout_adjusted = + chip->vendor.update_timeouts(chip, new_timeout); + + if (!chip->vendor.timeout_adjusted) { + /* Don't overwrite default if value is 0 */ + if (new_timeout[0] != 0 && new_timeout[0] < 1000) { + int i; + + /* timeouts in msec rather usec */ + for (i = 0; i != ARRAY_SIZE(new_timeout); i++) + new_timeout[i] *= 1000; + chip->vendor.timeout_adjusted = true; + } } - if (timeout) - chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale); - timeout = be32_to_cpu(timeout_cap->b); - if (timeout) - chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale); - timeout = be32_to_cpu(timeout_cap->c); - if (timeout) - chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale); - timeout = be32_to_cpu(timeout_cap->d); - if (timeout) - chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale); + + /* Report adjusted timeouts */ + if (chip->vendor.timeout_adjusted) { + dev_info(chip->dev, + HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", + old_timeout[0], new_timeout[0], + old_timeout[1], new_timeout[1], + old_timeout[2], new_timeout[2], + old_timeout[3], new_timeout[3]); + } + + chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]); + chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]); + chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]); + chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]); duration: tpm_cmd.header.in = tpm_getcap_header; @@ -1423,13 +1443,13 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) int err, total = 0, retries = 5; u8 *dest = out; + if (!out || !num_bytes || max > TPM_MAX_RNG_DATA) + return -EINVAL; + chip = tpm_chip_find_get(chip_num); if (chip == NULL) return -ENODEV; - if (!out || !num_bytes || max > TPM_MAX_RNG_DATA) - return -EINVAL; - do { tpm_cmd.header.in = tpm_getrandom_header; tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes); @@ -1448,6 +1468,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) num_bytes -= recd; } while (retries-- && total < max); + tpm_chip_put(chip); return total ? total : -EIO; } EXPORT_SYMBOL_GPL(tpm_get_random); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 0770d1d79366..deffda7678a0 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -95,6 +95,9 @@ struct tpm_vendor_specific { int (*send) (struct tpm_chip *, u8 *, size_t); void (*cancel) (struct tpm_chip *); u8 (*status) (struct tpm_chip *); + bool (*update_timeouts)(struct tpm_chip *chip, + unsigned long *timeout_cap); + void (*release) (struct device *); struct miscdevice miscdev; struct attribute_group *attr_group; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 8a41b6be23a0..72f21377fa02 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -373,6 +373,36 @@ out_err: return rc; } +struct tis_vendor_timeout_override { + u32 did_vid; + unsigned long timeout_us[4]; +}; + +static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { + /* Atmel 3204 */ + { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000), + (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, +}; + +static bool tpm_tis_update_timeouts(struct tpm_chip *chip, + unsigned long *timeout_cap) +{ + int i; + u32 did_vid; + + did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); + + for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { + if (vendor_timeout_overrides[i].did_vid != did_vid) + continue; + memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, + sizeof(vendor_timeout_overrides[i].timeout_us)); + return true; + } + + return false; +} + /* * Early probing for iTPM with STS_DATA_EXPECT flaw. * Try sending command without itpm flag set and if that @@ -475,6 +505,7 @@ static struct tpm_vendor_specific tpm_tis = { .recv = tpm_tis_recv, .send = tpm_tis_send, .cancel = tpm_tis_ready, + .update_timeouts = tpm_tis_update_timeouts, .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID, .req_canceled = tpm_tis_req_canceled, diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 391c67b182d9..7dbc319e1cf5 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove); */ static void efivar_entry_list_del_unlock(struct efivar_entry *entry) { - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); list_del(&entry->list); spin_unlock_irq(&__efivars->lock); @@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry) const struct efivar_operations *ops = __efivars->ops; efi_status_t status; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); status = ops->set_variable(entry->var.VariableName, &entry->var.VendorGuid, @@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, int strsize1, strsize2; bool found = false; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); list_for_each_entry_safe(entry, n, head, list) { strsize1 = ucs2_strsize(name, 1024); @@ -731,7 +731,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, const struct efivar_operations *ops = __efivars->ops; efi_status_t status; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); status = ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index c47c2034ca71..4293e89bbbdd 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -46,6 +46,7 @@ #include <linux/completion.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/sysctl.h> #include <rdma/iw_cm.h> #include <rdma/ib_addr.h> @@ -65,6 +66,20 @@ struct iwcm_work { struct list_head free_list; }; +static unsigned int default_backlog = 256; + +static struct ctl_table_header *iwcm_ctl_table_hdr; +static struct ctl_table iwcm_ctl_table[] = { + { + .procname = "default_backlog", + .data = &default_backlog, + .maxlen = sizeof(default_backlog), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { } +}; + /* * The following services provide a mechanism for pre-allocating iwcm_work * elements. The design pre-allocates them based on the cm_id type: @@ -419,6 +434,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + if (!backlog) + backlog = default_backlog; + ret = alloc_work_entries(cm_id_priv, backlog); if (ret) return ret; @@ -1024,11 +1042,20 @@ static int __init iw_cm_init(void) if (!iwcm_wq) return -ENOMEM; + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", + iwcm_ctl_table); + if (!iwcm_ctl_table_hdr) { + pr_err("iw_cm: couldn't register sysctl paths\n"); + destroy_workqueue(iwcm_wq); + return -ENOMEM; + } + return 0; } static void __exit iw_cm_cleanup(void) { + unregister_net_sysctl_table(iwcm_ctl_table_hdr); destroy_workqueue(iwcm_wq); } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 1954daac0b59..35dd5ff662f1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -93,6 +93,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr); static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); static struct scsi_transport_template *ib_srp_transport_template; +static struct workqueue_struct *srp_remove_wq; static struct ib_client srp_client = { .name = "srp", @@ -456,7 +457,7 @@ static bool srp_queue_remove_work(struct srp_target_port *target) spin_unlock_irq(&target->lock); if (changed) - queue_work(system_long_wq, &target->remove_work); + queue_work(srp_remove_wq, &target->remove_work); return changed; } @@ -2530,9 +2531,10 @@ static void srp_remove_one(struct ib_device *device) spin_unlock(&host->target_lock); /* - * Wait for target port removal tasks. + * Wait for tl_err and target port removal tasks. */ flush_workqueue(system_long_wq); + flush_workqueue(srp_remove_wq); kfree(host); } @@ -2577,16 +2579,22 @@ static int __init srp_init_module(void) indirect_sg_entries = cmd_sg_entries; } + srp_remove_wq = create_workqueue("srp_remove"); + if (IS_ERR(srp_remove_wq)) { + ret = PTR_ERR(srp_remove_wq); + goto out; + } + + ret = -ENOMEM; ib_srp_transport_template = srp_attach_transport(&ib_srp_transport_functions); if (!ib_srp_transport_template) - return -ENOMEM; + goto destroy_wq; ret = class_register(&srp_class); if (ret) { pr_err("couldn't register class infiniband_srp\n"); - srp_release_transport(ib_srp_transport_template); - return ret; + goto release_tr; } ib_sa_register_client(&srp_sa_client); @@ -2594,13 +2602,22 @@ static int __init srp_init_module(void) ret = ib_register_client(&srp_client); if (ret) { pr_err("couldn't register IB client\n"); - srp_release_transport(ib_srp_transport_template); - ib_sa_unregister_client(&srp_sa_client); - class_unregister(&srp_class); - return ret; + goto unreg_sa; } - return 0; +out: + return ret; + +unreg_sa: + ib_sa_unregister_client(&srp_sa_client); + class_unregister(&srp_class); + +release_tr: + srp_release_transport(ib_srp_transport_template); + +destroy_wq: + destroy_workqueue(srp_remove_wq); + goto out; } static void __exit srp_cleanup_module(void) @@ -2609,6 +2626,7 @@ static void __exit srp_cleanup_module(void) ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); srp_release_transport(ib_srp_transport_template); + destroy_workqueue(srp_remove_wq); } module_init(srp_init_module); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 6f849cbcac6f..dfb401cba733 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3187,14 +3187,16 @@ free_domains: static void cleanup_domain(struct protection_domain *domain) { - struct iommu_dev_data *dev_data, *next; + struct iommu_dev_data *entry; unsigned long flags; write_lock_irqsave(&amd_iommu_devtable_lock, flags); - list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { - __detach_device(dev_data); - atomic_set(&dev_data->bind, 0); + while (!list_empty(&domain->dev_list)) { + entry = list_first_entry(&domain->dev_list, + struct iommu_dev_data, list); + __detach_device(entry); + atomic_set(&entry->bind, 0); } write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 75771b2077c0..a176791509f6 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1406,12 +1406,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) mddev->degraded++; set_bit(Faulty, &rdev->flags); spin_unlock_irqrestore(&conf->device_lock, flags); - /* - * if recovery is running, make sure it aborts. - */ - set_bit(MD_RECOVERY_INTR, &mddev->recovery); } else set_bit(Faulty, &rdev->flags); + /* + * if recovery is running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_CHANGE_DEVS, &mddev->flags); printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n" diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d2f8cd332b4a..a1ea2a753912 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1681,11 +1681,11 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded++; spin_unlock_irqrestore(&conf->device_lock, flags); - /* - * if recovery is running, make sure it aborts. - */ - set_bit(MD_RECOVERY_INTR, &mddev->recovery); } + /* + * If recovery is running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags); @@ -2948,6 +2948,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, */ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { end_reshape(conf); + close_sync(conf); return 0; } @@ -4398,7 +4399,7 @@ read_more: read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_sync_read; read_bio->bi_rw = READ; - read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); + read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); read_bio->bi_flags |= 1 << BIO_UPTODATE; read_bio->bi_vcnt = 0; read_bio->bi_size = 0; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5e3c25d4562c..774f81423d78 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3561,6 +3561,8 @@ static void handle_stripe(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); if (prexor) continue; + if (s.failed > 1) + continue; if (!test_bit(R5_Insync, &dev->flags) || ((i == sh->pd_idx || i == sh->qd_idx) && s.failed == 0)) diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index 79715f9feb0a..fdb5840f034b 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -106,8 +106,6 @@ static long media_device_enum_entities(struct media_device *mdev, if (ent->name) { strncpy(u_ent.name, ent->name, sizeof(u_ent.name)); u_ent.name[sizeof(u_ent.name) - 1] = '\0'; - } else { - memset(u_ent.name, 0, sizeof(u_ent.name)); } u_ent.type = ent->type; u_ent.revision = ent->revision; diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index 2018befabb5a..e71decbfd0af 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -93,7 +93,7 @@ struct xc4000_priv { struct firmware_description *firm; int firm_size; u32 if_khz; - u32 freq_hz; + u32 freq_hz, freq_offset; u32 bandwidth; u8 video_standard; u8 rf_mode; @@ -1157,14 +1157,14 @@ static int xc4000_set_params(struct dvb_frontend *fe) case SYS_ATSC: dprintk(1, "%s() VSB modulation\n", __func__); priv->rf_mode = XC_RF_MODE_AIR; - priv->freq_hz = c->frequency - 1750000; + priv->freq_offset = 1750000; priv->video_standard = XC4000_DTV6; type = DTV6; break; case SYS_DVBC_ANNEX_B: dprintk(1, "%s() QAM modulation\n", __func__); priv->rf_mode = XC_RF_MODE_CABLE; - priv->freq_hz = c->frequency - 1750000; + priv->freq_offset = 1750000; priv->video_standard = XC4000_DTV6; type = DTV6; break; @@ -1173,23 +1173,23 @@ static int xc4000_set_params(struct dvb_frontend *fe) dprintk(1, "%s() OFDM\n", __func__); if (bw == 0) { if (c->frequency < 400000000) { - priv->freq_hz = c->frequency - 2250000; + priv->freq_offset = 2250000; } else { - priv->freq_hz = c->frequency - 2750000; + priv->freq_offset = 2750000; } priv->video_standard = XC4000_DTV7_8; type = DTV78; } else if (bw <= 6000000) { priv->video_standard = XC4000_DTV6; - priv->freq_hz = c->frequency - 1750000; + priv->freq_offset = 1750000; type = DTV6; } else if (bw <= 7000000) { priv->video_standard = XC4000_DTV7; - priv->freq_hz = c->frequency - 2250000; + priv->freq_offset = 2250000; type = DTV7; } else { priv->video_standard = XC4000_DTV8; - priv->freq_hz = c->frequency - 2750000; + priv->freq_offset = 2750000; type = DTV8; } priv->rf_mode = XC_RF_MODE_AIR; @@ -1200,6 +1200,8 @@ static int xc4000_set_params(struct dvb_frontend *fe) goto fail; } + priv->freq_hz = c->frequency - priv->freq_offset; + dprintk(1, "%s() frequency=%d (compensated)\n", __func__, priv->freq_hz); @@ -1520,7 +1522,7 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc4000_priv *priv = fe->tuner_priv; - *freq = priv->freq_hz; + *freq = priv->freq_hz + priv->freq_offset; if (debug) { mutex_lock(&priv->lock); diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c index 5cd09a681b6a..b2d9e9cb97f7 100644 --- a/drivers/media/tuners/xc5000.c +++ b/drivers/media/tuners/xc5000.c @@ -55,7 +55,7 @@ struct xc5000_priv { u32 if_khz; u16 xtal_khz; - u32 freq_hz; + u32 freq_hz, freq_offset; u32 bandwidth; u8 video_standard; u8 rf_mode; @@ -755,13 +755,13 @@ static int xc5000_set_params(struct dvb_frontend *fe) case SYS_ATSC: dprintk(1, "%s() VSB modulation\n", __func__); priv->rf_mode = XC_RF_MODE_AIR; - priv->freq_hz = freq - 1750000; + priv->freq_offset = 1750000; priv->video_standard = DTV6; break; case SYS_DVBC_ANNEX_B: dprintk(1, "%s() QAM modulation\n", __func__); priv->rf_mode = XC_RF_MODE_CABLE; - priv->freq_hz = freq - 1750000; + priv->freq_offset = 1750000; priv->video_standard = DTV6; break; case SYS_ISDBT: @@ -776,15 +776,15 @@ static int xc5000_set_params(struct dvb_frontend *fe) switch (bw) { case 6000000: priv->video_standard = DTV6; - priv->freq_hz = freq - 1750000; + priv->freq_offset = 1750000; break; case 7000000: priv->video_standard = DTV7; - priv->freq_hz = freq - 2250000; + priv->freq_offset = 2250000; break; case 8000000: priv->video_standard = DTV8; - priv->freq_hz = freq - 2750000; + priv->freq_offset = 2750000; break; default: printk(KERN_ERR "xc5000 bandwidth not set!\n"); @@ -798,15 +798,15 @@ static int xc5000_set_params(struct dvb_frontend *fe) priv->rf_mode = XC_RF_MODE_CABLE; if (bw <= 6000000) { priv->video_standard = DTV6; - priv->freq_hz = freq - 1750000; + priv->freq_offset = 1750000; b = 6; } else if (bw <= 7000000) { priv->video_standard = DTV7; - priv->freq_hz = freq - 2250000; + priv->freq_offset = 2250000; b = 7; } else { priv->video_standard = DTV7_8; - priv->freq_hz = freq - 2750000; + priv->freq_offset = 2750000; b = 8; } dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__, @@ -817,6 +817,8 @@ static int xc5000_set_params(struct dvb_frontend *fe) return -EINVAL; } + priv->freq_hz = freq - priv->freq_offset; + dprintk(1, "%s() frequency=%d (compensated to %d)\n", __func__, freq, priv->freq_hz); @@ -1067,7 +1069,7 @@ static int xc5000_get_frequency(struct dvb_frontend *fe, u32 *freq) { struct xc5000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); - *freq = priv->freq_hz; + *freq = priv->freq_hz + priv->freq_offset; return 0; } diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c index 75ac9947cdac..98e1b937b500 100644 --- a/drivers/media/usb/au0828/au0828-video.c +++ b/drivers/media/usb/au0828/au0828-video.c @@ -788,11 +788,27 @@ static int au0828_i2s_init(struct au0828_dev *dev) /* * Auvitek au0828 analog stream enable - * Please set interface0 to AS5 before enable the stream */ static int au0828_analog_stream_enable(struct au0828_dev *d) { + struct usb_interface *iface; + int ret; + dprintk(1, "au0828_analog_stream_enable called\n"); + + iface = usb_ifnum_to_if(d->usbdev, 0); + if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) { + dprintk(1, "Changing intf#0 to alt 5\n"); + /* set au0828 interface0 to AS5 here again */ + ret = usb_set_interface(d->usbdev, 0, 5); + if (ret < 0) { + printk(KERN_INFO "Au0828 can't set alt setting to 5!\n"); + return -EBUSY; + } + } + + /* FIXME: size should be calculated using d->width, d->height */ + au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00); au0828_writereg(d, 0x106, 0x00); /* set x position */ @@ -1003,15 +1019,6 @@ static int au0828_v4l2_open(struct file *filp) return -ERESTARTSYS; } if (dev->users == 0) { - /* set au0828 interface0 to AS5 here again */ - ret = usb_set_interface(dev->usbdev, 0, 5); - if (ret < 0) { - mutex_unlock(&dev->lock); - printk(KERN_INFO "Au0828 can't set alternate to 5!\n"); - kfree(fh); - return -EBUSY; - } - au0828_analog_stream_enable(dev); au0828_analog_stream_reset(dev); @@ -1253,13 +1260,6 @@ static int au0828_set_format(struct au0828_dev *dev, unsigned int cmd, } } - /* set au0828 interface0 to AS5 here again */ - ret = usb_set_interface(dev->usbdev, 0, 5); - if (ret < 0) { - printk(KERN_INFO "Au0828 can't set alt setting to 5!\n"); - return -EBUSY; - } - au0828_analog_stream_enable(dev); return 0; diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 759fae3ca7fb..a36f3f282ae7 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap, for (i = 0; i < omap->nports; i++) { if (is_ehci_phy_mode(pdata->port_mode[i])) { - reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; break; } } diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index 19d637266fcd..71e4f6ccae2f 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c @@ -1075,7 +1075,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) return; } - ftl_freepart(partition); kfree(partition); } diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 8c4eb287bbdb..e9b1797cdb5f 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -948,7 +948,7 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u32 val; val = readl(info->reg.gpmc_ecc_config); - if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs) + if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs) return -EINVAL; /* read ecc result */ diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c index 81d8681c3195..b1b35f38d11d 100644 --- a/drivers/regulator/arizona-ldo1.c +++ b/drivers/regulator/arizona-ldo1.c @@ -141,8 +141,6 @@ static struct regulator_ops arizona_ldo1_ops = { .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, - .get_bypass = regulator_get_bypass_regmap, - .set_bypass = regulator_set_bypass_regmap, }; static const struct regulator_desc arizona_ldo1 = { diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 23a90e7b7107..a119421cb324 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -72,7 +72,7 @@ struct bfa_sge_s { } while (0) #define bfa_swap_words(_x) ( \ - ((_x) << 32) | ((_x) >> 32)) + ((u64)(_x) << 32) | ((u64)(_x) >> 32)) #ifdef __BIG_ENDIAN #define bfa_sge_to_be(_x) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 91b76cea3e3c..87ca72d36d5b 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -33,6 +33,7 @@ #include <linux/device.h> #include <linux/hyperv.h> #include <linux/mempool.h> +#include <linux/blkdev.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_host.h> @@ -803,6 +804,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, case ATA_12: set_host_byte(scmnd, DID_PASSTHROUGH); break; + /* + * On Some Windows hosts TEST_UNIT_READY command can return + * SRB_STATUS_ERROR, let the upper level code deal with it + * based on the sense information. + */ + case TEST_UNIT_READY: + break; default: set_host_byte(scmnd, DID_TARGET_FAILURE); } @@ -1285,6 +1293,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) return SUCCESS; } +/* + * The host guarantees to respond to each command, although I/O latencies might + * be unbounded on Azure. Reset the timer unconditionally to give the host a + * chance to perform EH. + */ +static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) +{ + return BLK_EH_RESET_TIMER; +} + static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) { bool allowed = true; @@ -1444,6 +1462,7 @@ static struct scsi_host_template scsi_driver = { .bios_param = storvsc_get_chs, .queuecommand = storvsc_queuecommand, .eh_host_reset_handler = storvsc_host_reset_handler, + .eh_timed_out = storvsc_eh_timed_out, .slave_alloc = storvsc_device_alloc, .slave_destroy = storvsc_device_destroy, .slave_configure = storvsc_device_configure, diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 86d2158946bb..798729eb6689 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -136,6 +136,7 @@ struct omap2_mcspi_cs { void __iomem *base; unsigned long phys; int word_len; + u16 mode; struct list_head node; /* Context save and restore shadow register */ u32 chconf0; @@ -801,6 +802,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, mcspi_write_chconf0(spi, l); + cs->mode = spi->mode; + dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", OMAP2_MCSPI_MAX_FREQ >> div, (spi->mode & SPI_CPHA) ? "trailing" : "leading", @@ -871,6 +874,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) return -ENOMEM; cs->base = mcspi->base + spi->chip_select * 0x14; cs->phys = mcspi->phys + spi->chip_select * 0x14; + cs->mode = 0; cs->chconf0 = 0; spi->controller_state = cs; /* Link this to context save list */ @@ -1043,6 +1047,16 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m) mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); } + /* + * The slave driver could have changed spi->mode in which case + * it will be different from cs->mode (the current hardware setup). + * If so, set par_override (even though its not a parity issue) so + * omap2_mcspi_setup_transfer will be called to configure the hardware + * with the correct mode on the first iteration of the loop below. + */ + if (spi->mode != cs->mode) + par_override = 1; + omap2_mcspi_set_enable(spi, 0); m->status = status; diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index 66a5f82cf138..183aa80c9017 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -403,8 +403,6 @@ static int orion_spi_probe(struct platform_device *pdev) struct resource *r; unsigned long tclk_hz; int status = 0; - const u32 *iprop; - int size; master = spi_alloc_master(&pdev->dev, sizeof *spi); if (master == NULL) { @@ -415,10 +413,10 @@ static int orion_spi_probe(struct platform_device *pdev) if (pdev->id != -1) master->bus_num = pdev->id; if (pdev->dev.of_node) { - iprop = of_get_property(pdev->dev.of_node, "cell-index", - &size); - if (iprop && size == sizeof(*iprop)) - master->bus_num = *iprop; + u32 cell_index; + if (!of_property_read_u32(pdev->dev.of_node, "cell-index", + &cell_index)) + master->bus_num = cell_index; } /* we support only mode 0, and no options */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index e2c2d96491fa..52480240168e 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -74,11 +74,6 @@ #define SERVER_NAME_LENGTH 40 #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1) -/* used to define string lengths for reversing unicode strings */ -/* (256+1)*2 = 514 */ -/* (max path length + 1 for null) * 2 for unicode */ -#define MAX_NAME 514 - /* SMB echo "timeout" -- FIXME: tunable? */ #define SMB_ECHO_INTERVAL (60 * HZ) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8b0c656f2ab2..97b03895ac8c 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2809,7 +2809,7 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server, total_read += result; } - return total_read > 0 ? total_read : result; + return total_read > 0 && result != -EAGAIN ? total_read : result; } static ssize_t @@ -3232,7 +3232,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, total_read += result; } - return total_read > 0 ? total_read : result; + return total_read > 0 && result != -EAGAIN ? total_read : result; } static int cifs_readpages(struct file *file, struct address_space *mapping, diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 9d463501348f..c9bce9b43855 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1647,6 +1647,12 @@ unlink_target: target_dentry, to_name); } + /* force revalidate to go get info when needed */ + CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; + + source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime = + target_dir->i_mtime = current_fs_time(source_dir->i_sb); + cifs_rename_exit: kfree(info_buf_source); kfree(from_name); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 036279c064ff..87d125f682cd 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -585,8 +585,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) { cfile->invalidHandle = true; spin_unlock(&cifs_file_list_lock); - if (server->ops->close) - server->ops->close(xid, tcon, &cfile->fid); + if (server->ops->close_dir) + server->ops->close_dir(xid, tcon, &cfile->fid); } else spin_unlock(&cifs_file_list_lock); if (cfile->srch_inf.ntwrk_buf_start) { diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 5da1b55a2258..d801f63cddd0 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c @@ -73,7 +73,7 @@ smb2_open_file(const unsigned int xid, struct cifs_tcon *tcon, const char *path, goto out; } - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2, + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2, GFP_KERNEL); if (smb2_data == NULL) { rc = -ENOMEM; diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index fff6dfba6204..6d535797ec76 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -123,7 +123,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, *adjust_tz = false; - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2, + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2, GFP_KERNEL); if (smb2_data == NULL) return -ENOMEM; diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c index 7c2f45c06fc2..824696fb24db 100644 --- a/fs/cifs/smb2maperror.c +++ b/fs/cifs/smb2maperror.c @@ -605,7 +605,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = { {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"}, {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"}, {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"}, - {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"}, + {STATUS_CANNOT_DELETE, -EACCES, "STATUS_CANNOT_DELETE"}, {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"}, {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"}, {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"}, diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index e2756bb40b4d..fe7ac989c6c4 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -243,7 +243,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, int rc; struct smb2_file_all_info *smb2_data; - smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2, + smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2, GFP_KERNEL); if (smb2_data == NULL) return -ENOMEM; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index c7a6fd87bb6e..e37790841446 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -809,7 +809,8 @@ tcon_exit: tcon_error_exit: if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) { cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); - tcon->bad_network_name = true; + if (tcon) + tcon->bad_network_name = true; } goto tcon_exit; } @@ -1203,7 +1204,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, { return query_info(xid, tcon, persistent_fid, volatile_fid, FILE_ALL_INFORMATION, - sizeof(struct smb2_file_all_info) + MAX_NAME * 2, + sizeof(struct smb2_file_all_info) + PATH_MAX * 2, sizeof(struct smb2_file_all_info), data); } diff --git a/fs/dcache.c b/fs/dcache.c index 9a59653d3449..25c0a1b5f6c0 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -96,8 +96,6 @@ static struct kmem_cache *dentry_cache __read_mostly; * This hash-function tries to avoid losing too many bits of hash * information, yet avoid using a prime hash-size or similar. */ -#define D_HASHBITS d_hash_shift -#define D_HASHMASK d_hash_mask static unsigned int d_hash_mask __read_mostly; static unsigned int d_hash_shift __read_mostly; @@ -108,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent, unsigned int hash) { hash += (unsigned long) parent / L1_CACHE_BYTES; - hash = hash + (hash >> D_HASHBITS); - return dentry_hashtable + (hash & D_HASHMASK); + return dentry_hashtable + hash_32(hash, d_hash_shift); } /* Statistics gathering. */ diff --git a/fs/namei.c b/fs/namei.c index 6ac16a37ded2..f7c4393f8535 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -34,6 +34,7 @@ #include <linux/device_cgroup.h> #include <linux/fs_struct.h> #include <linux/posix_acl.h> +#include <linux/hash.h> #include <asm/uaccess.h> #include "internal.h" @@ -1647,8 +1648,7 @@ static inline int can_lookup(struct inode *inode) static inline unsigned int fold_hash(unsigned long hash) { - hash += hash >> (8*sizeof(int)); - return hash; + return hash_64(hash, 32); } #else /* 32-bit case */ diff --git a/fs/namespace.c b/fs/namespace.c index a45ba4f267fe..00409add4d96 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -828,8 +828,21 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; /* Don't allow unprivileged users to change mount flags */ - if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) - mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; + if (flag & CL_UNPRIVILEGED) { + mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; + + if (mnt->mnt.mnt_flags & MNT_READONLY) + mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; + + if (mnt->mnt.mnt_flags & MNT_NODEV) + mnt->mnt.mnt_flags |= MNT_LOCK_NODEV; + + if (mnt->mnt.mnt_flags & MNT_NOSUID) + mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID; + + if (mnt->mnt.mnt_flags & MNT_NOEXEC) + mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC; + } atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; @@ -1764,9 +1777,6 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags) if (readonly_request == __mnt_is_readonly(mnt)) return 0; - if (mnt->mnt_flags & MNT_LOCK_READONLY) - return -EPERM; - if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else @@ -1792,6 +1802,33 @@ static int do_remount(struct path *path, int flags, int mnt_flags, if (path->dentry != path->mnt->mnt_root) return -EINVAL; + /* Don't allow changing of locked mnt flags. + * + * No locks need to be held here while testing the various + * MNT_LOCK flags because those flags can never be cleared + * once they are set. + */ + if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && + !(mnt_flags & MNT_READONLY)) { + return -EPERM; + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && + !(mnt_flags & MNT_NODEV)) { + return -EPERM; + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && + !(mnt_flags & MNT_NOSUID)) { + return -EPERM; + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) && + !(mnt_flags & MNT_NOEXEC)) { + return -EPERM; + } + if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && + ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) { + return -EPERM; + } + err = security_sb_remount(sb, data); if (err) return err; @@ -1805,7 +1842,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags, err = do_remount_sb(sb, flags, data, 0); if (!err) { br_write_lock(&vfsmount_lock); - mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; + mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; mnt->mnt.mnt_flags = mnt_flags; br_write_unlock(&vfsmount_lock); } @@ -1991,7 +2028,7 @@ static int do_new_mount(struct path *path, const char *fstype, int flags, */ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { flags |= MS_NODEV; - mnt_flags |= MNT_NODEV; + mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; } } @@ -2309,6 +2346,14 @@ long do_mount(const char *dev_name, const char *dir_name, if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; + /* The default atime for remount is preservation */ + if ((flags & MS_REMOUNT) && + ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | + MS_STRICTATIME)) == 0)) { + mnt_flags &= ~MNT_ATIME_MASK; + mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; + } + flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); diff --git a/fs/proc/array.c b/fs/proc/array.c index cbd0f1b324b9..09f0d9c374a3 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -304,15 +304,11 @@ static void render_cap_t(struct seq_file *m, const char *header, seq_puts(m, header); CAP_FOR_EACH_U32(__capi) { seq_printf(m, "%08x", - a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]); + a->cap[CAP_LAST_U32 - __capi]); } seq_putc(m, '\n'); } -/* Remove non-existent capabilities */ -#define NORM_CAPS(v) (v.cap[CAP_TO_INDEX(CAP_LAST_CAP)] &= \ - CAP_TO_MASK(CAP_LAST_CAP + 1) - 1) - static inline void task_cap(struct seq_file *m, struct task_struct *p) { const struct cred *cred; @@ -326,11 +322,6 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p) cap_bset = cred->cap_bset; rcu_read_unlock(); - NORM_CAPS(cap_inheritable); - NORM_CAPS(cap_permitted); - NORM_CAPS(cap_effective); - NORM_CAPS(cap_bset); - render_cap_t(m, "CapInh:\t", &cap_inheritable); render_cap_t(m, "CapPrm:\t", &cap_permitted); render_cap_t(m, "CapEff:\t", &cap_effective); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 41a695048be7..cfbb4c1b2f17 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -1661,11 +1661,72 @@ xfs_vm_readpages( return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); } +/* + * This is basically a copy of __set_page_dirty_buffers() with one + * small tweak: buffers beyond EOF do not get marked dirty. If we mark them + * dirty, we'll never be able to clean them because we don't write buffers + * beyond EOF, and that means we can't invalidate pages that span EOF + * that have been marked dirty. Further, the dirty state can leak into + * the file interior if the file is extended, resulting in all sorts of + * bad things happening as the state does not match the underlying data. + * + * XXX: this really indicates that bufferheads in XFS need to die. Warts like + * this only exist because of bufferheads and how the generic code manages them. + */ +STATIC int +xfs_vm_set_page_dirty( + struct page *page) +{ + struct address_space *mapping = page->mapping; + struct inode *inode = mapping->host; + loff_t end_offset; + loff_t offset; + int newly_dirty; + + if (unlikely(!mapping)) + return !TestSetPageDirty(page); + + end_offset = i_size_read(inode); + offset = page_offset(page); + + spin_lock(&mapping->private_lock); + if (page_has_buffers(page)) { + struct buffer_head *head = page_buffers(page); + struct buffer_head *bh = head; + + do { + if (offset < end_offset) + set_buffer_dirty(bh); + bh = bh->b_this_page; + offset += 1 << inode->i_blkbits; + } while (bh != head); + } + newly_dirty = !TestSetPageDirty(page); + spin_unlock(&mapping->private_lock); + + if (newly_dirty) { + /* sigh - __set_page_dirty() is static, so copy it here, too */ + unsigned long flags; + + spin_lock_irqsave(&mapping->tree_lock, flags); + if (page->mapping) { /* Race with truncate? */ + WARN_ON_ONCE(!PageUptodate(page)); + account_page_dirtied(page, mapping); + radix_tree_tag_set(&mapping->page_tree, + page_index(page), PAGECACHE_TAG_DIRTY); + } + spin_unlock_irqrestore(&mapping->tree_lock, flags); + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + } + return newly_dirty; +} + const struct address_space_operations xfs_address_space_operations = { .readpage = xfs_vm_readpage, .readpages = xfs_vm_readpages, .writepage = xfs_vm_writepage, .writepages = xfs_vm_writepages, + .set_page_dirty = xfs_vm_set_page_dirty, .releasepage = xfs_vm_releasepage, .invalidatepage = xfs_vm_invalidatepage, .write_begin = xfs_vm_write_begin, diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 044e97a33c8d..bac3e1635b7d 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -1104,7 +1104,8 @@ xfs_qm_dqflush( * Get the buffer containing the on-disk dquot */ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, - mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL); + mp->m_quotainfo->qi_dqchunklen, 0, &bp, + &xfs_dquot_buf_ops); if (error) goto out_unlock; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index a5f2042aec8b..9f457fedbcfc 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -298,7 +298,16 @@ xfs_file_aio_read( xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); return ret; } - truncate_pagecache_range(VFS_I(ip), pos, -1); + + /* + * Invalidate whole pages. This can return an error if + * we fail to invalidate a page, but this should never + * happen on XFS. Warn if it does fail. + */ + ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, + pos >> PAGE_CACHE_SHIFT, -1); + WARN_ON_ONCE(ret); + ret = 0; } xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); } @@ -677,7 +686,15 @@ xfs_file_dio_aio_write( pos, -1); if (ret) goto out; - truncate_pagecache_range(VFS_I(ip), pos, -1); + /* + * Invalidate whole pages. This can return an error if + * we fail to invalidate a page, but this should never + * happen on XFS. Warn if it does fail. + */ + ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, + pos >> PAGE_CACHE_SHIFT, -1); + WARN_ON_ONCE(ret); + ret = 0; } /* diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index b75c9bb6e71e..29d1ca567ed3 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -935,6 +935,12 @@ xfs_qm_dqiter_bufs( if (error) break; + /* + * A corrupt buffer might not have a verifier attached, so + * make sure we have the correct one attached before writeback + * occurs. + */ + bp->b_ops = &xfs_dquot_buf_ops; xfs_qm_reset_dqcounts(mp, bp, firstid, type); xfs_buf_delwri_queue(bp, buffer_list); xfs_buf_relse(bp); @@ -1018,7 +1024,7 @@ xfs_qm_dqiterate( xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, rablkno), mp->m_quotainfo->qi_dqchunklen, - NULL); + &xfs_dquot_buf_ops); rablkno++; } } diff --git a/include/linux/capability.h b/include/linux/capability.h index 15f90929fb51..9b4378af414c 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set; # error Fix up hand-coded capability macro initializers #else /* HAND-CODED capability initializers */ +#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1) +#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1) + # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) -# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) +# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }}) # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ CAP_FS_MASK_B1 } }) diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 7c1420bb1dce..6ade97de7a85 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -157,7 +157,7 @@ struct ceph_msg { bool front_is_vmalloc; bool more_to_follow; bool needs_out_seq; - int front_max; + int front_alloc_len; unsigned long ack_stamp; /* tx: when we were acked */ struct ceph_msgpool *pool; diff --git a/include/linux/mount.h b/include/linux/mount.h index 73005f9957ea..8eeb8f6ab110 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -42,11 +42,18 @@ struct mnt_namespace; * flag, consider how it interacts with shared mounts. */ #define MNT_SHARED_MASK (MNT_UNBINDABLE) -#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE) +#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \ + | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \ + | MNT_READONLY) +#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) #define MNT_INTERNAL 0x4000 +#define MNT_LOCK_ATIME 0x040000 +#define MNT_LOCK_NOEXEC 0x080000 +#define MNT_LOCK_NOSUID 0x100000 +#define MNT_LOCK_NODEV 0x200000 #define MNT_LOCK_READONLY 0x400000 struct vfsmount { diff --git a/kernel/audit.c b/kernel/audit.c index a6c632757e57..4dd7529b0845 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1412,7 +1412,7 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap) audit_log_format(ab, " %s=", prefix); CAP_FOR_EACH_U32(i) { audit_log_format(ab, "%08x", - cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]); + cap->cap[CAP_LAST_U32 - i]); } } diff --git a/kernel/capability.c b/kernel/capability.c index d52eecc0942b..1339806a8731 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -268,6 +268,10 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) i++; } + effective.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; + permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; + inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; + new = prepare_creds(); if (!new) return -ENOMEM; diff --git a/kernel/smp.c b/kernel/smp.c index 4dba0f7b72ad..88797cb0d23a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -658,7 +658,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); - WARN_ON_ONCE(!ret); + WARN_ON_ONCE(ret); } preempt_enable(); } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 4063d5fe5e44..5efbc122e5ce 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1980,7 +1980,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) /** * rb_update_event - update event type and data - * @event: the even to update + * @event: the event to update * @type: the type of event * @length: the size of the event field in the ring buffer * @@ -3353,21 +3353,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; /* Iterator usage is expected to have record disabled */ - if (list_empty(&cpu_buffer->reader_page->list)) { - iter->head_page = rb_set_head_page(cpu_buffer); - if (unlikely(!iter->head_page)) - return; - iter->head = iter->head_page->read; - } else { - iter->head_page = cpu_buffer->reader_page; - iter->head = cpu_buffer->reader_page->read; - } + iter->head_page = cpu_buffer->reader_page; + iter->head = cpu_buffer->reader_page->read; + + iter->cache_reader_page = iter->head_page; + iter->cache_read = iter->head; + if (iter->head) iter->read_stamp = cpu_buffer->read_stamp; else iter->read_stamp = iter->head_page->page->time_stamp; - iter->cache_reader_page = cpu_buffer->reader_page; - iter->cache_read = cpu_buffer->read; } /** @@ -3760,12 +3755,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) return NULL; /* - * We repeat when a time extend is encountered. - * Since the time extend is always attached to a data event, - * we should never loop more than once. - * (We never hit the following condition more than twice). + * We repeat when a time extend is encountered or we hit + * the end of the page. Since the time extend is always attached + * to a data event, we should never loop more than three times. + * Once for going to next page, once on time extend, and + * finally once to get the event. + * (We never hit the following condition more than thrice). */ - if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) + if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) return NULL; if (rb_per_cpu_empty(cpu_buffer)) diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 302d29b3744d..5f36f70ce44d 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -887,7 +887,8 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) l2cap_chan_close(chan, 0); lock_sock(sk); - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index ca957d34b0c8..19ba192e9dbf 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -1857,10 +1857,13 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s) /* Get data directly from socket receive queue without copying it. */ while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); - if (!skb_linearize(skb)) + if (!skb_linearize(skb)) { s = rfcomm_recv_frame(s, skb); - else + if (!s) + break; + } else { kfree_skb(skb); + } } if (s && (sk->sk_state == BT_CLOSED)) diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c1c6028e389a..7ca014daa5ab 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -887,7 +887,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how) sk->sk_shutdown = SHUTDOWN_MASK; __rfcomm_sock_close(sk); - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 2bb1d3a5e76b..c9ae6b703c13 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -858,7 +858,8 @@ static int sco_sock_shutdown(struct socket *sock, int how) sco_sock_clear_timer(sk); __sco_sock_close(sk); - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } @@ -878,7 +879,8 @@ static int sco_sock_release(struct socket *sock) sco_sock_close(sk); - if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) { + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 96238ba95f2b..de6662b14e1f 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -13,8 +13,6 @@ #include "auth_x.h" #include "auth_x_protocol.h" -#define TEMP_TICKET_BUF_LEN 256 - static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); static int ceph_x_is_authenticated(struct ceph_auth_client *ac) @@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret, } static int ceph_x_decrypt(struct ceph_crypto_key *secret, - void **p, void *end, void *obuf, size_t olen) + void **p, void *end, void **obuf, size_t olen) { struct ceph_x_encrypt_header head; size_t head_len = sizeof(head); @@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret, return -EINVAL; dout("ceph_x_decrypt len %d\n", len); - ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen, - *p, len); + if (*obuf == NULL) { + *obuf = kmalloc(len, GFP_NOFS); + if (!*obuf) + return -ENOMEM; + olen = len; + } + + ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len); if (ret) return ret; if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) @@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac, kfree(th); } -static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, - struct ceph_crypto_key *secret, - void *buf, void *end) +static int process_one_ticket(struct ceph_auth_client *ac, + struct ceph_crypto_key *secret, + void **p, void *end) { struct ceph_x_info *xi = ac->private; - int num; - void *p = buf; + int type; + u8 tkt_struct_v, blob_struct_v; + struct ceph_x_ticket_handler *th; + void *dbuf = NULL; + void *dp, *dend; + int dlen; + char is_enc; + struct timespec validity; + struct ceph_crypto_key old_key; + void *ticket_buf = NULL; + void *tp, *tpend; + struct ceph_timespec new_validity; + struct ceph_crypto_key new_session_key; + struct ceph_buffer *new_ticket_blob; + unsigned long new_expires, new_renew_after; + u64 new_secret_id; int ret; - char *dbuf; - char *ticket_buf; - u8 reply_struct_v; - dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); - if (!dbuf) - return -ENOMEM; + ceph_decode_need(p, end, sizeof(u32) + 1, bad); - ret = -ENOMEM; - ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); - if (!ticket_buf) - goto out_dbuf; + type = ceph_decode_32(p); + dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); - ceph_decode_need(&p, end, 1 + sizeof(u32), bad); - reply_struct_v = ceph_decode_8(&p); - if (reply_struct_v != 1) + tkt_struct_v = ceph_decode_8(p); + if (tkt_struct_v != 1) goto bad; - num = ceph_decode_32(&p); - dout("%d tickets\n", num); - while (num--) { - int type; - u8 tkt_struct_v, blob_struct_v; - struct ceph_x_ticket_handler *th; - void *dp, *dend; - int dlen; - char is_enc; - struct timespec validity; - struct ceph_crypto_key old_key; - void *tp, *tpend; - struct ceph_timespec new_validity; - struct ceph_crypto_key new_session_key; - struct ceph_buffer *new_ticket_blob; - unsigned long new_expires, new_renew_after; - u64 new_secret_id; - - ceph_decode_need(&p, end, sizeof(u32) + 1, bad); - - type = ceph_decode_32(&p); - dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); - - tkt_struct_v = ceph_decode_8(&p); - if (tkt_struct_v != 1) - goto bad; - - th = get_ticket_handler(ac, type); - if (IS_ERR(th)) { - ret = PTR_ERR(th); - goto out; - } - /* blob for me */ - dlen = ceph_x_decrypt(secret, &p, end, dbuf, - TEMP_TICKET_BUF_LEN); - if (dlen <= 0) { - ret = dlen; - goto out; - } - dout(" decrypted %d bytes\n", dlen); - dend = dbuf + dlen; - dp = dbuf; + th = get_ticket_handler(ac, type); + if (IS_ERR(th)) { + ret = PTR_ERR(th); + goto out; + } - tkt_struct_v = ceph_decode_8(&dp); - if (tkt_struct_v != 1) - goto bad; + /* blob for me */ + dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0); + if (dlen <= 0) { + ret = dlen; + goto out; + } + dout(" decrypted %d bytes\n", dlen); + dp = dbuf; + dend = dp + dlen; - memcpy(&old_key, &th->session_key, sizeof(old_key)); - ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); - if (ret) - goto out; + tkt_struct_v = ceph_decode_8(&dp); + if (tkt_struct_v != 1) + goto bad; - ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); - ceph_decode_timespec(&validity, &new_validity); - new_expires = get_seconds() + validity.tv_sec; - new_renew_after = new_expires - (validity.tv_sec / 4); - dout(" expires=%lu renew_after=%lu\n", new_expires, - new_renew_after); + memcpy(&old_key, &th->session_key, sizeof(old_key)); + ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); + if (ret) + goto out; - /* ticket blob for service */ - ceph_decode_8_safe(&p, end, is_enc, bad); - tp = ticket_buf; - if (is_enc) { - /* encrypted */ - dout(" encrypted ticket\n"); - dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf, - TEMP_TICKET_BUF_LEN); - if (dlen < 0) { - ret = dlen; - goto out; - } - dlen = ceph_decode_32(&tp); - } else { - /* unencrypted */ - ceph_decode_32_safe(&p, end, dlen, bad); - ceph_decode_need(&p, end, dlen, bad); - ceph_decode_copy(&p, ticket_buf, dlen); + ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); + ceph_decode_timespec(&validity, &new_validity); + new_expires = get_seconds() + validity.tv_sec; + new_renew_after = new_expires - (validity.tv_sec / 4); + dout(" expires=%lu renew_after=%lu\n", new_expires, + new_renew_after); + + /* ticket blob for service */ + ceph_decode_8_safe(p, end, is_enc, bad); + if (is_enc) { + /* encrypted */ + dout(" encrypted ticket\n"); + dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0); + if (dlen < 0) { + ret = dlen; + goto out; } - tpend = tp + dlen; - dout(" ticket blob is %d bytes\n", dlen); - ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); - blob_struct_v = ceph_decode_8(&tp); - new_secret_id = ceph_decode_64(&tp); - ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); - if (ret) + tp = ticket_buf; + dlen = ceph_decode_32(&tp); + } else { + /* unencrypted */ + ceph_decode_32_safe(p, end, dlen, bad); + ticket_buf = kmalloc(dlen, GFP_NOFS); + if (!ticket_buf) { + ret = -ENOMEM; goto out; - - /* all is well, update our ticket */ - ceph_crypto_key_destroy(&th->session_key); - if (th->ticket_blob) - ceph_buffer_put(th->ticket_blob); - th->session_key = new_session_key; - th->ticket_blob = new_ticket_blob; - th->validity = new_validity; - th->secret_id = new_secret_id; - th->expires = new_expires; - th->renew_after = new_renew_after; - dout(" got ticket service %d (%s) secret_id %lld len %d\n", - type, ceph_entity_type_name(type), th->secret_id, - (int)th->ticket_blob->vec.iov_len); - xi->have_keys |= th->service; + } + tp = ticket_buf; + ceph_decode_need(p, end, dlen, bad); + ceph_decode_copy(p, ticket_buf, dlen); } + tpend = tp + dlen; + dout(" ticket blob is %d bytes\n", dlen); + ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); + blob_struct_v = ceph_decode_8(&tp); + new_secret_id = ceph_decode_64(&tp); + ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); + if (ret) + goto out; + + /* all is well, update our ticket */ + ceph_crypto_key_destroy(&th->session_key); + if (th->ticket_blob) + ceph_buffer_put(th->ticket_blob); + th->session_key = new_session_key; + th->ticket_blob = new_ticket_blob; + th->validity = new_validity; + th->secret_id = new_secret_id; + th->expires = new_expires; + th->renew_after = new_renew_after; + dout(" got ticket service %d (%s) secret_id %lld len %d\n", + type, ceph_entity_type_name(type), th->secret_id, + (int)th->ticket_blob->vec.iov_len); + xi->have_keys |= th->service; - ret = 0; out: kfree(ticket_buf); -out_dbuf: kfree(dbuf); return ret; @@ -270,6 +255,34 @@ bad: goto out; } +static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, + struct ceph_crypto_key *secret, + void *buf, void *end) +{ + void *p = buf; + u8 reply_struct_v; + u32 num; + int ret; + + ceph_decode_8_safe(&p, end, reply_struct_v, bad); + if (reply_struct_v != 1) + return -EINVAL; + + ceph_decode_32_safe(&p, end, num, bad); + dout("%d tickets\n", num); + + while (num--) { + ret = process_one_ticket(ac, secret, &p, end); + if (ret) + return ret; + } + + return 0; + +bad: + return -EINVAL; +} + static int ceph_x_build_authorizer(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th, struct ceph_x_authorizer *au) @@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th; int ret = 0; struct ceph_x_authorize_reply reply; + void *preply = &reply; void *p = au->reply_buf; void *end = p + sizeof(au->reply_buf); th = get_ticket_handler(ac, au->service); if (IS_ERR(th)) return PTR_ERR(th); - ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); + ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply)); if (ret < 0) return ret; if (ret != sizeof(reply)) diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b9d7df175700..66e77f380fce 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -904,7 +904,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, BUG_ON(page_count > (int)USHRT_MAX); cursor->page_count = (unsigned short)page_count; BUG_ON(length > SIZE_MAX - cursor->page_offset); - cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE; + cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; } static struct page * @@ -3144,7 +3144,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, INIT_LIST_HEAD(&m->data); /* front */ - m->front_max = front_len; + m->front_alloc_len = front_len; if (front_len) { if (front_len > PAGE_CACHE_SIZE) { m->front.iov_base = __vmalloc(front_len, flags, @@ -3319,8 +3319,8 @@ EXPORT_SYMBOL(ceph_msg_last_put); void ceph_msg_dump(struct ceph_msg *msg) { - pr_debug("msg_dump %p (front_max %d length %zd)\n", msg, - msg->front_max, msg->data_length); + pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, + msg->front_alloc_len, msg->data_length); print_hex_dump(KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 16, 1, &msg->hdr, sizeof(msg->hdr), true); diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 1fe25cd29d0e..dbcbf5a4707f 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -152,7 +152,7 @@ static int __open_session(struct ceph_mon_client *monc) /* initiatiate authentication handshake */ ret = ceph_auth_build_hello(monc->auth, monc->m_auth->front.iov_base, - monc->m_auth->front_max); + monc->m_auth->front_alloc_len); __send_prepared_auth_request(monc, ret); } else { dout("open_session mon%d already open\n", monc->cur_mon); @@ -196,7 +196,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) int num; p = msg->front.iov_base; - end = p + msg->front_max; + end = p + msg->front_alloc_len; num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap; ceph_encode_32(&p, num); @@ -897,7 +897,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc, ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, msg->front.iov_len, monc->m_auth->front.iov_base, - monc->m_auth->front_max); + monc->m_auth->front_alloc_len); if (ret < 0) { monc->client->auth_err = ret; wake_up_all(&monc->client->auth_wq); @@ -939,7 +939,7 @@ static int __validate_auth(struct ceph_mon_client *monc) return 0; ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, - monc->m_auth->front_max); + monc->m_auth->front_alloc_len); if (ret <= 0) return ret; /* either an error, or no need to authenticate */ __send_prepared_auth_request(monc, ret); @@ -1041,7 +1041,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, if (!m) { pr_info("alloc_msg unknown type %d\n", type); *skip = 1; + } else if (front_len > m->front_alloc_len) { + pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n", + front_len, m->front_alloc_len, + (unsigned int)con->peer_name.type, + le64_to_cpu(con->peer_name.num)); + ceph_msg_put(m); + m = ceph_msg_new(type, front_len, GFP_NOFS, false); } + return m; } diff --git a/security/commoncap.c b/security/commoncap.c index c44b6fe6648e..c9219a66b7c6 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -421,6 +421,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable); } + cpu_caps->permitted.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; + cpu_caps->inheritable.cap[CAP_LAST_U32] &= CAP_LAST_U32_VALID_MASK; + return 0; } diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index 9b7746c9546f..76bfeb3c3e30 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -2234,7 +2234,7 @@ static int max98090_probe(struct snd_soc_codec *codec) /* Register for interrupts */ dev_dbg(codec->dev, "irq = %d\n", max98090->irq); - ret = request_threaded_irq(max98090->irq, NULL, + ret = devm_request_threaded_irq(codec->dev, max98090->irq, NULL, max98090_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max98090_interrupt", codec); if (ret < 0) { diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 6dbb17d050c9..ca1e999026e5 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1284,3 +1284,5 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs) return 0; } EXPORT_SYMBOL_GPL(wm_adsp2_init); + +MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index 6f4dd7543e82..95a9b07bbe96 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c @@ -757,9 +757,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai) SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) -#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S24_LE | \ - SNDRV_PCM_FMTBIT_S32_LE) +#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops pxa_ssp_dai_ops = { .startup = pxa_ssp_startup, diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index 82ebb1a51479..5c9b5e4f94c3 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c @@ -853,11 +853,9 @@ static int i2s_suspend(struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); - if (dai->active) { - i2s->suspend_i2smod = readl(i2s->addr + I2SMOD); - i2s->suspend_i2scon = readl(i2s->addr + I2SCON); - i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR); - } + i2s->suspend_i2smod = readl(i2s->addr + I2SMOD); + i2s->suspend_i2scon = readl(i2s->addr + I2SCON); + i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR); return 0; } @@ -866,11 +864,9 @@ static int i2s_resume(struct snd_soc_dai *dai) { struct i2s_dai *i2s = to_info(dai); - if (dai->active) { - writel(i2s->suspend_i2scon, i2s->addr + I2SCON); - writel(i2s->suspend_i2smod, i2s->addr + I2SMOD); - writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR); - } + writel(i2s->suspend_i2scon, i2s->addr + I2SCON); + writel(i2s->suspend_i2smod, i2s->addr + I2SMOD); + writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR); return 0; } diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index ccb6be4d658d..02d26915b61d 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1886,6 +1886,7 @@ int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *widget) dpcm_be_disconnect(fe, SNDRV_PCM_STREAM_PLAYBACK); } + dpcm_path_put(&list); capture: /* skip if FE doesn't have capture capability */ if (!fe->cpu_dai->driver->capture.channels_min) diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 0a63658065f0..2cee2b79b4de 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -4,6 +4,7 @@ TARGETS += efivarfs TARGETS += kcmp TARGETS += memory-hotplug TARGETS += mqueue +TARGETS += mount TARGETS += net TARGETS += ptrace TARGETS += vm diff --git a/tools/testing/selftests/mount/Makefile b/tools/testing/selftests/mount/Makefile new file mode 100644 index 000000000000..337d853c2b72 --- /dev/null +++ b/tools/testing/selftests/mount/Makefile @@ -0,0 +1,17 @@ +# Makefile for mount selftests. + +all: unprivileged-remount-test + +unprivileged-remount-test: unprivileged-remount-test.c + gcc -Wall -O2 unprivileged-remount-test.c -o unprivileged-remount-test + +# Allow specific tests to be selected. +test_unprivileged_remount: unprivileged-remount-test + @if [ -f /proc/self/uid_map ] ; then ./unprivileged-remount-test ; fi + +run_tests: all test_unprivileged_remount + +clean: + rm -f unprivileged-remount-test + +.PHONY: all test_unprivileged_remount diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c new file mode 100644 index 000000000000..1b3ff2fda4d0 --- /dev/null +++ b/tools/testing/selftests/mount/unprivileged-remount-test.c @@ -0,0 +1,242 @@ +#define _GNU_SOURCE +#include <sched.h> +#include <stdio.h> +#include <errno.h> +#include <string.h> +#include <sys/types.h> +#include <sys/mount.h> +#include <sys/wait.h> +#include <stdlib.h> +#include <unistd.h> +#include <fcntl.h> +#include <grp.h> +#include <stdbool.h> +#include <stdarg.h> + +#ifndef CLONE_NEWNS +# define CLONE_NEWNS 0x00020000 +#endif +#ifndef CLONE_NEWUTS +# define CLONE_NEWUTS 0x04000000 +#endif +#ifndef CLONE_NEWIPC +# define CLONE_NEWIPC 0x08000000 +#endif +#ifndef CLONE_NEWNET +# define CLONE_NEWNET 0x40000000 +#endif +#ifndef CLONE_NEWUSER +# define CLONE_NEWUSER 0x10000000 +#endif +#ifndef CLONE_NEWPID +# define CLONE_NEWPID 0x20000000 +#endif + +#ifndef MS_RELATIME +#define MS_RELATIME (1 << 21) +#endif +#ifndef MS_STRICTATIME +#define MS_STRICTATIME (1 << 24) +#endif + +static void die(char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + exit(EXIT_FAILURE); +} + +static void write_file(char *filename, char *fmt, ...) +{ + char buf[4096]; + int fd; + ssize_t written; + int buf_len; + va_list ap; + + va_start(ap, fmt); + buf_len = vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + if (buf_len < 0) { + die("vsnprintf failed: %s\n", + strerror(errno)); + } + if (buf_len >= sizeof(buf)) { + die("vsnprintf output truncated\n"); + } + + fd = open(filename, O_WRONLY); + if (fd < 0) { + die("open of %s failed: %s\n", + filename, strerror(errno)); + } + written = write(fd, buf, buf_len); + if (written != buf_len) { + if (written >= 0) { + die("short write to %s\n", filename); + } else { + die("write to %s failed: %s\n", + filename, strerror(errno)); + } + } + if (close(fd) != 0) { + die("close of %s failed: %s\n", + filename, strerror(errno)); + } +} + +static void create_and_enter_userns(void) +{ + uid_t uid; + gid_t gid; + + uid = getuid(); + gid = getgid(); + + if (unshare(CLONE_NEWUSER) !=0) { + die("unshare(CLONE_NEWUSER) failed: %s\n", + strerror(errno)); + } + + write_file("/proc/self/uid_map", "0 %d 1", uid); + write_file("/proc/self/gid_map", "0 %d 1", gid); + + if (setgroups(0, NULL) != 0) { + die("setgroups failed: %s\n", + strerror(errno)); + } + if (setgid(0) != 0) { + die ("setgid(0) failed %s\n", + strerror(errno)); + } + if (setuid(0) != 0) { + die("setuid(0) failed %s\n", + strerror(errno)); + } +} + +static +bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags) +{ + pid_t child; + + child = fork(); + if (child == -1) { + die("fork failed: %s\n", + strerror(errno)); + } + if (child != 0) { /* parent */ + pid_t pid; + int status; + pid = waitpid(child, &status, 0); + if (pid == -1) { + die("waitpid failed: %s\n", + strerror(errno)); + } + if (pid != child) { + die("waited for %d got %d\n", + child, pid); + } + if (!WIFEXITED(status)) { + die("child did not terminate cleanly\n"); + } + return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false; + } + + create_and_enter_userns(); + if (unshare(CLONE_NEWNS) != 0) { + die("unshare(CLONE_NEWNS) failed: %s\n", + strerror(errno)); + } + + if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) { + die("mount of /tmp failed: %s\n", + strerror(errno)); + } + + create_and_enter_userns(); + + if (unshare(CLONE_NEWNS) != 0) { + die("unshare(CLONE_NEWNS) failed: %s\n", + strerror(errno)); + } + + if (mount("/tmp", "/tmp", "none", + MS_REMOUNT | MS_BIND | remount_flags, NULL) != 0) { + /* system("cat /proc/self/mounts"); */ + die("remount of /tmp failed: %s\n", + strerror(errno)); + } + + if (mount("/tmp", "/tmp", "none", + MS_REMOUNT | MS_BIND | invalid_flags, NULL) == 0) { + /* system("cat /proc/self/mounts"); */ + die("remount of /tmp with invalid flags " + "succeeded unexpectedly\n"); + } + exit(EXIT_SUCCESS); +} + +static bool test_unpriv_remount_simple(int mount_flags) +{ + return test_unpriv_remount(mount_flags, mount_flags, 0); +} + +static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags) +{ + return test_unpriv_remount(mount_flags, mount_flags, invalid_flags); +} + +int main(int argc, char **argv) +{ + if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) { + die("MS_RDONLY malfunctions\n"); + } + if (!test_unpriv_remount_simple(MS_NODEV)) { + die("MS_NODEV malfunctions\n"); + } + if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) { + die("MS_NOSUID malfunctions\n"); + } + if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) { + die("MS_NOEXEC malfunctions\n"); + } + if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV, + MS_NOATIME|MS_NODEV)) + { + die("MS_RELATIME malfunctions\n"); + } + if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV, + MS_NOATIME|MS_NODEV)) + { + die("MS_STRICTATIME malfunctions\n"); + } + if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV, + MS_STRICTATIME|MS_NODEV)) + { + die("MS_RELATIME malfunctions\n"); + } + if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV, + MS_NOATIME|MS_NODEV)) + { + die("MS_RELATIME malfunctions\n"); + } + if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV, + MS_NOATIME|MS_NODEV)) + { + die("MS_RELATIME malfunctions\n"); + } + if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV, + MS_STRICTATIME|MS_NODEV)) + { + die("MS_RELATIME malfunctions\n"); + } + if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV, + MS_NOATIME|MS_NODEV)) + { + die("Default atime malfunctions\n"); + } + return EXIT_SUCCESS; +} -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html